]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blame - drivers/gpu/drm/i915/i915_gem.c
drm/i915: Apply big hammer to serialise buffer access between rings
[mirror_ubuntu-focal-kernel.git] / drivers / gpu / drm / i915 / i915_gem.c
CommitLineData
673a394b
EA
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28#include "drmP.h"
29#include "drm.h"
30#include "i915_drm.h"
31#include "i915_drv.h"
1c5d22f7 32#include "i915_trace.h"
652c393a 33#include "intel_drv.h"
5a0e3ad6 34#include <linux/slab.h>
673a394b 35#include <linux/swap.h>
79e53945 36#include <linux/pci.h>
f8f235e5 37#include <linux/intel-gtt.h>
673a394b 38
a00b10c3
CW
39static uint32_t i915_gem_get_gtt_alignment(struct drm_i915_gem_object *obj_priv);
40static uint32_t i915_gem_get_gtt_size(struct drm_i915_gem_object *obj_priv);
ba3d8d74
DV
41
42static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj,
43 bool pipelined);
e47c68e9
EA
44static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
45static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
e47c68e9
EA
46static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj,
47 int write);
48static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
49 uint64_t offset,
50 uint64_t size);
51static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj);
2cf34d7b
CW
52static int i915_gem_object_wait_rendering(struct drm_gem_object *obj,
53 bool interruptible);
de151cf6 54static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
a00b10c3
CW
55 unsigned alignment,
56 bool mappable,
57 bool need_fence);
de151cf6 58static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
71acb5eb
DA
59static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
60 struct drm_i915_gem_pwrite *args,
61 struct drm_file *file_priv);
be72615b 62static void i915_gem_free_object_tail(struct drm_gem_object *obj);
673a394b 63
17250b71
CW
64static int i915_gem_inactive_shrink(struct shrinker *shrinker,
65 int nr_to_scan,
66 gfp_t gfp_mask);
67
31169714 68
73aa808f
CW
69/* some bookkeeping */
70static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
71 size_t size)
72{
73 dev_priv->mm.object_count++;
74 dev_priv->mm.object_memory += size;
75}
76
77static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
78 size_t size)
79{
80 dev_priv->mm.object_count--;
81 dev_priv->mm.object_memory -= size;
82}
83
84static void i915_gem_info_add_gtt(struct drm_i915_private *dev_priv,
a00b10c3 85 struct drm_i915_gem_object *obj)
73aa808f
CW
86{
87 dev_priv->mm.gtt_count++;
a00b10c3
CW
88 dev_priv->mm.gtt_memory += obj->gtt_space->size;
89 if (obj->gtt_offset < dev_priv->mm.gtt_mappable_end) {
fb7d516a 90 dev_priv->mm.mappable_gtt_used +=
a00b10c3
CW
91 min_t(size_t, obj->gtt_space->size,
92 dev_priv->mm.gtt_mappable_end - obj->gtt_offset);
fb7d516a 93 }
73aa808f
CW
94}
95
96static void i915_gem_info_remove_gtt(struct drm_i915_private *dev_priv,
a00b10c3 97 struct drm_i915_gem_object *obj)
73aa808f
CW
98{
99 dev_priv->mm.gtt_count--;
a00b10c3
CW
100 dev_priv->mm.gtt_memory -= obj->gtt_space->size;
101 if (obj->gtt_offset < dev_priv->mm.gtt_mappable_end) {
fb7d516a 102 dev_priv->mm.mappable_gtt_used -=
a00b10c3
CW
103 min_t(size_t, obj->gtt_space->size,
104 dev_priv->mm.gtt_mappable_end - obj->gtt_offset);
fb7d516a
DV
105 }
106}
107
108/**
109 * Update the mappable working set counters. Call _only_ when there is a change
110 * in one of (pin|fault)_mappable and update *_mappable _before_ calling.
111 * @mappable: new state the changed mappable flag (either pin_ or fault_).
112 */
113static void
114i915_gem_info_update_mappable(struct drm_i915_private *dev_priv,
a00b10c3 115 struct drm_i915_gem_object *obj,
fb7d516a
DV
116 bool mappable)
117{
fb7d516a 118 if (mappable) {
a00b10c3 119 if (obj->pin_mappable && obj->fault_mappable)
fb7d516a
DV
120 /* Combined state was already mappable. */
121 return;
122 dev_priv->mm.gtt_mappable_count++;
a00b10c3 123 dev_priv->mm.gtt_mappable_memory += obj->gtt_space->size;
fb7d516a 124 } else {
a00b10c3 125 if (obj->pin_mappable || obj->fault_mappable)
fb7d516a
DV
126 /* Combined state still mappable. */
127 return;
128 dev_priv->mm.gtt_mappable_count--;
a00b10c3 129 dev_priv->mm.gtt_mappable_memory -= obj->gtt_space->size;
fb7d516a 130 }
73aa808f
CW
131}
132
133static void i915_gem_info_add_pin(struct drm_i915_private *dev_priv,
a00b10c3 134 struct drm_i915_gem_object *obj,
fb7d516a 135 bool mappable)
73aa808f
CW
136{
137 dev_priv->mm.pin_count++;
a00b10c3 138 dev_priv->mm.pin_memory += obj->gtt_space->size;
fb7d516a 139 if (mappable) {
a00b10c3 140 obj->pin_mappable = true;
fb7d516a
DV
141 i915_gem_info_update_mappable(dev_priv, obj, true);
142 }
73aa808f
CW
143}
144
145static void i915_gem_info_remove_pin(struct drm_i915_private *dev_priv,
a00b10c3 146 struct drm_i915_gem_object *obj)
73aa808f
CW
147{
148 dev_priv->mm.pin_count--;
a00b10c3
CW
149 dev_priv->mm.pin_memory -= obj->gtt_space->size;
150 if (obj->pin_mappable) {
151 obj->pin_mappable = false;
fb7d516a
DV
152 i915_gem_info_update_mappable(dev_priv, obj, false);
153 }
73aa808f
CW
154}
155
30dbf0c0
CW
156int
157i915_gem_check_is_wedged(struct drm_device *dev)
158{
159 struct drm_i915_private *dev_priv = dev->dev_private;
160 struct completion *x = &dev_priv->error_completion;
161 unsigned long flags;
162 int ret;
163
164 if (!atomic_read(&dev_priv->mm.wedged))
165 return 0;
166
167 ret = wait_for_completion_interruptible(x);
168 if (ret)
169 return ret;
170
171 /* Success, we reset the GPU! */
172 if (!atomic_read(&dev_priv->mm.wedged))
173 return 0;
174
175 /* GPU is hung, bump the completion count to account for
176 * the token we just consumed so that we never hit zero and
177 * end up waiting upon a subsequent completion event that
178 * will never happen.
179 */
180 spin_lock_irqsave(&x->wait.lock, flags);
181 x->done++;
182 spin_unlock_irqrestore(&x->wait.lock, flags);
183 return -EIO;
184}
185
76c1dec1
CW
186static int i915_mutex_lock_interruptible(struct drm_device *dev)
187{
188 struct drm_i915_private *dev_priv = dev->dev_private;
189 int ret;
190
191 ret = i915_gem_check_is_wedged(dev);
192 if (ret)
193 return ret;
194
195 ret = mutex_lock_interruptible(&dev->struct_mutex);
196 if (ret)
197 return ret;
198
199 if (atomic_read(&dev_priv->mm.wedged)) {
200 mutex_unlock(&dev->struct_mutex);
201 return -EAGAIN;
202 }
203
23bc5982 204 WARN_ON(i915_verify_lists(dev));
76c1dec1
CW
205 return 0;
206}
30dbf0c0 207
7d1c4804
CW
208static inline bool
209i915_gem_object_is_inactive(struct drm_i915_gem_object *obj_priv)
210{
211 return obj_priv->gtt_space &&
212 !obj_priv->active &&
213 obj_priv->pin_count == 0;
214}
215
73aa808f
CW
216int i915_gem_do_init(struct drm_device *dev,
217 unsigned long start,
53984635 218 unsigned long mappable_end,
79e53945 219 unsigned long end)
673a394b
EA
220{
221 drm_i915_private_t *dev_priv = dev->dev_private;
673a394b 222
79e53945
JB
223 if (start >= end ||
224 (start & (PAGE_SIZE - 1)) != 0 ||
225 (end & (PAGE_SIZE - 1)) != 0) {
673a394b
EA
226 return -EINVAL;
227 }
228
79e53945
JB
229 drm_mm_init(&dev_priv->mm.gtt_space, start,
230 end - start);
673a394b 231
73aa808f 232 dev_priv->mm.gtt_total = end - start;
fb7d516a 233 dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start;
53984635 234 dev_priv->mm.gtt_mappable_end = mappable_end;
79e53945
JB
235
236 return 0;
237}
673a394b 238
79e53945
JB
239int
240i915_gem_init_ioctl(struct drm_device *dev, void *data,
241 struct drm_file *file_priv)
242{
243 struct drm_i915_gem_init *args = data;
244 int ret;
245
246 mutex_lock(&dev->struct_mutex);
53984635 247 ret = i915_gem_do_init(dev, args->gtt_start, args->gtt_end, args->gtt_end);
673a394b
EA
248 mutex_unlock(&dev->struct_mutex);
249
79e53945 250 return ret;
673a394b
EA
251}
252
5a125c3c
EA
253int
254i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
255 struct drm_file *file_priv)
256{
73aa808f 257 struct drm_i915_private *dev_priv = dev->dev_private;
5a125c3c 258 struct drm_i915_gem_get_aperture *args = data;
5a125c3c
EA
259
260 if (!(dev->driver->driver_features & DRIVER_GEM))
261 return -ENODEV;
262
73aa808f
CW
263 mutex_lock(&dev->struct_mutex);
264 args->aper_size = dev_priv->mm.gtt_total;
265 args->aper_available_size = args->aper_size - dev_priv->mm.pin_memory;
266 mutex_unlock(&dev->struct_mutex);
5a125c3c
EA
267
268 return 0;
269}
270
673a394b
EA
271
272/**
273 * Creates a new mm object and returns a handle to it.
274 */
275int
276i915_gem_create_ioctl(struct drm_device *dev, void *data,
277 struct drm_file *file_priv)
278{
279 struct drm_i915_gem_create *args = data;
280 struct drm_gem_object *obj;
a1a2d1d3
PP
281 int ret;
282 u32 handle;
673a394b
EA
283
284 args->size = roundup(args->size, PAGE_SIZE);
285
286 /* Allocate the new object */
ac52bc56 287 obj = i915_gem_alloc_object(dev, args->size);
673a394b
EA
288 if (obj == NULL)
289 return -ENOMEM;
290
291 ret = drm_gem_handle_create(file_priv, obj, &handle);
1dfd9754 292 if (ret) {
202f2fef
CW
293 drm_gem_object_release(obj);
294 i915_gem_info_remove_obj(dev->dev_private, obj->size);
295 kfree(obj);
673a394b 296 return ret;
1dfd9754 297 }
673a394b 298
202f2fef
CW
299 /* drop reference from allocate - handle holds it now */
300 drm_gem_object_unreference(obj);
301 trace_i915_gem_object_create(obj);
302
1dfd9754 303 args->handle = handle;
673a394b
EA
304 return 0;
305}
306
280b713b
EA
307static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj)
308{
309 drm_i915_private_t *dev_priv = obj->dev->dev_private;
23010e43 310 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
280b713b
EA
311
312 return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
313 obj_priv->tiling_mode != I915_TILING_NONE;
314}
315
99a03df5 316static inline void
40123c1f
EA
317slow_shmem_copy(struct page *dst_page,
318 int dst_offset,
319 struct page *src_page,
320 int src_offset,
321 int length)
322{
323 char *dst_vaddr, *src_vaddr;
324
99a03df5
CW
325 dst_vaddr = kmap(dst_page);
326 src_vaddr = kmap(src_page);
40123c1f
EA
327
328 memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length);
329
99a03df5
CW
330 kunmap(src_page);
331 kunmap(dst_page);
40123c1f
EA
332}
333
99a03df5 334static inline void
280b713b
EA
335slow_shmem_bit17_copy(struct page *gpu_page,
336 int gpu_offset,
337 struct page *cpu_page,
338 int cpu_offset,
339 int length,
340 int is_read)
341{
342 char *gpu_vaddr, *cpu_vaddr;
343
344 /* Use the unswizzled path if this page isn't affected. */
345 if ((page_to_phys(gpu_page) & (1 << 17)) == 0) {
346 if (is_read)
347 return slow_shmem_copy(cpu_page, cpu_offset,
348 gpu_page, gpu_offset, length);
349 else
350 return slow_shmem_copy(gpu_page, gpu_offset,
351 cpu_page, cpu_offset, length);
352 }
353
99a03df5
CW
354 gpu_vaddr = kmap(gpu_page);
355 cpu_vaddr = kmap(cpu_page);
280b713b
EA
356
357 /* Copy the data, XORing A6 with A17 (1). The user already knows he's
358 * XORing with the other bits (A9 for Y, A9 and A10 for X)
359 */
360 while (length > 0) {
361 int cacheline_end = ALIGN(gpu_offset + 1, 64);
362 int this_length = min(cacheline_end - gpu_offset, length);
363 int swizzled_gpu_offset = gpu_offset ^ 64;
364
365 if (is_read) {
366 memcpy(cpu_vaddr + cpu_offset,
367 gpu_vaddr + swizzled_gpu_offset,
368 this_length);
369 } else {
370 memcpy(gpu_vaddr + swizzled_gpu_offset,
371 cpu_vaddr + cpu_offset,
372 this_length);
373 }
374 cpu_offset += this_length;
375 gpu_offset += this_length;
376 length -= this_length;
377 }
378
99a03df5
CW
379 kunmap(cpu_page);
380 kunmap(gpu_page);
280b713b
EA
381}
382
eb01459f
EA
383/**
384 * This is the fast shmem pread path, which attempts to copy_from_user directly
385 * from the backing pages of the object to the user's address space. On a
386 * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow().
387 */
388static int
389i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
390 struct drm_i915_gem_pread *args,
391 struct drm_file *file_priv)
392{
23010e43 393 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
e5281ccd 394 struct address_space *mapping = obj->filp->f_path.dentry->d_inode->i_mapping;
eb01459f 395 ssize_t remain;
e5281ccd 396 loff_t offset;
eb01459f
EA
397 char __user *user_data;
398 int page_offset, page_length;
eb01459f
EA
399
400 user_data = (char __user *) (uintptr_t) args->data_ptr;
401 remain = args->size;
402
23010e43 403 obj_priv = to_intel_bo(obj);
eb01459f
EA
404 offset = args->offset;
405
406 while (remain > 0) {
e5281ccd
CW
407 struct page *page;
408 char *vaddr;
409 int ret;
410
eb01459f
EA
411 /* Operation in this page
412 *
eb01459f
EA
413 * page_offset = offset within page
414 * page_length = bytes to copy for this page
415 */
eb01459f
EA
416 page_offset = offset & (PAGE_SIZE-1);
417 page_length = remain;
418 if ((page_offset + remain) > PAGE_SIZE)
419 page_length = PAGE_SIZE - page_offset;
420
e5281ccd
CW
421 page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
422 GFP_HIGHUSER | __GFP_RECLAIMABLE);
423 if (IS_ERR(page))
424 return PTR_ERR(page);
425
426 vaddr = kmap_atomic(page);
427 ret = __copy_to_user_inatomic(user_data,
428 vaddr + page_offset,
429 page_length);
430 kunmap_atomic(vaddr);
431
432 mark_page_accessed(page);
433 page_cache_release(page);
434 if (ret)
4f27b75d 435 return -EFAULT;
eb01459f
EA
436
437 remain -= page_length;
438 user_data += page_length;
439 offset += page_length;
440 }
441
4f27b75d 442 return 0;
eb01459f
EA
443}
444
445/**
446 * This is the fallback shmem pread path, which allocates temporary storage
447 * in kernel space to copy_to_user into outside of the struct_mutex, so we
448 * can copy out of the object's backing pages while holding the struct mutex
449 * and not take page faults.
450 */
451static int
452i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
453 struct drm_i915_gem_pread *args,
454 struct drm_file *file_priv)
455{
e5281ccd 456 struct address_space *mapping = obj->filp->f_path.dentry->d_inode->i_mapping;
23010e43 457 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
eb01459f
EA
458 struct mm_struct *mm = current->mm;
459 struct page **user_pages;
460 ssize_t remain;
461 loff_t offset, pinned_pages, i;
462 loff_t first_data_page, last_data_page, num_pages;
e5281ccd
CW
463 int shmem_page_offset;
464 int data_page_index, data_page_offset;
eb01459f
EA
465 int page_length;
466 int ret;
467 uint64_t data_ptr = args->data_ptr;
280b713b 468 int do_bit17_swizzling;
eb01459f
EA
469
470 remain = args->size;
471
472 /* Pin the user pages containing the data. We can't fault while
473 * holding the struct mutex, yet we want to hold it while
474 * dereferencing the user data.
475 */
476 first_data_page = data_ptr / PAGE_SIZE;
477 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
478 num_pages = last_data_page - first_data_page + 1;
479
4f27b75d 480 user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
eb01459f
EA
481 if (user_pages == NULL)
482 return -ENOMEM;
483
4f27b75d 484 mutex_unlock(&dev->struct_mutex);
eb01459f
EA
485 down_read(&mm->mmap_sem);
486 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
e5e9ecde 487 num_pages, 1, 0, user_pages, NULL);
eb01459f 488 up_read(&mm->mmap_sem);
4f27b75d 489 mutex_lock(&dev->struct_mutex);
eb01459f
EA
490 if (pinned_pages < num_pages) {
491 ret = -EFAULT;
4f27b75d 492 goto out;
eb01459f
EA
493 }
494
4f27b75d
CW
495 ret = i915_gem_object_set_cpu_read_domain_range(obj,
496 args->offset,
497 args->size);
07f73f69 498 if (ret)
4f27b75d 499 goto out;
eb01459f 500
4f27b75d 501 do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
eb01459f 502
23010e43 503 obj_priv = to_intel_bo(obj);
eb01459f
EA
504 offset = args->offset;
505
506 while (remain > 0) {
e5281ccd
CW
507 struct page *page;
508
eb01459f
EA
509 /* Operation in this page
510 *
eb01459f
EA
511 * shmem_page_offset = offset within page in shmem file
512 * data_page_index = page number in get_user_pages return
513 * data_page_offset = offset with data_page_index page.
514 * page_length = bytes to copy for this page
515 */
eb01459f
EA
516 shmem_page_offset = offset & ~PAGE_MASK;
517 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
518 data_page_offset = data_ptr & ~PAGE_MASK;
519
520 page_length = remain;
521 if ((shmem_page_offset + page_length) > PAGE_SIZE)
522 page_length = PAGE_SIZE - shmem_page_offset;
523 if ((data_page_offset + page_length) > PAGE_SIZE)
524 page_length = PAGE_SIZE - data_page_offset;
525
e5281ccd
CW
526 page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
527 GFP_HIGHUSER | __GFP_RECLAIMABLE);
528 if (IS_ERR(page))
529 return PTR_ERR(page);
530
280b713b 531 if (do_bit17_swizzling) {
e5281ccd 532 slow_shmem_bit17_copy(page,
280b713b 533 shmem_page_offset,
99a03df5
CW
534 user_pages[data_page_index],
535 data_page_offset,
536 page_length,
537 1);
538 } else {
539 slow_shmem_copy(user_pages[data_page_index],
540 data_page_offset,
e5281ccd 541 page,
99a03df5
CW
542 shmem_page_offset,
543 page_length);
280b713b 544 }
eb01459f 545
e5281ccd
CW
546 mark_page_accessed(page);
547 page_cache_release(page);
548
eb01459f
EA
549 remain -= page_length;
550 data_ptr += page_length;
551 offset += page_length;
552 }
553
4f27b75d 554out:
eb01459f
EA
555 for (i = 0; i < pinned_pages; i++) {
556 SetPageDirty(user_pages[i]);
e5281ccd 557 mark_page_accessed(user_pages[i]);
eb01459f
EA
558 page_cache_release(user_pages[i]);
559 }
8e7d2b2c 560 drm_free_large(user_pages);
eb01459f
EA
561
562 return ret;
563}
564
673a394b
EA
565/**
566 * Reads data from the object referenced by handle.
567 *
568 * On error, the contents of *data are undefined.
569 */
570int
571i915_gem_pread_ioctl(struct drm_device *dev, void *data,
572 struct drm_file *file_priv)
573{
574 struct drm_i915_gem_pread *args = data;
575 struct drm_gem_object *obj;
576 struct drm_i915_gem_object *obj_priv;
35b62a89 577 int ret = 0;
673a394b 578
4f27b75d 579 ret = i915_mutex_lock_interruptible(dev);
1d7cfea1 580 if (ret)
4f27b75d 581 return ret;
673a394b
EA
582
583 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1d7cfea1
CW
584 if (obj == NULL) {
585 ret = -ENOENT;
586 goto unlock;
4f27b75d 587 }
23010e43 588 obj_priv = to_intel_bo(obj);
673a394b 589
7dcd2499
CW
590 /* Bounds check source. */
591 if (args->offset > obj->size || args->size > obj->size - args->offset) {
ce9d419d 592 ret = -EINVAL;
35b62a89 593 goto out;
ce9d419d
CW
594 }
595
35b62a89
CW
596 if (args->size == 0)
597 goto out;
598
ce9d419d
CW
599 if (!access_ok(VERIFY_WRITE,
600 (char __user *)(uintptr_t)args->data_ptr,
601 args->size)) {
602 ret = -EFAULT;
35b62a89 603 goto out;
673a394b
EA
604 }
605
b5e4feb6
CW
606 ret = fault_in_pages_writeable((char __user *)(uintptr_t)args->data_ptr,
607 args->size);
608 if (ret) {
609 ret = -EFAULT;
610 goto out;
280b713b 611 }
673a394b 612
4f27b75d
CW
613 ret = i915_gem_object_set_cpu_read_domain_range(obj,
614 args->offset,
615 args->size);
616 if (ret)
e5281ccd 617 goto out;
4f27b75d
CW
618
619 ret = -EFAULT;
620 if (!i915_gem_object_needs_bit17_swizzle(obj))
280b713b 621 ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv);
4f27b75d
CW
622 if (ret == -EFAULT)
623 ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
673a394b 624
35b62a89 625out:
4f27b75d 626 drm_gem_object_unreference(obj);
1d7cfea1 627unlock:
4f27b75d 628 mutex_unlock(&dev->struct_mutex);
eb01459f 629 return ret;
673a394b
EA
630}
631
0839ccb8
KP
632/* This is the fast write path which cannot handle
633 * page faults in the source data
9b7530cc 634 */
0839ccb8
KP
635
636static inline int
637fast_user_write(struct io_mapping *mapping,
638 loff_t page_base, int page_offset,
639 char __user *user_data,
640 int length)
9b7530cc 641{
9b7530cc 642 char *vaddr_atomic;
0839ccb8 643 unsigned long unwritten;
9b7530cc 644
3e4d3af5 645 vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
0839ccb8
KP
646 unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset,
647 user_data, length);
3e4d3af5 648 io_mapping_unmap_atomic(vaddr_atomic);
fbd5a26d 649 return unwritten;
0839ccb8
KP
650}
651
652/* Here's the write path which can sleep for
653 * page faults
654 */
655
ab34c226 656static inline void
3de09aa3
EA
657slow_kernel_write(struct io_mapping *mapping,
658 loff_t gtt_base, int gtt_offset,
659 struct page *user_page, int user_offset,
660 int length)
0839ccb8 661{
ab34c226
CW
662 char __iomem *dst_vaddr;
663 char *src_vaddr;
0839ccb8 664
ab34c226
CW
665 dst_vaddr = io_mapping_map_wc(mapping, gtt_base);
666 src_vaddr = kmap(user_page);
667
668 memcpy_toio(dst_vaddr + gtt_offset,
669 src_vaddr + user_offset,
670 length);
671
672 kunmap(user_page);
673 io_mapping_unmap(dst_vaddr);
9b7530cc
LT
674}
675
3de09aa3
EA
676/**
677 * This is the fast pwrite path, where we copy the data directly from the
678 * user into the GTT, uncached.
679 */
673a394b 680static int
3de09aa3
EA
681i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
682 struct drm_i915_gem_pwrite *args,
683 struct drm_file *file_priv)
673a394b 684{
23010e43 685 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
0839ccb8 686 drm_i915_private_t *dev_priv = dev->dev_private;
673a394b 687 ssize_t remain;
0839ccb8 688 loff_t offset, page_base;
673a394b 689 char __user *user_data;
0839ccb8 690 int page_offset, page_length;
673a394b
EA
691
692 user_data = (char __user *) (uintptr_t) args->data_ptr;
693 remain = args->size;
673a394b 694
23010e43 695 obj_priv = to_intel_bo(obj);
673a394b 696 offset = obj_priv->gtt_offset + args->offset;
673a394b
EA
697
698 while (remain > 0) {
699 /* Operation in this page
700 *
0839ccb8
KP
701 * page_base = page offset within aperture
702 * page_offset = offset within page
703 * page_length = bytes to copy for this page
673a394b 704 */
0839ccb8
KP
705 page_base = (offset & ~(PAGE_SIZE-1));
706 page_offset = offset & (PAGE_SIZE-1);
707 page_length = remain;
708 if ((page_offset + remain) > PAGE_SIZE)
709 page_length = PAGE_SIZE - page_offset;
710
0839ccb8 711 /* If we get a fault while copying data, then (presumably) our
3de09aa3
EA
712 * source page isn't available. Return the error and we'll
713 * retry in the slow path.
0839ccb8 714 */
fbd5a26d
CW
715 if (fast_user_write(dev_priv->mm.gtt_mapping, page_base,
716 page_offset, user_data, page_length))
717
718 return -EFAULT;
673a394b 719
0839ccb8
KP
720 remain -= page_length;
721 user_data += page_length;
722 offset += page_length;
673a394b 723 }
673a394b 724
fbd5a26d 725 return 0;
673a394b
EA
726}
727
3de09aa3
EA
728/**
729 * This is the fallback GTT pwrite path, which uses get_user_pages to pin
730 * the memory and maps it using kmap_atomic for copying.
731 *
732 * This code resulted in x11perf -rgb10text consuming about 10% more CPU
733 * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit).
734 */
3043c60c 735static int
3de09aa3
EA
736i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
737 struct drm_i915_gem_pwrite *args,
738 struct drm_file *file_priv)
673a394b 739{
23010e43 740 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
3de09aa3
EA
741 drm_i915_private_t *dev_priv = dev->dev_private;
742 ssize_t remain;
743 loff_t gtt_page_base, offset;
744 loff_t first_data_page, last_data_page, num_pages;
745 loff_t pinned_pages, i;
746 struct page **user_pages;
747 struct mm_struct *mm = current->mm;
748 int gtt_page_offset, data_page_offset, data_page_index, page_length;
673a394b 749 int ret;
3de09aa3
EA
750 uint64_t data_ptr = args->data_ptr;
751
752 remain = args->size;
753
754 /* Pin the user pages containing the data. We can't fault while
755 * holding the struct mutex, and all of the pwrite implementations
756 * want to hold it while dereferencing the user data.
757 */
758 first_data_page = data_ptr / PAGE_SIZE;
759 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
760 num_pages = last_data_page - first_data_page + 1;
761
fbd5a26d 762 user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
3de09aa3
EA
763 if (user_pages == NULL)
764 return -ENOMEM;
765
fbd5a26d 766 mutex_unlock(&dev->struct_mutex);
3de09aa3
EA
767 down_read(&mm->mmap_sem);
768 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
769 num_pages, 0, 0, user_pages, NULL);
770 up_read(&mm->mmap_sem);
fbd5a26d 771 mutex_lock(&dev->struct_mutex);
3de09aa3
EA
772 if (pinned_pages < num_pages) {
773 ret = -EFAULT;
774 goto out_unpin_pages;
775 }
673a394b 776
3de09aa3
EA
777 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
778 if (ret)
fbd5a26d 779 goto out_unpin_pages;
3de09aa3 780
23010e43 781 obj_priv = to_intel_bo(obj);
3de09aa3
EA
782 offset = obj_priv->gtt_offset + args->offset;
783
784 while (remain > 0) {
785 /* Operation in this page
786 *
787 * gtt_page_base = page offset within aperture
788 * gtt_page_offset = offset within page in aperture
789 * data_page_index = page number in get_user_pages return
790 * data_page_offset = offset with data_page_index page.
791 * page_length = bytes to copy for this page
792 */
793 gtt_page_base = offset & PAGE_MASK;
794 gtt_page_offset = offset & ~PAGE_MASK;
795 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
796 data_page_offset = data_ptr & ~PAGE_MASK;
797
798 page_length = remain;
799 if ((gtt_page_offset + page_length) > PAGE_SIZE)
800 page_length = PAGE_SIZE - gtt_page_offset;
801 if ((data_page_offset + page_length) > PAGE_SIZE)
802 page_length = PAGE_SIZE - data_page_offset;
803
ab34c226
CW
804 slow_kernel_write(dev_priv->mm.gtt_mapping,
805 gtt_page_base, gtt_page_offset,
806 user_pages[data_page_index],
807 data_page_offset,
808 page_length);
3de09aa3
EA
809
810 remain -= page_length;
811 offset += page_length;
812 data_ptr += page_length;
813 }
814
3de09aa3
EA
815out_unpin_pages:
816 for (i = 0; i < pinned_pages; i++)
817 page_cache_release(user_pages[i]);
8e7d2b2c 818 drm_free_large(user_pages);
3de09aa3
EA
819
820 return ret;
821}
822
40123c1f
EA
823/**
824 * This is the fast shmem pwrite path, which attempts to directly
825 * copy_from_user into the kmapped pages backing the object.
826 */
3043c60c 827static int
40123c1f
EA
828i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
829 struct drm_i915_gem_pwrite *args,
830 struct drm_file *file_priv)
673a394b 831{
e5281ccd 832 struct address_space *mapping = obj->filp->f_path.dentry->d_inode->i_mapping;
23010e43 833 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
40123c1f 834 ssize_t remain;
e5281ccd 835 loff_t offset;
40123c1f
EA
836 char __user *user_data;
837 int page_offset, page_length;
40123c1f
EA
838
839 user_data = (char __user *) (uintptr_t) args->data_ptr;
840 remain = args->size;
673a394b 841
23010e43 842 obj_priv = to_intel_bo(obj);
40123c1f
EA
843 offset = args->offset;
844 obj_priv->dirty = 1;
845
846 while (remain > 0) {
e5281ccd
CW
847 struct page *page;
848 char *vaddr;
849 int ret;
850
40123c1f
EA
851 /* Operation in this page
852 *
40123c1f
EA
853 * page_offset = offset within page
854 * page_length = bytes to copy for this page
855 */
40123c1f
EA
856 page_offset = offset & (PAGE_SIZE-1);
857 page_length = remain;
858 if ((page_offset + remain) > PAGE_SIZE)
859 page_length = PAGE_SIZE - page_offset;
860
e5281ccd
CW
861 page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
862 GFP_HIGHUSER | __GFP_RECLAIMABLE);
863 if (IS_ERR(page))
864 return PTR_ERR(page);
865
866 vaddr = kmap_atomic(page, KM_USER0);
867 ret = __copy_from_user_inatomic(vaddr + page_offset,
868 user_data,
869 page_length);
870 kunmap_atomic(vaddr, KM_USER0);
871
872 set_page_dirty(page);
873 mark_page_accessed(page);
874 page_cache_release(page);
875
876 /* If we get a fault while copying data, then (presumably) our
877 * source page isn't available. Return the error and we'll
878 * retry in the slow path.
879 */
880 if (ret)
fbd5a26d 881 return -EFAULT;
40123c1f
EA
882
883 remain -= page_length;
884 user_data += page_length;
885 offset += page_length;
886 }
887
fbd5a26d 888 return 0;
40123c1f
EA
889}
890
891/**
892 * This is the fallback shmem pwrite path, which uses get_user_pages to pin
893 * the memory and maps it using kmap_atomic for copying.
894 *
895 * This avoids taking mmap_sem for faulting on the user's address while the
896 * struct_mutex is held.
897 */
898static int
899i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
900 struct drm_i915_gem_pwrite *args,
901 struct drm_file *file_priv)
902{
e5281ccd 903 struct address_space *mapping = obj->filp->f_path.dentry->d_inode->i_mapping;
23010e43 904 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
40123c1f
EA
905 struct mm_struct *mm = current->mm;
906 struct page **user_pages;
907 ssize_t remain;
908 loff_t offset, pinned_pages, i;
909 loff_t first_data_page, last_data_page, num_pages;
e5281ccd 910 int shmem_page_offset;
40123c1f
EA
911 int data_page_index, data_page_offset;
912 int page_length;
913 int ret;
914 uint64_t data_ptr = args->data_ptr;
280b713b 915 int do_bit17_swizzling;
40123c1f
EA
916
917 remain = args->size;
918
919 /* Pin the user pages containing the data. We can't fault while
920 * holding the struct mutex, and all of the pwrite implementations
921 * want to hold it while dereferencing the user data.
922 */
923 first_data_page = data_ptr / PAGE_SIZE;
924 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
925 num_pages = last_data_page - first_data_page + 1;
926
4f27b75d 927 user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
40123c1f
EA
928 if (user_pages == NULL)
929 return -ENOMEM;
930
fbd5a26d 931 mutex_unlock(&dev->struct_mutex);
40123c1f
EA
932 down_read(&mm->mmap_sem);
933 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
934 num_pages, 0, 0, user_pages, NULL);
935 up_read(&mm->mmap_sem);
fbd5a26d 936 mutex_lock(&dev->struct_mutex);
40123c1f
EA
937 if (pinned_pages < num_pages) {
938 ret = -EFAULT;
fbd5a26d 939 goto out;
673a394b
EA
940 }
941
fbd5a26d 942 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
07f73f69 943 if (ret)
fbd5a26d 944 goto out;
40123c1f 945
fbd5a26d 946 do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
40123c1f 947
23010e43 948 obj_priv = to_intel_bo(obj);
673a394b 949 offset = args->offset;
40123c1f 950 obj_priv->dirty = 1;
673a394b 951
40123c1f 952 while (remain > 0) {
e5281ccd
CW
953 struct page *page;
954
40123c1f
EA
955 /* Operation in this page
956 *
40123c1f
EA
957 * shmem_page_offset = offset within page in shmem file
958 * data_page_index = page number in get_user_pages return
959 * data_page_offset = offset with data_page_index page.
960 * page_length = bytes to copy for this page
961 */
40123c1f
EA
962 shmem_page_offset = offset & ~PAGE_MASK;
963 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
964 data_page_offset = data_ptr & ~PAGE_MASK;
965
966 page_length = remain;
967 if ((shmem_page_offset + page_length) > PAGE_SIZE)
968 page_length = PAGE_SIZE - shmem_page_offset;
969 if ((data_page_offset + page_length) > PAGE_SIZE)
970 page_length = PAGE_SIZE - data_page_offset;
971
e5281ccd
CW
972 page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
973 GFP_HIGHUSER | __GFP_RECLAIMABLE);
974 if (IS_ERR(page)) {
975 ret = PTR_ERR(page);
976 goto out;
977 }
978
280b713b 979 if (do_bit17_swizzling) {
e5281ccd 980 slow_shmem_bit17_copy(page,
280b713b
EA
981 shmem_page_offset,
982 user_pages[data_page_index],
983 data_page_offset,
99a03df5
CW
984 page_length,
985 0);
986 } else {
e5281ccd 987 slow_shmem_copy(page,
99a03df5
CW
988 shmem_page_offset,
989 user_pages[data_page_index],
990 data_page_offset,
991 page_length);
280b713b 992 }
40123c1f 993
e5281ccd
CW
994 set_page_dirty(page);
995 mark_page_accessed(page);
996 page_cache_release(page);
997
40123c1f
EA
998 remain -= page_length;
999 data_ptr += page_length;
1000 offset += page_length;
673a394b
EA
1001 }
1002
fbd5a26d 1003out:
40123c1f
EA
1004 for (i = 0; i < pinned_pages; i++)
1005 page_cache_release(user_pages[i]);
8e7d2b2c 1006 drm_free_large(user_pages);
673a394b 1007
40123c1f 1008 return ret;
673a394b
EA
1009}
1010
1011/**
1012 * Writes data to the object referenced by handle.
1013 *
1014 * On error, the contents of the buffer that were to be modified are undefined.
1015 */
1016int
1017i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
fbd5a26d 1018 struct drm_file *file)
673a394b
EA
1019{
1020 struct drm_i915_gem_pwrite *args = data;
1021 struct drm_gem_object *obj;
1022 struct drm_i915_gem_object *obj_priv;
1023 int ret = 0;
1024
fbd5a26d 1025 ret = i915_mutex_lock_interruptible(dev);
1d7cfea1 1026 if (ret)
fbd5a26d 1027 return ret;
1d7cfea1
CW
1028
1029 obj = drm_gem_object_lookup(dev, file, args->handle);
1030 if (obj == NULL) {
1031 ret = -ENOENT;
1032 goto unlock;
fbd5a26d 1033 }
23010e43 1034 obj_priv = to_intel_bo(obj);
673a394b 1035
fbd5a26d 1036
7dcd2499
CW
1037 /* Bounds check destination. */
1038 if (args->offset > obj->size || args->size > obj->size - args->offset) {
ce9d419d 1039 ret = -EINVAL;
35b62a89 1040 goto out;
ce9d419d
CW
1041 }
1042
35b62a89
CW
1043 if (args->size == 0)
1044 goto out;
1045
ce9d419d
CW
1046 if (!access_ok(VERIFY_READ,
1047 (char __user *)(uintptr_t)args->data_ptr,
1048 args->size)) {
1049 ret = -EFAULT;
35b62a89 1050 goto out;
673a394b
EA
1051 }
1052
b5e4feb6
CW
1053 ret = fault_in_pages_readable((char __user *)(uintptr_t)args->data_ptr,
1054 args->size);
1055 if (ret) {
1056 ret = -EFAULT;
1057 goto out;
673a394b
EA
1058 }
1059
1060 /* We can only do the GTT pwrite on untiled buffers, as otherwise
1061 * it would end up going through the fenced access, and we'll get
1062 * different detiling behavior between reading and writing.
1063 * pread/pwrite currently are reading and writing from the CPU
1064 * perspective, requiring manual detiling by the client.
1065 */
71acb5eb 1066 if (obj_priv->phys_obj)
fbd5a26d 1067 ret = i915_gem_phys_pwrite(dev, obj, args, file);
71acb5eb 1068 else if (obj_priv->tiling_mode == I915_TILING_NONE &&
5cdf5881 1069 obj_priv->gtt_space &&
9b8c4a0b 1070 obj->write_domain != I915_GEM_DOMAIN_CPU) {
a00b10c3 1071 ret = i915_gem_object_pin(obj, 0, true, false);
fbd5a26d
CW
1072 if (ret)
1073 goto out;
1074
1075 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
1076 if (ret)
1077 goto out_unpin;
1078
1079 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
1080 if (ret == -EFAULT)
1081 ret = i915_gem_gtt_pwrite_slow(dev, obj, args, file);
1082
1083out_unpin:
1084 i915_gem_object_unpin(obj);
40123c1f 1085 } else {
fbd5a26d
CW
1086 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
1087 if (ret)
e5281ccd 1088 goto out;
673a394b 1089
fbd5a26d
CW
1090 ret = -EFAULT;
1091 if (!i915_gem_object_needs_bit17_swizzle(obj))
1092 ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file);
1093 if (ret == -EFAULT)
1094 ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file);
fbd5a26d 1095 }
673a394b 1096
35b62a89 1097out:
fbd5a26d 1098 drm_gem_object_unreference(obj);
1d7cfea1 1099unlock:
fbd5a26d 1100 mutex_unlock(&dev->struct_mutex);
673a394b
EA
1101 return ret;
1102}
1103
1104/**
2ef7eeaa
EA
1105 * Called when user space prepares to use an object with the CPU, either
1106 * through the mmap ioctl's mapping or a GTT mapping.
673a394b
EA
1107 */
1108int
1109i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1110 struct drm_file *file_priv)
1111{
a09ba7fa 1112 struct drm_i915_private *dev_priv = dev->dev_private;
673a394b
EA
1113 struct drm_i915_gem_set_domain *args = data;
1114 struct drm_gem_object *obj;
652c393a 1115 struct drm_i915_gem_object *obj_priv;
2ef7eeaa
EA
1116 uint32_t read_domains = args->read_domains;
1117 uint32_t write_domain = args->write_domain;
673a394b
EA
1118 int ret;
1119
1120 if (!(dev->driver->driver_features & DRIVER_GEM))
1121 return -ENODEV;
1122
2ef7eeaa 1123 /* Only handle setting domains to types used by the CPU. */
21d509e3 1124 if (write_domain & I915_GEM_GPU_DOMAINS)
2ef7eeaa
EA
1125 return -EINVAL;
1126
21d509e3 1127 if (read_domains & I915_GEM_GPU_DOMAINS)
2ef7eeaa
EA
1128 return -EINVAL;
1129
1130 /* Having something in the write domain implies it's in the read
1131 * domain, and only that read domain. Enforce that in the request.
1132 */
1133 if (write_domain != 0 && read_domains != write_domain)
1134 return -EINVAL;
1135
76c1dec1 1136 ret = i915_mutex_lock_interruptible(dev);
1d7cfea1 1137 if (ret)
76c1dec1 1138 return ret;
1d7cfea1 1139
673a394b 1140 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1d7cfea1
CW
1141 if (obj == NULL) {
1142 ret = -ENOENT;
1143 goto unlock;
76c1dec1 1144 }
23010e43 1145 obj_priv = to_intel_bo(obj);
673a394b 1146
652c393a
JB
1147 intel_mark_busy(dev, obj);
1148
2ef7eeaa
EA
1149 if (read_domains & I915_GEM_DOMAIN_GTT) {
1150 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
02354392 1151
a09ba7fa
EA
1152 /* Update the LRU on the fence for the CPU access that's
1153 * about to occur.
1154 */
1155 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
007cc8ac
DV
1156 struct drm_i915_fence_reg *reg =
1157 &dev_priv->fence_regs[obj_priv->fence_reg];
1158 list_move_tail(&reg->lru_list,
a09ba7fa
EA
1159 &dev_priv->mm.fence_list);
1160 }
1161
02354392
EA
1162 /* Silently promote "you're not bound, there was nothing to do"
1163 * to success, since the client was just asking us to
1164 * make sure everything was done.
1165 */
1166 if (ret == -EINVAL)
1167 ret = 0;
2ef7eeaa 1168 } else {
e47c68e9 1169 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
2ef7eeaa
EA
1170 }
1171
7d1c4804
CW
1172 /* Maintain LRU order of "inactive" objects */
1173 if (ret == 0 && i915_gem_object_is_inactive(obj_priv))
69dc4987 1174 list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list);
7d1c4804 1175
673a394b 1176 drm_gem_object_unreference(obj);
1d7cfea1 1177unlock:
673a394b
EA
1178 mutex_unlock(&dev->struct_mutex);
1179 return ret;
1180}
1181
1182/**
1183 * Called when user space has done writes to this buffer
1184 */
1185int
1186i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1187 struct drm_file *file_priv)
1188{
1189 struct drm_i915_gem_sw_finish *args = data;
1190 struct drm_gem_object *obj;
673a394b
EA
1191 int ret = 0;
1192
1193 if (!(dev->driver->driver_features & DRIVER_GEM))
1194 return -ENODEV;
1195
76c1dec1 1196 ret = i915_mutex_lock_interruptible(dev);
1d7cfea1 1197 if (ret)
76c1dec1 1198 return ret;
1d7cfea1 1199
673a394b
EA
1200 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1201 if (obj == NULL) {
1d7cfea1
CW
1202 ret = -ENOENT;
1203 goto unlock;
673a394b
EA
1204 }
1205
673a394b 1206 /* Pinned buffers may be scanout, so flush the cache */
3d2a812a 1207 if (to_intel_bo(obj)->pin_count)
e47c68e9
EA
1208 i915_gem_object_flush_cpu_write_domain(obj);
1209
673a394b 1210 drm_gem_object_unreference(obj);
1d7cfea1 1211unlock:
673a394b
EA
1212 mutex_unlock(&dev->struct_mutex);
1213 return ret;
1214}
1215
1216/**
1217 * Maps the contents of an object, returning the address it is mapped
1218 * into.
1219 *
1220 * While the mapping holds a reference on the contents of the object, it doesn't
1221 * imply a ref on the object itself.
1222 */
1223int
1224i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1225 struct drm_file *file_priv)
1226{
da761a6e 1227 struct drm_i915_private *dev_priv = dev->dev_private;
673a394b
EA
1228 struct drm_i915_gem_mmap *args = data;
1229 struct drm_gem_object *obj;
1230 loff_t offset;
1231 unsigned long addr;
1232
1233 if (!(dev->driver->driver_features & DRIVER_GEM))
1234 return -ENODEV;
1235
1236 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1237 if (obj == NULL)
bf79cb91 1238 return -ENOENT;
673a394b 1239
da761a6e
CW
1240 if (obj->size > dev_priv->mm.gtt_mappable_end) {
1241 drm_gem_object_unreference_unlocked(obj);
1242 return -E2BIG;
1243 }
1244
673a394b
EA
1245 offset = args->offset;
1246
1247 down_write(&current->mm->mmap_sem);
1248 addr = do_mmap(obj->filp, 0, args->size,
1249 PROT_READ | PROT_WRITE, MAP_SHARED,
1250 args->offset);
1251 up_write(&current->mm->mmap_sem);
bc9025bd 1252 drm_gem_object_unreference_unlocked(obj);
673a394b
EA
1253 if (IS_ERR((void *)addr))
1254 return addr;
1255
1256 args->addr_ptr = (uint64_t) addr;
1257
1258 return 0;
1259}
1260
de151cf6
JB
1261/**
1262 * i915_gem_fault - fault a page into the GTT
1263 * vma: VMA in question
1264 * vmf: fault info
1265 *
1266 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1267 * from userspace. The fault handler takes care of binding the object to
1268 * the GTT (if needed), allocating and programming a fence register (again,
1269 * only if needed based on whether the old reg is still valid or the object
1270 * is tiled) and inserting a new PTE into the faulting process.
1271 *
1272 * Note that the faulting process may involve evicting existing objects
1273 * from the GTT and/or fence registers to make room. So performance may
1274 * suffer if the GTT working set is large or there are few fence registers
1275 * left.
1276 */
1277int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1278{
1279 struct drm_gem_object *obj = vma->vm_private_data;
1280 struct drm_device *dev = obj->dev;
7d1c4804 1281 drm_i915_private_t *dev_priv = dev->dev_private;
23010e43 1282 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
de151cf6
JB
1283 pgoff_t page_offset;
1284 unsigned long pfn;
1285 int ret = 0;
0f973f27 1286 bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
de151cf6
JB
1287
1288 /* We don't use vmf->pgoff since that has the fake offset */
1289 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
1290 PAGE_SHIFT;
1291
1292 /* Now bind it into the GTT if needed */
1293 mutex_lock(&dev->struct_mutex);
fb7d516a 1294 BUG_ON(obj_priv->pin_count && !obj_priv->pin_mappable);
a00b10c3
CW
1295
1296 if (obj_priv->gtt_space) {
1297 if (!obj_priv->mappable ||
1298 (obj_priv->tiling_mode && !obj_priv->fenceable)) {
1299 ret = i915_gem_object_unbind(obj);
1300 if (ret)
1301 goto unlock;
1302 }
1303 }
16e809ac 1304
de151cf6 1305 if (!obj_priv->gtt_space) {
a00b10c3
CW
1306 ret = i915_gem_object_bind_to_gtt(obj, 0,
1307 true, obj_priv->tiling_mode);
c715089f
CW
1308 if (ret)
1309 goto unlock;
de151cf6
JB
1310 }
1311
4a684a41
CW
1312 ret = i915_gem_object_set_to_gtt_domain(obj, write);
1313 if (ret)
1314 goto unlock;
1315
fb7d516a
DV
1316 if (!obj_priv->fault_mappable) {
1317 obj_priv->fault_mappable = true;
a00b10c3 1318 i915_gem_info_update_mappable(dev_priv, obj_priv, true);
fb7d516a
DV
1319 }
1320
de151cf6 1321 /* Need a new fence register? */
a09ba7fa 1322 if (obj_priv->tiling_mode != I915_TILING_NONE) {
2cf34d7b 1323 ret = i915_gem_object_get_fence_reg(obj, true);
c715089f
CW
1324 if (ret)
1325 goto unlock;
d9ddcb96 1326 }
de151cf6 1327
7d1c4804 1328 if (i915_gem_object_is_inactive(obj_priv))
69dc4987 1329 list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list);
7d1c4804 1330
de151cf6
JB
1331 pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) +
1332 page_offset;
1333
1334 /* Finally, remap it using the new GTT offset */
1335 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
c715089f 1336unlock:
de151cf6
JB
1337 mutex_unlock(&dev->struct_mutex);
1338
1339 switch (ret) {
c715089f
CW
1340 case 0:
1341 case -ERESTARTSYS:
1342 return VM_FAULT_NOPAGE;
de151cf6
JB
1343 case -ENOMEM:
1344 case -EAGAIN:
1345 return VM_FAULT_OOM;
de151cf6 1346 default:
c715089f 1347 return VM_FAULT_SIGBUS;
de151cf6
JB
1348 }
1349}
1350
1351/**
1352 * i915_gem_create_mmap_offset - create a fake mmap offset for an object
1353 * @obj: obj in question
1354 *
1355 * GEM memory mapping works by handing back to userspace a fake mmap offset
1356 * it can use in a subsequent mmap(2) call. The DRM core code then looks
1357 * up the object based on the offset and sets up the various memory mapping
1358 * structures.
1359 *
1360 * This routine allocates and attaches a fake offset for @obj.
1361 */
1362static int
1363i915_gem_create_mmap_offset(struct drm_gem_object *obj)
1364{
1365 struct drm_device *dev = obj->dev;
1366 struct drm_gem_mm *mm = dev->mm_private;
de151cf6 1367 struct drm_map_list *list;
f77d390c 1368 struct drm_local_map *map;
de151cf6
JB
1369 int ret = 0;
1370
1371 /* Set the object up for mmap'ing */
1372 list = &obj->map_list;
9a298b2a 1373 list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
de151cf6
JB
1374 if (!list->map)
1375 return -ENOMEM;
1376
1377 map = list->map;
1378 map->type = _DRM_GEM;
1379 map->size = obj->size;
1380 map->handle = obj;
1381
1382 /* Get a DRM GEM mmap offset allocated... */
1383 list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
1384 obj->size / PAGE_SIZE, 0, 0);
1385 if (!list->file_offset_node) {
1386 DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
9e0ae534 1387 ret = -ENOSPC;
de151cf6
JB
1388 goto out_free_list;
1389 }
1390
1391 list->file_offset_node = drm_mm_get_block(list->file_offset_node,
1392 obj->size / PAGE_SIZE, 0);
1393 if (!list->file_offset_node) {
1394 ret = -ENOMEM;
1395 goto out_free_list;
1396 }
1397
1398 list->hash.key = list->file_offset_node->start;
9e0ae534
CW
1399 ret = drm_ht_insert_item(&mm->offset_hash, &list->hash);
1400 if (ret) {
de151cf6
JB
1401 DRM_ERROR("failed to add to map hash\n");
1402 goto out_free_mm;
1403 }
1404
de151cf6
JB
1405 return 0;
1406
1407out_free_mm:
1408 drm_mm_put_block(list->file_offset_node);
1409out_free_list:
9a298b2a 1410 kfree(list->map);
39a01d1f 1411 list->map = NULL;
de151cf6
JB
1412
1413 return ret;
1414}
1415
901782b2
CW
1416/**
1417 * i915_gem_release_mmap - remove physical page mappings
1418 * @obj: obj in question
1419 *
af901ca1 1420 * Preserve the reservation of the mmapping with the DRM core code, but
901782b2
CW
1421 * relinquish ownership of the pages back to the system.
1422 *
1423 * It is vital that we remove the page mapping if we have mapped a tiled
1424 * object through the GTT and then lose the fence register due to
1425 * resource pressure. Similarly if the object has been moved out of the
1426 * aperture, than pages mapped into userspace must be revoked. Removing the
1427 * mapping will then trigger a page fault on the next user access, allowing
1428 * fixup by i915_gem_fault().
1429 */
d05ca301 1430void
901782b2
CW
1431i915_gem_release_mmap(struct drm_gem_object *obj)
1432{
1433 struct drm_device *dev = obj->dev;
fb7d516a 1434 struct drm_i915_private *dev_priv = dev->dev_private;
23010e43 1435 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
901782b2 1436
39a01d1f 1437 if (unlikely(obj->map_list.map && dev->dev_mapping))
901782b2 1438 unmap_mapping_range(dev->dev_mapping,
39a01d1f
CW
1439 (loff_t)obj->map_list.hash.key<<PAGE_SHIFT,
1440 obj->size, 1);
fb7d516a
DV
1441
1442 if (obj_priv->fault_mappable) {
1443 obj_priv->fault_mappable = false;
a00b10c3 1444 i915_gem_info_update_mappable(dev_priv, obj_priv, false);
fb7d516a 1445 }
901782b2
CW
1446}
1447
ab00b3e5
JB
1448static void
1449i915_gem_free_mmap_offset(struct drm_gem_object *obj)
1450{
1451 struct drm_device *dev = obj->dev;
ab00b3e5 1452 struct drm_gem_mm *mm = dev->mm_private;
39a01d1f 1453 struct drm_map_list *list = &obj->map_list;
ab00b3e5 1454
ab00b3e5 1455 drm_ht_remove_item(&mm->offset_hash, &list->hash);
39a01d1f
CW
1456 drm_mm_put_block(list->file_offset_node);
1457 kfree(list->map);
1458 list->map = NULL;
ab00b3e5
JB
1459}
1460
de151cf6
JB
1461/**
1462 * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1463 * @obj: object to check
1464 *
1465 * Return the required GTT alignment for an object, taking into account
1466 * potential fence register mapping if needed.
1467 */
1468static uint32_t
a00b10c3 1469i915_gem_get_gtt_alignment(struct drm_i915_gem_object *obj_priv)
de151cf6 1470{
a00b10c3 1471 struct drm_device *dev = obj_priv->base.dev;
de151cf6
JB
1472
1473 /*
1474 * Minimum alignment is 4k (GTT page size), but might be greater
1475 * if a fence register is needed for the object.
1476 */
a00b10c3
CW
1477 if (INTEL_INFO(dev)->gen >= 4 ||
1478 obj_priv->tiling_mode == I915_TILING_NONE)
de151cf6
JB
1479 return 4096;
1480
a00b10c3
CW
1481 /*
1482 * Previous chips need to be aligned to the size of the smallest
1483 * fence register that can contain the object.
1484 */
1485 return i915_gem_get_gtt_size(obj_priv);
1486}
1487
1488static uint32_t
1489i915_gem_get_gtt_size(struct drm_i915_gem_object *obj_priv)
1490{
1491 struct drm_device *dev = obj_priv->base.dev;
1492 uint32_t size;
1493
1494 /*
1495 * Minimum alignment is 4k (GTT page size), but might be greater
1496 * if a fence register is needed for the object.
1497 */
1498 if (INTEL_INFO(dev)->gen >= 4)
1499 return obj_priv->base.size;
1500
de151cf6
JB
1501 /*
1502 * Previous chips need to be aligned to the size of the smallest
1503 * fence register that can contain the object.
1504 */
a6c45cf0 1505 if (INTEL_INFO(dev)->gen == 3)
a00b10c3 1506 size = 1024*1024;
de151cf6 1507 else
a00b10c3 1508 size = 512*1024;
de151cf6 1509
a00b10c3
CW
1510 while (size < obj_priv->base.size)
1511 size <<= 1;
de151cf6 1512
a00b10c3 1513 return size;
de151cf6
JB
1514}
1515
1516/**
1517 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1518 * @dev: DRM device
1519 * @data: GTT mapping ioctl data
1520 * @file_priv: GEM object info
1521 *
1522 * Simply returns the fake offset to userspace so it can mmap it.
1523 * The mmap call will end up in drm_gem_mmap(), which will set things
1524 * up so we can get faults in the handler above.
1525 *
1526 * The fault handler will take care of binding the object into the GTT
1527 * (since it may have been evicted to make room for something), allocating
1528 * a fence register, and mapping the appropriate aperture address into
1529 * userspace.
1530 */
1531int
1532i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1533 struct drm_file *file_priv)
1534{
da761a6e 1535 struct drm_i915_private *dev_priv = dev->dev_private;
de151cf6 1536 struct drm_i915_gem_mmap_gtt *args = data;
de151cf6
JB
1537 struct drm_gem_object *obj;
1538 struct drm_i915_gem_object *obj_priv;
1539 int ret;
1540
1541 if (!(dev->driver->driver_features & DRIVER_GEM))
1542 return -ENODEV;
1543
76c1dec1 1544 ret = i915_mutex_lock_interruptible(dev);
1d7cfea1 1545 if (ret)
76c1dec1 1546 return ret;
de151cf6 1547
1d7cfea1
CW
1548 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1549 if (obj == NULL) {
1550 ret = -ENOENT;
1551 goto unlock;
1552 }
23010e43 1553 obj_priv = to_intel_bo(obj);
de151cf6 1554
da761a6e
CW
1555 if (obj->size > dev_priv->mm.gtt_mappable_end) {
1556 ret = -E2BIG;
1557 goto unlock;
1558 }
1559
ab18282d
CW
1560 if (obj_priv->madv != I915_MADV_WILLNEED) {
1561 DRM_ERROR("Attempting to mmap a purgeable buffer\n");
1d7cfea1
CW
1562 ret = -EINVAL;
1563 goto out;
ab18282d
CW
1564 }
1565
39a01d1f 1566 if (!obj->map_list.map) {
de151cf6 1567 ret = i915_gem_create_mmap_offset(obj);
1d7cfea1
CW
1568 if (ret)
1569 goto out;
de151cf6
JB
1570 }
1571
39a01d1f 1572 args->offset = (u64)obj->map_list.hash.key << PAGE_SHIFT;
de151cf6 1573
1d7cfea1 1574out:
de151cf6 1575 drm_gem_object_unreference(obj);
1d7cfea1 1576unlock:
de151cf6 1577 mutex_unlock(&dev->struct_mutex);
1d7cfea1 1578 return ret;
de151cf6
JB
1579}
1580
e5281ccd
CW
1581static int
1582i915_gem_object_get_pages_gtt(struct drm_gem_object *obj,
1583 gfp_t gfpmask)
1584{
1585 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1586 int page_count, i;
1587 struct address_space *mapping;
1588 struct inode *inode;
1589 struct page *page;
1590
1591 /* Get the list of pages out of our struct file. They'll be pinned
1592 * at this point until we release them.
1593 */
1594 page_count = obj->size / PAGE_SIZE;
1595 BUG_ON(obj_priv->pages != NULL);
1596 obj_priv->pages = drm_malloc_ab(page_count, sizeof(struct page *));
1597 if (obj_priv->pages == NULL)
1598 return -ENOMEM;
1599
1600 inode = obj->filp->f_path.dentry->d_inode;
1601 mapping = inode->i_mapping;
1602 for (i = 0; i < page_count; i++) {
1603 page = read_cache_page_gfp(mapping, i,
1604 GFP_HIGHUSER |
1605 __GFP_COLD |
1606 __GFP_RECLAIMABLE |
1607 gfpmask);
1608 if (IS_ERR(page))
1609 goto err_pages;
1610
1611 obj_priv->pages[i] = page;
1612 }
1613
1614 if (obj_priv->tiling_mode != I915_TILING_NONE)
1615 i915_gem_object_do_bit_17_swizzle(obj);
1616
1617 return 0;
1618
1619err_pages:
1620 while (i--)
1621 page_cache_release(obj_priv->pages[i]);
1622
1623 drm_free_large(obj_priv->pages);
1624 obj_priv->pages = NULL;
1625 return PTR_ERR(page);
1626}
1627
5cdf5881 1628static void
e5281ccd 1629i915_gem_object_put_pages_gtt(struct drm_gem_object *obj)
673a394b 1630{
23010e43 1631 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
673a394b
EA
1632 int page_count = obj->size / PAGE_SIZE;
1633 int i;
1634
bb6baf76 1635 BUG_ON(obj_priv->madv == __I915_MADV_PURGED);
673a394b 1636
280b713b
EA
1637 if (obj_priv->tiling_mode != I915_TILING_NONE)
1638 i915_gem_object_save_bit_17_swizzle(obj);
1639
3ef94daa 1640 if (obj_priv->madv == I915_MADV_DONTNEED)
13a05fd9 1641 obj_priv->dirty = 0;
3ef94daa
CW
1642
1643 for (i = 0; i < page_count; i++) {
3ef94daa
CW
1644 if (obj_priv->dirty)
1645 set_page_dirty(obj_priv->pages[i]);
1646
1647 if (obj_priv->madv == I915_MADV_WILLNEED)
856fa198 1648 mark_page_accessed(obj_priv->pages[i]);
3ef94daa
CW
1649
1650 page_cache_release(obj_priv->pages[i]);
1651 }
673a394b
EA
1652 obj_priv->dirty = 0;
1653
8e7d2b2c 1654 drm_free_large(obj_priv->pages);
856fa198 1655 obj_priv->pages = NULL;
673a394b
EA
1656}
1657
a56ba56c
CW
1658static uint32_t
1659i915_gem_next_request_seqno(struct drm_device *dev,
1660 struct intel_ring_buffer *ring)
1661{
1662 drm_i915_private_t *dev_priv = dev->dev_private;
1663
1664 ring->outstanding_lazy_request = true;
1665 return dev_priv->next_seqno;
1666}
1667
673a394b 1668static void
617dbe27 1669i915_gem_object_move_to_active(struct drm_gem_object *obj,
852835f3 1670 struct intel_ring_buffer *ring)
673a394b
EA
1671{
1672 struct drm_device *dev = obj->dev;
69dc4987 1673 struct drm_i915_private *dev_priv = dev->dev_private;
23010e43 1674 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
a56ba56c 1675 uint32_t seqno = i915_gem_next_request_seqno(dev, ring);
617dbe27 1676
852835f3
ZN
1677 BUG_ON(ring == NULL);
1678 obj_priv->ring = ring;
673a394b
EA
1679
1680 /* Add a reference if we're newly entering the active list. */
1681 if (!obj_priv->active) {
1682 drm_gem_object_reference(obj);
1683 obj_priv->active = 1;
1684 }
e35a41de 1685
673a394b 1686 /* Move from whatever list we were on to the tail of execution. */
69dc4987
CW
1687 list_move_tail(&obj_priv->mm_list, &dev_priv->mm.active_list);
1688 list_move_tail(&obj_priv->ring_list, &ring->active_list);
ce44b0ea 1689 obj_priv->last_rendering_seqno = seqno;
673a394b
EA
1690}
1691
ce44b0ea
EA
1692static void
1693i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
1694{
1695 struct drm_device *dev = obj->dev;
1696 drm_i915_private_t *dev_priv = dev->dev_private;
23010e43 1697 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
ce44b0ea
EA
1698
1699 BUG_ON(!obj_priv->active);
69dc4987
CW
1700 list_move_tail(&obj_priv->mm_list, &dev_priv->mm.flushing_list);
1701 list_del_init(&obj_priv->ring_list);
ce44b0ea
EA
1702 obj_priv->last_rendering_seqno = 0;
1703}
673a394b 1704
963b4836
CW
1705/* Immediately discard the backing storage */
1706static void
1707i915_gem_object_truncate(struct drm_gem_object *obj)
1708{
23010e43 1709 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
bb6baf76 1710 struct inode *inode;
963b4836 1711
ae9fed6b
CW
1712 /* Our goal here is to return as much of the memory as
1713 * is possible back to the system as we are called from OOM.
1714 * To do this we must instruct the shmfs to drop all of its
1715 * backing pages, *now*. Here we mirror the actions taken
1716 * when by shmem_delete_inode() to release the backing store.
1717 */
bb6baf76 1718 inode = obj->filp->f_path.dentry->d_inode;
ae9fed6b
CW
1719 truncate_inode_pages(inode->i_mapping, 0);
1720 if (inode->i_op->truncate_range)
1721 inode->i_op->truncate_range(inode, 0, (loff_t)-1);
bb6baf76
CW
1722
1723 obj_priv->madv = __I915_MADV_PURGED;
963b4836
CW
1724}
1725
1726static inline int
1727i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj_priv)
1728{
1729 return obj_priv->madv == I915_MADV_DONTNEED;
1730}
1731
673a394b
EA
1732static void
1733i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
1734{
1735 struct drm_device *dev = obj->dev;
1736 drm_i915_private_t *dev_priv = dev->dev_private;
23010e43 1737 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
673a394b 1738
673a394b 1739 if (obj_priv->pin_count != 0)
69dc4987 1740 list_move_tail(&obj_priv->mm_list, &dev_priv->mm.pinned_list);
673a394b 1741 else
69dc4987
CW
1742 list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list);
1743 list_del_init(&obj_priv->ring_list);
673a394b 1744
99fcb766
DV
1745 BUG_ON(!list_empty(&obj_priv->gpu_write_list));
1746
ce44b0ea 1747 obj_priv->last_rendering_seqno = 0;
852835f3 1748 obj_priv->ring = NULL;
673a394b
EA
1749 if (obj_priv->active) {
1750 obj_priv->active = 0;
1751 drm_gem_object_unreference(obj);
1752 }
23bc5982 1753 WARN_ON(i915_verify_lists(dev));
673a394b
EA
1754}
1755
63560396
DV
1756static void
1757i915_gem_process_flushing_list(struct drm_device *dev,
8a1a49f9 1758 uint32_t flush_domains,
852835f3 1759 struct intel_ring_buffer *ring)
63560396
DV
1760{
1761 drm_i915_private_t *dev_priv = dev->dev_private;
1762 struct drm_i915_gem_object *obj_priv, *next;
1763
1764 list_for_each_entry_safe(obj_priv, next,
64193406 1765 &ring->gpu_write_list,
63560396 1766 gpu_write_list) {
a8089e84 1767 struct drm_gem_object *obj = &obj_priv->base;
63560396 1768
64193406 1769 if (obj->write_domain & flush_domains) {
63560396
DV
1770 uint32_t old_write_domain = obj->write_domain;
1771
1772 obj->write_domain = 0;
1773 list_del_init(&obj_priv->gpu_write_list);
617dbe27 1774 i915_gem_object_move_to_active(obj, ring);
63560396
DV
1775
1776 /* update the fence lru list */
007cc8ac
DV
1777 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
1778 struct drm_i915_fence_reg *reg =
1779 &dev_priv->fence_regs[obj_priv->fence_reg];
1780 list_move_tail(&reg->lru_list,
63560396 1781 &dev_priv->mm.fence_list);
007cc8ac 1782 }
63560396
DV
1783
1784 trace_i915_gem_object_change_domain(obj,
1785 obj->read_domains,
1786 old_write_domain);
1787 }
1788 }
1789}
8187a2b7 1790
3cce469c 1791int
8a1a49f9 1792i915_add_request(struct drm_device *dev,
f787a5f5 1793 struct drm_file *file,
8dc5d147 1794 struct drm_i915_gem_request *request,
8a1a49f9 1795 struct intel_ring_buffer *ring)
673a394b
EA
1796{
1797 drm_i915_private_t *dev_priv = dev->dev_private;
f787a5f5 1798 struct drm_i915_file_private *file_priv = NULL;
673a394b
EA
1799 uint32_t seqno;
1800 int was_empty;
3cce469c
CW
1801 int ret;
1802
1803 BUG_ON(request == NULL);
673a394b 1804
f787a5f5
CW
1805 if (file != NULL)
1806 file_priv = file->driver_priv;
b962442e 1807
3cce469c
CW
1808 ret = ring->add_request(ring, &seqno);
1809 if (ret)
1810 return ret;
673a394b 1811
a56ba56c 1812 ring->outstanding_lazy_request = false;
673a394b
EA
1813
1814 request->seqno = seqno;
852835f3 1815 request->ring = ring;
673a394b 1816 request->emitted_jiffies = jiffies;
852835f3
ZN
1817 was_empty = list_empty(&ring->request_list);
1818 list_add_tail(&request->list, &ring->request_list);
1819
f787a5f5 1820 if (file_priv) {
1c25595f 1821 spin_lock(&file_priv->mm.lock);
f787a5f5 1822 request->file_priv = file_priv;
b962442e 1823 list_add_tail(&request->client_list,
f787a5f5 1824 &file_priv->mm.request_list);
1c25595f 1825 spin_unlock(&file_priv->mm.lock);
b962442e 1826 }
673a394b 1827
f65d9421 1828 if (!dev_priv->mm.suspended) {
b3b079db
CW
1829 mod_timer(&dev_priv->hangcheck_timer,
1830 jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
f65d9421 1831 if (was_empty)
b3b079db
CW
1832 queue_delayed_work(dev_priv->wq,
1833 &dev_priv->mm.retire_work, HZ);
f65d9421 1834 }
3cce469c 1835 return 0;
673a394b
EA
1836}
1837
1838/**
1839 * Command execution barrier
1840 *
1841 * Ensures that all commands in the ring are finished
1842 * before signalling the CPU
1843 */
8a1a49f9 1844static void
852835f3 1845i915_retire_commands(struct drm_device *dev, struct intel_ring_buffer *ring)
673a394b 1846{
673a394b 1847 uint32_t flush_domains = 0;
673a394b
EA
1848
1849 /* The sampler always gets flushed on i965 (sigh) */
a6c45cf0 1850 if (INTEL_INFO(dev)->gen >= 4)
673a394b 1851 flush_domains |= I915_GEM_DOMAIN_SAMPLER;
852835f3 1852
78501eac 1853 ring->flush(ring, I915_GEM_DOMAIN_COMMAND, flush_domains);
673a394b
EA
1854}
1855
f787a5f5
CW
1856static inline void
1857i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
673a394b 1858{
1c25595f 1859 struct drm_i915_file_private *file_priv = request->file_priv;
673a394b 1860
1c25595f
CW
1861 if (!file_priv)
1862 return;
1c5d22f7 1863
1c25595f
CW
1864 spin_lock(&file_priv->mm.lock);
1865 list_del(&request->client_list);
1866 request->file_priv = NULL;
1867 spin_unlock(&file_priv->mm.lock);
673a394b 1868}
673a394b 1869
dfaae392
CW
1870static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
1871 struct intel_ring_buffer *ring)
9375e446 1872{
dfaae392
CW
1873 while (!list_empty(&ring->request_list)) {
1874 struct drm_i915_gem_request *request;
673a394b 1875
dfaae392
CW
1876 request = list_first_entry(&ring->request_list,
1877 struct drm_i915_gem_request,
1878 list);
de151cf6 1879
dfaae392 1880 list_del(&request->list);
f787a5f5 1881 i915_gem_request_remove_from_client(request);
dfaae392
CW
1882 kfree(request);
1883 }
673a394b 1884
dfaae392 1885 while (!list_empty(&ring->active_list)) {
9375e446
CW
1886 struct drm_i915_gem_object *obj_priv;
1887
dfaae392 1888 obj_priv = list_first_entry(&ring->active_list,
9375e446 1889 struct drm_i915_gem_object,
69dc4987 1890 ring_list);
9375e446
CW
1891
1892 obj_priv->base.write_domain = 0;
dfaae392 1893 list_del_init(&obj_priv->gpu_write_list);
9375e446 1894 i915_gem_object_move_to_inactive(&obj_priv->base);
673a394b
EA
1895 }
1896}
1897
069efc1d 1898void i915_gem_reset(struct drm_device *dev)
673a394b 1899{
77f01230
CW
1900 struct drm_i915_private *dev_priv = dev->dev_private;
1901 struct drm_i915_gem_object *obj_priv;
069efc1d 1902 int i;
673a394b 1903
dfaae392 1904 i915_gem_reset_ring_lists(dev_priv, &dev_priv->render_ring);
87acb0a5 1905 i915_gem_reset_ring_lists(dev_priv, &dev_priv->bsd_ring);
549f7365 1906 i915_gem_reset_ring_lists(dev_priv, &dev_priv->blt_ring);
dfaae392
CW
1907
1908 /* Remove anything from the flushing lists. The GPU cache is likely
1909 * to be lost on reset along with the data, so simply move the
1910 * lost bo to the inactive list.
1911 */
1912 while (!list_empty(&dev_priv->mm.flushing_list)) {
1913 obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
1914 struct drm_i915_gem_object,
69dc4987 1915 mm_list);
dfaae392
CW
1916
1917 obj_priv->base.write_domain = 0;
1918 list_del_init(&obj_priv->gpu_write_list);
1919 i915_gem_object_move_to_inactive(&obj_priv->base);
1920 }
1921
1922 /* Move everything out of the GPU domains to ensure we do any
1923 * necessary invalidation upon reuse.
1924 */
77f01230
CW
1925 list_for_each_entry(obj_priv,
1926 &dev_priv->mm.inactive_list,
69dc4987 1927 mm_list)
77f01230
CW
1928 {
1929 obj_priv->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
1930 }
069efc1d
CW
1931
1932 /* The fence registers are invalidated so clear them out */
1933 for (i = 0; i < 16; i++) {
1934 struct drm_i915_fence_reg *reg;
1935
1936 reg = &dev_priv->fence_regs[i];
1937 if (!reg->obj)
1938 continue;
1939
1940 i915_gem_clear_fence_reg(reg->obj);
1941 }
673a394b
EA
1942}
1943
1944/**
1945 * This function clears the request list as sequence numbers are passed.
1946 */
b09a1fec
CW
1947static void
1948i915_gem_retire_requests_ring(struct drm_device *dev,
1949 struct intel_ring_buffer *ring)
673a394b
EA
1950{
1951 drm_i915_private_t *dev_priv = dev->dev_private;
1952 uint32_t seqno;
1953
b84d5f0c
CW
1954 if (!ring->status_page.page_addr ||
1955 list_empty(&ring->request_list))
6c0594a3
KW
1956 return;
1957
23bc5982 1958 WARN_ON(i915_verify_lists(dev));
673a394b 1959
78501eac 1960 seqno = ring->get_seqno(ring);
852835f3 1961 while (!list_empty(&ring->request_list)) {
673a394b 1962 struct drm_i915_gem_request *request;
673a394b 1963
852835f3 1964 request = list_first_entry(&ring->request_list,
673a394b
EA
1965 struct drm_i915_gem_request,
1966 list);
673a394b 1967
dfaae392 1968 if (!i915_seqno_passed(seqno, request->seqno))
b84d5f0c
CW
1969 break;
1970
1971 trace_i915_gem_request_retire(dev, request->seqno);
1972
1973 list_del(&request->list);
f787a5f5 1974 i915_gem_request_remove_from_client(request);
b84d5f0c
CW
1975 kfree(request);
1976 }
673a394b 1977
b84d5f0c
CW
1978 /* Move any buffers on the active list that are no longer referenced
1979 * by the ringbuffer to the flushing/inactive lists as appropriate.
1980 */
1981 while (!list_empty(&ring->active_list)) {
1982 struct drm_gem_object *obj;
1983 struct drm_i915_gem_object *obj_priv;
1984
1985 obj_priv = list_first_entry(&ring->active_list,
1986 struct drm_i915_gem_object,
69dc4987 1987 ring_list);
673a394b 1988
dfaae392 1989 if (!i915_seqno_passed(seqno, obj_priv->last_rendering_seqno))
673a394b 1990 break;
b84d5f0c
CW
1991
1992 obj = &obj_priv->base;
b84d5f0c
CW
1993 if (obj->write_domain != 0)
1994 i915_gem_object_move_to_flushing(obj);
1995 else
1996 i915_gem_object_move_to_inactive(obj);
673a394b 1997 }
9d34e5db
CW
1998
1999 if (unlikely (dev_priv->trace_irq_seqno &&
2000 i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) {
78501eac 2001 ring->user_irq_put(ring);
9d34e5db
CW
2002 dev_priv->trace_irq_seqno = 0;
2003 }
23bc5982
CW
2004
2005 WARN_ON(i915_verify_lists(dev));
673a394b
EA
2006}
2007
b09a1fec
CW
2008void
2009i915_gem_retire_requests(struct drm_device *dev)
2010{
2011 drm_i915_private_t *dev_priv = dev->dev_private;
2012
be72615b
CW
2013 if (!list_empty(&dev_priv->mm.deferred_free_list)) {
2014 struct drm_i915_gem_object *obj_priv, *tmp;
2015
2016 /* We must be careful that during unbind() we do not
2017 * accidentally infinitely recurse into retire requests.
2018 * Currently:
2019 * retire -> free -> unbind -> wait -> retire_ring
2020 */
2021 list_for_each_entry_safe(obj_priv, tmp,
2022 &dev_priv->mm.deferred_free_list,
69dc4987 2023 mm_list)
be72615b
CW
2024 i915_gem_free_object_tail(&obj_priv->base);
2025 }
2026
b09a1fec 2027 i915_gem_retire_requests_ring(dev, &dev_priv->render_ring);
87acb0a5 2028 i915_gem_retire_requests_ring(dev, &dev_priv->bsd_ring);
549f7365 2029 i915_gem_retire_requests_ring(dev, &dev_priv->blt_ring);
b09a1fec
CW
2030}
2031
75ef9da2 2032static void
673a394b
EA
2033i915_gem_retire_work_handler(struct work_struct *work)
2034{
2035 drm_i915_private_t *dev_priv;
2036 struct drm_device *dev;
2037
2038 dev_priv = container_of(work, drm_i915_private_t,
2039 mm.retire_work.work);
2040 dev = dev_priv->dev;
2041
891b48cf
CW
2042 /* Come back later if the device is busy... */
2043 if (!mutex_trylock(&dev->struct_mutex)) {
2044 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
2045 return;
2046 }
2047
b09a1fec 2048 i915_gem_retire_requests(dev);
d1b851fc 2049
6dbe2772 2050 if (!dev_priv->mm.suspended &&
d1b851fc 2051 (!list_empty(&dev_priv->render_ring.request_list) ||
549f7365
CW
2052 !list_empty(&dev_priv->bsd_ring.request_list) ||
2053 !list_empty(&dev_priv->blt_ring.request_list)))
9c9fe1f8 2054 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
673a394b
EA
2055 mutex_unlock(&dev->struct_mutex);
2056}
2057
5a5a0c64 2058int
852835f3 2059i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
8a1a49f9 2060 bool interruptible, struct intel_ring_buffer *ring)
673a394b
EA
2061{
2062 drm_i915_private_t *dev_priv = dev->dev_private;
802c7eb6 2063 u32 ier;
673a394b
EA
2064 int ret = 0;
2065
2066 BUG_ON(seqno == 0);
2067
ba1234d1 2068 if (atomic_read(&dev_priv->mm.wedged))
30dbf0c0
CW
2069 return -EAGAIN;
2070
a56ba56c 2071 if (ring->outstanding_lazy_request) {
3cce469c
CW
2072 struct drm_i915_gem_request *request;
2073
2074 request = kzalloc(sizeof(*request), GFP_KERNEL);
2075 if (request == NULL)
e35a41de 2076 return -ENOMEM;
3cce469c
CW
2077
2078 ret = i915_add_request(dev, NULL, request, ring);
2079 if (ret) {
2080 kfree(request);
2081 return ret;
2082 }
2083
2084 seqno = request->seqno;
e35a41de 2085 }
a56ba56c 2086 BUG_ON(seqno == dev_priv->next_seqno);
ffed1d09 2087
78501eac 2088 if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
bad720ff 2089 if (HAS_PCH_SPLIT(dev))
036a4a7d
ZW
2090 ier = I915_READ(DEIER) | I915_READ(GTIER);
2091 else
2092 ier = I915_READ(IER);
802c7eb6
JB
2093 if (!ier) {
2094 DRM_ERROR("something (likely vbetool) disabled "
2095 "interrupts, re-enabling\n");
2096 i915_driver_irq_preinstall(dev);
2097 i915_driver_irq_postinstall(dev);
2098 }
2099
1c5d22f7
CW
2100 trace_i915_gem_request_wait_begin(dev, seqno);
2101
b2223497 2102 ring->waiting_seqno = seqno;
78501eac 2103 ring->user_irq_get(ring);
48764bf4 2104 if (interruptible)
852835f3 2105 ret = wait_event_interruptible(ring->irq_queue,
78501eac 2106 i915_seqno_passed(ring->get_seqno(ring), seqno)
852835f3 2107 || atomic_read(&dev_priv->mm.wedged));
48764bf4 2108 else
852835f3 2109 wait_event(ring->irq_queue,
78501eac 2110 i915_seqno_passed(ring->get_seqno(ring), seqno)
852835f3 2111 || atomic_read(&dev_priv->mm.wedged));
48764bf4 2112
78501eac 2113 ring->user_irq_put(ring);
b2223497 2114 ring->waiting_seqno = 0;
1c5d22f7
CW
2115
2116 trace_i915_gem_request_wait_end(dev, seqno);
673a394b 2117 }
ba1234d1 2118 if (atomic_read(&dev_priv->mm.wedged))
30dbf0c0 2119 ret = -EAGAIN;
673a394b
EA
2120
2121 if (ret && ret != -ERESTARTSYS)
8bff917c 2122 DRM_ERROR("%s returns %d (awaiting %d at %d, next %d)\n",
78501eac 2123 __func__, ret, seqno, ring->get_seqno(ring),
8bff917c 2124 dev_priv->next_seqno);
673a394b
EA
2125
2126 /* Directly dispatch request retiring. While we have the work queue
2127 * to handle this, the waiter on a request often wants an associated
2128 * buffer to have made it to the inactive list, and we would need
2129 * a separate wait queue to handle that.
2130 */
2131 if (ret == 0)
b09a1fec 2132 i915_gem_retire_requests_ring(dev, ring);
673a394b
EA
2133
2134 return ret;
2135}
2136
48764bf4
DV
2137/**
2138 * Waits for a sequence number to be signaled, and cleans up the
2139 * request and object lists appropriately for that event.
2140 */
2141static int
852835f3 2142i915_wait_request(struct drm_device *dev, uint32_t seqno,
a56ba56c 2143 struct intel_ring_buffer *ring)
48764bf4 2144{
852835f3 2145 return i915_do_wait_request(dev, seqno, 1, ring);
48764bf4
DV
2146}
2147
20f0cd55 2148static void
9220434a 2149i915_gem_flush_ring(struct drm_device *dev,
c78ec30b 2150 struct drm_file *file_priv,
9220434a
CW
2151 struct intel_ring_buffer *ring,
2152 uint32_t invalidate_domains,
2153 uint32_t flush_domains)
2154{
78501eac 2155 ring->flush(ring, invalidate_domains, flush_domains);
9220434a
CW
2156 i915_gem_process_flushing_list(dev, flush_domains, ring);
2157}
2158
8187a2b7
ZN
2159static void
2160i915_gem_flush(struct drm_device *dev,
c78ec30b 2161 struct drm_file *file_priv,
8187a2b7 2162 uint32_t invalidate_domains,
9220434a
CW
2163 uint32_t flush_domains,
2164 uint32_t flush_rings)
8187a2b7
ZN
2165{
2166 drm_i915_private_t *dev_priv = dev->dev_private;
8bff917c 2167
8187a2b7
ZN
2168 if (flush_domains & I915_GEM_DOMAIN_CPU)
2169 drm_agp_chipset_flush(dev);
8bff917c 2170
9220434a
CW
2171 if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) {
2172 if (flush_rings & RING_RENDER)
c78ec30b 2173 i915_gem_flush_ring(dev, file_priv,
9220434a
CW
2174 &dev_priv->render_ring,
2175 invalidate_domains, flush_domains);
2176 if (flush_rings & RING_BSD)
c78ec30b 2177 i915_gem_flush_ring(dev, file_priv,
9220434a
CW
2178 &dev_priv->bsd_ring,
2179 invalidate_domains, flush_domains);
549f7365
CW
2180 if (flush_rings & RING_BLT)
2181 i915_gem_flush_ring(dev, file_priv,
2182 &dev_priv->blt_ring,
2183 invalidate_domains, flush_domains);
9220434a 2184 }
8187a2b7
ZN
2185}
2186
673a394b
EA
2187/**
2188 * Ensures that all rendering to the object has completed and the object is
2189 * safe to unbind from the GTT or access from the CPU.
2190 */
2191static int
2cf34d7b
CW
2192i915_gem_object_wait_rendering(struct drm_gem_object *obj,
2193 bool interruptible)
673a394b
EA
2194{
2195 struct drm_device *dev = obj->dev;
23010e43 2196 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
673a394b
EA
2197 int ret;
2198
e47c68e9
EA
2199 /* This function only exists to support waiting for existing rendering,
2200 * not for emitting required flushes.
673a394b 2201 */
e47c68e9 2202 BUG_ON((obj->write_domain & I915_GEM_GPU_DOMAINS) != 0);
673a394b
EA
2203
2204 /* If there is rendering queued on the buffer being evicted, wait for
2205 * it.
2206 */
2207 if (obj_priv->active) {
2cf34d7b
CW
2208 ret = i915_do_wait_request(dev,
2209 obj_priv->last_rendering_seqno,
2210 interruptible,
2211 obj_priv->ring);
2212 if (ret)
673a394b
EA
2213 return ret;
2214 }
2215
2216 return 0;
2217}
2218
2219/**
2220 * Unbinds an object from the GTT aperture.
2221 */
0f973f27 2222int
673a394b
EA
2223i915_gem_object_unbind(struct drm_gem_object *obj)
2224{
2225 struct drm_device *dev = obj->dev;
73aa808f 2226 struct drm_i915_private *dev_priv = dev->dev_private;
23010e43 2227 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
673a394b
EA
2228 int ret = 0;
2229
673a394b
EA
2230 if (obj_priv->gtt_space == NULL)
2231 return 0;
2232
2233 if (obj_priv->pin_count != 0) {
2234 DRM_ERROR("Attempting to unbind pinned buffer\n");
2235 return -EINVAL;
2236 }
2237
5323fd04
EA
2238 /* blow away mappings if mapped through GTT */
2239 i915_gem_release_mmap(obj);
2240
673a394b
EA
2241 /* Move the object to the CPU domain to ensure that
2242 * any possible CPU writes while it's not in the GTT
2243 * are flushed when we go to remap it. This will
2244 * also ensure that all pending GPU writes are finished
2245 * before we unbind.
2246 */
e47c68e9 2247 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
8dc1775d 2248 if (ret == -ERESTARTSYS)
673a394b 2249 return ret;
8dc1775d
CW
2250 /* Continue on if we fail due to EIO, the GPU is hung so we
2251 * should be safe and we need to cleanup or else we might
2252 * cause memory corruption through use-after-free.
2253 */
812ed492
CW
2254 if (ret) {
2255 i915_gem_clflush_object(obj);
2256 obj->read_domains = obj->write_domain = I915_GEM_DOMAIN_CPU;
2257 }
673a394b 2258
96b47b65
DV
2259 /* release the fence reg _after_ flushing */
2260 if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
2261 i915_gem_clear_fence_reg(obj);
2262
73aa808f
CW
2263 drm_unbind_agp(obj_priv->agp_mem);
2264 drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
673a394b 2265
e5281ccd 2266 i915_gem_object_put_pages_gtt(obj);
673a394b 2267
a00b10c3 2268 i915_gem_info_remove_gtt(dev_priv, obj_priv);
69dc4987 2269 list_del_init(&obj_priv->mm_list);
a00b10c3
CW
2270 obj_priv->fenceable = true;
2271 obj_priv->mappable = true;
673a394b 2272
73aa808f
CW
2273 drm_mm_put_block(obj_priv->gtt_space);
2274 obj_priv->gtt_space = NULL;
9af90d19 2275 obj_priv->gtt_offset = 0;
673a394b 2276
963b4836
CW
2277 if (i915_gem_object_is_purgeable(obj_priv))
2278 i915_gem_object_truncate(obj);
2279
1c5d22f7
CW
2280 trace_i915_gem_object_unbind(obj);
2281
8dc1775d 2282 return ret;
673a394b
EA
2283}
2284
a56ba56c
CW
2285static int i915_ring_idle(struct drm_device *dev,
2286 struct intel_ring_buffer *ring)
2287{
64193406
CW
2288 if (list_empty(&ring->gpu_write_list))
2289 return 0;
2290
a56ba56c
CW
2291 i915_gem_flush_ring(dev, NULL, ring,
2292 I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
2293 return i915_wait_request(dev,
2294 i915_gem_next_request_seqno(dev, ring),
2295 ring);
2296}
2297
b47eb4a2 2298int
4df2faf4
DV
2299i915_gpu_idle(struct drm_device *dev)
2300{
2301 drm_i915_private_t *dev_priv = dev->dev_private;
2302 bool lists_empty;
852835f3 2303 int ret;
4df2faf4 2304
d1b851fc
ZN
2305 lists_empty = (list_empty(&dev_priv->mm.flushing_list) &&
2306 list_empty(&dev_priv->render_ring.active_list) &&
549f7365
CW
2307 list_empty(&dev_priv->bsd_ring.active_list) &&
2308 list_empty(&dev_priv->blt_ring.active_list));
4df2faf4
DV
2309 if (lists_empty)
2310 return 0;
2311
2312 /* Flush everything onto the inactive list. */
a56ba56c 2313 ret = i915_ring_idle(dev, &dev_priv->render_ring);
8a1a49f9
DV
2314 if (ret)
2315 return ret;
d1b851fc 2316
87acb0a5
CW
2317 ret = i915_ring_idle(dev, &dev_priv->bsd_ring);
2318 if (ret)
2319 return ret;
d1b851fc 2320
549f7365
CW
2321 ret = i915_ring_idle(dev, &dev_priv->blt_ring);
2322 if (ret)
2323 return ret;
4df2faf4 2324
8a1a49f9 2325 return 0;
4df2faf4
DV
2326}
2327
a00b10c3 2328static void sandybridge_write_fence_reg(struct drm_gem_object *obj)
4e901fdc 2329{
4e901fdc
EA
2330 struct drm_device *dev = obj->dev;
2331 drm_i915_private_t *dev_priv = dev->dev_private;
23010e43 2332 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
a00b10c3 2333 u32 size = i915_gem_get_gtt_size(obj_priv);
4e901fdc
EA
2334 int regnum = obj_priv->fence_reg;
2335 uint64_t val;
2336
a00b10c3 2337 val = (uint64_t)((obj_priv->gtt_offset + size - 4096) &
4e901fdc
EA
2338 0xfffff000) << 32;
2339 val |= obj_priv->gtt_offset & 0xfffff000;
2340 val |= (uint64_t)((obj_priv->stride / 128) - 1) <<
2341 SANDYBRIDGE_FENCE_PITCH_SHIFT;
2342
2343 if (obj_priv->tiling_mode == I915_TILING_Y)
2344 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2345 val |= I965_FENCE_REG_VALID;
2346
2347 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (regnum * 8), val);
2348}
2349
a00b10c3 2350static void i965_write_fence_reg(struct drm_gem_object *obj)
de151cf6 2351{
de151cf6
JB
2352 struct drm_device *dev = obj->dev;
2353 drm_i915_private_t *dev_priv = dev->dev_private;
23010e43 2354 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
a00b10c3 2355 u32 size = i915_gem_get_gtt_size(obj_priv);
de151cf6
JB
2356 int regnum = obj_priv->fence_reg;
2357 uint64_t val;
2358
a00b10c3 2359 val = (uint64_t)((obj_priv->gtt_offset + size - 4096) &
de151cf6
JB
2360 0xfffff000) << 32;
2361 val |= obj_priv->gtt_offset & 0xfffff000;
2362 val |= ((obj_priv->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
2363 if (obj_priv->tiling_mode == I915_TILING_Y)
2364 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2365 val |= I965_FENCE_REG_VALID;
2366
2367 I915_WRITE64(FENCE_REG_965_0 + (regnum * 8), val);
2368}
2369
a00b10c3 2370static void i915_write_fence_reg(struct drm_gem_object *obj)
de151cf6 2371{
de151cf6
JB
2372 struct drm_device *dev = obj->dev;
2373 drm_i915_private_t *dev_priv = dev->dev_private;
23010e43 2374 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
a00b10c3
CW
2375 u32 size = i915_gem_get_gtt_size(obj_priv);
2376 uint32_t fence_reg, val, pitch_val;
0f973f27 2377 int tile_width;
de151cf6
JB
2378
2379 if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) ||
a00b10c3
CW
2380 (obj_priv->gtt_offset & (size - 1))) {
2381 WARN(1, "%s: object 0x%08x [fenceable? %d] not 1M or size (0x%08x) aligned [gtt_space offset=%lx, size=%lx]\n",
2382 __func__, obj_priv->gtt_offset, obj_priv->fenceable, size,
2383 obj_priv->gtt_space->start, obj_priv->gtt_space->size);
de151cf6
JB
2384 return;
2385 }
2386
0f973f27
JB
2387 if (obj_priv->tiling_mode == I915_TILING_Y &&
2388 HAS_128_BYTE_Y_TILING(dev))
2389 tile_width = 128;
de151cf6 2390 else
0f973f27
JB
2391 tile_width = 512;
2392
2393 /* Note: pitch better be a power of two tile widths */
2394 pitch_val = obj_priv->stride / tile_width;
2395 pitch_val = ffs(pitch_val) - 1;
de151cf6 2396
c36a2a6d
DV
2397 if (obj_priv->tiling_mode == I915_TILING_Y &&
2398 HAS_128_BYTE_Y_TILING(dev))
2399 WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL);
2400 else
2401 WARN_ON(pitch_val > I915_FENCE_MAX_PITCH_VAL);
2402
de151cf6
JB
2403 val = obj_priv->gtt_offset;
2404 if (obj_priv->tiling_mode == I915_TILING_Y)
2405 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
a00b10c3 2406 val |= I915_FENCE_SIZE_BITS(size);
de151cf6
JB
2407 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2408 val |= I830_FENCE_REG_VALID;
2409
a00b10c3
CW
2410 fence_reg = obj_priv->fence_reg;
2411 if (fence_reg < 8)
2412 fence_reg = FENCE_REG_830_0 + fence_reg * 4;
dc529a4f 2413 else
a00b10c3 2414 fence_reg = FENCE_REG_945_8 + (fence_reg - 8) * 4;
dc529a4f 2415 I915_WRITE(fence_reg, val);
de151cf6
JB
2416}
2417
a00b10c3 2418static void i830_write_fence_reg(struct drm_gem_object *obj)
de151cf6 2419{
de151cf6
JB
2420 struct drm_device *dev = obj->dev;
2421 drm_i915_private_t *dev_priv = dev->dev_private;
23010e43 2422 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
a00b10c3 2423 u32 size = i915_gem_get_gtt_size(obj_priv);
de151cf6
JB
2424 int regnum = obj_priv->fence_reg;
2425 uint32_t val;
2426 uint32_t pitch_val;
8d7773a3 2427 uint32_t fence_size_bits;
de151cf6 2428
8d7773a3 2429 if ((obj_priv->gtt_offset & ~I830_FENCE_START_MASK) ||
de151cf6 2430 (obj_priv->gtt_offset & (obj->size - 1))) {
8d7773a3 2431 WARN(1, "%s: object 0x%08x not 512K or size aligned\n",
0f973f27 2432 __func__, obj_priv->gtt_offset);
de151cf6
JB
2433 return;
2434 }
2435
e76a16de
EA
2436 pitch_val = obj_priv->stride / 128;
2437 pitch_val = ffs(pitch_val) - 1;
2438 WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL);
2439
de151cf6
JB
2440 val = obj_priv->gtt_offset;
2441 if (obj_priv->tiling_mode == I915_TILING_Y)
2442 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
a00b10c3 2443 fence_size_bits = I830_FENCE_SIZE_BITS(size);
8d7773a3
DV
2444 WARN_ON(fence_size_bits & ~0x00000f00);
2445 val |= fence_size_bits;
de151cf6
JB
2446 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2447 val |= I830_FENCE_REG_VALID;
2448
2449 I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val);
de151cf6
JB
2450}
2451
2cf34d7b
CW
2452static int i915_find_fence_reg(struct drm_device *dev,
2453 bool interruptible)
ae3db24a 2454{
ae3db24a 2455 struct drm_i915_private *dev_priv = dev->dev_private;
a00b10c3
CW
2456 struct drm_i915_fence_reg *reg;
2457 struct drm_i915_gem_object *obj_priv = NULL;
ae3db24a
DV
2458 int i, avail, ret;
2459
2460 /* First try to find a free reg */
2461 avail = 0;
2462 for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
2463 reg = &dev_priv->fence_regs[i];
2464 if (!reg->obj)
2465 return i;
2466
23010e43 2467 obj_priv = to_intel_bo(reg->obj);
ae3db24a
DV
2468 if (!obj_priv->pin_count)
2469 avail++;
2470 }
2471
2472 if (avail == 0)
2473 return -ENOSPC;
2474
2475 /* None available, try to steal one or wait for a user to finish */
a00b10c3 2476 avail = I915_FENCE_REG_NONE;
007cc8ac
DV
2477 list_for_each_entry(reg, &dev_priv->mm.fence_list,
2478 lru_list) {
a00b10c3 2479 obj_priv = to_intel_bo(reg->obj);
ae3db24a
DV
2480 if (obj_priv->pin_count)
2481 continue;
2482
2483 /* found one! */
a00b10c3 2484 avail = obj_priv->fence_reg;
ae3db24a
DV
2485 break;
2486 }
2487
a00b10c3 2488 BUG_ON(avail == I915_FENCE_REG_NONE);
ae3db24a
DV
2489
2490 /* We only have a reference on obj from the active list. put_fence_reg
2491 * might drop that one, causing a use-after-free in it. So hold a
2492 * private reference to obj like the other callers of put_fence_reg
2493 * (set_tiling ioctl) do. */
a00b10c3
CW
2494 drm_gem_object_reference(&obj_priv->base);
2495 ret = i915_gem_object_put_fence_reg(&obj_priv->base, interruptible);
2496 drm_gem_object_unreference(&obj_priv->base);
ae3db24a
DV
2497 if (ret != 0)
2498 return ret;
2499
a00b10c3 2500 return avail;
ae3db24a
DV
2501}
2502
de151cf6
JB
2503/**
2504 * i915_gem_object_get_fence_reg - set up a fence reg for an object
2505 * @obj: object to map through a fence reg
2506 *
2507 * When mapping objects through the GTT, userspace wants to be able to write
2508 * to them without having to worry about swizzling if the object is tiled.
2509 *
2510 * This function walks the fence regs looking for a free one for @obj,
2511 * stealing one if it can't find any.
2512 *
2513 * It then sets up the reg based on the object's properties: address, pitch
2514 * and tiling format.
2515 */
8c4b8c3f 2516int
2cf34d7b
CW
2517i915_gem_object_get_fence_reg(struct drm_gem_object *obj,
2518 bool interruptible)
de151cf6
JB
2519{
2520 struct drm_device *dev = obj->dev;
79e53945 2521 struct drm_i915_private *dev_priv = dev->dev_private;
23010e43 2522 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
de151cf6 2523 struct drm_i915_fence_reg *reg = NULL;
ae3db24a 2524 int ret;
de151cf6 2525
a09ba7fa
EA
2526 /* Just update our place in the LRU if our fence is getting used. */
2527 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
007cc8ac
DV
2528 reg = &dev_priv->fence_regs[obj_priv->fence_reg];
2529 list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
a09ba7fa
EA
2530 return 0;
2531 }
2532
de151cf6
JB
2533 switch (obj_priv->tiling_mode) {
2534 case I915_TILING_NONE:
2535 WARN(1, "allocating a fence for non-tiled object?\n");
2536 break;
2537 case I915_TILING_X:
0f973f27
JB
2538 if (!obj_priv->stride)
2539 return -EINVAL;
2540 WARN((obj_priv->stride & (512 - 1)),
2541 "object 0x%08x is X tiled but has non-512B pitch\n",
2542 obj_priv->gtt_offset);
de151cf6
JB
2543 break;
2544 case I915_TILING_Y:
0f973f27
JB
2545 if (!obj_priv->stride)
2546 return -EINVAL;
2547 WARN((obj_priv->stride & (128 - 1)),
2548 "object 0x%08x is Y tiled but has non-128B pitch\n",
2549 obj_priv->gtt_offset);
de151cf6
JB
2550 break;
2551 }
2552
2cf34d7b 2553 ret = i915_find_fence_reg(dev, interruptible);
ae3db24a
DV
2554 if (ret < 0)
2555 return ret;
de151cf6 2556
ae3db24a
DV
2557 obj_priv->fence_reg = ret;
2558 reg = &dev_priv->fence_regs[obj_priv->fence_reg];
007cc8ac 2559 list_add_tail(&reg->lru_list, &dev_priv->mm.fence_list);
a09ba7fa 2560
de151cf6
JB
2561 reg->obj = obj;
2562
e259befd
CW
2563 switch (INTEL_INFO(dev)->gen) {
2564 case 6:
a00b10c3 2565 sandybridge_write_fence_reg(obj);
e259befd
CW
2566 break;
2567 case 5:
2568 case 4:
a00b10c3 2569 i965_write_fence_reg(obj);
e259befd
CW
2570 break;
2571 case 3:
a00b10c3 2572 i915_write_fence_reg(obj);
e259befd
CW
2573 break;
2574 case 2:
a00b10c3 2575 i830_write_fence_reg(obj);
e259befd
CW
2576 break;
2577 }
d9ddcb96 2578
a00b10c3
CW
2579 trace_i915_gem_object_get_fence(obj,
2580 obj_priv->fence_reg,
2581 obj_priv->tiling_mode);
1c5d22f7 2582
d9ddcb96 2583 return 0;
de151cf6
JB
2584}
2585
2586/**
2587 * i915_gem_clear_fence_reg - clear out fence register info
2588 * @obj: object to clear
2589 *
2590 * Zeroes out the fence register itself and clears out the associated
2591 * data structures in dev_priv and obj_priv.
2592 */
2593static void
2594i915_gem_clear_fence_reg(struct drm_gem_object *obj)
2595{
2596 struct drm_device *dev = obj->dev;
79e53945 2597 drm_i915_private_t *dev_priv = dev->dev_private;
23010e43 2598 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
007cc8ac
DV
2599 struct drm_i915_fence_reg *reg =
2600 &dev_priv->fence_regs[obj_priv->fence_reg];
e259befd 2601 uint32_t fence_reg;
de151cf6 2602
e259befd
CW
2603 switch (INTEL_INFO(dev)->gen) {
2604 case 6:
4e901fdc
EA
2605 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 +
2606 (obj_priv->fence_reg * 8), 0);
e259befd
CW
2607 break;
2608 case 5:
2609 case 4:
de151cf6 2610 I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0);
e259befd
CW
2611 break;
2612 case 3:
9b74f734 2613 if (obj_priv->fence_reg >= 8)
e259befd 2614 fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg - 8) * 4;
dc529a4f 2615 else
e259befd
CW
2616 case 2:
2617 fence_reg = FENCE_REG_830_0 + obj_priv->fence_reg * 4;
dc529a4f
EA
2618
2619 I915_WRITE(fence_reg, 0);
e259befd 2620 break;
dc529a4f 2621 }
de151cf6 2622
007cc8ac 2623 reg->obj = NULL;
de151cf6 2624 obj_priv->fence_reg = I915_FENCE_REG_NONE;
007cc8ac 2625 list_del_init(&reg->lru_list);
de151cf6
JB
2626}
2627
52dc7d32
CW
2628/**
2629 * i915_gem_object_put_fence_reg - waits on outstanding fenced access
2630 * to the buffer to finish, and then resets the fence register.
2631 * @obj: tiled object holding a fence register.
2cf34d7b 2632 * @bool: whether the wait upon the fence is interruptible
52dc7d32
CW
2633 *
2634 * Zeroes out the fence register itself and clears out the associated
2635 * data structures in dev_priv and obj_priv.
2636 */
2637int
2cf34d7b
CW
2638i915_gem_object_put_fence_reg(struct drm_gem_object *obj,
2639 bool interruptible)
52dc7d32
CW
2640{
2641 struct drm_device *dev = obj->dev;
53640e1d 2642 struct drm_i915_private *dev_priv = dev->dev_private;
23010e43 2643 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
53640e1d 2644 struct drm_i915_fence_reg *reg;
52dc7d32
CW
2645
2646 if (obj_priv->fence_reg == I915_FENCE_REG_NONE)
2647 return 0;
2648
10ae9bd2
DV
2649 /* If we've changed tiling, GTT-mappings of the object
2650 * need to re-fault to ensure that the correct fence register
2651 * setup is in place.
2652 */
2653 i915_gem_release_mmap(obj);
2654
52dc7d32
CW
2655 /* On the i915, GPU access to tiled buffers is via a fence,
2656 * therefore we must wait for any outstanding access to complete
2657 * before clearing the fence.
2658 */
53640e1d
CW
2659 reg = &dev_priv->fence_regs[obj_priv->fence_reg];
2660 if (reg->gpu) {
52dc7d32
CW
2661 int ret;
2662
2cf34d7b 2663 ret = i915_gem_object_flush_gpu_write_domain(obj, true);
0bc23aad 2664 if (ret)
2dafb1e0
CW
2665 return ret;
2666
2cf34d7b 2667 ret = i915_gem_object_wait_rendering(obj, interruptible);
0bc23aad 2668 if (ret)
52dc7d32 2669 return ret;
53640e1d
CW
2670
2671 reg->gpu = false;
52dc7d32
CW
2672 }
2673
4a726612 2674 i915_gem_object_flush_gtt_write_domain(obj);
0bc23aad 2675 i915_gem_clear_fence_reg(obj);
52dc7d32
CW
2676
2677 return 0;
2678}
2679
673a394b
EA
2680/**
2681 * Finds free space in the GTT aperture and binds the object there.
2682 */
2683static int
920afa77
DV
2684i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
2685 unsigned alignment,
a00b10c3
CW
2686 bool mappable,
2687 bool need_fence)
673a394b
EA
2688{
2689 struct drm_device *dev = obj->dev;
2690 drm_i915_private_t *dev_priv = dev->dev_private;
23010e43 2691 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
673a394b 2692 struct drm_mm_node *free_space;
a00b10c3
CW
2693 gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN;
2694 u32 size, fence_size, fence_alignment;
07f73f69 2695 int ret;
673a394b 2696
bb6baf76 2697 if (obj_priv->madv != I915_MADV_WILLNEED) {
3ef94daa
CW
2698 DRM_ERROR("Attempting to bind a purgeable object\n");
2699 return -EINVAL;
2700 }
2701
a00b10c3
CW
2702 fence_size = i915_gem_get_gtt_size(obj_priv);
2703 fence_alignment = i915_gem_get_gtt_alignment(obj_priv);
2704
673a394b 2705 if (alignment == 0)
a00b10c3
CW
2706 alignment = need_fence ? fence_alignment : 4096;
2707 if (need_fence && alignment & (fence_alignment - 1)) {
673a394b
EA
2708 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
2709 return -EINVAL;
2710 }
2711
a00b10c3
CW
2712 size = need_fence ? fence_size : obj->size;
2713
654fc607
CW
2714 /* If the object is bigger than the entire aperture, reject it early
2715 * before evicting everything in a vain attempt to find space.
2716 */
920afa77
DV
2717 if (obj->size >
2718 (mappable ? dev_priv->mm.gtt_mappable_end : dev_priv->mm.gtt_total)) {
654fc607
CW
2719 DRM_ERROR("Attempting to bind an object larger than the aperture\n");
2720 return -E2BIG;
2721 }
2722
673a394b 2723 search_free:
920afa77
DV
2724 if (mappable)
2725 free_space =
2726 drm_mm_search_free_in_range(&dev_priv->mm.gtt_space,
a00b10c3 2727 size, alignment, 0,
920afa77
DV
2728 dev_priv->mm.gtt_mappable_end,
2729 0);
2730 else
2731 free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
a00b10c3 2732 size, alignment, 0);
920afa77
DV
2733
2734 if (free_space != NULL) {
2735 if (mappable)
2736 obj_priv->gtt_space =
2737 drm_mm_get_block_range_generic(free_space,
a00b10c3 2738 size, alignment, 0,
920afa77
DV
2739 dev_priv->mm.gtt_mappable_end,
2740 0);
2741 else
2742 obj_priv->gtt_space =
a00b10c3 2743 drm_mm_get_block(free_space, size, alignment);
920afa77 2744 }
673a394b
EA
2745 if (obj_priv->gtt_space == NULL) {
2746 /* If the gtt is empty and we're still having trouble
2747 * fitting our object in, we're out of memory.
2748 */
a00b10c3 2749 ret = i915_gem_evict_something(dev, size, alignment, mappable);
9731129c 2750 if (ret)
673a394b 2751 return ret;
9731129c 2752
673a394b
EA
2753 goto search_free;
2754 }
2755
e5281ccd 2756 ret = i915_gem_object_get_pages_gtt(obj, gfpmask);
673a394b
EA
2757 if (ret) {
2758 drm_mm_put_block(obj_priv->gtt_space);
2759 obj_priv->gtt_space = NULL;
07f73f69
CW
2760
2761 if (ret == -ENOMEM) {
2762 /* first try to clear up some space from the GTT */
a00b10c3 2763 ret = i915_gem_evict_something(dev, size,
920afa77 2764 alignment, mappable);
07f73f69 2765 if (ret) {
07f73f69 2766 /* now try to shrink everyone else */
4bdadb97
CW
2767 if (gfpmask) {
2768 gfpmask = 0;
2769 goto search_free;
07f73f69
CW
2770 }
2771
2772 return ret;
2773 }
2774
2775 goto search_free;
2776 }
2777
673a394b
EA
2778 return ret;
2779 }
2780
673a394b
EA
2781 /* Create an AGP memory structure pointing at our pages, and bind it
2782 * into the GTT.
2783 */
2784 obj_priv->agp_mem = drm_agp_bind_pages(dev,
856fa198 2785 obj_priv->pages,
07f73f69 2786 obj->size >> PAGE_SHIFT,
9af90d19 2787 obj_priv->gtt_space->start,
ba1eb1d8 2788 obj_priv->agp_type);
673a394b 2789 if (obj_priv->agp_mem == NULL) {
e5281ccd 2790 i915_gem_object_put_pages_gtt(obj);
673a394b
EA
2791 drm_mm_put_block(obj_priv->gtt_space);
2792 obj_priv->gtt_space = NULL;
07f73f69 2793
a00b10c3
CW
2794 ret = i915_gem_evict_something(dev, size,
2795 alignment, mappable);
9731129c 2796 if (ret)
07f73f69 2797 return ret;
07f73f69
CW
2798
2799 goto search_free;
673a394b 2800 }
673a394b 2801
fb7d516a
DV
2802 obj_priv->gtt_offset = obj_priv->gtt_space->start;
2803
bf1a1092 2804 /* keep track of bounds object by adding it to the inactive list */
69dc4987 2805 list_add_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list);
a00b10c3 2806 i915_gem_info_add_gtt(dev_priv, obj_priv);
bf1a1092 2807
673a394b
EA
2808 /* Assert that the object is not currently in any GPU domain. As it
2809 * wasn't in the GTT, there shouldn't be any way it could have been in
2810 * a GPU cache
2811 */
21d509e3
CW
2812 BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
2813 BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
673a394b 2814
ec57d260 2815 trace_i915_gem_object_bind(obj, obj_priv->gtt_offset, mappable);
1c5d22f7 2816
a00b10c3
CW
2817 obj_priv->fenceable =
2818 obj_priv->gtt_space->size == fence_size &&
2819 (obj_priv->gtt_space->start & (fence_alignment -1)) == 0;
2820
2821 obj_priv->mappable =
2822 obj_priv->gtt_offset + obj->size <= dev_priv->mm.gtt_mappable_end;
2823
673a394b
EA
2824 return 0;
2825}
2826
2827void
2828i915_gem_clflush_object(struct drm_gem_object *obj)
2829{
23010e43 2830 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
673a394b
EA
2831
2832 /* If we don't have a page list set up, then we're not pinned
2833 * to GPU, and we can ignore the cache flush because it'll happen
2834 * again at bind time.
2835 */
856fa198 2836 if (obj_priv->pages == NULL)
673a394b
EA
2837 return;
2838
1c5d22f7 2839 trace_i915_gem_object_clflush(obj);
cfa16a0d 2840
856fa198 2841 drm_clflush_pages(obj_priv->pages, obj->size / PAGE_SIZE);
673a394b
EA
2842}
2843
e47c68e9 2844/** Flushes any GPU write domain for the object if it's dirty. */
2dafb1e0 2845static int
ba3d8d74
DV
2846i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj,
2847 bool pipelined)
e47c68e9
EA
2848{
2849 struct drm_device *dev = obj->dev;
e47c68e9
EA
2850
2851 if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
2dafb1e0 2852 return 0;
e47c68e9
EA
2853
2854 /* Queue the GPU write cache flushing we need. */
c78ec30b 2855 i915_gem_flush_ring(dev, NULL,
9220434a
CW
2856 to_intel_bo(obj)->ring,
2857 0, obj->write_domain);
48b956c5 2858 BUG_ON(obj->write_domain);
1c5d22f7 2859
ba3d8d74
DV
2860 if (pipelined)
2861 return 0;
2862
2cf34d7b 2863 return i915_gem_object_wait_rendering(obj, true);
e47c68e9
EA
2864}
2865
2866/** Flushes the GTT write domain for the object if it's dirty. */
2867static void
2868i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj)
2869{
1c5d22f7
CW
2870 uint32_t old_write_domain;
2871
e47c68e9
EA
2872 if (obj->write_domain != I915_GEM_DOMAIN_GTT)
2873 return;
2874
2875 /* No actual flushing is required for the GTT write domain. Writes
2876 * to it immediately go to main memory as far as we know, so there's
2877 * no chipset flush. It also doesn't land in render cache.
2878 */
4a684a41
CW
2879 i915_gem_release_mmap(obj);
2880
1c5d22f7 2881 old_write_domain = obj->write_domain;
e47c68e9 2882 obj->write_domain = 0;
1c5d22f7
CW
2883
2884 trace_i915_gem_object_change_domain(obj,
2885 obj->read_domains,
2886 old_write_domain);
e47c68e9
EA
2887}
2888
2889/** Flushes the CPU write domain for the object if it's dirty. */
2890static void
2891i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
2892{
2893 struct drm_device *dev = obj->dev;
1c5d22f7 2894 uint32_t old_write_domain;
e47c68e9
EA
2895
2896 if (obj->write_domain != I915_GEM_DOMAIN_CPU)
2897 return;
2898
2899 i915_gem_clflush_object(obj);
2900 drm_agp_chipset_flush(dev);
1c5d22f7 2901 old_write_domain = obj->write_domain;
e47c68e9 2902 obj->write_domain = 0;
1c5d22f7
CW
2903
2904 trace_i915_gem_object_change_domain(obj,
2905 obj->read_domains,
2906 old_write_domain);
e47c68e9
EA
2907}
2908
2ef7eeaa
EA
2909/**
2910 * Moves a single object to the GTT read, and possibly write domain.
2911 *
2912 * This function returns when the move is complete, including waiting on
2913 * flushes to occur.
2914 */
79e53945 2915int
2ef7eeaa
EA
2916i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
2917{
23010e43 2918 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1c5d22f7 2919 uint32_t old_write_domain, old_read_domains;
e47c68e9 2920 int ret;
2ef7eeaa 2921
02354392
EA
2922 /* Not valid to be called on unbound objects. */
2923 if (obj_priv->gtt_space == NULL)
2924 return -EINVAL;
2925
ba3d8d74 2926 ret = i915_gem_object_flush_gpu_write_domain(obj, false);
2dafb1e0
CW
2927 if (ret != 0)
2928 return ret;
2929
7213342d 2930 i915_gem_object_flush_cpu_write_domain(obj);
1c5d22f7 2931
ba3d8d74 2932 if (write) {
2cf34d7b 2933 ret = i915_gem_object_wait_rendering(obj, true);
ba3d8d74
DV
2934 if (ret)
2935 return ret;
ba3d8d74 2936 }
e47c68e9 2937
1c5d22f7
CW
2938 old_write_domain = obj->write_domain;
2939 old_read_domains = obj->read_domains;
2940
e47c68e9
EA
2941 /* It should now be out of any other write domains, and we can update
2942 * the domain values for our changes.
2943 */
2944 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
2945 obj->read_domains |= I915_GEM_DOMAIN_GTT;
2946 if (write) {
7213342d 2947 obj->read_domains = I915_GEM_DOMAIN_GTT;
e47c68e9
EA
2948 obj->write_domain = I915_GEM_DOMAIN_GTT;
2949 obj_priv->dirty = 1;
2ef7eeaa
EA
2950 }
2951
1c5d22f7
CW
2952 trace_i915_gem_object_change_domain(obj,
2953 old_read_domains,
2954 old_write_domain);
2955
e47c68e9
EA
2956 return 0;
2957}
2958
b9241ea3
ZW
2959/*
2960 * Prepare buffer for display plane. Use uninterruptible for possible flush
2961 * wait, as in modesetting process we're not supposed to be interrupted.
2962 */
2963int
48b956c5
CW
2964i915_gem_object_set_to_display_plane(struct drm_gem_object *obj,
2965 bool pipelined)
b9241ea3 2966{
23010e43 2967 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
ba3d8d74 2968 uint32_t old_read_domains;
b9241ea3
ZW
2969 int ret;
2970
2971 /* Not valid to be called on unbound objects. */
2972 if (obj_priv->gtt_space == NULL)
2973 return -EINVAL;
2974
ced270fa 2975 ret = i915_gem_object_flush_gpu_write_domain(obj, true);
2dafb1e0
CW
2976 if (ret)
2977 return ret;
b9241ea3 2978
ced270fa
CW
2979 /* Currently, we are always called from an non-interruptible context. */
2980 if (!pipelined) {
2981 ret = i915_gem_object_wait_rendering(obj, false);
2982 if (ret)
b9241ea3
ZW
2983 return ret;
2984 }
2985
b118c1e3
CW
2986 i915_gem_object_flush_cpu_write_domain(obj);
2987
b9241ea3 2988 old_read_domains = obj->read_domains;
c78ec30b 2989 obj->read_domains |= I915_GEM_DOMAIN_GTT;
b9241ea3
ZW
2990
2991 trace_i915_gem_object_change_domain(obj,
2992 old_read_domains,
ba3d8d74 2993 obj->write_domain);
b9241ea3
ZW
2994
2995 return 0;
2996}
2997
e47c68e9
EA
2998/**
2999 * Moves a single object to the CPU read, and possibly write domain.
3000 *
3001 * This function returns when the move is complete, including waiting on
3002 * flushes to occur.
3003 */
3004static int
3005i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
3006{
1c5d22f7 3007 uint32_t old_write_domain, old_read_domains;
e47c68e9
EA
3008 int ret;
3009
ba3d8d74 3010 ret = i915_gem_object_flush_gpu_write_domain(obj, false);
e47c68e9
EA
3011 if (ret != 0)
3012 return ret;
2ef7eeaa 3013
e47c68e9 3014 i915_gem_object_flush_gtt_write_domain(obj);
2ef7eeaa 3015
e47c68e9
EA
3016 /* If we have a partially-valid cache of the object in the CPU,
3017 * finish invalidating it and free the per-page flags.
2ef7eeaa 3018 */
e47c68e9 3019 i915_gem_object_set_to_full_cpu_read_domain(obj);
2ef7eeaa 3020
7213342d 3021 if (write) {
2cf34d7b 3022 ret = i915_gem_object_wait_rendering(obj, true);
7213342d
CW
3023 if (ret)
3024 return ret;
3025 }
3026
1c5d22f7
CW
3027 old_write_domain = obj->write_domain;
3028 old_read_domains = obj->read_domains;
3029
e47c68e9
EA
3030 /* Flush the CPU cache if it's still invalid. */
3031 if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
2ef7eeaa 3032 i915_gem_clflush_object(obj);
2ef7eeaa 3033
e47c68e9 3034 obj->read_domains |= I915_GEM_DOMAIN_CPU;
2ef7eeaa
EA
3035 }
3036
3037 /* It should now be out of any other write domains, and we can update
3038 * the domain values for our changes.
3039 */
e47c68e9
EA
3040 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
3041
3042 /* If we're writing through the CPU, then the GPU read domains will
3043 * need to be invalidated at next use.
3044 */
3045 if (write) {
c78ec30b 3046 obj->read_domains = I915_GEM_DOMAIN_CPU;
e47c68e9
EA
3047 obj->write_domain = I915_GEM_DOMAIN_CPU;
3048 }
2ef7eeaa 3049
1c5d22f7
CW
3050 trace_i915_gem_object_change_domain(obj,
3051 old_read_domains,
3052 old_write_domain);
3053
2ef7eeaa
EA
3054 return 0;
3055}
3056
673a394b
EA
3057/*
3058 * Set the next domain for the specified object. This
3059 * may not actually perform the necessary flushing/invaliding though,
3060 * as that may want to be batched with other set_domain operations
3061 *
3062 * This is (we hope) the only really tricky part of gem. The goal
3063 * is fairly simple -- track which caches hold bits of the object
3064 * and make sure they remain coherent. A few concrete examples may
3065 * help to explain how it works. For shorthand, we use the notation
3066 * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
3067 * a pair of read and write domain masks.
3068 *
3069 * Case 1: the batch buffer
3070 *
3071 * 1. Allocated
3072 * 2. Written by CPU
3073 * 3. Mapped to GTT
3074 * 4. Read by GPU
3075 * 5. Unmapped from GTT
3076 * 6. Freed
3077 *
3078 * Let's take these a step at a time
3079 *
3080 * 1. Allocated
3081 * Pages allocated from the kernel may still have
3082 * cache contents, so we set them to (CPU, CPU) always.
3083 * 2. Written by CPU (using pwrite)
3084 * The pwrite function calls set_domain (CPU, CPU) and
3085 * this function does nothing (as nothing changes)
3086 * 3. Mapped by GTT
3087 * This function asserts that the object is not
3088 * currently in any GPU-based read or write domains
3089 * 4. Read by GPU
3090 * i915_gem_execbuffer calls set_domain (COMMAND, 0).
3091 * As write_domain is zero, this function adds in the
3092 * current read domains (CPU+COMMAND, 0).
3093 * flush_domains is set to CPU.
3094 * invalidate_domains is set to COMMAND
3095 * clflush is run to get data out of the CPU caches
3096 * then i915_dev_set_domain calls i915_gem_flush to
3097 * emit an MI_FLUSH and drm_agp_chipset_flush
3098 * 5. Unmapped from GTT
3099 * i915_gem_object_unbind calls set_domain (CPU, CPU)
3100 * flush_domains and invalidate_domains end up both zero
3101 * so no flushing/invalidating happens
3102 * 6. Freed
3103 * yay, done
3104 *
3105 * Case 2: The shared render buffer
3106 *
3107 * 1. Allocated
3108 * 2. Mapped to GTT
3109 * 3. Read/written by GPU
3110 * 4. set_domain to (CPU,CPU)
3111 * 5. Read/written by CPU
3112 * 6. Read/written by GPU
3113 *
3114 * 1. Allocated
3115 * Same as last example, (CPU, CPU)
3116 * 2. Mapped to GTT
3117 * Nothing changes (assertions find that it is not in the GPU)
3118 * 3. Read/written by GPU
3119 * execbuffer calls set_domain (RENDER, RENDER)
3120 * flush_domains gets CPU
3121 * invalidate_domains gets GPU
3122 * clflush (obj)
3123 * MI_FLUSH and drm_agp_chipset_flush
3124 * 4. set_domain (CPU, CPU)
3125 * flush_domains gets GPU
3126 * invalidate_domains gets CPU
3127 * wait_rendering (obj) to make sure all drawing is complete.
3128 * This will include an MI_FLUSH to get the data from GPU
3129 * to memory
3130 * clflush (obj) to invalidate the CPU cache
3131 * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
3132 * 5. Read/written by CPU
3133 * cache lines are loaded and dirtied
3134 * 6. Read written by GPU
3135 * Same as last GPU access
3136 *
3137 * Case 3: The constant buffer
3138 *
3139 * 1. Allocated
3140 * 2. Written by CPU
3141 * 3. Read by GPU
3142 * 4. Updated (written) by CPU again
3143 * 5. Read by GPU
3144 *
3145 * 1. Allocated
3146 * (CPU, CPU)
3147 * 2. Written by CPU
3148 * (CPU, CPU)
3149 * 3. Read by GPU
3150 * (CPU+RENDER, 0)
3151 * flush_domains = CPU
3152 * invalidate_domains = RENDER
3153 * clflush (obj)
3154 * MI_FLUSH
3155 * drm_agp_chipset_flush
3156 * 4. Updated (written) by CPU again
3157 * (CPU, CPU)
3158 * flush_domains = 0 (no previous write domain)
3159 * invalidate_domains = 0 (no new read domains)
3160 * 5. Read by GPU
3161 * (CPU+RENDER, 0)
3162 * flush_domains = CPU
3163 * invalidate_domains = RENDER
3164 * clflush (obj)
3165 * MI_FLUSH
3166 * drm_agp_chipset_flush
3167 */
c0d90829 3168static void
b6651458
CW
3169i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
3170 struct intel_ring_buffer *ring)
673a394b
EA
3171{
3172 struct drm_device *dev = obj->dev;
9220434a 3173 struct drm_i915_private *dev_priv = dev->dev_private;
23010e43 3174 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
673a394b
EA
3175 uint32_t invalidate_domains = 0;
3176 uint32_t flush_domains = 0;
652c393a 3177
673a394b
EA
3178 /*
3179 * If the object isn't moving to a new write domain,
3180 * let the object stay in multiple read domains
3181 */
8b0e378a
EA
3182 if (obj->pending_write_domain == 0)
3183 obj->pending_read_domains |= obj->read_domains;
673a394b
EA
3184
3185 /*
3186 * Flush the current write domain if
3187 * the new read domains don't match. Invalidate
3188 * any read domains which differ from the old
3189 * write domain
3190 */
8b0e378a 3191 if (obj->write_domain &&
13b29289
CW
3192 (obj->write_domain != obj->pending_read_domains ||
3193 obj_priv->ring != ring)) {
673a394b 3194 flush_domains |= obj->write_domain;
8b0e378a
EA
3195 invalidate_domains |=
3196 obj->pending_read_domains & ~obj->write_domain;
673a394b
EA
3197 }
3198 /*
3199 * Invalidate any read caches which may have
3200 * stale data. That is, any new read domains.
3201 */
8b0e378a 3202 invalidate_domains |= obj->pending_read_domains & ~obj->read_domains;
3d2a812a 3203 if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU)
673a394b 3204 i915_gem_clflush_object(obj);
673a394b 3205
4a684a41
CW
3206 /* blow away mappings if mapped through GTT */
3207 if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_GTT)
3208 i915_gem_release_mmap(obj);
3209
efbeed96
EA
3210 /* The actual obj->write_domain will be updated with
3211 * pending_write_domain after we emit the accumulated flush for all
3212 * of our domain changes in execbuffers (which clears objects'
3213 * write_domains). So if we have a current write domain that we
3214 * aren't changing, set pending_write_domain to that.
3215 */
3216 if (flush_domains == 0 && obj->pending_write_domain == 0)
3217 obj->pending_write_domain = obj->write_domain;
673a394b
EA
3218
3219 dev->invalidate_domains |= invalidate_domains;
3220 dev->flush_domains |= flush_domains;
b6651458 3221 if (flush_domains & I915_GEM_GPU_DOMAINS)
9220434a 3222 dev_priv->mm.flush_rings |= obj_priv->ring->id;
b6651458
CW
3223 if (invalidate_domains & I915_GEM_GPU_DOMAINS)
3224 dev_priv->mm.flush_rings |= ring->id;
673a394b
EA
3225}
3226
3227/**
e47c68e9 3228 * Moves the object from a partially CPU read to a full one.
673a394b 3229 *
e47c68e9
EA
3230 * Note that this only resolves i915_gem_object_set_cpu_read_domain_range(),
3231 * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU).
673a394b 3232 */
e47c68e9
EA
3233static void
3234i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj)
673a394b 3235{
23010e43 3236 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
673a394b 3237
e47c68e9
EA
3238 if (!obj_priv->page_cpu_valid)
3239 return;
3240
3241 /* If we're partially in the CPU read domain, finish moving it in.
3242 */
3243 if (obj->read_domains & I915_GEM_DOMAIN_CPU) {
3244 int i;
3245
3246 for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) {
3247 if (obj_priv->page_cpu_valid[i])
3248 continue;
856fa198 3249 drm_clflush_pages(obj_priv->pages + i, 1);
e47c68e9 3250 }
e47c68e9
EA
3251 }
3252
3253 /* Free the page_cpu_valid mappings which are now stale, whether
3254 * or not we've got I915_GEM_DOMAIN_CPU.
3255 */
9a298b2a 3256 kfree(obj_priv->page_cpu_valid);
e47c68e9
EA
3257 obj_priv->page_cpu_valid = NULL;
3258}
3259
3260/**
3261 * Set the CPU read domain on a range of the object.
3262 *
3263 * The object ends up with I915_GEM_DOMAIN_CPU in its read flags although it's
3264 * not entirely valid. The page_cpu_valid member of the object flags which
3265 * pages have been flushed, and will be respected by
3266 * i915_gem_object_set_to_cpu_domain() if it's called on to get a valid mapping
3267 * of the whole object.
3268 *
3269 * This function returns when the move is complete, including waiting on
3270 * flushes to occur.
3271 */
3272static int
3273i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
3274 uint64_t offset, uint64_t size)
3275{
23010e43 3276 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1c5d22f7 3277 uint32_t old_read_domains;
e47c68e9 3278 int i, ret;
673a394b 3279
e47c68e9
EA
3280 if (offset == 0 && size == obj->size)
3281 return i915_gem_object_set_to_cpu_domain(obj, 0);
673a394b 3282
ba3d8d74 3283 ret = i915_gem_object_flush_gpu_write_domain(obj, false);
e47c68e9 3284 if (ret != 0)
6a47baa6 3285 return ret;
e47c68e9
EA
3286 i915_gem_object_flush_gtt_write_domain(obj);
3287
3288 /* If we're already fully in the CPU read domain, we're done. */
3289 if (obj_priv->page_cpu_valid == NULL &&
3290 (obj->read_domains & I915_GEM_DOMAIN_CPU) != 0)
3291 return 0;
673a394b 3292
e47c68e9
EA
3293 /* Otherwise, create/clear the per-page CPU read domain flag if we're
3294 * newly adding I915_GEM_DOMAIN_CPU
3295 */
673a394b 3296 if (obj_priv->page_cpu_valid == NULL) {
9a298b2a
EA
3297 obj_priv->page_cpu_valid = kzalloc(obj->size / PAGE_SIZE,
3298 GFP_KERNEL);
e47c68e9
EA
3299 if (obj_priv->page_cpu_valid == NULL)
3300 return -ENOMEM;
3301 } else if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0)
3302 memset(obj_priv->page_cpu_valid, 0, obj->size / PAGE_SIZE);
673a394b
EA
3303
3304 /* Flush the cache on any pages that are still invalid from the CPU's
3305 * perspective.
3306 */
e47c68e9
EA
3307 for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE;
3308 i++) {
673a394b
EA
3309 if (obj_priv->page_cpu_valid[i])
3310 continue;
3311
856fa198 3312 drm_clflush_pages(obj_priv->pages + i, 1);
673a394b
EA
3313
3314 obj_priv->page_cpu_valid[i] = 1;
3315 }
3316
e47c68e9
EA
3317 /* It should now be out of any other write domains, and we can update
3318 * the domain values for our changes.
3319 */
3320 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
3321
1c5d22f7 3322 old_read_domains = obj->read_domains;
e47c68e9
EA
3323 obj->read_domains |= I915_GEM_DOMAIN_CPU;
3324
1c5d22f7
CW
3325 trace_i915_gem_object_change_domain(obj,
3326 old_read_domains,
3327 obj->write_domain);
3328
673a394b
EA
3329 return 0;
3330}
3331
673a394b
EA
3332/**
3333 * Pin an object to the GTT and evaluate the relocations landing in it.
3334 */
3335static int
9af90d19
CW
3336i915_gem_execbuffer_relocate(struct drm_i915_gem_object *obj,
3337 struct drm_file *file_priv,
3338 struct drm_i915_gem_exec_object2 *entry)
673a394b 3339{
9af90d19 3340 struct drm_device *dev = obj->base.dev;
0839ccb8 3341 drm_i915_private_t *dev_priv = dev->dev_private;
2549d6c2 3342 struct drm_i915_gem_relocation_entry __user *user_relocs;
9af90d19
CW
3343 struct drm_gem_object *target_obj = NULL;
3344 uint32_t target_handle = 0;
3345 int i, ret = 0;
673a394b 3346
2549d6c2 3347 user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr;
673a394b 3348 for (i = 0; i < entry->relocation_count; i++) {
2549d6c2 3349 struct drm_i915_gem_relocation_entry reloc;
9af90d19 3350 uint32_t target_offset;
673a394b 3351
9af90d19
CW
3352 if (__copy_from_user_inatomic(&reloc,
3353 user_relocs+i,
3354 sizeof(reloc))) {
3355 ret = -EFAULT;
3356 break;
76446cac 3357 }
76446cac 3358
9af90d19
CW
3359 if (reloc.target_handle != target_handle) {
3360 drm_gem_object_unreference(target_obj);
673a394b 3361
9af90d19
CW
3362 target_obj = drm_gem_object_lookup(dev, file_priv,
3363 reloc.target_handle);
3364 if (target_obj == NULL) {
3365 ret = -ENOENT;
3366 break;
3367 }
3368
3369 target_handle = reloc.target_handle;
673a394b 3370 }
9af90d19 3371 target_offset = to_intel_bo(target_obj)->gtt_offset;
673a394b 3372
8542a0bb
CW
3373#if WATCH_RELOC
3374 DRM_INFO("%s: obj %p offset %08x target %d "
3375 "read %08x write %08x gtt %08x "
3376 "presumed %08x delta %08x\n",
3377 __func__,
3378 obj,
2549d6c2
CW
3379 (int) reloc.offset,
3380 (int) reloc.target_handle,
3381 (int) reloc.read_domains,
3382 (int) reloc.write_domain,
9af90d19 3383 (int) target_offset,
2549d6c2
CW
3384 (int) reloc.presumed_offset,
3385 reloc.delta);
8542a0bb
CW
3386#endif
3387
673a394b
EA
3388 /* The target buffer should have appeared before us in the
3389 * exec_object list, so it should have a GTT space bound by now.
3390 */
9af90d19 3391 if (target_offset == 0) {
673a394b 3392 DRM_ERROR("No GTT space found for object %d\n",
2549d6c2 3393 reloc.target_handle);
9af90d19
CW
3394 ret = -EINVAL;
3395 break;
673a394b
EA
3396 }
3397
8542a0bb 3398 /* Validate that the target is in a valid r/w GPU domain */
2549d6c2 3399 if (reloc.write_domain & (reloc.write_domain - 1)) {
16edd550
DV
3400 DRM_ERROR("reloc with multiple write domains: "
3401 "obj %p target %d offset %d "
3402 "read %08x write %08x",
2549d6c2
CW
3403 obj, reloc.target_handle,
3404 (int) reloc.offset,
3405 reloc.read_domains,
3406 reloc.write_domain);
9af90d19
CW
3407 ret = -EINVAL;
3408 break;
16edd550 3409 }
2549d6c2
CW
3410 if (reloc.write_domain & I915_GEM_DOMAIN_CPU ||
3411 reloc.read_domains & I915_GEM_DOMAIN_CPU) {
e47c68e9
EA
3412 DRM_ERROR("reloc with read/write CPU domains: "
3413 "obj %p target %d offset %d "
3414 "read %08x write %08x",
2549d6c2
CW
3415 obj, reloc.target_handle,
3416 (int) reloc.offset,
3417 reloc.read_domains,
3418 reloc.write_domain);
9af90d19
CW
3419 ret = -EINVAL;
3420 break;
e47c68e9 3421 }
2549d6c2
CW
3422 if (reloc.write_domain && target_obj->pending_write_domain &&
3423 reloc.write_domain != target_obj->pending_write_domain) {
673a394b
EA
3424 DRM_ERROR("Write domain conflict: "
3425 "obj %p target %d offset %d "
3426 "new %08x old %08x\n",
2549d6c2
CW
3427 obj, reloc.target_handle,
3428 (int) reloc.offset,
3429 reloc.write_domain,
673a394b 3430 target_obj->pending_write_domain);
9af90d19
CW
3431 ret = -EINVAL;
3432 break;
673a394b
EA
3433 }
3434
2549d6c2 3435 target_obj->pending_read_domains |= reloc.read_domains;
878a3c37 3436 target_obj->pending_write_domain |= reloc.write_domain;
673a394b
EA
3437
3438 /* If the relocation already has the right value in it, no
3439 * more work needs to be done.
3440 */
9af90d19 3441 if (target_offset == reloc.presumed_offset)
673a394b 3442 continue;
673a394b 3443
8542a0bb 3444 /* Check that the relocation address is valid... */
9af90d19 3445 if (reloc.offset > obj->base.size - 4) {
8542a0bb
CW
3446 DRM_ERROR("Relocation beyond object bounds: "
3447 "obj %p target %d offset %d size %d.\n",
2549d6c2 3448 obj, reloc.target_handle,
9af90d19
CW
3449 (int) reloc.offset, (int) obj->base.size);
3450 ret = -EINVAL;
3451 break;
8542a0bb 3452 }
2549d6c2 3453 if (reloc.offset & 3) {
8542a0bb
CW
3454 DRM_ERROR("Relocation not 4-byte aligned: "
3455 "obj %p target %d offset %d.\n",
2549d6c2
CW
3456 obj, reloc.target_handle,
3457 (int) reloc.offset);
9af90d19
CW
3458 ret = -EINVAL;
3459 break;
8542a0bb
CW
3460 }
3461
3462 /* and points to somewhere within the target object. */
2549d6c2 3463 if (reloc.delta >= target_obj->size) {
8542a0bb
CW
3464 DRM_ERROR("Relocation beyond target object bounds: "
3465 "obj %p target %d delta %d size %d.\n",
2549d6c2
CW
3466 obj, reloc.target_handle,
3467 (int) reloc.delta, (int) target_obj->size);
9af90d19
CW
3468 ret = -EINVAL;
3469 break;
673a394b
EA
3470 }
3471
9af90d19
CW
3472 reloc.delta += target_offset;
3473 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) {
f0c43d9b
CW
3474 uint32_t page_offset = reloc.offset & ~PAGE_MASK;
3475 char *vaddr;
673a394b 3476
c48c43e4 3477 vaddr = kmap_atomic(obj->pages[reloc.offset >> PAGE_SHIFT]);
f0c43d9b 3478 *(uint32_t *)(vaddr + page_offset) = reloc.delta;
c48c43e4 3479 kunmap_atomic(vaddr);
f0c43d9b
CW
3480 } else {
3481 uint32_t __iomem *reloc_entry;
3482 void __iomem *reloc_page;
b962442e 3483
9af90d19
CW
3484 ret = i915_gem_object_set_to_gtt_domain(&obj->base, 1);
3485 if (ret)
3486 break;
b962442e 3487
f0c43d9b 3488 /* Map the page containing the relocation we're going to perform. */
9af90d19 3489 reloc.offset += obj->gtt_offset;
f0c43d9b 3490 reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
c48c43e4 3491 reloc.offset & PAGE_MASK);
f0c43d9b
CW
3492 reloc_entry = (uint32_t __iomem *)
3493 (reloc_page + (reloc.offset & ~PAGE_MASK));
3494 iowrite32(reloc.delta, reloc_entry);
c48c43e4 3495 io_mapping_unmap_atomic(reloc_page);
f0c43d9b 3496 }
b962442e 3497
b5dc608c
CW
3498 /* and update the user's relocation entry */
3499 reloc.presumed_offset = target_offset;
3500 if (__copy_to_user_inatomic(&user_relocs[i].presumed_offset,
3501 &reloc.presumed_offset,
3502 sizeof(reloc.presumed_offset))) {
3503 ret = -EFAULT;
3504 break;
3505 }
b962442e 3506 }
b962442e 3507
9af90d19 3508 drm_gem_object_unreference(target_obj);
673a394b
EA
3509 return ret;
3510}
3511
40a5f0de 3512static int
9af90d19
CW
3513i915_gem_execbuffer_pin(struct drm_device *dev,
3514 struct drm_file *file,
3515 struct drm_gem_object **object_list,
3516 struct drm_i915_gem_exec_object2 *exec_list,
3517 int count)
40a5f0de 3518{
9af90d19
CW
3519 struct drm_i915_private *dev_priv = dev->dev_private;
3520 int ret, i, retry;
40a5f0de 3521
9af90d19 3522 /* attempt to pin all of the buffers into the GTT */
5eac3ab4
CW
3523 retry = 0;
3524 do {
9af90d19
CW
3525 ret = 0;
3526 for (i = 0; i < count; i++) {
3527 struct drm_i915_gem_exec_object2 *entry = &exec_list[i];
16e809ac 3528 struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]);
9af90d19
CW
3529 bool need_fence =
3530 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
3531 obj->tiling_mode != I915_TILING_NONE;
3532
16e809ac
DV
3533 /* g33/pnv can't fence buffers in the unmappable part */
3534 bool need_mappable =
3535 entry->relocation_count ? true : need_fence;
3536
9af90d19 3537 /* Check fence reg constraints and rebind if necessary */
a00b10c3
CW
3538 if ((need_fence && !obj->fenceable) ||
3539 (need_mappable && !obj->mappable)) {
9af90d19
CW
3540 ret = i915_gem_object_unbind(&obj->base);
3541 if (ret)
3542 break;
3543 }
40a5f0de 3544
920afa77 3545 ret = i915_gem_object_pin(&obj->base,
16e809ac 3546 entry->alignment,
a00b10c3
CW
3547 need_mappable,
3548 need_fence);
9af90d19
CW
3549 if (ret)
3550 break;
40a5f0de 3551
9af90d19
CW
3552 /*
3553 * Pre-965 chips need a fence register set up in order
3554 * to properly handle blits to/from tiled surfaces.
3555 */
3556 if (need_fence) {
3557 ret = i915_gem_object_get_fence_reg(&obj->base, true);
3558 if (ret) {
3559 i915_gem_object_unpin(&obj->base);
3560 break;
3561 }
40a5f0de 3562
9af90d19
CW
3563 dev_priv->fence_regs[obj->fence_reg].gpu = true;
3564 }
40a5f0de 3565
9af90d19 3566 entry->offset = obj->gtt_offset;
40a5f0de
EA
3567 }
3568
9af90d19
CW
3569 while (i--)
3570 i915_gem_object_unpin(object_list[i]);
3571
5eac3ab4 3572 if (ret != -ENOSPC || retry > 1)
9af90d19
CW
3573 return ret;
3574
5eac3ab4
CW
3575 /* First attempt, just clear anything that is purgeable.
3576 * Second attempt, clear the entire GTT.
3577 */
3578 ret = i915_gem_evict_everything(dev, retry == 0);
9af90d19
CW
3579 if (ret)
3580 return ret;
40a5f0de 3581
5eac3ab4
CW
3582 retry++;
3583 } while (1);
40a5f0de
EA
3584}
3585
13b29289
CW
3586static int
3587i915_gem_execbuffer_move_to_gpu(struct drm_device *dev,
3588 struct drm_file *file,
3589 struct intel_ring_buffer *ring,
3590 struct drm_gem_object **objects,
3591 int count)
3592{
3593 struct drm_i915_private *dev_priv = dev->dev_private;
3594 int ret, i;
3595
3596 /* Zero the global flush/invalidate flags. These
3597 * will be modified as new domains are computed
3598 * for each object
3599 */
3600 dev->invalidate_domains = 0;
3601 dev->flush_domains = 0;
3602 dev_priv->mm.flush_rings = 0;
3603 for (i = 0; i < count; i++)
3604 i915_gem_object_set_to_gpu_domain(objects[i], ring);
3605
3606 if (dev->invalidate_domains | dev->flush_domains) {
3607#if WATCH_EXEC
3608 DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
3609 __func__,
3610 dev->invalidate_domains,
3611 dev->flush_domains);
3612#endif
3613 i915_gem_flush(dev, file,
3614 dev->invalidate_domains,
3615 dev->flush_domains,
3616 dev_priv->mm.flush_rings);
3617 }
3618
3619 for (i = 0; i < count; i++) {
3620 struct drm_i915_gem_object *obj = to_intel_bo(objects[i]);
3621 /* XXX replace with semaphores */
3622 if (obj->ring && ring != obj->ring) {
3623 ret = i915_gem_object_wait_rendering(&obj->base, true);
3624 if (ret)
3625 return ret;
3626 }
3627 }
3628
3629 return 0;
3630}
3631
673a394b
EA
3632/* Throttle our rendering by waiting until the ring has completed our requests
3633 * emitted over 20 msec ago.
3634 *
b962442e
EA
3635 * Note that if we were to use the current jiffies each time around the loop,
3636 * we wouldn't escape the function with any frames outstanding if the time to
3637 * render a frame was over 20ms.
3638 *
673a394b
EA
3639 * This should get us reasonable parallelism between CPU and GPU but also
3640 * relatively low latency when blocking on a particular request to finish.
3641 */
40a5f0de 3642static int
f787a5f5 3643i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
40a5f0de 3644{
f787a5f5
CW
3645 struct drm_i915_private *dev_priv = dev->dev_private;
3646 struct drm_i915_file_private *file_priv = file->driver_priv;
b962442e 3647 unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
f787a5f5
CW
3648 struct drm_i915_gem_request *request;
3649 struct intel_ring_buffer *ring = NULL;
3650 u32 seqno = 0;
3651 int ret;
93533c29 3652
1c25595f 3653 spin_lock(&file_priv->mm.lock);
f787a5f5 3654 list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
b962442e
EA
3655 if (time_after_eq(request->emitted_jiffies, recent_enough))
3656 break;
40a5f0de 3657
f787a5f5
CW
3658 ring = request->ring;
3659 seqno = request->seqno;
b962442e 3660 }
1c25595f 3661 spin_unlock(&file_priv->mm.lock);
40a5f0de 3662
f787a5f5
CW
3663 if (seqno == 0)
3664 return 0;
2bc43b5c 3665
f787a5f5 3666 ret = 0;
78501eac 3667 if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
f787a5f5
CW
3668 /* And wait for the seqno passing without holding any locks and
3669 * causing extra latency for others. This is safe as the irq
3670 * generation is designed to be run atomically and so is
3671 * lockless.
3672 */
78501eac 3673 ring->user_irq_get(ring);
f787a5f5 3674 ret = wait_event_interruptible(ring->irq_queue,
78501eac 3675 i915_seqno_passed(ring->get_seqno(ring), seqno)
f787a5f5 3676 || atomic_read(&dev_priv->mm.wedged));
78501eac 3677 ring->user_irq_put(ring);
40a5f0de 3678
f787a5f5
CW
3679 if (ret == 0 && atomic_read(&dev_priv->mm.wedged))
3680 ret = -EIO;
40a5f0de
EA
3681 }
3682
f787a5f5
CW
3683 if (ret == 0)
3684 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
40a5f0de
EA
3685
3686 return ret;
3687}
3688
83d60795 3689static int
2549d6c2
CW
3690i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec,
3691 uint64_t exec_offset)
83d60795
CW
3692{
3693 uint32_t exec_start, exec_len;
3694
3695 exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
3696 exec_len = (uint32_t) exec->batch_len;
3697
3698 if ((exec_start | exec_len) & 0x7)
3699 return -EINVAL;
3700
3701 if (!exec_start)
3702 return -EINVAL;
3703
3704 return 0;
3705}
3706
6b95a207 3707static int
2549d6c2
CW
3708validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
3709 int count)
6b95a207 3710{
2549d6c2 3711 int i;
6b95a207 3712
2549d6c2
CW
3713 for (i = 0; i < count; i++) {
3714 char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
3715 size_t length = exec[i].relocation_count * sizeof(struct drm_i915_gem_relocation_entry);
6b95a207 3716
2549d6c2
CW
3717 if (!access_ok(VERIFY_READ, ptr, length))
3718 return -EFAULT;
40a5f0de 3719
b5dc608c
CW
3720 /* we may also need to update the presumed offsets */
3721 if (!access_ok(VERIFY_WRITE, ptr, length))
3722 return -EFAULT;
3723
2549d6c2
CW
3724 if (fault_in_pages_readable(ptr, length))
3725 return -EFAULT;
6b95a207 3726 }
6b95a207 3727
83d60795 3728 return 0;
6b95a207
KH
3729}
3730
8dc5d147 3731static int
76446cac 3732i915_gem_do_execbuffer(struct drm_device *dev, void *data,
9af90d19 3733 struct drm_file *file,
76446cac
JB
3734 struct drm_i915_gem_execbuffer2 *args,
3735 struct drm_i915_gem_exec_object2 *exec_list)
673a394b
EA
3736{
3737 drm_i915_private_t *dev_priv = dev->dev_private;
673a394b
EA
3738 struct drm_gem_object **object_list = NULL;
3739 struct drm_gem_object *batch_obj;
201361a5 3740 struct drm_clip_rect *cliprects = NULL;
8dc5d147 3741 struct drm_i915_gem_request *request = NULL;
9af90d19 3742 int ret, i, flips;
673a394b 3743 uint64_t exec_offset;
673a394b 3744
852835f3
ZN
3745 struct intel_ring_buffer *ring = NULL;
3746
30dbf0c0
CW
3747 ret = i915_gem_check_is_wedged(dev);
3748 if (ret)
3749 return ret;
3750
2549d6c2
CW
3751 ret = validate_exec_list(exec_list, args->buffer_count);
3752 if (ret)
3753 return ret;
3754
673a394b
EA
3755#if WATCH_EXEC
3756 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
3757 (int) args->buffers_ptr, args->buffer_count, args->batch_len);
3758#endif
549f7365
CW
3759 switch (args->flags & I915_EXEC_RING_MASK) {
3760 case I915_EXEC_DEFAULT:
3761 case I915_EXEC_RENDER:
3762 ring = &dev_priv->render_ring;
3763 break;
3764 case I915_EXEC_BSD:
d1b851fc 3765 if (!HAS_BSD(dev)) {
549f7365 3766 DRM_ERROR("execbuf with invalid ring (BSD)\n");
d1b851fc
ZN
3767 return -EINVAL;
3768 }
3769 ring = &dev_priv->bsd_ring;
549f7365
CW
3770 break;
3771 case I915_EXEC_BLT:
3772 if (!HAS_BLT(dev)) {
3773 DRM_ERROR("execbuf with invalid ring (BLT)\n");
3774 return -EINVAL;
3775 }
3776 ring = &dev_priv->blt_ring;
3777 break;
3778 default:
3779 DRM_ERROR("execbuf with unknown ring: %d\n",
3780 (int)(args->flags & I915_EXEC_RING_MASK));
3781 return -EINVAL;
d1b851fc
ZN
3782 }
3783
4f481ed2
EA
3784 if (args->buffer_count < 1) {
3785 DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
3786 return -EINVAL;
3787 }
c8e0f93a 3788 object_list = drm_malloc_ab(sizeof(*object_list), args->buffer_count);
76446cac
JB
3789 if (object_list == NULL) {
3790 DRM_ERROR("Failed to allocate object list for %d buffers\n",
673a394b
EA
3791 args->buffer_count);
3792 ret = -ENOMEM;
3793 goto pre_mutex_err;
3794 }
673a394b 3795
201361a5 3796 if (args->num_cliprects != 0) {
9a298b2a
EA
3797 cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects),
3798 GFP_KERNEL);
a40e8d31
OA
3799 if (cliprects == NULL) {
3800 ret = -ENOMEM;
201361a5 3801 goto pre_mutex_err;
a40e8d31 3802 }
201361a5
EA
3803
3804 ret = copy_from_user(cliprects,
3805 (struct drm_clip_rect __user *)
3806 (uintptr_t) args->cliprects_ptr,
3807 sizeof(*cliprects) * args->num_cliprects);
3808 if (ret != 0) {
3809 DRM_ERROR("copy %d cliprects failed: %d\n",
3810 args->num_cliprects, ret);
c877cdce 3811 ret = -EFAULT;
201361a5
EA
3812 goto pre_mutex_err;
3813 }
3814 }
3815
8dc5d147
CW
3816 request = kzalloc(sizeof(*request), GFP_KERNEL);
3817 if (request == NULL) {
3818 ret = -ENOMEM;
40a5f0de 3819 goto pre_mutex_err;
8dc5d147 3820 }
40a5f0de 3821
76c1dec1
CW
3822 ret = i915_mutex_lock_interruptible(dev);
3823 if (ret)
a198bc80 3824 goto pre_mutex_err;
673a394b
EA
3825
3826 if (dev_priv->mm.suspended) {
673a394b 3827 mutex_unlock(&dev->struct_mutex);
a198bc80
CW
3828 ret = -EBUSY;
3829 goto pre_mutex_err;
673a394b
EA
3830 }
3831
ac94a962 3832 /* Look up object handles */
673a394b 3833 for (i = 0; i < args->buffer_count; i++) {
7e318e18
CW
3834 struct drm_i915_gem_object *obj_priv;
3835
9af90d19 3836 object_list[i] = drm_gem_object_lookup(dev, file,
673a394b
EA
3837 exec_list[i].handle);
3838 if (object_list[i] == NULL) {
3839 DRM_ERROR("Invalid object handle %d at index %d\n",
3840 exec_list[i].handle, i);
0ce907f8
CW
3841 /* prevent error path from reading uninitialized data */
3842 args->buffer_count = i + 1;
bf79cb91 3843 ret = -ENOENT;
673a394b
EA
3844 goto err;
3845 }
b70d11da 3846
23010e43 3847 obj_priv = to_intel_bo(object_list[i]);
b70d11da
KH
3848 if (obj_priv->in_execbuffer) {
3849 DRM_ERROR("Object %p appears more than once in object list\n",
3850 object_list[i]);
0ce907f8
CW
3851 /* prevent error path from reading uninitialized data */
3852 args->buffer_count = i + 1;
bf79cb91 3853 ret = -EINVAL;
b70d11da
KH
3854 goto err;
3855 }
3856 obj_priv->in_execbuffer = true;
ac94a962 3857 }
673a394b 3858
9af90d19
CW
3859 /* Move the objects en-masse into the GTT, evicting if necessary. */
3860 ret = i915_gem_execbuffer_pin(dev, file,
3861 object_list, exec_list,
3862 args->buffer_count);
3863 if (ret)
3864 goto err;
ac94a962 3865
9af90d19
CW
3866 /* The objects are in their final locations, apply the relocations. */
3867 for (i = 0; i < args->buffer_count; i++) {
3868 struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]);
3869 obj->base.pending_read_domains = 0;
3870 obj->base.pending_write_domain = 0;
3871 ret = i915_gem_execbuffer_relocate(obj, file, &exec_list[i]);
3872 if (ret)
ac94a962 3873 goto err;
673a394b
EA
3874 }
3875
3876 /* Set the pending read domains for the batch buffer to COMMAND */
3877 batch_obj = object_list[args->buffer_count-1];
5f26a2c7
CW
3878 if (batch_obj->pending_write_domain) {
3879 DRM_ERROR("Attempting to use self-modifying batch buffer\n");
3880 ret = -EINVAL;
3881 goto err;
3882 }
3883 batch_obj->pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
673a394b 3884
9af90d19
CW
3885 /* Sanity check the batch buffer */
3886 exec_offset = to_intel_bo(batch_obj)->gtt_offset;
3887 ret = i915_gem_check_execbuffer(args, exec_offset);
83d60795
CW
3888 if (ret != 0) {
3889 DRM_ERROR("execbuf with invalid offset/length\n");
3890 goto err;
3891 }
3892
13b29289
CW
3893 ret = i915_gem_execbuffer_move_to_gpu(dev, file, ring,
3894 object_list, args->buffer_count);
3895 if (ret)
3896 goto err;
673a394b 3897
673a394b
EA
3898#if WATCH_COHERENCY
3899 for (i = 0; i < args->buffer_count; i++) {
3900 i915_gem_object_check_coherency(object_list[i],
3901 exec_list[i].handle);
3902 }
3903#endif
3904
673a394b 3905#if WATCH_EXEC
6911a9b8 3906 i915_gem_dump_object(batch_obj,
673a394b
EA
3907 args->batch_len,
3908 __func__,
3909 ~0);
3910#endif
3911
e59f2bac
CW
3912 /* Check for any pending flips. As we only maintain a flip queue depth
3913 * of 1, we can simply insert a WAIT for the next display flip prior
3914 * to executing the batch and avoid stalling the CPU.
3915 */
3916 flips = 0;
3917 for (i = 0; i < args->buffer_count; i++) {
3918 if (object_list[i]->write_domain)
3919 flips |= atomic_read(&to_intel_bo(object_list[i])->pending_flip);
3920 }
3921 if (flips) {
3922 int plane, flip_mask;
3923
3924 for (plane = 0; flips >> plane; plane++) {
3925 if (((flips >> plane) & 1) == 0)
3926 continue;
3927
3928 if (plane)
3929 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
3930 else
3931 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
3932
e1f99ce6
CW
3933 ret = intel_ring_begin(ring, 2);
3934 if (ret)
3935 goto err;
3936
78501eac
CW
3937 intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
3938 intel_ring_emit(ring, MI_NOOP);
3939 intel_ring_advance(ring);
e59f2bac
CW
3940 }
3941 }
3942
673a394b 3943 /* Exec the batchbuffer */
78501eac 3944 ret = ring->dispatch_execbuffer(ring, args, cliprects, exec_offset);
673a394b
EA
3945 if (ret) {
3946 DRM_ERROR("dispatch failed %d\n", ret);
3947 goto err;
3948 }
3949
673a394b
EA
3950 for (i = 0; i < args->buffer_count; i++) {
3951 struct drm_gem_object *obj = object_list[i];
673a394b 3952
7e318e18
CW
3953 obj->read_domains = obj->pending_read_domains;
3954 obj->write_domain = obj->pending_write_domain;
3955
617dbe27 3956 i915_gem_object_move_to_active(obj, ring);
7e318e18
CW
3957 if (obj->write_domain) {
3958 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
3959 obj_priv->dirty = 1;
3960 list_move_tail(&obj_priv->gpu_write_list,
64193406 3961 &ring->gpu_write_list);
7e318e18
CW
3962 intel_mark_busy(dev, obj);
3963 }
3964
3965 trace_i915_gem_object_change_domain(obj,
3966 obj->read_domains,
3967 obj->write_domain);
673a394b 3968 }
673a394b 3969
7e318e18
CW
3970 /*
3971 * Ensure that the commands in the batch buffer are
3972 * finished before the interrupt fires
3973 */
3974 i915_retire_commands(dev, ring);
3975
3cce469c
CW
3976 if (i915_add_request(dev, file, request, ring))
3977 ring->outstanding_lazy_request = true;
3978 else
3979 request = NULL;
673a394b 3980
673a394b 3981err:
b70d11da 3982 for (i = 0; i < args->buffer_count; i++) {
7e318e18
CW
3983 if (object_list[i] == NULL)
3984 break;
3985
3986 to_intel_bo(object_list[i])->in_execbuffer = false;
aad87dff 3987 drm_gem_object_unreference(object_list[i]);
b70d11da 3988 }
673a394b 3989
673a394b
EA
3990 mutex_unlock(&dev->struct_mutex);
3991
93533c29 3992pre_mutex_err:
8e7d2b2c 3993 drm_free_large(object_list);
9a298b2a 3994 kfree(cliprects);
8dc5d147 3995 kfree(request);
673a394b
EA
3996
3997 return ret;
3998}
3999
76446cac
JB
4000/*
4001 * Legacy execbuffer just creates an exec2 list from the original exec object
4002 * list array and passes it to the real function.
4003 */
4004int
4005i915_gem_execbuffer(struct drm_device *dev, void *data,
4006 struct drm_file *file_priv)
4007{
4008 struct drm_i915_gem_execbuffer *args = data;
4009 struct drm_i915_gem_execbuffer2 exec2;
4010 struct drm_i915_gem_exec_object *exec_list = NULL;
4011 struct drm_i915_gem_exec_object2 *exec2_list = NULL;
4012 int ret, i;
4013
4014#if WATCH_EXEC
4015 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
4016 (int) args->buffers_ptr, args->buffer_count, args->batch_len);
4017#endif
4018
4019 if (args->buffer_count < 1) {
4020 DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
4021 return -EINVAL;
4022 }
4023
4024 /* Copy in the exec list from userland */
4025 exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
4026 exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
4027 if (exec_list == NULL || exec2_list == NULL) {
4028 DRM_ERROR("Failed to allocate exec list for %d buffers\n",
4029 args->buffer_count);
4030 drm_free_large(exec_list);
4031 drm_free_large(exec2_list);
4032 return -ENOMEM;
4033 }
4034 ret = copy_from_user(exec_list,
4035 (struct drm_i915_relocation_entry __user *)
4036 (uintptr_t) args->buffers_ptr,
4037 sizeof(*exec_list) * args->buffer_count);
4038 if (ret != 0) {
4039 DRM_ERROR("copy %d exec entries failed %d\n",
4040 args->buffer_count, ret);
4041 drm_free_large(exec_list);
4042 drm_free_large(exec2_list);
4043 return -EFAULT;
4044 }
4045
4046 for (i = 0; i < args->buffer_count; i++) {
4047 exec2_list[i].handle = exec_list[i].handle;
4048 exec2_list[i].relocation_count = exec_list[i].relocation_count;
4049 exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
4050 exec2_list[i].alignment = exec_list[i].alignment;
4051 exec2_list[i].offset = exec_list[i].offset;
a6c45cf0 4052 if (INTEL_INFO(dev)->gen < 4)
76446cac
JB
4053 exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
4054 else
4055 exec2_list[i].flags = 0;
4056 }
4057
4058 exec2.buffers_ptr = args->buffers_ptr;
4059 exec2.buffer_count = args->buffer_count;
4060 exec2.batch_start_offset = args->batch_start_offset;
4061 exec2.batch_len = args->batch_len;
4062 exec2.DR1 = args->DR1;
4063 exec2.DR4 = args->DR4;
4064 exec2.num_cliprects = args->num_cliprects;
4065 exec2.cliprects_ptr = args->cliprects_ptr;
852835f3 4066 exec2.flags = I915_EXEC_RENDER;
76446cac
JB
4067
4068 ret = i915_gem_do_execbuffer(dev, data, file_priv, &exec2, exec2_list);
4069 if (!ret) {
4070 /* Copy the new buffer offsets back to the user's exec list. */
4071 for (i = 0; i < args->buffer_count; i++)
4072 exec_list[i].offset = exec2_list[i].offset;
4073 /* ... and back out to userspace */
4074 ret = copy_to_user((struct drm_i915_relocation_entry __user *)
4075 (uintptr_t) args->buffers_ptr,
4076 exec_list,
4077 sizeof(*exec_list) * args->buffer_count);
4078 if (ret) {
4079 ret = -EFAULT;
4080 DRM_ERROR("failed to copy %d exec entries "
4081 "back to user (%d)\n",
4082 args->buffer_count, ret);
4083 }
76446cac
JB
4084 }
4085
4086 drm_free_large(exec_list);
4087 drm_free_large(exec2_list);
4088 return ret;
4089}
4090
4091int
4092i915_gem_execbuffer2(struct drm_device *dev, void *data,
4093 struct drm_file *file_priv)
4094{
4095 struct drm_i915_gem_execbuffer2 *args = data;
4096 struct drm_i915_gem_exec_object2 *exec2_list = NULL;
4097 int ret;
4098
4099#if WATCH_EXEC
4100 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
4101 (int) args->buffers_ptr, args->buffer_count, args->batch_len);
4102#endif
4103
4104 if (args->buffer_count < 1) {
4105 DRM_ERROR("execbuf2 with %d buffers\n", args->buffer_count);
4106 return -EINVAL;
4107 }
4108
4109 exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
4110 if (exec2_list == NULL) {
4111 DRM_ERROR("Failed to allocate exec list for %d buffers\n",
4112 args->buffer_count);
4113 return -ENOMEM;
4114 }
4115 ret = copy_from_user(exec2_list,
4116 (struct drm_i915_relocation_entry __user *)
4117 (uintptr_t) args->buffers_ptr,
4118 sizeof(*exec2_list) * args->buffer_count);
4119 if (ret != 0) {
4120 DRM_ERROR("copy %d exec entries failed %d\n",
4121 args->buffer_count, ret);
4122 drm_free_large(exec2_list);
4123 return -EFAULT;
4124 }
4125
4126 ret = i915_gem_do_execbuffer(dev, data, file_priv, args, exec2_list);
4127 if (!ret) {
4128 /* Copy the new buffer offsets back to the user's exec list. */
4129 ret = copy_to_user((struct drm_i915_relocation_entry __user *)
4130 (uintptr_t) args->buffers_ptr,
4131 exec2_list,
4132 sizeof(*exec2_list) * args->buffer_count);
4133 if (ret) {
4134 ret = -EFAULT;
4135 DRM_ERROR("failed to copy %d exec entries "
4136 "back to user (%d)\n",
4137 args->buffer_count, ret);
4138 }
4139 }
4140
4141 drm_free_large(exec2_list);
4142 return ret;
4143}
4144
673a394b 4145int
920afa77 4146i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment,
a00b10c3 4147 bool mappable, bool need_fence)
673a394b
EA
4148{
4149 struct drm_device *dev = obj->dev;
f13d3f73 4150 struct drm_i915_private *dev_priv = dev->dev_private;
23010e43 4151 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
673a394b
EA
4152 int ret;
4153
778c3544 4154 BUG_ON(obj_priv->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT);
23bc5982 4155 WARN_ON(i915_verify_lists(dev));
ac0c6b5a
CW
4156
4157 if (obj_priv->gtt_space != NULL) {
a00b10c3
CW
4158 if ((alignment && obj_priv->gtt_offset & (alignment - 1)) ||
4159 (need_fence && !obj_priv->fenceable) ||
4160 (mappable && !obj_priv->mappable)) {
ae7d49d8
CW
4161 WARN(obj_priv->pin_count,
4162 "bo is already pinned with incorrect alignment:"
a00b10c3
CW
4163 " offset=%x, req.alignment=%x, need_fence=%d, fenceable=%d, mappable=%d, cpu_accessible=%d\n",
4164 obj_priv->gtt_offset, alignment,
4165 need_fence, obj_priv->fenceable,
4166 mappable, obj_priv->mappable);
ac0c6b5a
CW
4167 ret = i915_gem_object_unbind(obj);
4168 if (ret)
4169 return ret;
4170 }
4171 }
4172
673a394b 4173 if (obj_priv->gtt_space == NULL) {
a00b10c3
CW
4174 ret = i915_gem_object_bind_to_gtt(obj, alignment,
4175 mappable, need_fence);
9731129c 4176 if (ret)
673a394b 4177 return ret;
22c344e9 4178 }
76446cac 4179
7465378f 4180 if (obj_priv->pin_count++ == 0) {
a00b10c3 4181 i915_gem_info_add_pin(dev_priv, obj_priv, mappable);
f13d3f73 4182 if (!obj_priv->active)
69dc4987 4183 list_move_tail(&obj_priv->mm_list,
f13d3f73 4184 &dev_priv->mm.pinned_list);
673a394b 4185 }
fb7d516a 4186 BUG_ON(!obj_priv->pin_mappable && mappable);
673a394b 4187
23bc5982 4188 WARN_ON(i915_verify_lists(dev));
673a394b
EA
4189 return 0;
4190}
4191
4192void
4193i915_gem_object_unpin(struct drm_gem_object *obj)
4194{
4195 struct drm_device *dev = obj->dev;
4196 drm_i915_private_t *dev_priv = dev->dev_private;
23010e43 4197 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
673a394b 4198
23bc5982 4199 WARN_ON(i915_verify_lists(dev));
7465378f 4200 BUG_ON(obj_priv->pin_count == 0);
673a394b
EA
4201 BUG_ON(obj_priv->gtt_space == NULL);
4202
7465378f 4203 if (--obj_priv->pin_count == 0) {
f13d3f73 4204 if (!obj_priv->active)
69dc4987 4205 list_move_tail(&obj_priv->mm_list,
673a394b 4206 &dev_priv->mm.inactive_list);
a00b10c3 4207 i915_gem_info_remove_pin(dev_priv, obj_priv);
673a394b 4208 }
23bc5982 4209 WARN_ON(i915_verify_lists(dev));
673a394b
EA
4210}
4211
4212int
4213i915_gem_pin_ioctl(struct drm_device *dev, void *data,
4214 struct drm_file *file_priv)
4215{
4216 struct drm_i915_gem_pin *args = data;
4217 struct drm_gem_object *obj;
4218 struct drm_i915_gem_object *obj_priv;
4219 int ret;
4220
1d7cfea1
CW
4221 ret = i915_mutex_lock_interruptible(dev);
4222 if (ret)
4223 return ret;
673a394b
EA
4224
4225 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
4226 if (obj == NULL) {
1d7cfea1
CW
4227 ret = -ENOENT;
4228 goto unlock;
673a394b 4229 }
23010e43 4230 obj_priv = to_intel_bo(obj);
673a394b 4231
bb6baf76
CW
4232 if (obj_priv->madv != I915_MADV_WILLNEED) {
4233 DRM_ERROR("Attempting to pin a purgeable buffer\n");
1d7cfea1
CW
4234 ret = -EINVAL;
4235 goto out;
3ef94daa
CW
4236 }
4237
79e53945
JB
4238 if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) {
4239 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
4240 args->handle);
1d7cfea1
CW
4241 ret = -EINVAL;
4242 goto out;
79e53945
JB
4243 }
4244
4245 obj_priv->user_pin_count++;
4246 obj_priv->pin_filp = file_priv;
4247 if (obj_priv->user_pin_count == 1) {
a00b10c3
CW
4248 ret = i915_gem_object_pin(obj, args->alignment,
4249 true, obj_priv->tiling_mode);
1d7cfea1
CW
4250 if (ret)
4251 goto out;
673a394b
EA
4252 }
4253
4254 /* XXX - flush the CPU caches for pinned objects
4255 * as the X server doesn't manage domains yet
4256 */
e47c68e9 4257 i915_gem_object_flush_cpu_write_domain(obj);
673a394b 4258 args->offset = obj_priv->gtt_offset;
1d7cfea1 4259out:
673a394b 4260 drm_gem_object_unreference(obj);
1d7cfea1 4261unlock:
673a394b 4262 mutex_unlock(&dev->struct_mutex);
1d7cfea1 4263 return ret;
673a394b
EA
4264}
4265
4266int
4267i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
4268 struct drm_file *file_priv)
4269{
4270 struct drm_i915_gem_pin *args = data;
4271 struct drm_gem_object *obj;
79e53945 4272 struct drm_i915_gem_object *obj_priv;
76c1dec1 4273 int ret;
673a394b 4274
1d7cfea1
CW
4275 ret = i915_mutex_lock_interruptible(dev);
4276 if (ret)
4277 return ret;
673a394b
EA
4278
4279 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
4280 if (obj == NULL) {
1d7cfea1
CW
4281 ret = -ENOENT;
4282 goto unlock;
673a394b 4283 }
23010e43 4284 obj_priv = to_intel_bo(obj);
76c1dec1 4285
79e53945
JB
4286 if (obj_priv->pin_filp != file_priv) {
4287 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
4288 args->handle);
1d7cfea1
CW
4289 ret = -EINVAL;
4290 goto out;
79e53945
JB
4291 }
4292 obj_priv->user_pin_count--;
4293 if (obj_priv->user_pin_count == 0) {
4294 obj_priv->pin_filp = NULL;
4295 i915_gem_object_unpin(obj);
4296 }
673a394b 4297
1d7cfea1 4298out:
673a394b 4299 drm_gem_object_unreference(obj);
1d7cfea1 4300unlock:
673a394b 4301 mutex_unlock(&dev->struct_mutex);
1d7cfea1 4302 return ret;
673a394b
EA
4303}
4304
4305int
4306i915_gem_busy_ioctl(struct drm_device *dev, void *data,
4307 struct drm_file *file_priv)
4308{
4309 struct drm_i915_gem_busy *args = data;
4310 struct drm_gem_object *obj;
4311 struct drm_i915_gem_object *obj_priv;
30dbf0c0
CW
4312 int ret;
4313
76c1dec1 4314 ret = i915_mutex_lock_interruptible(dev);
1d7cfea1 4315 if (ret)
76c1dec1 4316 return ret;
673a394b 4317
673a394b
EA
4318 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
4319 if (obj == NULL) {
1d7cfea1
CW
4320 ret = -ENOENT;
4321 goto unlock;
673a394b 4322 }
1d7cfea1 4323 obj_priv = to_intel_bo(obj);
d1b851fc 4324
0be555b6
CW
4325 /* Count all active objects as busy, even if they are currently not used
4326 * by the gpu. Users of this interface expect objects to eventually
4327 * become non-busy without any further actions, therefore emit any
4328 * necessary flushes here.
c4de0a5d 4329 */
0be555b6
CW
4330 args->busy = obj_priv->active;
4331 if (args->busy) {
4332 /* Unconditionally flush objects, even when the gpu still uses this
4333 * object. Userspace calling this function indicates that it wants to
4334 * use this buffer rather sooner than later, so issuing the required
4335 * flush earlier is beneficial.
4336 */
c78ec30b
CW
4337 if (obj->write_domain & I915_GEM_GPU_DOMAINS)
4338 i915_gem_flush_ring(dev, file_priv,
9220434a
CW
4339 obj_priv->ring,
4340 0, obj->write_domain);
0be555b6
CW
4341
4342 /* Update the active list for the hardware's current position.
4343 * Otherwise this only updates on a delayed timer or when irqs
4344 * are actually unmasked, and our working set ends up being
4345 * larger than required.
4346 */
4347 i915_gem_retire_requests_ring(dev, obj_priv->ring);
4348
4349 args->busy = obj_priv->active;
4350 }
673a394b
EA
4351
4352 drm_gem_object_unreference(obj);
1d7cfea1 4353unlock:
673a394b 4354 mutex_unlock(&dev->struct_mutex);
1d7cfea1 4355 return ret;
673a394b
EA
4356}
4357
4358int
4359i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
4360 struct drm_file *file_priv)
4361{
4362 return i915_gem_ring_throttle(dev, file_priv);
4363}
4364
3ef94daa
CW
4365int
4366i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
4367 struct drm_file *file_priv)
4368{
4369 struct drm_i915_gem_madvise *args = data;
4370 struct drm_gem_object *obj;
4371 struct drm_i915_gem_object *obj_priv;
76c1dec1 4372 int ret;
3ef94daa
CW
4373
4374 switch (args->madv) {
4375 case I915_MADV_DONTNEED:
4376 case I915_MADV_WILLNEED:
4377 break;
4378 default:
4379 return -EINVAL;
4380 }
4381
1d7cfea1
CW
4382 ret = i915_mutex_lock_interruptible(dev);
4383 if (ret)
4384 return ret;
4385
3ef94daa
CW
4386 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
4387 if (obj == NULL) {
1d7cfea1
CW
4388 ret = -ENOENT;
4389 goto unlock;
3ef94daa 4390 }
23010e43 4391 obj_priv = to_intel_bo(obj);
3ef94daa
CW
4392
4393 if (obj_priv->pin_count) {
1d7cfea1
CW
4394 ret = -EINVAL;
4395 goto out;
3ef94daa
CW
4396 }
4397
bb6baf76
CW
4398 if (obj_priv->madv != __I915_MADV_PURGED)
4399 obj_priv->madv = args->madv;
3ef94daa 4400
2d7ef395
CW
4401 /* if the object is no longer bound, discard its backing storage */
4402 if (i915_gem_object_is_purgeable(obj_priv) &&
4403 obj_priv->gtt_space == NULL)
4404 i915_gem_object_truncate(obj);
4405
bb6baf76
CW
4406 args->retained = obj_priv->madv != __I915_MADV_PURGED;
4407
1d7cfea1 4408out:
3ef94daa 4409 drm_gem_object_unreference(obj);
1d7cfea1 4410unlock:
3ef94daa 4411 mutex_unlock(&dev->struct_mutex);
1d7cfea1 4412 return ret;
3ef94daa
CW
4413}
4414
ac52bc56
DV
4415struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev,
4416 size_t size)
4417{
73aa808f 4418 struct drm_i915_private *dev_priv = dev->dev_private;
c397b908 4419 struct drm_i915_gem_object *obj;
ac52bc56 4420
c397b908
DV
4421 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
4422 if (obj == NULL)
4423 return NULL;
673a394b 4424
c397b908
DV
4425 if (drm_gem_object_init(dev, &obj->base, size) != 0) {
4426 kfree(obj);
4427 return NULL;
4428 }
673a394b 4429
73aa808f
CW
4430 i915_gem_info_add_obj(dev_priv, size);
4431
c397b908
DV
4432 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4433 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
673a394b 4434
c397b908 4435 obj->agp_type = AGP_USER_MEMORY;
62b8b215 4436 obj->base.driver_private = NULL;
c397b908 4437 obj->fence_reg = I915_FENCE_REG_NONE;
69dc4987
CW
4438 INIT_LIST_HEAD(&obj->mm_list);
4439 INIT_LIST_HEAD(&obj->ring_list);
c397b908 4440 INIT_LIST_HEAD(&obj->gpu_write_list);
c397b908 4441 obj->madv = I915_MADV_WILLNEED;
a00b10c3
CW
4442 obj->fenceable = true;
4443 obj->mappable = true;
de151cf6 4444
c397b908
DV
4445 return &obj->base;
4446}
4447
4448int i915_gem_init_object(struct drm_gem_object *obj)
4449{
4450 BUG();
de151cf6 4451
673a394b
EA
4452 return 0;
4453}
4454
be72615b 4455static void i915_gem_free_object_tail(struct drm_gem_object *obj)
673a394b 4456{
de151cf6 4457 struct drm_device *dev = obj->dev;
be72615b 4458 drm_i915_private_t *dev_priv = dev->dev_private;
23010e43 4459 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
be72615b 4460 int ret;
673a394b 4461
be72615b
CW
4462 ret = i915_gem_object_unbind(obj);
4463 if (ret == -ERESTARTSYS) {
69dc4987 4464 list_move(&obj_priv->mm_list,
be72615b
CW
4465 &dev_priv->mm.deferred_free_list);
4466 return;
4467 }
673a394b 4468
39a01d1f 4469 if (obj->map_list.map)
7e616158 4470 i915_gem_free_mmap_offset(obj);
de151cf6 4471
c397b908 4472 drm_gem_object_release(obj);
73aa808f 4473 i915_gem_info_remove_obj(dev_priv, obj->size);
c397b908 4474
9a298b2a 4475 kfree(obj_priv->page_cpu_valid);
280b713b 4476 kfree(obj_priv->bit_17);
c397b908 4477 kfree(obj_priv);
673a394b
EA
4478}
4479
be72615b
CW
4480void i915_gem_free_object(struct drm_gem_object *obj)
4481{
4482 struct drm_device *dev = obj->dev;
4483 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
4484
4485 trace_i915_gem_object_destroy(obj);
4486
4487 while (obj_priv->pin_count > 0)
4488 i915_gem_object_unpin(obj);
4489
4490 if (obj_priv->phys_obj)
4491 i915_gem_detach_phys_object(dev, obj);
4492
4493 i915_gem_free_object_tail(obj);
4494}
4495
29105ccc
CW
4496int
4497i915_gem_idle(struct drm_device *dev)
4498{
4499 drm_i915_private_t *dev_priv = dev->dev_private;
4500 int ret;
28dfe52a 4501
29105ccc 4502 mutex_lock(&dev->struct_mutex);
1c5d22f7 4503
87acb0a5 4504 if (dev_priv->mm.suspended) {
29105ccc
CW
4505 mutex_unlock(&dev->struct_mutex);
4506 return 0;
28dfe52a
EA
4507 }
4508
29105ccc 4509 ret = i915_gpu_idle(dev);
6dbe2772
KP
4510 if (ret) {
4511 mutex_unlock(&dev->struct_mutex);
673a394b 4512 return ret;
6dbe2772 4513 }
673a394b 4514
29105ccc
CW
4515 /* Under UMS, be paranoid and evict. */
4516 if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
5eac3ab4 4517 ret = i915_gem_evict_inactive(dev, false);
29105ccc
CW
4518 if (ret) {
4519 mutex_unlock(&dev->struct_mutex);
4520 return ret;
4521 }
4522 }
4523
4524 /* Hack! Don't let anybody do execbuf while we don't control the chip.
4525 * We need to replace this with a semaphore, or something.
4526 * And not confound mm.suspended!
4527 */
4528 dev_priv->mm.suspended = 1;
bc0c7f14 4529 del_timer_sync(&dev_priv->hangcheck_timer);
29105ccc
CW
4530
4531 i915_kernel_lost_context(dev);
6dbe2772 4532 i915_gem_cleanup_ringbuffer(dev);
29105ccc 4533
6dbe2772
KP
4534 mutex_unlock(&dev->struct_mutex);
4535
29105ccc
CW
4536 /* Cancel the retire work handler, which should be idle now. */
4537 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
4538
673a394b
EA
4539 return 0;
4540}
4541
e552eb70
JB
4542/*
4543 * 965+ support PIPE_CONTROL commands, which provide finer grained control
4544 * over cache flushing.
4545 */
8187a2b7 4546static int
e552eb70
JB
4547i915_gem_init_pipe_control(struct drm_device *dev)
4548{
4549 drm_i915_private_t *dev_priv = dev->dev_private;
4550 struct drm_gem_object *obj;
4551 struct drm_i915_gem_object *obj_priv;
4552 int ret;
4553
34dc4d44 4554 obj = i915_gem_alloc_object(dev, 4096);
e552eb70
JB
4555 if (obj == NULL) {
4556 DRM_ERROR("Failed to allocate seqno page\n");
4557 ret = -ENOMEM;
4558 goto err;
4559 }
4560 obj_priv = to_intel_bo(obj);
4561 obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
4562
a00b10c3 4563 ret = i915_gem_object_pin(obj, 4096, true, false);
e552eb70
JB
4564 if (ret)
4565 goto err_unref;
4566
4567 dev_priv->seqno_gfx_addr = obj_priv->gtt_offset;
4568 dev_priv->seqno_page = kmap(obj_priv->pages[0]);
4569 if (dev_priv->seqno_page == NULL)
4570 goto err_unpin;
4571
4572 dev_priv->seqno_obj = obj;
4573 memset(dev_priv->seqno_page, 0, PAGE_SIZE);
4574
4575 return 0;
4576
4577err_unpin:
4578 i915_gem_object_unpin(obj);
4579err_unref:
4580 drm_gem_object_unreference(obj);
4581err:
4582 return ret;
4583}
4584
8187a2b7
ZN
4585
4586static void
e552eb70
JB
4587i915_gem_cleanup_pipe_control(struct drm_device *dev)
4588{
4589 drm_i915_private_t *dev_priv = dev->dev_private;
4590 struct drm_gem_object *obj;
4591 struct drm_i915_gem_object *obj_priv;
4592
4593 obj = dev_priv->seqno_obj;
4594 obj_priv = to_intel_bo(obj);
4595 kunmap(obj_priv->pages[0]);
4596 i915_gem_object_unpin(obj);
4597 drm_gem_object_unreference(obj);
4598 dev_priv->seqno_obj = NULL;
4599
4600 dev_priv->seqno_page = NULL;
673a394b
EA
4601}
4602
8187a2b7
ZN
4603int
4604i915_gem_init_ringbuffer(struct drm_device *dev)
4605{
4606 drm_i915_private_t *dev_priv = dev->dev_private;
4607 int ret;
68f95ba9 4608
8187a2b7
ZN
4609 if (HAS_PIPE_CONTROL(dev)) {
4610 ret = i915_gem_init_pipe_control(dev);
4611 if (ret)
4612 return ret;
4613 }
68f95ba9 4614
5c1143bb 4615 ret = intel_init_render_ring_buffer(dev);
68f95ba9
CW
4616 if (ret)
4617 goto cleanup_pipe_control;
4618
4619 if (HAS_BSD(dev)) {
5c1143bb 4620 ret = intel_init_bsd_ring_buffer(dev);
68f95ba9
CW
4621 if (ret)
4622 goto cleanup_render_ring;
d1b851fc 4623 }
68f95ba9 4624
549f7365
CW
4625 if (HAS_BLT(dev)) {
4626 ret = intel_init_blt_ring_buffer(dev);
4627 if (ret)
4628 goto cleanup_bsd_ring;
4629 }
4630
6f392d54
CW
4631 dev_priv->next_seqno = 1;
4632
68f95ba9
CW
4633 return 0;
4634
549f7365 4635cleanup_bsd_ring:
78501eac 4636 intel_cleanup_ring_buffer(&dev_priv->bsd_ring);
68f95ba9 4637cleanup_render_ring:
78501eac 4638 intel_cleanup_ring_buffer(&dev_priv->render_ring);
68f95ba9
CW
4639cleanup_pipe_control:
4640 if (HAS_PIPE_CONTROL(dev))
4641 i915_gem_cleanup_pipe_control(dev);
8187a2b7
ZN
4642 return ret;
4643}
4644
4645void
4646i915_gem_cleanup_ringbuffer(struct drm_device *dev)
4647{
4648 drm_i915_private_t *dev_priv = dev->dev_private;
4649
78501eac
CW
4650 intel_cleanup_ring_buffer(&dev_priv->render_ring);
4651 intel_cleanup_ring_buffer(&dev_priv->bsd_ring);
4652 intel_cleanup_ring_buffer(&dev_priv->blt_ring);
8187a2b7
ZN
4653 if (HAS_PIPE_CONTROL(dev))
4654 i915_gem_cleanup_pipe_control(dev);
4655}
4656
673a394b
EA
4657int
4658i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4659 struct drm_file *file_priv)
4660{
4661 drm_i915_private_t *dev_priv = dev->dev_private;
4662 int ret;
4663
79e53945
JB
4664 if (drm_core_check_feature(dev, DRIVER_MODESET))
4665 return 0;
4666
ba1234d1 4667 if (atomic_read(&dev_priv->mm.wedged)) {
673a394b 4668 DRM_ERROR("Reenabling wedged hardware, good luck\n");
ba1234d1 4669 atomic_set(&dev_priv->mm.wedged, 0);
673a394b
EA
4670 }
4671
673a394b 4672 mutex_lock(&dev->struct_mutex);
9bb2d6f9
EA
4673 dev_priv->mm.suspended = 0;
4674
4675 ret = i915_gem_init_ringbuffer(dev);
d816f6ac
WF
4676 if (ret != 0) {
4677 mutex_unlock(&dev->struct_mutex);
9bb2d6f9 4678 return ret;
d816f6ac 4679 }
9bb2d6f9 4680
69dc4987 4681 BUG_ON(!list_empty(&dev_priv->mm.active_list));
852835f3 4682 BUG_ON(!list_empty(&dev_priv->render_ring.active_list));
87acb0a5 4683 BUG_ON(!list_empty(&dev_priv->bsd_ring.active_list));
549f7365 4684 BUG_ON(!list_empty(&dev_priv->blt_ring.active_list));
673a394b
EA
4685 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
4686 BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
852835f3 4687 BUG_ON(!list_empty(&dev_priv->render_ring.request_list));
87acb0a5 4688 BUG_ON(!list_empty(&dev_priv->bsd_ring.request_list));
549f7365 4689 BUG_ON(!list_empty(&dev_priv->blt_ring.request_list));
673a394b 4690 mutex_unlock(&dev->struct_mutex);
dbb19d30 4691
5f35308b
CW
4692 ret = drm_irq_install(dev);
4693 if (ret)
4694 goto cleanup_ringbuffer;
dbb19d30 4695
673a394b 4696 return 0;
5f35308b
CW
4697
4698cleanup_ringbuffer:
4699 mutex_lock(&dev->struct_mutex);
4700 i915_gem_cleanup_ringbuffer(dev);
4701 dev_priv->mm.suspended = 1;
4702 mutex_unlock(&dev->struct_mutex);
4703
4704 return ret;
673a394b
EA
4705}
4706
4707int
4708i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
4709 struct drm_file *file_priv)
4710{
79e53945
JB
4711 if (drm_core_check_feature(dev, DRIVER_MODESET))
4712 return 0;
4713
dbb19d30 4714 drm_irq_uninstall(dev);
e6890f6f 4715 return i915_gem_idle(dev);
673a394b
EA
4716}
4717
4718void
4719i915_gem_lastclose(struct drm_device *dev)
4720{
4721 int ret;
673a394b 4722
e806b495
EA
4723 if (drm_core_check_feature(dev, DRIVER_MODESET))
4724 return;
4725
6dbe2772
KP
4726 ret = i915_gem_idle(dev);
4727 if (ret)
4728 DRM_ERROR("failed to idle hardware: %d\n", ret);
673a394b
EA
4729}
4730
64193406
CW
4731static void
4732init_ring_lists(struct intel_ring_buffer *ring)
4733{
4734 INIT_LIST_HEAD(&ring->active_list);
4735 INIT_LIST_HEAD(&ring->request_list);
4736 INIT_LIST_HEAD(&ring->gpu_write_list);
4737}
4738
673a394b
EA
4739void
4740i915_gem_load(struct drm_device *dev)
4741{
b5aa8a0f 4742 int i;
673a394b
EA
4743 drm_i915_private_t *dev_priv = dev->dev_private;
4744
69dc4987 4745 INIT_LIST_HEAD(&dev_priv->mm.active_list);
673a394b
EA
4746 INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
4747 INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
f13d3f73 4748 INIT_LIST_HEAD(&dev_priv->mm.pinned_list);
a09ba7fa 4749 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
be72615b 4750 INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list);
64193406
CW
4751 init_ring_lists(&dev_priv->render_ring);
4752 init_ring_lists(&dev_priv->bsd_ring);
4753 init_ring_lists(&dev_priv->blt_ring);
007cc8ac
DV
4754 for (i = 0; i < 16; i++)
4755 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
673a394b
EA
4756 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
4757 i915_gem_retire_work_handler);
30dbf0c0 4758 init_completion(&dev_priv->error_completion);
31169714 4759
94400120
DA
4760 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
4761 if (IS_GEN3(dev)) {
4762 u32 tmp = I915_READ(MI_ARB_STATE);
4763 if (!(tmp & MI_ARB_C3_LP_WRITE_ENABLE)) {
4764 /* arb state is a masked write, so set bit + bit in mask */
4765 tmp = MI_ARB_C3_LP_WRITE_ENABLE | (MI_ARB_C3_LP_WRITE_ENABLE << MI_ARB_MASK_SHIFT);
4766 I915_WRITE(MI_ARB_STATE, tmp);
4767 }
4768 }
4769
de151cf6 4770 /* Old X drivers will take 0-2 for front, back, depth buffers */
b397c836
EA
4771 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4772 dev_priv->fence_reg_start = 3;
de151cf6 4773
a6c45cf0 4774 if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
de151cf6
JB
4775 dev_priv->num_fence_regs = 16;
4776 else
4777 dev_priv->num_fence_regs = 8;
4778
b5aa8a0f 4779 /* Initialize fence registers to zero */
a6c45cf0
CW
4780 switch (INTEL_INFO(dev)->gen) {
4781 case 6:
4782 for (i = 0; i < 16; i++)
4783 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), 0);
4784 break;
4785 case 5:
4786 case 4:
b5aa8a0f
GH
4787 for (i = 0; i < 16; i++)
4788 I915_WRITE64(FENCE_REG_965_0 + (i * 8), 0);
a6c45cf0
CW
4789 break;
4790 case 3:
b5aa8a0f
GH
4791 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
4792 for (i = 0; i < 8; i++)
4793 I915_WRITE(FENCE_REG_945_8 + (i * 4), 0);
a6c45cf0
CW
4794 case 2:
4795 for (i = 0; i < 8; i++)
4796 I915_WRITE(FENCE_REG_830_0 + (i * 4), 0);
4797 break;
b5aa8a0f 4798 }
673a394b 4799 i915_gem_detect_bit_6_swizzle(dev);
6b95a207 4800 init_waitqueue_head(&dev_priv->pending_flip_queue);
17250b71
CW
4801
4802 dev_priv->mm.inactive_shrinker.shrink = i915_gem_inactive_shrink;
4803 dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
4804 register_shrinker(&dev_priv->mm.inactive_shrinker);
673a394b 4805}
71acb5eb
DA
4806
4807/*
4808 * Create a physically contiguous memory object for this object
4809 * e.g. for cursor + overlay regs
4810 */
995b6762
CW
4811static int i915_gem_init_phys_object(struct drm_device *dev,
4812 int id, int size, int align)
71acb5eb
DA
4813{
4814 drm_i915_private_t *dev_priv = dev->dev_private;
4815 struct drm_i915_gem_phys_object *phys_obj;
4816 int ret;
4817
4818 if (dev_priv->mm.phys_objs[id - 1] || !size)
4819 return 0;
4820
9a298b2a 4821 phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL);
71acb5eb
DA
4822 if (!phys_obj)
4823 return -ENOMEM;
4824
4825 phys_obj->id = id;
4826
6eeefaf3 4827 phys_obj->handle = drm_pci_alloc(dev, size, align);
71acb5eb
DA
4828 if (!phys_obj->handle) {
4829 ret = -ENOMEM;
4830 goto kfree_obj;
4831 }
4832#ifdef CONFIG_X86
4833 set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4834#endif
4835
4836 dev_priv->mm.phys_objs[id - 1] = phys_obj;
4837
4838 return 0;
4839kfree_obj:
9a298b2a 4840 kfree(phys_obj);
71acb5eb
DA
4841 return ret;
4842}
4843
995b6762 4844static void i915_gem_free_phys_object(struct drm_device *dev, int id)
71acb5eb
DA
4845{
4846 drm_i915_private_t *dev_priv = dev->dev_private;
4847 struct drm_i915_gem_phys_object *phys_obj;
4848
4849 if (!dev_priv->mm.phys_objs[id - 1])
4850 return;
4851
4852 phys_obj = dev_priv->mm.phys_objs[id - 1];
4853 if (phys_obj->cur_obj) {
4854 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
4855 }
4856
4857#ifdef CONFIG_X86
4858 set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4859#endif
4860 drm_pci_free(dev, phys_obj->handle);
4861 kfree(phys_obj);
4862 dev_priv->mm.phys_objs[id - 1] = NULL;
4863}
4864
4865void i915_gem_free_all_phys_object(struct drm_device *dev)
4866{
4867 int i;
4868
260883c8 4869 for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
71acb5eb
DA
4870 i915_gem_free_phys_object(dev, i);
4871}
4872
4873void i915_gem_detach_phys_object(struct drm_device *dev,
4874 struct drm_gem_object *obj)
4875{
e5281ccd
CW
4876 struct address_space *mapping = obj->filp->f_path.dentry->d_inode->i_mapping;
4877 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
4878 char *vaddr;
71acb5eb 4879 int i;
71acb5eb
DA
4880 int page_count;
4881
71acb5eb
DA
4882 if (!obj_priv->phys_obj)
4883 return;
e5281ccd 4884 vaddr = obj_priv->phys_obj->handle->vaddr;
71acb5eb
DA
4885
4886 page_count = obj->size / PAGE_SIZE;
4887
4888 for (i = 0; i < page_count; i++) {
e5281ccd
CW
4889 struct page *page = read_cache_page_gfp(mapping, i,
4890 GFP_HIGHUSER | __GFP_RECLAIMABLE);
4891 if (!IS_ERR(page)) {
4892 char *dst = kmap_atomic(page);
4893 memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
4894 kunmap_atomic(dst);
4895
4896 drm_clflush_pages(&page, 1);
4897
4898 set_page_dirty(page);
4899 mark_page_accessed(page);
4900 page_cache_release(page);
4901 }
71acb5eb 4902 }
71acb5eb 4903 drm_agp_chipset_flush(dev);
d78b47b9 4904
71acb5eb
DA
4905 obj_priv->phys_obj->cur_obj = NULL;
4906 obj_priv->phys_obj = NULL;
4907}
4908
4909int
4910i915_gem_attach_phys_object(struct drm_device *dev,
6eeefaf3
CW
4911 struct drm_gem_object *obj,
4912 int id,
4913 int align)
71acb5eb 4914{
e5281ccd 4915 struct address_space *mapping = obj->filp->f_path.dentry->d_inode->i_mapping;
71acb5eb
DA
4916 drm_i915_private_t *dev_priv = dev->dev_private;
4917 struct drm_i915_gem_object *obj_priv;
4918 int ret = 0;
4919 int page_count;
4920 int i;
4921
4922 if (id > I915_MAX_PHYS_OBJECT)
4923 return -EINVAL;
4924
23010e43 4925 obj_priv = to_intel_bo(obj);
71acb5eb
DA
4926
4927 if (obj_priv->phys_obj) {
4928 if (obj_priv->phys_obj->id == id)
4929 return 0;
4930 i915_gem_detach_phys_object(dev, obj);
4931 }
4932
71acb5eb
DA
4933 /* create a new object */
4934 if (!dev_priv->mm.phys_objs[id - 1]) {
4935 ret = i915_gem_init_phys_object(dev, id,
6eeefaf3 4936 obj->size, align);
71acb5eb 4937 if (ret) {
aeb565df 4938 DRM_ERROR("failed to init phys object %d size: %zu\n", id, obj->size);
e5281ccd 4939 return ret;
71acb5eb
DA
4940 }
4941 }
4942
4943 /* bind to the object */
4944 obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1];
4945 obj_priv->phys_obj->cur_obj = obj;
4946
71acb5eb
DA
4947 page_count = obj->size / PAGE_SIZE;
4948
4949 for (i = 0; i < page_count; i++) {
e5281ccd
CW
4950 struct page *page;
4951 char *dst, *src;
4952
4953 page = read_cache_page_gfp(mapping, i,
4954 GFP_HIGHUSER | __GFP_RECLAIMABLE);
4955 if (IS_ERR(page))
4956 return PTR_ERR(page);
71acb5eb 4957
ff75b9bc 4958 src = kmap_atomic(page);
e5281ccd 4959 dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
71acb5eb 4960 memcpy(dst, src, PAGE_SIZE);
3e4d3af5 4961 kunmap_atomic(src);
71acb5eb 4962
e5281ccd
CW
4963 mark_page_accessed(page);
4964 page_cache_release(page);
4965 }
d78b47b9 4966
71acb5eb 4967 return 0;
71acb5eb
DA
4968}
4969
4970static int
4971i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
4972 struct drm_i915_gem_pwrite *args,
4973 struct drm_file *file_priv)
4974{
23010e43 4975 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
71acb5eb
DA
4976 void *obj_addr;
4977 int ret;
4978 char __user *user_data;
4979
4980 user_data = (char __user *) (uintptr_t) args->data_ptr;
4981 obj_addr = obj_priv->phys_obj->handle->vaddr + args->offset;
4982
44d98a61 4983 DRM_DEBUG_DRIVER("obj_addr %p, %lld\n", obj_addr, args->size);
71acb5eb
DA
4984 ret = copy_from_user(obj_addr, user_data, args->size);
4985 if (ret)
4986 return -EFAULT;
4987
4988 drm_agp_chipset_flush(dev);
4989 return 0;
4990}
b962442e 4991
f787a5f5 4992void i915_gem_release(struct drm_device *dev, struct drm_file *file)
b962442e 4993{
f787a5f5 4994 struct drm_i915_file_private *file_priv = file->driver_priv;
b962442e
EA
4995
4996 /* Clean up our request list when the client is going away, so that
4997 * later retire_requests won't dereference our soon-to-be-gone
4998 * file_priv.
4999 */
1c25595f 5000 spin_lock(&file_priv->mm.lock);
f787a5f5
CW
5001 while (!list_empty(&file_priv->mm.request_list)) {
5002 struct drm_i915_gem_request *request;
5003
5004 request = list_first_entry(&file_priv->mm.request_list,
5005 struct drm_i915_gem_request,
5006 client_list);
5007 list_del(&request->client_list);
5008 request->file_priv = NULL;
5009 }
1c25595f 5010 spin_unlock(&file_priv->mm.lock);
b962442e 5011}
31169714 5012
1637ef41
CW
5013static int
5014i915_gpu_is_active(struct drm_device *dev)
5015{
5016 drm_i915_private_t *dev_priv = dev->dev_private;
5017 int lists_empty;
5018
1637ef41 5019 lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
17250b71 5020 list_empty(&dev_priv->mm.active_list);
1637ef41
CW
5021
5022 return !lists_empty;
5023}
5024
31169714 5025static int
17250b71
CW
5026i915_gem_inactive_shrink(struct shrinker *shrinker,
5027 int nr_to_scan,
5028 gfp_t gfp_mask)
31169714 5029{
17250b71
CW
5030 struct drm_i915_private *dev_priv =
5031 container_of(shrinker,
5032 struct drm_i915_private,
5033 mm.inactive_shrinker);
5034 struct drm_device *dev = dev_priv->dev;
5035 struct drm_i915_gem_object *obj, *next;
5036 int cnt;
5037
5038 if (!mutex_trylock(&dev->struct_mutex))
bbe2e11a 5039 return 0;
31169714
CW
5040
5041 /* "fast-path" to count number of available objects */
5042 if (nr_to_scan == 0) {
17250b71
CW
5043 cnt = 0;
5044 list_for_each_entry(obj,
5045 &dev_priv->mm.inactive_list,
5046 mm_list)
5047 cnt++;
5048 mutex_unlock(&dev->struct_mutex);
5049 return cnt / 100 * sysctl_vfs_cache_pressure;
31169714
CW
5050 }
5051
1637ef41 5052rescan:
31169714 5053 /* first scan for clean buffers */
17250b71 5054 i915_gem_retire_requests(dev);
31169714 5055
17250b71
CW
5056 list_for_each_entry_safe(obj, next,
5057 &dev_priv->mm.inactive_list,
5058 mm_list) {
5059 if (i915_gem_object_is_purgeable(obj)) {
5060 i915_gem_object_unbind(&obj->base);
5061 if (--nr_to_scan == 0)
5062 break;
31169714 5063 }
31169714
CW
5064 }
5065
5066 /* second pass, evict/count anything still on the inactive list */
17250b71
CW
5067 cnt = 0;
5068 list_for_each_entry_safe(obj, next,
5069 &dev_priv->mm.inactive_list,
5070 mm_list) {
5071 if (nr_to_scan) {
5072 i915_gem_object_unbind(&obj->base);
5073 nr_to_scan--;
5074 } else
5075 cnt++;
5076 }
5077
5078 if (nr_to_scan && i915_gpu_is_active(dev)) {
1637ef41
CW
5079 /*
5080 * We are desperate for pages, so as a last resort, wait
5081 * for the GPU to finish and discard whatever we can.
5082 * This has a dramatic impact to reduce the number of
5083 * OOM-killer events whilst running the GPU aggressively.
5084 */
17250b71 5085 if (i915_gpu_idle(dev) == 0)
1637ef41
CW
5086 goto rescan;
5087 }
17250b71
CW
5088 mutex_unlock(&dev->struct_mutex);
5089 return cnt / 100 * sysctl_vfs_cache_pressure;
31169714 5090}