]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/gpu/drm/i915/i915_gem.c
drm/i915: tune down DIDL warning about too many outputs
[mirror_ubuntu-hirsute-kernel.git] / drivers / gpu / drm / i915 / i915_gem.c
CommitLineData
673a394b
EA
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
760285e7
DH
28#include <drm/drmP.h>
29#include <drm/i915_drm.h>
673a394b 30#include "i915_drv.h"
1c5d22f7 31#include "i915_trace.h"
652c393a 32#include "intel_drv.h"
5949eac4 33#include <linux/shmem_fs.h>
5a0e3ad6 34#include <linux/slab.h>
673a394b 35#include <linux/swap.h>
79e53945 36#include <linux/pci.h>
1286ff73 37#include <linux/dma-buf.h>
673a394b 38
05394f39
CW
39static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
40static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
88241785
CW
41static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
42 unsigned alignment,
86a1ee26
CW
43 bool map_and_fenceable,
44 bool nonblocking);
05394f39
CW
45static int i915_gem_phys_pwrite(struct drm_device *dev,
46 struct drm_i915_gem_object *obj,
71acb5eb 47 struct drm_i915_gem_pwrite *args,
05394f39 48 struct drm_file *file);
673a394b 49
61050808
CW
50static void i915_gem_write_fence(struct drm_device *dev, int reg,
51 struct drm_i915_gem_object *obj);
52static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
53 struct drm_i915_fence_reg *fence,
54 bool enable);
55
17250b71 56static int i915_gem_inactive_shrink(struct shrinker *shrinker,
1495f230 57 struct shrink_control *sc);
6c085a72
CW
58static long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
59static void i915_gem_shrink_all(struct drm_i915_private *dev_priv);
8c59967c 60static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
31169714 61
61050808
CW
62static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
63{
64 if (obj->tiling_mode)
65 i915_gem_release_mmap(obj);
66
67 /* As we do not have an associated fence register, we will force
68 * a tiling change if we ever need to acquire one.
69 */
5d82e3e6 70 obj->fence_dirty = false;
61050808
CW
71 obj->fence_reg = I915_FENCE_REG_NONE;
72}
73
73aa808f
CW
74/* some bookkeeping */
75static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
76 size_t size)
77{
78 dev_priv->mm.object_count++;
79 dev_priv->mm.object_memory += size;
80}
81
82static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
83 size_t size)
84{
85 dev_priv->mm.object_count--;
86 dev_priv->mm.object_memory -= size;
87}
88
21dd3734 89static int
33196ded 90i915_gem_wait_for_error(struct i915_gpu_error *error)
30dbf0c0 91{
30dbf0c0
CW
92 int ret;
93
7abb690a
DV
94#define EXIT_COND (!i915_reset_in_progress(error) || \
95 i915_terminally_wedged(error))
1f83fee0 96 if (EXIT_COND)
30dbf0c0
CW
97 return 0;
98
0a6759c6
DV
99 /*
100 * Only wait 10 seconds for the gpu reset to complete to avoid hanging
101 * userspace. If it takes that long something really bad is going on and
102 * we should simply try to bail out and fail as gracefully as possible.
103 */
1f83fee0
DV
104 ret = wait_event_interruptible_timeout(error->reset_queue,
105 EXIT_COND,
106 10*HZ);
0a6759c6
DV
107 if (ret == 0) {
108 DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
109 return -EIO;
110 } else if (ret < 0) {
30dbf0c0 111 return ret;
0a6759c6 112 }
1f83fee0 113#undef EXIT_COND
30dbf0c0 114
21dd3734 115 return 0;
30dbf0c0
CW
116}
117
54cf91dc 118int i915_mutex_lock_interruptible(struct drm_device *dev)
76c1dec1 119{
33196ded 120 struct drm_i915_private *dev_priv = dev->dev_private;
76c1dec1
CW
121 int ret;
122
33196ded 123 ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
76c1dec1
CW
124 if (ret)
125 return ret;
126
127 ret = mutex_lock_interruptible(&dev->struct_mutex);
128 if (ret)
129 return ret;
130
23bc5982 131 WARN_ON(i915_verify_lists(dev));
76c1dec1
CW
132 return 0;
133}
30dbf0c0 134
7d1c4804 135static inline bool
05394f39 136i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
7d1c4804 137{
6c085a72 138 return obj->gtt_space && !obj->active;
7d1c4804
CW
139}
140
79e53945
JB
141int
142i915_gem_init_ioctl(struct drm_device *dev, void *data,
05394f39 143 struct drm_file *file)
79e53945 144{
93d18799 145 struct drm_i915_private *dev_priv = dev->dev_private;
79e53945 146 struct drm_i915_gem_init *args = data;
2021746e 147
7bb6fb8d
DV
148 if (drm_core_check_feature(dev, DRIVER_MODESET))
149 return -ENODEV;
150
2021746e
CW
151 if (args->gtt_start >= args->gtt_end ||
152 (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
153 return -EINVAL;
79e53945 154
f534bc0b
DV
155 /* GEM with user mode setting was never supported on ilk and later. */
156 if (INTEL_INFO(dev)->gen >= 5)
157 return -ENODEV;
158
79e53945 159 mutex_lock(&dev->struct_mutex);
d7e5008f
BW
160 i915_gem_setup_global_gtt(dev, args->gtt_start, args->gtt_end,
161 args->gtt_end);
93d18799 162 dev_priv->gtt.mappable_end = args->gtt_end;
673a394b
EA
163 mutex_unlock(&dev->struct_mutex);
164
2021746e 165 return 0;
673a394b
EA
166}
167
5a125c3c
EA
168int
169i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
05394f39 170 struct drm_file *file)
5a125c3c 171{
73aa808f 172 struct drm_i915_private *dev_priv = dev->dev_private;
5a125c3c 173 struct drm_i915_gem_get_aperture *args = data;
6299f992
CW
174 struct drm_i915_gem_object *obj;
175 size_t pinned;
5a125c3c 176
6299f992 177 pinned = 0;
73aa808f 178 mutex_lock(&dev->struct_mutex);
35c20a60 179 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
1b50247a
CW
180 if (obj->pin_count)
181 pinned += obj->gtt_space->size;
73aa808f 182 mutex_unlock(&dev->struct_mutex);
5a125c3c 183
5d4545ae 184 args->aper_size = dev_priv->gtt.total;
0206e353 185 args->aper_available_size = args->aper_size - pinned;
6299f992 186
5a125c3c
EA
187 return 0;
188}
189
42dcedd4
CW
190void *i915_gem_object_alloc(struct drm_device *dev)
191{
192 struct drm_i915_private *dev_priv = dev->dev_private;
193 return kmem_cache_alloc(dev_priv->slab, GFP_KERNEL | __GFP_ZERO);
194}
195
196void i915_gem_object_free(struct drm_i915_gem_object *obj)
197{
198 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
199 kmem_cache_free(dev_priv->slab, obj);
200}
201
ff72145b
DA
202static int
203i915_gem_create(struct drm_file *file,
204 struct drm_device *dev,
205 uint64_t size,
206 uint32_t *handle_p)
673a394b 207{
05394f39 208 struct drm_i915_gem_object *obj;
a1a2d1d3
PP
209 int ret;
210 u32 handle;
673a394b 211
ff72145b 212 size = roundup(size, PAGE_SIZE);
8ffc0246
CW
213 if (size == 0)
214 return -EINVAL;
673a394b
EA
215
216 /* Allocate the new object */
ff72145b 217 obj = i915_gem_alloc_object(dev, size);
673a394b
EA
218 if (obj == NULL)
219 return -ENOMEM;
220
05394f39 221 ret = drm_gem_handle_create(file, &obj->base, &handle);
1dfd9754 222 if (ret) {
05394f39
CW
223 drm_gem_object_release(&obj->base);
224 i915_gem_info_remove_obj(dev->dev_private, obj->base.size);
42dcedd4 225 i915_gem_object_free(obj);
673a394b 226 return ret;
1dfd9754 227 }
673a394b 228
202f2fef 229 /* drop reference from allocate - handle holds it now */
05394f39 230 drm_gem_object_unreference(&obj->base);
202f2fef
CW
231 trace_i915_gem_object_create(obj);
232
ff72145b 233 *handle_p = handle;
673a394b
EA
234 return 0;
235}
236
ff72145b
DA
237int
238i915_gem_dumb_create(struct drm_file *file,
239 struct drm_device *dev,
240 struct drm_mode_create_dumb *args)
241{
242 /* have to work out size/pitch and return them */
ed0291fd 243 args->pitch = ALIGN(args->width * ((args->bpp + 7) / 8), 64);
ff72145b
DA
244 args->size = args->pitch * args->height;
245 return i915_gem_create(file, dev,
246 args->size, &args->handle);
247}
248
249int i915_gem_dumb_destroy(struct drm_file *file,
250 struct drm_device *dev,
251 uint32_t handle)
252{
253 return drm_gem_handle_delete(file, handle);
254}
255
256/**
257 * Creates a new mm object and returns a handle to it.
258 */
259int
260i915_gem_create_ioctl(struct drm_device *dev, void *data,
261 struct drm_file *file)
262{
263 struct drm_i915_gem_create *args = data;
63ed2cb2 264
ff72145b
DA
265 return i915_gem_create(file, dev,
266 args->size, &args->handle);
267}
268
8461d226
DV
269static inline int
270__copy_to_user_swizzled(char __user *cpu_vaddr,
271 const char *gpu_vaddr, int gpu_offset,
272 int length)
273{
274 int ret, cpu_offset = 0;
275
276 while (length > 0) {
277 int cacheline_end = ALIGN(gpu_offset + 1, 64);
278 int this_length = min(cacheline_end - gpu_offset, length);
279 int swizzled_gpu_offset = gpu_offset ^ 64;
280
281 ret = __copy_to_user(cpu_vaddr + cpu_offset,
282 gpu_vaddr + swizzled_gpu_offset,
283 this_length);
284 if (ret)
285 return ret + length;
286
287 cpu_offset += this_length;
288 gpu_offset += this_length;
289 length -= this_length;
290 }
291
292 return 0;
293}
294
8c59967c 295static inline int
4f0c7cfb
BW
296__copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
297 const char __user *cpu_vaddr,
8c59967c
DV
298 int length)
299{
300 int ret, cpu_offset = 0;
301
302 while (length > 0) {
303 int cacheline_end = ALIGN(gpu_offset + 1, 64);
304 int this_length = min(cacheline_end - gpu_offset, length);
305 int swizzled_gpu_offset = gpu_offset ^ 64;
306
307 ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
308 cpu_vaddr + cpu_offset,
309 this_length);
310 if (ret)
311 return ret + length;
312
313 cpu_offset += this_length;
314 gpu_offset += this_length;
315 length -= this_length;
316 }
317
318 return 0;
319}
320
d174bd64
DV
321/* Per-page copy function for the shmem pread fastpath.
322 * Flushes invalid cachelines before reading the target if
323 * needs_clflush is set. */
eb01459f 324static int
d174bd64
DV
325shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
326 char __user *user_data,
327 bool page_do_bit17_swizzling, bool needs_clflush)
328{
329 char *vaddr;
330 int ret;
331
e7e58eb5 332 if (unlikely(page_do_bit17_swizzling))
d174bd64
DV
333 return -EINVAL;
334
335 vaddr = kmap_atomic(page);
336 if (needs_clflush)
337 drm_clflush_virt_range(vaddr + shmem_page_offset,
338 page_length);
339 ret = __copy_to_user_inatomic(user_data,
340 vaddr + shmem_page_offset,
341 page_length);
342 kunmap_atomic(vaddr);
343
f60d7f0c 344 return ret ? -EFAULT : 0;
d174bd64
DV
345}
346
23c18c71
DV
347static void
348shmem_clflush_swizzled_range(char *addr, unsigned long length,
349 bool swizzled)
350{
e7e58eb5 351 if (unlikely(swizzled)) {
23c18c71
DV
352 unsigned long start = (unsigned long) addr;
353 unsigned long end = (unsigned long) addr + length;
354
355 /* For swizzling simply ensure that we always flush both
356 * channels. Lame, but simple and it works. Swizzled
357 * pwrite/pread is far from a hotpath - current userspace
358 * doesn't use it at all. */
359 start = round_down(start, 128);
360 end = round_up(end, 128);
361
362 drm_clflush_virt_range((void *)start, end - start);
363 } else {
364 drm_clflush_virt_range(addr, length);
365 }
366
367}
368
d174bd64
DV
369/* Only difference to the fast-path function is that this can handle bit17
370 * and uses non-atomic copy and kmap functions. */
371static int
372shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
373 char __user *user_data,
374 bool page_do_bit17_swizzling, bool needs_clflush)
375{
376 char *vaddr;
377 int ret;
378
379 vaddr = kmap(page);
380 if (needs_clflush)
23c18c71
DV
381 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
382 page_length,
383 page_do_bit17_swizzling);
d174bd64
DV
384
385 if (page_do_bit17_swizzling)
386 ret = __copy_to_user_swizzled(user_data,
387 vaddr, shmem_page_offset,
388 page_length);
389 else
390 ret = __copy_to_user(user_data,
391 vaddr + shmem_page_offset,
392 page_length);
393 kunmap(page);
394
f60d7f0c 395 return ret ? - EFAULT : 0;
d174bd64
DV
396}
397
eb01459f 398static int
dbf7bff0
DV
399i915_gem_shmem_pread(struct drm_device *dev,
400 struct drm_i915_gem_object *obj,
401 struct drm_i915_gem_pread *args,
402 struct drm_file *file)
eb01459f 403{
8461d226 404 char __user *user_data;
eb01459f 405 ssize_t remain;
8461d226 406 loff_t offset;
eb2c0c81 407 int shmem_page_offset, page_length, ret = 0;
8461d226 408 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
96d79b52 409 int prefaulted = 0;
8489731c 410 int needs_clflush = 0;
67d5a50c 411 struct sg_page_iter sg_iter;
eb01459f 412
2bb4629a 413 user_data = to_user_ptr(args->data_ptr);
eb01459f
EA
414 remain = args->size;
415
8461d226 416 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
eb01459f 417
8489731c
DV
418 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
419 /* If we're not in the cpu read domain, set ourself into the gtt
420 * read domain and manually flush cachelines (if required). This
421 * optimizes for the case when the gpu will dirty the data
422 * anyway again before the next pread happens. */
423 if (obj->cache_level == I915_CACHE_NONE)
424 needs_clflush = 1;
6c085a72
CW
425 if (obj->gtt_space) {
426 ret = i915_gem_object_set_to_gtt_domain(obj, false);
427 if (ret)
428 return ret;
429 }
8489731c 430 }
eb01459f 431
f60d7f0c
CW
432 ret = i915_gem_object_get_pages(obj);
433 if (ret)
434 return ret;
435
436 i915_gem_object_pin_pages(obj);
437
8461d226 438 offset = args->offset;
eb01459f 439
67d5a50c
ID
440 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
441 offset >> PAGE_SHIFT) {
2db76d7c 442 struct page *page = sg_page_iter_page(&sg_iter);
9da3da66
CW
443
444 if (remain <= 0)
445 break;
446
eb01459f
EA
447 /* Operation in this page
448 *
eb01459f 449 * shmem_page_offset = offset within page in shmem file
eb01459f
EA
450 * page_length = bytes to copy for this page
451 */
c8cbbb8b 452 shmem_page_offset = offset_in_page(offset);
eb01459f
EA
453 page_length = remain;
454 if ((shmem_page_offset + page_length) > PAGE_SIZE)
455 page_length = PAGE_SIZE - shmem_page_offset;
eb01459f 456
8461d226
DV
457 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
458 (page_to_phys(page) & (1 << 17)) != 0;
459
d174bd64
DV
460 ret = shmem_pread_fast(page, shmem_page_offset, page_length,
461 user_data, page_do_bit17_swizzling,
462 needs_clflush);
463 if (ret == 0)
464 goto next_page;
dbf7bff0 465
dbf7bff0
DV
466 mutex_unlock(&dev->struct_mutex);
467
96d79b52 468 if (!prefaulted) {
f56f821f 469 ret = fault_in_multipages_writeable(user_data, remain);
96d79b52
DV
470 /* Userspace is tricking us, but we've already clobbered
471 * its pages with the prefault and promised to write the
472 * data up to the first fault. Hence ignore any errors
473 * and just continue. */
474 (void)ret;
475 prefaulted = 1;
476 }
eb01459f 477
d174bd64
DV
478 ret = shmem_pread_slow(page, shmem_page_offset, page_length,
479 user_data, page_do_bit17_swizzling,
480 needs_clflush);
eb01459f 481
dbf7bff0 482 mutex_lock(&dev->struct_mutex);
f60d7f0c 483
dbf7bff0 484next_page:
e5281ccd 485 mark_page_accessed(page);
e5281ccd 486
f60d7f0c 487 if (ret)
8461d226 488 goto out;
8461d226 489
eb01459f 490 remain -= page_length;
8461d226 491 user_data += page_length;
eb01459f
EA
492 offset += page_length;
493 }
494
4f27b75d 495out:
f60d7f0c
CW
496 i915_gem_object_unpin_pages(obj);
497
eb01459f
EA
498 return ret;
499}
500
673a394b
EA
501/**
502 * Reads data from the object referenced by handle.
503 *
504 * On error, the contents of *data are undefined.
505 */
506int
507i915_gem_pread_ioctl(struct drm_device *dev, void *data,
05394f39 508 struct drm_file *file)
673a394b
EA
509{
510 struct drm_i915_gem_pread *args = data;
05394f39 511 struct drm_i915_gem_object *obj;
35b62a89 512 int ret = 0;
673a394b 513
51311d0a
CW
514 if (args->size == 0)
515 return 0;
516
517 if (!access_ok(VERIFY_WRITE,
2bb4629a 518 to_user_ptr(args->data_ptr),
51311d0a
CW
519 args->size))
520 return -EFAULT;
521
4f27b75d 522 ret = i915_mutex_lock_interruptible(dev);
1d7cfea1 523 if (ret)
4f27b75d 524 return ret;
673a394b 525
05394f39 526 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
c8725226 527 if (&obj->base == NULL) {
1d7cfea1
CW
528 ret = -ENOENT;
529 goto unlock;
4f27b75d 530 }
673a394b 531
7dcd2499 532 /* Bounds check source. */
05394f39
CW
533 if (args->offset > obj->base.size ||
534 args->size > obj->base.size - args->offset) {
ce9d419d 535 ret = -EINVAL;
35b62a89 536 goto out;
ce9d419d
CW
537 }
538
1286ff73
DV
539 /* prime objects have no backing filp to GEM pread/pwrite
540 * pages from.
541 */
542 if (!obj->base.filp) {
543 ret = -EINVAL;
544 goto out;
545 }
546
db53a302
CW
547 trace_i915_gem_object_pread(obj, args->offset, args->size);
548
dbf7bff0 549 ret = i915_gem_shmem_pread(dev, obj, args, file);
673a394b 550
35b62a89 551out:
05394f39 552 drm_gem_object_unreference(&obj->base);
1d7cfea1 553unlock:
4f27b75d 554 mutex_unlock(&dev->struct_mutex);
eb01459f 555 return ret;
673a394b
EA
556}
557
0839ccb8
KP
558/* This is the fast write path which cannot handle
559 * page faults in the source data
9b7530cc 560 */
0839ccb8
KP
561
562static inline int
563fast_user_write(struct io_mapping *mapping,
564 loff_t page_base, int page_offset,
565 char __user *user_data,
566 int length)
9b7530cc 567{
4f0c7cfb
BW
568 void __iomem *vaddr_atomic;
569 void *vaddr;
0839ccb8 570 unsigned long unwritten;
9b7530cc 571
3e4d3af5 572 vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
4f0c7cfb
BW
573 /* We can use the cpu mem copy function because this is X86. */
574 vaddr = (void __force*)vaddr_atomic + page_offset;
575 unwritten = __copy_from_user_inatomic_nocache(vaddr,
0839ccb8 576 user_data, length);
3e4d3af5 577 io_mapping_unmap_atomic(vaddr_atomic);
fbd5a26d 578 return unwritten;
0839ccb8
KP
579}
580
3de09aa3
EA
581/**
582 * This is the fast pwrite path, where we copy the data directly from the
583 * user into the GTT, uncached.
584 */
673a394b 585static int
05394f39
CW
586i915_gem_gtt_pwrite_fast(struct drm_device *dev,
587 struct drm_i915_gem_object *obj,
3de09aa3 588 struct drm_i915_gem_pwrite *args,
05394f39 589 struct drm_file *file)
673a394b 590{
0839ccb8 591 drm_i915_private_t *dev_priv = dev->dev_private;
673a394b 592 ssize_t remain;
0839ccb8 593 loff_t offset, page_base;
673a394b 594 char __user *user_data;
935aaa69
DV
595 int page_offset, page_length, ret;
596
86a1ee26 597 ret = i915_gem_object_pin(obj, 0, true, true);
935aaa69
DV
598 if (ret)
599 goto out;
600
601 ret = i915_gem_object_set_to_gtt_domain(obj, true);
602 if (ret)
603 goto out_unpin;
604
605 ret = i915_gem_object_put_fence(obj);
606 if (ret)
607 goto out_unpin;
673a394b 608
2bb4629a 609 user_data = to_user_ptr(args->data_ptr);
673a394b 610 remain = args->size;
673a394b 611
05394f39 612 offset = obj->gtt_offset + args->offset;
673a394b
EA
613
614 while (remain > 0) {
615 /* Operation in this page
616 *
0839ccb8
KP
617 * page_base = page offset within aperture
618 * page_offset = offset within page
619 * page_length = bytes to copy for this page
673a394b 620 */
c8cbbb8b
CW
621 page_base = offset & PAGE_MASK;
622 page_offset = offset_in_page(offset);
0839ccb8
KP
623 page_length = remain;
624 if ((page_offset + remain) > PAGE_SIZE)
625 page_length = PAGE_SIZE - page_offset;
626
0839ccb8 627 /* If we get a fault while copying data, then (presumably) our
3de09aa3
EA
628 * source page isn't available. Return the error and we'll
629 * retry in the slow path.
0839ccb8 630 */
5d4545ae 631 if (fast_user_write(dev_priv->gtt.mappable, page_base,
935aaa69
DV
632 page_offset, user_data, page_length)) {
633 ret = -EFAULT;
634 goto out_unpin;
635 }
673a394b 636
0839ccb8
KP
637 remain -= page_length;
638 user_data += page_length;
639 offset += page_length;
673a394b 640 }
673a394b 641
935aaa69
DV
642out_unpin:
643 i915_gem_object_unpin(obj);
644out:
3de09aa3 645 return ret;
673a394b
EA
646}
647
d174bd64
DV
648/* Per-page copy function for the shmem pwrite fastpath.
649 * Flushes invalid cachelines before writing to the target if
650 * needs_clflush_before is set and flushes out any written cachelines after
651 * writing if needs_clflush is set. */
3043c60c 652static int
d174bd64
DV
653shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
654 char __user *user_data,
655 bool page_do_bit17_swizzling,
656 bool needs_clflush_before,
657 bool needs_clflush_after)
673a394b 658{
d174bd64 659 char *vaddr;
673a394b 660 int ret;
3de09aa3 661
e7e58eb5 662 if (unlikely(page_do_bit17_swizzling))
d174bd64 663 return -EINVAL;
3de09aa3 664
d174bd64
DV
665 vaddr = kmap_atomic(page);
666 if (needs_clflush_before)
667 drm_clflush_virt_range(vaddr + shmem_page_offset,
668 page_length);
669 ret = __copy_from_user_inatomic_nocache(vaddr + shmem_page_offset,
670 user_data,
671 page_length);
672 if (needs_clflush_after)
673 drm_clflush_virt_range(vaddr + shmem_page_offset,
674 page_length);
675 kunmap_atomic(vaddr);
3de09aa3 676
755d2218 677 return ret ? -EFAULT : 0;
3de09aa3
EA
678}
679
d174bd64
DV
680/* Only difference to the fast-path function is that this can handle bit17
681 * and uses non-atomic copy and kmap functions. */
3043c60c 682static int
d174bd64
DV
683shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
684 char __user *user_data,
685 bool page_do_bit17_swizzling,
686 bool needs_clflush_before,
687 bool needs_clflush_after)
673a394b 688{
d174bd64
DV
689 char *vaddr;
690 int ret;
e5281ccd 691
d174bd64 692 vaddr = kmap(page);
e7e58eb5 693 if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
23c18c71
DV
694 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
695 page_length,
696 page_do_bit17_swizzling);
d174bd64
DV
697 if (page_do_bit17_swizzling)
698 ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
e5281ccd
CW
699 user_data,
700 page_length);
d174bd64
DV
701 else
702 ret = __copy_from_user(vaddr + shmem_page_offset,
703 user_data,
704 page_length);
705 if (needs_clflush_after)
23c18c71
DV
706 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
707 page_length,
708 page_do_bit17_swizzling);
d174bd64 709 kunmap(page);
40123c1f 710
755d2218 711 return ret ? -EFAULT : 0;
40123c1f
EA
712}
713
40123c1f 714static int
e244a443
DV
715i915_gem_shmem_pwrite(struct drm_device *dev,
716 struct drm_i915_gem_object *obj,
717 struct drm_i915_gem_pwrite *args,
718 struct drm_file *file)
40123c1f 719{
40123c1f 720 ssize_t remain;
8c59967c
DV
721 loff_t offset;
722 char __user *user_data;
eb2c0c81 723 int shmem_page_offset, page_length, ret = 0;
8c59967c 724 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
e244a443 725 int hit_slowpath = 0;
58642885
DV
726 int needs_clflush_after = 0;
727 int needs_clflush_before = 0;
67d5a50c 728 struct sg_page_iter sg_iter;
40123c1f 729
2bb4629a 730 user_data = to_user_ptr(args->data_ptr);
40123c1f
EA
731 remain = args->size;
732
8c59967c 733 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
40123c1f 734
58642885
DV
735 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
736 /* If we're not in the cpu write domain, set ourself into the gtt
737 * write domain and manually flush cachelines (if required). This
738 * optimizes for the case when the gpu will use the data
739 * right away and we therefore have to clflush anyway. */
740 if (obj->cache_level == I915_CACHE_NONE)
741 needs_clflush_after = 1;
6c085a72
CW
742 if (obj->gtt_space) {
743 ret = i915_gem_object_set_to_gtt_domain(obj, true);
744 if (ret)
745 return ret;
746 }
58642885
DV
747 }
748 /* Same trick applies for invalidate partially written cachelines before
749 * writing. */
750 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)
751 && obj->cache_level == I915_CACHE_NONE)
752 needs_clflush_before = 1;
753
755d2218
CW
754 ret = i915_gem_object_get_pages(obj);
755 if (ret)
756 return ret;
757
758 i915_gem_object_pin_pages(obj);
759
673a394b 760 offset = args->offset;
05394f39 761 obj->dirty = 1;
673a394b 762
67d5a50c
ID
763 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
764 offset >> PAGE_SHIFT) {
2db76d7c 765 struct page *page = sg_page_iter_page(&sg_iter);
58642885 766 int partial_cacheline_write;
e5281ccd 767
9da3da66
CW
768 if (remain <= 0)
769 break;
770
40123c1f
EA
771 /* Operation in this page
772 *
40123c1f 773 * shmem_page_offset = offset within page in shmem file
40123c1f
EA
774 * page_length = bytes to copy for this page
775 */
c8cbbb8b 776 shmem_page_offset = offset_in_page(offset);
40123c1f
EA
777
778 page_length = remain;
779 if ((shmem_page_offset + page_length) > PAGE_SIZE)
780 page_length = PAGE_SIZE - shmem_page_offset;
40123c1f 781
58642885
DV
782 /* If we don't overwrite a cacheline completely we need to be
783 * careful to have up-to-date data by first clflushing. Don't
784 * overcomplicate things and flush the entire patch. */
785 partial_cacheline_write = needs_clflush_before &&
786 ((shmem_page_offset | page_length)
787 & (boot_cpu_data.x86_clflush_size - 1));
788
8c59967c
DV
789 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
790 (page_to_phys(page) & (1 << 17)) != 0;
791
d174bd64
DV
792 ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
793 user_data, page_do_bit17_swizzling,
794 partial_cacheline_write,
795 needs_clflush_after);
796 if (ret == 0)
797 goto next_page;
e244a443
DV
798
799 hit_slowpath = 1;
e244a443 800 mutex_unlock(&dev->struct_mutex);
d174bd64
DV
801 ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
802 user_data, page_do_bit17_swizzling,
803 partial_cacheline_write,
804 needs_clflush_after);
40123c1f 805
e244a443 806 mutex_lock(&dev->struct_mutex);
755d2218 807
e244a443 808next_page:
e5281ccd
CW
809 set_page_dirty(page);
810 mark_page_accessed(page);
e5281ccd 811
755d2218 812 if (ret)
8c59967c 813 goto out;
8c59967c 814
40123c1f 815 remain -= page_length;
8c59967c 816 user_data += page_length;
40123c1f 817 offset += page_length;
673a394b
EA
818 }
819
fbd5a26d 820out:
755d2218
CW
821 i915_gem_object_unpin_pages(obj);
822
e244a443 823 if (hit_slowpath) {
8dcf015e
DV
824 /*
825 * Fixup: Flush cpu caches in case we didn't flush the dirty
826 * cachelines in-line while writing and the object moved
827 * out of the cpu write domain while we've dropped the lock.
828 */
829 if (!needs_clflush_after &&
830 obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
e244a443 831 i915_gem_clflush_object(obj);
e76e9aeb 832 i915_gem_chipset_flush(dev);
e244a443 833 }
8c59967c 834 }
673a394b 835
58642885 836 if (needs_clflush_after)
e76e9aeb 837 i915_gem_chipset_flush(dev);
58642885 838
40123c1f 839 return ret;
673a394b
EA
840}
841
842/**
843 * Writes data to the object referenced by handle.
844 *
845 * On error, the contents of the buffer that were to be modified are undefined.
846 */
847int
848i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
fbd5a26d 849 struct drm_file *file)
673a394b
EA
850{
851 struct drm_i915_gem_pwrite *args = data;
05394f39 852 struct drm_i915_gem_object *obj;
51311d0a
CW
853 int ret;
854
855 if (args->size == 0)
856 return 0;
857
858 if (!access_ok(VERIFY_READ,
2bb4629a 859 to_user_ptr(args->data_ptr),
51311d0a
CW
860 args->size))
861 return -EFAULT;
862
2bb4629a 863 ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr),
f56f821f 864 args->size);
51311d0a
CW
865 if (ret)
866 return -EFAULT;
673a394b 867
fbd5a26d 868 ret = i915_mutex_lock_interruptible(dev);
1d7cfea1 869 if (ret)
fbd5a26d 870 return ret;
1d7cfea1 871
05394f39 872 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
c8725226 873 if (&obj->base == NULL) {
1d7cfea1
CW
874 ret = -ENOENT;
875 goto unlock;
fbd5a26d 876 }
673a394b 877
7dcd2499 878 /* Bounds check destination. */
05394f39
CW
879 if (args->offset > obj->base.size ||
880 args->size > obj->base.size - args->offset) {
ce9d419d 881 ret = -EINVAL;
35b62a89 882 goto out;
ce9d419d
CW
883 }
884
1286ff73
DV
885 /* prime objects have no backing filp to GEM pread/pwrite
886 * pages from.
887 */
888 if (!obj->base.filp) {
889 ret = -EINVAL;
890 goto out;
891 }
892
db53a302
CW
893 trace_i915_gem_object_pwrite(obj, args->offset, args->size);
894
935aaa69 895 ret = -EFAULT;
673a394b
EA
896 /* We can only do the GTT pwrite on untiled buffers, as otherwise
897 * it would end up going through the fenced access, and we'll get
898 * different detiling behavior between reading and writing.
899 * pread/pwrite currently are reading and writing from the CPU
900 * perspective, requiring manual detiling by the client.
901 */
5c0480f2 902 if (obj->phys_obj) {
fbd5a26d 903 ret = i915_gem_phys_pwrite(dev, obj, args, file);
5c0480f2
DV
904 goto out;
905 }
906
86a1ee26 907 if (obj->cache_level == I915_CACHE_NONE &&
c07496fa 908 obj->tiling_mode == I915_TILING_NONE &&
5c0480f2 909 obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
fbd5a26d 910 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
935aaa69
DV
911 /* Note that the gtt paths might fail with non-page-backed user
912 * pointers (e.g. gtt mappings when moving data between
913 * textures). Fallback to the shmem path in that case. */
fbd5a26d 914 }
673a394b 915
86a1ee26 916 if (ret == -EFAULT || ret == -ENOSPC)
935aaa69 917 ret = i915_gem_shmem_pwrite(dev, obj, args, file);
5c0480f2 918
35b62a89 919out:
05394f39 920 drm_gem_object_unreference(&obj->base);
1d7cfea1 921unlock:
fbd5a26d 922 mutex_unlock(&dev->struct_mutex);
673a394b
EA
923 return ret;
924}
925
b361237b 926int
33196ded 927i915_gem_check_wedge(struct i915_gpu_error *error,
b361237b
CW
928 bool interruptible)
929{
1f83fee0 930 if (i915_reset_in_progress(error)) {
b361237b
CW
931 /* Non-interruptible callers can't handle -EAGAIN, hence return
932 * -EIO unconditionally for these. */
933 if (!interruptible)
934 return -EIO;
935
1f83fee0
DV
936 /* Recovery complete, but the reset failed ... */
937 if (i915_terminally_wedged(error))
b361237b
CW
938 return -EIO;
939
940 return -EAGAIN;
941 }
942
943 return 0;
944}
945
946/*
947 * Compare seqno against outstanding lazy request. Emit a request if they are
948 * equal.
949 */
950static int
951i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
952{
953 int ret;
954
955 BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
956
957 ret = 0;
958 if (seqno == ring->outstanding_lazy_request)
0025c077 959 ret = i915_add_request(ring, NULL);
b361237b
CW
960
961 return ret;
962}
963
964/**
965 * __wait_seqno - wait until execution of seqno has finished
966 * @ring: the ring expected to report seqno
967 * @seqno: duh!
f69061be 968 * @reset_counter: reset sequence associated with the given seqno
b361237b
CW
969 * @interruptible: do an interruptible wait (normally yes)
970 * @timeout: in - how long to wait (NULL forever); out - how much time remaining
971 *
f69061be
DV
972 * Note: It is of utmost importance that the passed in seqno and reset_counter
973 * values have been read by the caller in an smp safe manner. Where read-side
974 * locks are involved, it is sufficient to read the reset_counter before
975 * unlocking the lock that protects the seqno. For lockless tricks, the
976 * reset_counter _must_ be read before, and an appropriate smp_rmb must be
977 * inserted.
978 *
b361237b
CW
979 * Returns 0 if the seqno was found within the alloted time. Else returns the
980 * errno with remaining time filled in timeout argument.
981 */
982static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
f69061be 983 unsigned reset_counter,
b361237b
CW
984 bool interruptible, struct timespec *timeout)
985{
986 drm_i915_private_t *dev_priv = ring->dev->dev_private;
987 struct timespec before, now, wait_time={1,0};
988 unsigned long timeout_jiffies;
989 long end;
990 bool wait_forever = true;
991 int ret;
992
993 if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
994 return 0;
995
996 trace_i915_gem_request_wait_begin(ring, seqno);
997
998 if (timeout != NULL) {
999 wait_time = *timeout;
1000 wait_forever = false;
1001 }
1002
e054cc39 1003 timeout_jiffies = timespec_to_jiffies_timeout(&wait_time);
b361237b
CW
1004
1005 if (WARN_ON(!ring->irq_get(ring)))
1006 return -ENODEV;
1007
1008 /* Record current time in case interrupted by signal, or wedged * */
1009 getrawmonotonic(&before);
1010
1011#define EXIT_COND \
1012 (i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \
f69061be
DV
1013 i915_reset_in_progress(&dev_priv->gpu_error) || \
1014 reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
b361237b
CW
1015 do {
1016 if (interruptible)
1017 end = wait_event_interruptible_timeout(ring->irq_queue,
1018 EXIT_COND,
1019 timeout_jiffies);
1020 else
1021 end = wait_event_timeout(ring->irq_queue, EXIT_COND,
1022 timeout_jiffies);
1023
f69061be
DV
1024 /* We need to check whether any gpu reset happened in between
1025 * the caller grabbing the seqno and now ... */
1026 if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
1027 end = -EAGAIN;
1028
1029 /* ... but upgrade the -EGAIN to an -EIO if the gpu is truely
1030 * gone. */
33196ded 1031 ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
b361237b
CW
1032 if (ret)
1033 end = ret;
1034 } while (end == 0 && wait_forever);
1035
1036 getrawmonotonic(&now);
1037
1038 ring->irq_put(ring);
1039 trace_i915_gem_request_wait_end(ring, seqno);
1040#undef EXIT_COND
1041
1042 if (timeout) {
1043 struct timespec sleep_time = timespec_sub(now, before);
1044 *timeout = timespec_sub(*timeout, sleep_time);
4f42f4ef
CW
1045 if (!timespec_valid(timeout)) /* i.e. negative time remains */
1046 set_normalized_timespec(timeout, 0, 0);
b361237b
CW
1047 }
1048
1049 switch (end) {
1050 case -EIO:
1051 case -EAGAIN: /* Wedged */
1052 case -ERESTARTSYS: /* Signal */
1053 return (int)end;
1054 case 0: /* Timeout */
b361237b
CW
1055 return -ETIME;
1056 default: /* Completed */
1057 WARN_ON(end < 0); /* We're not aware of other errors */
1058 return 0;
1059 }
1060}
1061
1062/**
1063 * Waits for a sequence number to be signaled, and cleans up the
1064 * request and object lists appropriately for that event.
1065 */
1066int
1067i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
1068{
1069 struct drm_device *dev = ring->dev;
1070 struct drm_i915_private *dev_priv = dev->dev_private;
1071 bool interruptible = dev_priv->mm.interruptible;
1072 int ret;
1073
1074 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1075 BUG_ON(seqno == 0);
1076
33196ded 1077 ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
b361237b
CW
1078 if (ret)
1079 return ret;
1080
1081 ret = i915_gem_check_olr(ring, seqno);
1082 if (ret)
1083 return ret;
1084
f69061be
DV
1085 return __wait_seqno(ring, seqno,
1086 atomic_read(&dev_priv->gpu_error.reset_counter),
1087 interruptible, NULL);
b361237b
CW
1088}
1089
1090/**
1091 * Ensures that all rendering to the object has completed and the object is
1092 * safe to unbind from the GTT or access from the CPU.
1093 */
1094static __must_check int
1095i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
1096 bool readonly)
1097{
1098 struct intel_ring_buffer *ring = obj->ring;
1099 u32 seqno;
1100 int ret;
1101
1102 seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
1103 if (seqno == 0)
1104 return 0;
1105
1106 ret = i915_wait_seqno(ring, seqno);
1107 if (ret)
1108 return ret;
1109
1110 i915_gem_retire_requests_ring(ring);
1111
1112 /* Manually manage the write flush as we may have not yet
1113 * retired the buffer.
1114 */
1115 if (obj->last_write_seqno &&
1116 i915_seqno_passed(seqno, obj->last_write_seqno)) {
1117 obj->last_write_seqno = 0;
1118 obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
1119 }
1120
1121 return 0;
1122}
1123
3236f57a
CW
1124/* A nonblocking variant of the above wait. This is a highly dangerous routine
1125 * as the object state may change during this call.
1126 */
1127static __must_check int
1128i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
1129 bool readonly)
1130{
1131 struct drm_device *dev = obj->base.dev;
1132 struct drm_i915_private *dev_priv = dev->dev_private;
1133 struct intel_ring_buffer *ring = obj->ring;
f69061be 1134 unsigned reset_counter;
3236f57a
CW
1135 u32 seqno;
1136 int ret;
1137
1138 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1139 BUG_ON(!dev_priv->mm.interruptible);
1140
1141 seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
1142 if (seqno == 0)
1143 return 0;
1144
33196ded 1145 ret = i915_gem_check_wedge(&dev_priv->gpu_error, true);
3236f57a
CW
1146 if (ret)
1147 return ret;
1148
1149 ret = i915_gem_check_olr(ring, seqno);
1150 if (ret)
1151 return ret;
1152
f69061be 1153 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
3236f57a 1154 mutex_unlock(&dev->struct_mutex);
f69061be 1155 ret = __wait_seqno(ring, seqno, reset_counter, true, NULL);
3236f57a
CW
1156 mutex_lock(&dev->struct_mutex);
1157
1158 i915_gem_retire_requests_ring(ring);
1159
1160 /* Manually manage the write flush as we may have not yet
1161 * retired the buffer.
1162 */
1163 if (obj->last_write_seqno &&
1164 i915_seqno_passed(seqno, obj->last_write_seqno)) {
1165 obj->last_write_seqno = 0;
1166 obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
1167 }
1168
1169 return ret;
1170}
1171
673a394b 1172/**
2ef7eeaa
EA
1173 * Called when user space prepares to use an object with the CPU, either
1174 * through the mmap ioctl's mapping or a GTT mapping.
673a394b
EA
1175 */
1176int
1177i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
05394f39 1178 struct drm_file *file)
673a394b
EA
1179{
1180 struct drm_i915_gem_set_domain *args = data;
05394f39 1181 struct drm_i915_gem_object *obj;
2ef7eeaa
EA
1182 uint32_t read_domains = args->read_domains;
1183 uint32_t write_domain = args->write_domain;
673a394b
EA
1184 int ret;
1185
2ef7eeaa 1186 /* Only handle setting domains to types used by the CPU. */
21d509e3 1187 if (write_domain & I915_GEM_GPU_DOMAINS)
2ef7eeaa
EA
1188 return -EINVAL;
1189
21d509e3 1190 if (read_domains & I915_GEM_GPU_DOMAINS)
2ef7eeaa
EA
1191 return -EINVAL;
1192
1193 /* Having something in the write domain implies it's in the read
1194 * domain, and only that read domain. Enforce that in the request.
1195 */
1196 if (write_domain != 0 && read_domains != write_domain)
1197 return -EINVAL;
1198
76c1dec1 1199 ret = i915_mutex_lock_interruptible(dev);
1d7cfea1 1200 if (ret)
76c1dec1 1201 return ret;
1d7cfea1 1202
05394f39 1203 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
c8725226 1204 if (&obj->base == NULL) {
1d7cfea1
CW
1205 ret = -ENOENT;
1206 goto unlock;
76c1dec1 1207 }
673a394b 1208
3236f57a
CW
1209 /* Try to flush the object off the GPU without holding the lock.
1210 * We will repeat the flush holding the lock in the normal manner
1211 * to catch cases where we are gazumped.
1212 */
1213 ret = i915_gem_object_wait_rendering__nonblocking(obj, !write_domain);
1214 if (ret)
1215 goto unref;
1216
2ef7eeaa
EA
1217 if (read_domains & I915_GEM_DOMAIN_GTT) {
1218 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
02354392
EA
1219
1220 /* Silently promote "you're not bound, there was nothing to do"
1221 * to success, since the client was just asking us to
1222 * make sure everything was done.
1223 */
1224 if (ret == -EINVAL)
1225 ret = 0;
2ef7eeaa 1226 } else {
e47c68e9 1227 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
2ef7eeaa
EA
1228 }
1229
3236f57a 1230unref:
05394f39 1231 drm_gem_object_unreference(&obj->base);
1d7cfea1 1232unlock:
673a394b
EA
1233 mutex_unlock(&dev->struct_mutex);
1234 return ret;
1235}
1236
1237/**
1238 * Called when user space has done writes to this buffer
1239 */
1240int
1241i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
05394f39 1242 struct drm_file *file)
673a394b
EA
1243{
1244 struct drm_i915_gem_sw_finish *args = data;
05394f39 1245 struct drm_i915_gem_object *obj;
673a394b
EA
1246 int ret = 0;
1247
76c1dec1 1248 ret = i915_mutex_lock_interruptible(dev);
1d7cfea1 1249 if (ret)
76c1dec1 1250 return ret;
1d7cfea1 1251
05394f39 1252 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
c8725226 1253 if (&obj->base == NULL) {
1d7cfea1
CW
1254 ret = -ENOENT;
1255 goto unlock;
673a394b
EA
1256 }
1257
673a394b 1258 /* Pinned buffers may be scanout, so flush the cache */
05394f39 1259 if (obj->pin_count)
e47c68e9
EA
1260 i915_gem_object_flush_cpu_write_domain(obj);
1261
05394f39 1262 drm_gem_object_unreference(&obj->base);
1d7cfea1 1263unlock:
673a394b
EA
1264 mutex_unlock(&dev->struct_mutex);
1265 return ret;
1266}
1267
1268/**
1269 * Maps the contents of an object, returning the address it is mapped
1270 * into.
1271 *
1272 * While the mapping holds a reference on the contents of the object, it doesn't
1273 * imply a ref on the object itself.
1274 */
1275int
1276i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
05394f39 1277 struct drm_file *file)
673a394b
EA
1278{
1279 struct drm_i915_gem_mmap *args = data;
1280 struct drm_gem_object *obj;
673a394b
EA
1281 unsigned long addr;
1282
05394f39 1283 obj = drm_gem_object_lookup(dev, file, args->handle);
673a394b 1284 if (obj == NULL)
bf79cb91 1285 return -ENOENT;
673a394b 1286
1286ff73
DV
1287 /* prime objects have no backing filp to GEM mmap
1288 * pages from.
1289 */
1290 if (!obj->filp) {
1291 drm_gem_object_unreference_unlocked(obj);
1292 return -EINVAL;
1293 }
1294
6be5ceb0 1295 addr = vm_mmap(obj->filp, 0, args->size,
673a394b
EA
1296 PROT_READ | PROT_WRITE, MAP_SHARED,
1297 args->offset);
bc9025bd 1298 drm_gem_object_unreference_unlocked(obj);
673a394b
EA
1299 if (IS_ERR((void *)addr))
1300 return addr;
1301
1302 args->addr_ptr = (uint64_t) addr;
1303
1304 return 0;
1305}
1306
de151cf6
JB
1307/**
1308 * i915_gem_fault - fault a page into the GTT
1309 * vma: VMA in question
1310 * vmf: fault info
1311 *
1312 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1313 * from userspace. The fault handler takes care of binding the object to
1314 * the GTT (if needed), allocating and programming a fence register (again,
1315 * only if needed based on whether the old reg is still valid or the object
1316 * is tiled) and inserting a new PTE into the faulting process.
1317 *
1318 * Note that the faulting process may involve evicting existing objects
1319 * from the GTT and/or fence registers to make room. So performance may
1320 * suffer if the GTT working set is large or there are few fence registers
1321 * left.
1322 */
1323int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1324{
05394f39
CW
1325 struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
1326 struct drm_device *dev = obj->base.dev;
7d1c4804 1327 drm_i915_private_t *dev_priv = dev->dev_private;
de151cf6
JB
1328 pgoff_t page_offset;
1329 unsigned long pfn;
1330 int ret = 0;
0f973f27 1331 bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
de151cf6
JB
1332
1333 /* We don't use vmf->pgoff since that has the fake offset */
1334 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
1335 PAGE_SHIFT;
1336
d9bc7e9f
CW
1337 ret = i915_mutex_lock_interruptible(dev);
1338 if (ret)
1339 goto out;
a00b10c3 1340
db53a302
CW
1341 trace_i915_gem_object_fault(obj, page_offset, true, write);
1342
eb119bd6
CW
1343 /* Access to snoopable pages through the GTT is incoherent. */
1344 if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) {
1345 ret = -EINVAL;
1346 goto unlock;
1347 }
1348
d9bc7e9f 1349 /* Now bind it into the GTT if needed */
c9839303
CW
1350 ret = i915_gem_object_pin(obj, 0, true, false);
1351 if (ret)
1352 goto unlock;
4a684a41 1353
c9839303
CW
1354 ret = i915_gem_object_set_to_gtt_domain(obj, write);
1355 if (ret)
1356 goto unpin;
74898d7e 1357
06d98131 1358 ret = i915_gem_object_get_fence(obj);
d9e86c0e 1359 if (ret)
c9839303 1360 goto unpin;
7d1c4804 1361
6299f992
CW
1362 obj->fault_mappable = true;
1363
5d4545ae 1364 pfn = ((dev_priv->gtt.mappable_base + obj->gtt_offset) >> PAGE_SHIFT) +
de151cf6
JB
1365 page_offset;
1366
1367 /* Finally, remap it using the new GTT offset */
1368 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
c9839303
CW
1369unpin:
1370 i915_gem_object_unpin(obj);
c715089f 1371unlock:
de151cf6 1372 mutex_unlock(&dev->struct_mutex);
d9bc7e9f 1373out:
de151cf6 1374 switch (ret) {
d9bc7e9f 1375 case -EIO:
a9340cca
DV
1376 /* If this -EIO is due to a gpu hang, give the reset code a
1377 * chance to clean up the mess. Otherwise return the proper
1378 * SIGBUS. */
1f83fee0 1379 if (i915_terminally_wedged(&dev_priv->gpu_error))
a9340cca 1380 return VM_FAULT_SIGBUS;
045e769a 1381 case -EAGAIN:
d9bc7e9f
CW
1382 /* Give the error handler a chance to run and move the
1383 * objects off the GPU active list. Next time we service the
1384 * fault, we should be able to transition the page into the
1385 * GTT without touching the GPU (and so avoid further
1386 * EIO/EGAIN). If the GPU is wedged, then there is no issue
1387 * with coherency, just lost writes.
1388 */
045e769a 1389 set_need_resched();
c715089f
CW
1390 case 0:
1391 case -ERESTARTSYS:
bed636ab 1392 case -EINTR:
e79e0fe3
DR
1393 case -EBUSY:
1394 /*
1395 * EBUSY is ok: this just means that another thread
1396 * already did the job.
1397 */
c715089f 1398 return VM_FAULT_NOPAGE;
de151cf6 1399 case -ENOMEM:
de151cf6 1400 return VM_FAULT_OOM;
a7c2e1aa
DV
1401 case -ENOSPC:
1402 return VM_FAULT_SIGBUS;
de151cf6 1403 default:
a7c2e1aa 1404 WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
c715089f 1405 return VM_FAULT_SIGBUS;
de151cf6
JB
1406 }
1407}
1408
901782b2
CW
1409/**
1410 * i915_gem_release_mmap - remove physical page mappings
1411 * @obj: obj in question
1412 *
af901ca1 1413 * Preserve the reservation of the mmapping with the DRM core code, but
901782b2
CW
1414 * relinquish ownership of the pages back to the system.
1415 *
1416 * It is vital that we remove the page mapping if we have mapped a tiled
1417 * object through the GTT and then lose the fence register due to
1418 * resource pressure. Similarly if the object has been moved out of the
1419 * aperture, than pages mapped into userspace must be revoked. Removing the
1420 * mapping will then trigger a page fault on the next user access, allowing
1421 * fixup by i915_gem_fault().
1422 */
d05ca301 1423void
05394f39 1424i915_gem_release_mmap(struct drm_i915_gem_object *obj)
901782b2 1425{
6299f992
CW
1426 if (!obj->fault_mappable)
1427 return;
901782b2 1428
f6e47884
CW
1429 if (obj->base.dev->dev_mapping)
1430 unmap_mapping_range(obj->base.dev->dev_mapping,
1431 (loff_t)obj->base.map_list.hash.key<<PAGE_SHIFT,
1432 obj->base.size, 1);
fb7d516a 1433
6299f992 1434 obj->fault_mappable = false;
901782b2
CW
1435}
1436
0fa87796 1437uint32_t
e28f8711 1438i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
92b88aeb 1439{
e28f8711 1440 uint32_t gtt_size;
92b88aeb
CW
1441
1442 if (INTEL_INFO(dev)->gen >= 4 ||
e28f8711
CW
1443 tiling_mode == I915_TILING_NONE)
1444 return size;
92b88aeb
CW
1445
1446 /* Previous chips need a power-of-two fence region when tiling */
1447 if (INTEL_INFO(dev)->gen == 3)
e28f8711 1448 gtt_size = 1024*1024;
92b88aeb 1449 else
e28f8711 1450 gtt_size = 512*1024;
92b88aeb 1451
e28f8711
CW
1452 while (gtt_size < size)
1453 gtt_size <<= 1;
92b88aeb 1454
e28f8711 1455 return gtt_size;
92b88aeb
CW
1456}
1457
de151cf6
JB
1458/**
1459 * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1460 * @obj: object to check
1461 *
1462 * Return the required GTT alignment for an object, taking into account
5e783301 1463 * potential fence register mapping.
de151cf6 1464 */
d865110c
ID
1465uint32_t
1466i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
1467 int tiling_mode, bool fenced)
de151cf6 1468{
de151cf6
JB
1469 /*
1470 * Minimum alignment is 4k (GTT page size), but might be greater
1471 * if a fence register is needed for the object.
1472 */
d865110c 1473 if (INTEL_INFO(dev)->gen >= 4 || (!fenced && IS_G33(dev)) ||
e28f8711 1474 tiling_mode == I915_TILING_NONE)
de151cf6
JB
1475 return 4096;
1476
a00b10c3
CW
1477 /*
1478 * Previous chips need to be aligned to the size of the smallest
1479 * fence register that can contain the object.
1480 */
e28f8711 1481 return i915_gem_get_gtt_size(dev, size, tiling_mode);
a00b10c3
CW
1482}
1483
d8cb5086
CW
1484static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
1485{
1486 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1487 int ret;
1488
1489 if (obj->base.map_list.map)
1490 return 0;
1491
da494d7c
DV
1492 dev_priv->mm.shrinker_no_lock_stealing = true;
1493
d8cb5086
CW
1494 ret = drm_gem_create_mmap_offset(&obj->base);
1495 if (ret != -ENOSPC)
da494d7c 1496 goto out;
d8cb5086
CW
1497
1498 /* Badly fragmented mmap space? The only way we can recover
1499 * space is by destroying unwanted objects. We can't randomly release
1500 * mmap_offsets as userspace expects them to be persistent for the
1501 * lifetime of the objects. The closest we can is to release the
1502 * offsets on purgeable objects by truncating it and marking it purged,
1503 * which prevents userspace from ever using that object again.
1504 */
1505 i915_gem_purge(dev_priv, obj->base.size >> PAGE_SHIFT);
1506 ret = drm_gem_create_mmap_offset(&obj->base);
1507 if (ret != -ENOSPC)
da494d7c 1508 goto out;
d8cb5086
CW
1509
1510 i915_gem_shrink_all(dev_priv);
da494d7c
DV
1511 ret = drm_gem_create_mmap_offset(&obj->base);
1512out:
1513 dev_priv->mm.shrinker_no_lock_stealing = false;
1514
1515 return ret;
d8cb5086
CW
1516}
1517
1518static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
1519{
1520 if (!obj->base.map_list.map)
1521 return;
1522
1523 drm_gem_free_mmap_offset(&obj->base);
1524}
1525
de151cf6 1526int
ff72145b
DA
1527i915_gem_mmap_gtt(struct drm_file *file,
1528 struct drm_device *dev,
1529 uint32_t handle,
1530 uint64_t *offset)
de151cf6 1531{
da761a6e 1532 struct drm_i915_private *dev_priv = dev->dev_private;
05394f39 1533 struct drm_i915_gem_object *obj;
de151cf6
JB
1534 int ret;
1535
76c1dec1 1536 ret = i915_mutex_lock_interruptible(dev);
1d7cfea1 1537 if (ret)
76c1dec1 1538 return ret;
de151cf6 1539
ff72145b 1540 obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
c8725226 1541 if (&obj->base == NULL) {
1d7cfea1
CW
1542 ret = -ENOENT;
1543 goto unlock;
1544 }
de151cf6 1545
5d4545ae 1546 if (obj->base.size > dev_priv->gtt.mappable_end) {
da761a6e 1547 ret = -E2BIG;
ff56b0bc 1548 goto out;
da761a6e
CW
1549 }
1550
05394f39 1551 if (obj->madv != I915_MADV_WILLNEED) {
ab18282d 1552 DRM_ERROR("Attempting to mmap a purgeable buffer\n");
1d7cfea1
CW
1553 ret = -EINVAL;
1554 goto out;
ab18282d
CW
1555 }
1556
d8cb5086
CW
1557 ret = i915_gem_object_create_mmap_offset(obj);
1558 if (ret)
1559 goto out;
de151cf6 1560
ff72145b 1561 *offset = (u64)obj->base.map_list.hash.key << PAGE_SHIFT;
de151cf6 1562
1d7cfea1 1563out:
05394f39 1564 drm_gem_object_unreference(&obj->base);
1d7cfea1 1565unlock:
de151cf6 1566 mutex_unlock(&dev->struct_mutex);
1d7cfea1 1567 return ret;
de151cf6
JB
1568}
1569
ff72145b
DA
1570/**
1571 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1572 * @dev: DRM device
1573 * @data: GTT mapping ioctl data
1574 * @file: GEM object info
1575 *
1576 * Simply returns the fake offset to userspace so it can mmap it.
1577 * The mmap call will end up in drm_gem_mmap(), which will set things
1578 * up so we can get faults in the handler above.
1579 *
1580 * The fault handler will take care of binding the object into the GTT
1581 * (since it may have been evicted to make room for something), allocating
1582 * a fence register, and mapping the appropriate aperture address into
1583 * userspace.
1584 */
1585int
1586i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1587 struct drm_file *file)
1588{
1589 struct drm_i915_gem_mmap_gtt *args = data;
1590
ff72145b
DA
1591 return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
1592}
1593
225067ee
DV
1594/* Immediately discard the backing storage */
1595static void
1596i915_gem_object_truncate(struct drm_i915_gem_object *obj)
e5281ccd 1597{
e5281ccd 1598 struct inode *inode;
e5281ccd 1599
4d6294bf 1600 i915_gem_object_free_mmap_offset(obj);
1286ff73 1601
4d6294bf
CW
1602 if (obj->base.filp == NULL)
1603 return;
e5281ccd 1604
225067ee
DV
1605 /* Our goal here is to return as much of the memory as
1606 * is possible back to the system as we are called from OOM.
1607 * To do this we must instruct the shmfs to drop all of its
1608 * backing pages, *now*.
1609 */
496ad9aa 1610 inode = file_inode(obj->base.filp);
225067ee 1611 shmem_truncate_range(inode, 0, (loff_t)-1);
e5281ccd 1612
225067ee
DV
1613 obj->madv = __I915_MADV_PURGED;
1614}
e5281ccd 1615
225067ee
DV
1616static inline int
1617i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
1618{
1619 return obj->madv == I915_MADV_DONTNEED;
e5281ccd
CW
1620}
1621
5cdf5881 1622static void
05394f39 1623i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
673a394b 1624{
90797e6d
ID
1625 struct sg_page_iter sg_iter;
1626 int ret;
1286ff73 1627
05394f39 1628 BUG_ON(obj->madv == __I915_MADV_PURGED);
673a394b 1629
6c085a72
CW
1630 ret = i915_gem_object_set_to_cpu_domain(obj, true);
1631 if (ret) {
1632 /* In the event of a disaster, abandon all caches and
1633 * hope for the best.
1634 */
1635 WARN_ON(ret != -EIO);
1636 i915_gem_clflush_object(obj);
1637 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
1638 }
1639
6dacfd2f 1640 if (i915_gem_object_needs_bit17_swizzle(obj))
280b713b
EA
1641 i915_gem_object_save_bit_17_swizzle(obj);
1642
05394f39
CW
1643 if (obj->madv == I915_MADV_DONTNEED)
1644 obj->dirty = 0;
3ef94daa 1645
90797e6d 1646 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
2db76d7c 1647 struct page *page = sg_page_iter_page(&sg_iter);
9da3da66 1648
05394f39 1649 if (obj->dirty)
9da3da66 1650 set_page_dirty(page);
3ef94daa 1651
05394f39 1652 if (obj->madv == I915_MADV_WILLNEED)
9da3da66 1653 mark_page_accessed(page);
3ef94daa 1654
9da3da66 1655 page_cache_release(page);
3ef94daa 1656 }
05394f39 1657 obj->dirty = 0;
673a394b 1658
9da3da66
CW
1659 sg_free_table(obj->pages);
1660 kfree(obj->pages);
37e680a1 1661}
6c085a72 1662
dd624afd 1663int
37e680a1
CW
1664i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
1665{
1666 const struct drm_i915_gem_object_ops *ops = obj->ops;
1667
2f745ad3 1668 if (obj->pages == NULL)
37e680a1
CW
1669 return 0;
1670
1671 BUG_ON(obj->gtt_space);
6c085a72 1672
a5570178
CW
1673 if (obj->pages_pin_count)
1674 return -EBUSY;
1675
a2165e31
CW
1676 /* ->put_pages might need to allocate memory for the bit17 swizzle
1677 * array, hence protect them from being reaped by removing them from gtt
1678 * lists early. */
35c20a60 1679 list_del(&obj->global_list);
a2165e31 1680
37e680a1 1681 ops->put_pages(obj);
05394f39 1682 obj->pages = NULL;
37e680a1 1683
6c085a72
CW
1684 if (i915_gem_object_is_purgeable(obj))
1685 i915_gem_object_truncate(obj);
1686
1687 return 0;
1688}
1689
1690static long
93927ca5
DV
1691__i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
1692 bool purgeable_only)
6c085a72
CW
1693{
1694 struct drm_i915_gem_object *obj, *next;
1695 long count = 0;
1696
1697 list_for_each_entry_safe(obj, next,
1698 &dev_priv->mm.unbound_list,
35c20a60 1699 global_list) {
93927ca5 1700 if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
37e680a1 1701 i915_gem_object_put_pages(obj) == 0) {
6c085a72
CW
1702 count += obj->base.size >> PAGE_SHIFT;
1703 if (count >= target)
1704 return count;
1705 }
1706 }
1707
1708 list_for_each_entry_safe(obj, next,
1709 &dev_priv->mm.inactive_list,
1710 mm_list) {
93927ca5 1711 if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
6c085a72 1712 i915_gem_object_unbind(obj) == 0 &&
37e680a1 1713 i915_gem_object_put_pages(obj) == 0) {
6c085a72
CW
1714 count += obj->base.size >> PAGE_SHIFT;
1715 if (count >= target)
1716 return count;
1717 }
1718 }
1719
1720 return count;
1721}
1722
93927ca5
DV
1723static long
1724i915_gem_purge(struct drm_i915_private *dev_priv, long target)
1725{
1726 return __i915_gem_shrink(dev_priv, target, true);
1727}
1728
6c085a72
CW
1729static void
1730i915_gem_shrink_all(struct drm_i915_private *dev_priv)
1731{
1732 struct drm_i915_gem_object *obj, *next;
1733
1734 i915_gem_evict_everything(dev_priv->dev);
1735
35c20a60
BW
1736 list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list,
1737 global_list)
37e680a1 1738 i915_gem_object_put_pages(obj);
225067ee
DV
1739}
1740
37e680a1 1741static int
6c085a72 1742i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
e5281ccd 1743{
6c085a72 1744 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
e5281ccd
CW
1745 int page_count, i;
1746 struct address_space *mapping;
9da3da66
CW
1747 struct sg_table *st;
1748 struct scatterlist *sg;
90797e6d 1749 struct sg_page_iter sg_iter;
e5281ccd 1750 struct page *page;
90797e6d 1751 unsigned long last_pfn = 0; /* suppress gcc warning */
6c085a72 1752 gfp_t gfp;
e5281ccd 1753
6c085a72
CW
1754 /* Assert that the object is not currently in any GPU domain. As it
1755 * wasn't in the GTT, there shouldn't be any way it could have been in
1756 * a GPU cache
1757 */
1758 BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
1759 BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
1760
9da3da66
CW
1761 st = kmalloc(sizeof(*st), GFP_KERNEL);
1762 if (st == NULL)
1763 return -ENOMEM;
1764
05394f39 1765 page_count = obj->base.size / PAGE_SIZE;
9da3da66
CW
1766 if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
1767 sg_free_table(st);
1768 kfree(st);
e5281ccd 1769 return -ENOMEM;
9da3da66 1770 }
e5281ccd 1771
9da3da66
CW
1772 /* Get the list of pages out of our struct file. They'll be pinned
1773 * at this point until we release them.
1774 *
1775 * Fail silently without starting the shrinker
1776 */
496ad9aa 1777 mapping = file_inode(obj->base.filp)->i_mapping;
6c085a72 1778 gfp = mapping_gfp_mask(mapping);
caf49191 1779 gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
6c085a72 1780 gfp &= ~(__GFP_IO | __GFP_WAIT);
90797e6d
ID
1781 sg = st->sgl;
1782 st->nents = 0;
1783 for (i = 0; i < page_count; i++) {
6c085a72
CW
1784 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
1785 if (IS_ERR(page)) {
1786 i915_gem_purge(dev_priv, page_count);
1787 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
1788 }
1789 if (IS_ERR(page)) {
1790 /* We've tried hard to allocate the memory by reaping
1791 * our own buffer, now let the real VM do its job and
1792 * go down in flames if truly OOM.
1793 */
caf49191 1794 gfp &= ~(__GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD);
6c085a72
CW
1795 gfp |= __GFP_IO | __GFP_WAIT;
1796
1797 i915_gem_shrink_all(dev_priv);
1798 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
1799 if (IS_ERR(page))
1800 goto err_pages;
1801
caf49191 1802 gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
6c085a72
CW
1803 gfp &= ~(__GFP_IO | __GFP_WAIT);
1804 }
1625e7e5
KRW
1805#ifdef CONFIG_SWIOTLB
1806 if (swiotlb_nr_tbl()) {
1807 st->nents++;
1808 sg_set_page(sg, page, PAGE_SIZE, 0);
1809 sg = sg_next(sg);
1810 continue;
1811 }
1812#endif
90797e6d
ID
1813 if (!i || page_to_pfn(page) != last_pfn + 1) {
1814 if (i)
1815 sg = sg_next(sg);
1816 st->nents++;
1817 sg_set_page(sg, page, PAGE_SIZE, 0);
1818 } else {
1819 sg->length += PAGE_SIZE;
1820 }
1821 last_pfn = page_to_pfn(page);
e5281ccd 1822 }
1625e7e5
KRW
1823#ifdef CONFIG_SWIOTLB
1824 if (!swiotlb_nr_tbl())
1825#endif
1826 sg_mark_end(sg);
74ce6b6c
CW
1827 obj->pages = st;
1828
6dacfd2f 1829 if (i915_gem_object_needs_bit17_swizzle(obj))
e5281ccd
CW
1830 i915_gem_object_do_bit_17_swizzle(obj);
1831
1832 return 0;
1833
1834err_pages:
90797e6d
ID
1835 sg_mark_end(sg);
1836 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
2db76d7c 1837 page_cache_release(sg_page_iter_page(&sg_iter));
9da3da66
CW
1838 sg_free_table(st);
1839 kfree(st);
e5281ccd 1840 return PTR_ERR(page);
673a394b
EA
1841}
1842
37e680a1
CW
1843/* Ensure that the associated pages are gathered from the backing storage
1844 * and pinned into our object. i915_gem_object_get_pages() may be called
1845 * multiple times before they are released by a single call to
1846 * i915_gem_object_put_pages() - once the pages are no longer referenced
1847 * either as a result of memory pressure (reaping pages under the shrinker)
1848 * or as the object is itself released.
1849 */
1850int
1851i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
1852{
1853 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1854 const struct drm_i915_gem_object_ops *ops = obj->ops;
1855 int ret;
1856
2f745ad3 1857 if (obj->pages)
37e680a1
CW
1858 return 0;
1859
43e28f09
CW
1860 if (obj->madv != I915_MADV_WILLNEED) {
1861 DRM_ERROR("Attempting to obtain a purgeable object\n");
1862 return -EINVAL;
1863 }
1864
a5570178
CW
1865 BUG_ON(obj->pages_pin_count);
1866
37e680a1
CW
1867 ret = ops->get_pages(obj);
1868 if (ret)
1869 return ret;
1870
35c20a60 1871 list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list);
37e680a1 1872 return 0;
673a394b
EA
1873}
1874
54cf91dc 1875void
05394f39 1876i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
9d773091 1877 struct intel_ring_buffer *ring)
673a394b 1878{
05394f39 1879 struct drm_device *dev = obj->base.dev;
69dc4987 1880 struct drm_i915_private *dev_priv = dev->dev_private;
9d773091 1881 u32 seqno = intel_ring_get_seqno(ring);
617dbe27 1882
852835f3 1883 BUG_ON(ring == NULL);
05394f39 1884 obj->ring = ring;
673a394b
EA
1885
1886 /* Add a reference if we're newly entering the active list. */
05394f39
CW
1887 if (!obj->active) {
1888 drm_gem_object_reference(&obj->base);
1889 obj->active = 1;
673a394b 1890 }
e35a41de 1891
673a394b 1892 /* Move from whatever list we were on to the tail of execution. */
05394f39
CW
1893 list_move_tail(&obj->mm_list, &dev_priv->mm.active_list);
1894 list_move_tail(&obj->ring_list, &ring->active_list);
caea7476 1895
0201f1ec 1896 obj->last_read_seqno = seqno;
caea7476 1897
7dd49065 1898 if (obj->fenced_gpu_access) {
caea7476 1899 obj->last_fenced_seqno = seqno;
caea7476 1900
7dd49065
CW
1901 /* Bump MRU to take account of the delayed flush */
1902 if (obj->fence_reg != I915_FENCE_REG_NONE) {
1903 struct drm_i915_fence_reg *reg;
1904
1905 reg = &dev_priv->fence_regs[obj->fence_reg];
1906 list_move_tail(&reg->lru_list,
1907 &dev_priv->mm.fence_list);
1908 }
caea7476
CW
1909 }
1910}
1911
1912static void
caea7476 1913i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
ce44b0ea 1914{
05394f39 1915 struct drm_device *dev = obj->base.dev;
caea7476 1916 struct drm_i915_private *dev_priv = dev->dev_private;
ce44b0ea 1917
65ce3027 1918 BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
05394f39 1919 BUG_ON(!obj->active);
caea7476 1920
1b50247a 1921 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
caea7476 1922
65ce3027 1923 list_del_init(&obj->ring_list);
caea7476
CW
1924 obj->ring = NULL;
1925
65ce3027
CW
1926 obj->last_read_seqno = 0;
1927 obj->last_write_seqno = 0;
1928 obj->base.write_domain = 0;
1929
1930 obj->last_fenced_seqno = 0;
caea7476 1931 obj->fenced_gpu_access = false;
caea7476
CW
1932
1933 obj->active = 0;
1934 drm_gem_object_unreference(&obj->base);
1935
1936 WARN_ON(i915_verify_lists(dev));
ce44b0ea 1937}
673a394b 1938
9d773091 1939static int
fca26bb4 1940i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
53d227f2 1941{
9d773091
CW
1942 struct drm_i915_private *dev_priv = dev->dev_private;
1943 struct intel_ring_buffer *ring;
1944 int ret, i, j;
53d227f2 1945
107f27a5 1946 /* Carefully retire all requests without writing to the rings */
9d773091 1947 for_each_ring(ring, dev_priv, i) {
107f27a5
CW
1948 ret = intel_ring_idle(ring);
1949 if (ret)
1950 return ret;
9d773091 1951 }
9d773091 1952 i915_gem_retire_requests(dev);
107f27a5
CW
1953
1954 /* Finally reset hw state */
9d773091 1955 for_each_ring(ring, dev_priv, i) {
fca26bb4 1956 intel_ring_init_seqno(ring, seqno);
498d2ac1 1957
9d773091
CW
1958 for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
1959 ring->sync_seqno[j] = 0;
1960 }
53d227f2 1961
9d773091 1962 return 0;
53d227f2
DV
1963}
1964
fca26bb4
MK
1965int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
1966{
1967 struct drm_i915_private *dev_priv = dev->dev_private;
1968 int ret;
1969
1970 if (seqno == 0)
1971 return -EINVAL;
1972
1973 /* HWS page needs to be set less than what we
1974 * will inject to ring
1975 */
1976 ret = i915_gem_init_seqno(dev, seqno - 1);
1977 if (ret)
1978 return ret;
1979
1980 /* Carefully set the last_seqno value so that wrap
1981 * detection still works
1982 */
1983 dev_priv->next_seqno = seqno;
1984 dev_priv->last_seqno = seqno - 1;
1985 if (dev_priv->last_seqno == 0)
1986 dev_priv->last_seqno--;
1987
1988 return 0;
1989}
1990
9d773091
CW
1991int
1992i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
53d227f2 1993{
9d773091
CW
1994 struct drm_i915_private *dev_priv = dev->dev_private;
1995
1996 /* reserve 0 for non-seqno */
1997 if (dev_priv->next_seqno == 0) {
fca26bb4 1998 int ret = i915_gem_init_seqno(dev, 0);
9d773091
CW
1999 if (ret)
2000 return ret;
53d227f2 2001
9d773091
CW
2002 dev_priv->next_seqno = 1;
2003 }
53d227f2 2004
f72b3435 2005 *seqno = dev_priv->last_seqno = dev_priv->next_seqno++;
9d773091 2006 return 0;
53d227f2
DV
2007}
2008
0025c077
MK
2009int __i915_add_request(struct intel_ring_buffer *ring,
2010 struct drm_file *file,
7d736f4f 2011 struct drm_i915_gem_object *obj,
0025c077 2012 u32 *out_seqno)
673a394b 2013{
db53a302 2014 drm_i915_private_t *dev_priv = ring->dev->dev_private;
acb868d3 2015 struct drm_i915_gem_request *request;
7d736f4f 2016 u32 request_ring_position, request_start;
673a394b 2017 int was_empty;
3cce469c
CW
2018 int ret;
2019
7d736f4f 2020 request_start = intel_ring_get_tail(ring);
cc889e0f
DV
2021 /*
2022 * Emit any outstanding flushes - execbuf can fail to emit the flush
2023 * after having emitted the batchbuffer command. Hence we need to fix
2024 * things up similar to emitting the lazy request. The difference here
2025 * is that the flush _must_ happen before the next request, no matter
2026 * what.
2027 */
a7b9761d
CW
2028 ret = intel_ring_flush_all_caches(ring);
2029 if (ret)
2030 return ret;
cc889e0f 2031
acb868d3
CW
2032 request = kmalloc(sizeof(*request), GFP_KERNEL);
2033 if (request == NULL)
2034 return -ENOMEM;
cc889e0f 2035
673a394b 2036
a71d8d94
CW
2037 /* Record the position of the start of the request so that
2038 * should we detect the updated seqno part-way through the
2039 * GPU processing the request, we never over-estimate the
2040 * position of the head.
2041 */
2042 request_ring_position = intel_ring_get_tail(ring);
2043
9d773091 2044 ret = ring->add_request(ring);
3bb73aba
CW
2045 if (ret) {
2046 kfree(request);
2047 return ret;
2048 }
673a394b 2049
9d773091 2050 request->seqno = intel_ring_get_seqno(ring);
852835f3 2051 request->ring = ring;
7d736f4f 2052 request->head = request_start;
a71d8d94 2053 request->tail = request_ring_position;
0e50e96b 2054 request->ctx = ring->last_context;
7d736f4f
MK
2055 request->batch_obj = obj;
2056
2057 /* Whilst this request exists, batch_obj will be on the
2058 * active_list, and so will hold the active reference. Only when this
2059 * request is retired will the the batch_obj be moved onto the
2060 * inactive_list and lose its active reference. Hence we do not need
2061 * to explicitly hold another reference here.
2062 */
0e50e96b
MK
2063
2064 if (request->ctx)
2065 i915_gem_context_reference(request->ctx);
2066
673a394b 2067 request->emitted_jiffies = jiffies;
852835f3
ZN
2068 was_empty = list_empty(&ring->request_list);
2069 list_add_tail(&request->list, &ring->request_list);
3bb73aba 2070 request->file_priv = NULL;
852835f3 2071
db53a302
CW
2072 if (file) {
2073 struct drm_i915_file_private *file_priv = file->driver_priv;
2074
1c25595f 2075 spin_lock(&file_priv->mm.lock);
f787a5f5 2076 request->file_priv = file_priv;
b962442e 2077 list_add_tail(&request->client_list,
f787a5f5 2078 &file_priv->mm.request_list);
1c25595f 2079 spin_unlock(&file_priv->mm.lock);
b962442e 2080 }
673a394b 2081
9d773091 2082 trace_i915_gem_request_add(ring, request->seqno);
5391d0cf 2083 ring->outstanding_lazy_request = 0;
db53a302 2084
f65d9421 2085 if (!dev_priv->mm.suspended) {
3e0dc6b0 2086 if (i915_enable_hangcheck) {
99584db3 2087 mod_timer(&dev_priv->gpu_error.hangcheck_timer,
cecc21fe 2088 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
3e0dc6b0 2089 }
f047e395 2090 if (was_empty) {
b3b079db 2091 queue_delayed_work(dev_priv->wq,
bcb45086
CW
2092 &dev_priv->mm.retire_work,
2093 round_jiffies_up_relative(HZ));
f047e395
CW
2094 intel_mark_busy(dev_priv->dev);
2095 }
f65d9421 2096 }
cc889e0f 2097
acb868d3 2098 if (out_seqno)
9d773091 2099 *out_seqno = request->seqno;
3cce469c 2100 return 0;
673a394b
EA
2101}
2102
f787a5f5
CW
2103static inline void
2104i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
673a394b 2105{
1c25595f 2106 struct drm_i915_file_private *file_priv = request->file_priv;
673a394b 2107
1c25595f
CW
2108 if (!file_priv)
2109 return;
1c5d22f7 2110
1c25595f 2111 spin_lock(&file_priv->mm.lock);
09bfa517
HRK
2112 if (request->file_priv) {
2113 list_del(&request->client_list);
2114 request->file_priv = NULL;
2115 }
1c25595f 2116 spin_unlock(&file_priv->mm.lock);
673a394b 2117}
673a394b 2118
aa60c664
MK
2119static bool i915_head_inside_object(u32 acthd, struct drm_i915_gem_object *obj)
2120{
2121 if (acthd >= obj->gtt_offset &&
2122 acthd < obj->gtt_offset + obj->base.size)
2123 return true;
2124
2125 return false;
2126}
2127
2128static bool i915_head_inside_request(const u32 acthd_unmasked,
2129 const u32 request_start,
2130 const u32 request_end)
2131{
2132 const u32 acthd = acthd_unmasked & HEAD_ADDR;
2133
2134 if (request_start < request_end) {
2135 if (acthd >= request_start && acthd < request_end)
2136 return true;
2137 } else if (request_start > request_end) {
2138 if (acthd >= request_start || acthd < request_end)
2139 return true;
2140 }
2141
2142 return false;
2143}
2144
2145static bool i915_request_guilty(struct drm_i915_gem_request *request,
2146 const u32 acthd, bool *inside)
2147{
2148 /* There is a possibility that unmasked head address
2149 * pointing inside the ring, matches the batch_obj address range.
2150 * However this is extremely unlikely.
2151 */
2152
2153 if (request->batch_obj) {
2154 if (i915_head_inside_object(acthd, request->batch_obj)) {
2155 *inside = true;
2156 return true;
2157 }
2158 }
2159
2160 if (i915_head_inside_request(acthd, request->head, request->tail)) {
2161 *inside = false;
2162 return true;
2163 }
2164
2165 return false;
2166}
2167
2168static void i915_set_reset_status(struct intel_ring_buffer *ring,
2169 struct drm_i915_gem_request *request,
2170 u32 acthd)
2171{
2172 struct i915_ctx_hang_stats *hs = NULL;
2173 bool inside, guilty;
2174
2175 /* Innocent until proven guilty */
2176 guilty = false;
2177
2178 if (ring->hangcheck.action != wait &&
2179 i915_request_guilty(request, acthd, &inside)) {
2180 DRM_ERROR("%s hung %s bo (0x%x ctx %d) at 0x%x\n",
2181 ring->name,
2182 inside ? "inside" : "flushing",
2183 request->batch_obj ?
2184 request->batch_obj->gtt_offset : 0,
2185 request->ctx ? request->ctx->id : 0,
2186 acthd);
2187
2188 guilty = true;
2189 }
2190
2191 /* If contexts are disabled or this is the default context, use
2192 * file_priv->reset_state
2193 */
2194 if (request->ctx && request->ctx->id != DEFAULT_CONTEXT_ID)
2195 hs = &request->ctx->hang_stats;
2196 else if (request->file_priv)
2197 hs = &request->file_priv->hang_stats;
2198
2199 if (hs) {
2200 if (guilty)
2201 hs->batch_active++;
2202 else
2203 hs->batch_pending++;
2204 }
2205}
2206
0e50e96b
MK
2207static void i915_gem_free_request(struct drm_i915_gem_request *request)
2208{
2209 list_del(&request->list);
2210 i915_gem_request_remove_from_client(request);
2211
2212 if (request->ctx)
2213 i915_gem_context_unreference(request->ctx);
2214
2215 kfree(request);
2216}
2217
dfaae392
CW
2218static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
2219 struct intel_ring_buffer *ring)
9375e446 2220{
aa60c664
MK
2221 u32 completed_seqno;
2222 u32 acthd;
2223
2224 acthd = intel_ring_get_active_head(ring);
2225 completed_seqno = ring->get_seqno(ring, false);
2226
dfaae392
CW
2227 while (!list_empty(&ring->request_list)) {
2228 struct drm_i915_gem_request *request;
673a394b 2229
dfaae392
CW
2230 request = list_first_entry(&ring->request_list,
2231 struct drm_i915_gem_request,
2232 list);
de151cf6 2233
aa60c664
MK
2234 if (request->seqno > completed_seqno)
2235 i915_set_reset_status(ring, request, acthd);
2236
0e50e96b 2237 i915_gem_free_request(request);
dfaae392 2238 }
673a394b 2239
dfaae392 2240 while (!list_empty(&ring->active_list)) {
05394f39 2241 struct drm_i915_gem_object *obj;
9375e446 2242
05394f39
CW
2243 obj = list_first_entry(&ring->active_list,
2244 struct drm_i915_gem_object,
2245 ring_list);
9375e446 2246
05394f39 2247 i915_gem_object_move_to_inactive(obj);
673a394b
EA
2248 }
2249}
2250
312817a3
CW
2251static void i915_gem_reset_fences(struct drm_device *dev)
2252{
2253 struct drm_i915_private *dev_priv = dev->dev_private;
2254 int i;
2255
4b9de737 2256 for (i = 0; i < dev_priv->num_fence_regs; i++) {
312817a3 2257 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
7d2cb39c 2258
ada726c7
CW
2259 if (reg->obj)
2260 i915_gem_object_fence_lost(reg->obj);
7d2cb39c 2261
f9c513e9
CW
2262 i915_gem_write_fence(dev, i, NULL);
2263
ada726c7
CW
2264 reg->pin_count = 0;
2265 reg->obj = NULL;
2266 INIT_LIST_HEAD(&reg->lru_list);
312817a3 2267 }
ada726c7
CW
2268
2269 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
312817a3
CW
2270}
2271
069efc1d 2272void i915_gem_reset(struct drm_device *dev)
673a394b 2273{
77f01230 2274 struct drm_i915_private *dev_priv = dev->dev_private;
05394f39 2275 struct drm_i915_gem_object *obj;
b4519513 2276 struct intel_ring_buffer *ring;
1ec14ad3 2277 int i;
673a394b 2278
b4519513
CW
2279 for_each_ring(ring, dev_priv, i)
2280 i915_gem_reset_ring_lists(dev_priv, ring);
dfaae392 2281
dfaae392
CW
2282 /* Move everything out of the GPU domains to ensure we do any
2283 * necessary invalidation upon reuse.
2284 */
05394f39 2285 list_for_each_entry(obj,
77f01230 2286 &dev_priv->mm.inactive_list,
69dc4987 2287 mm_list)
77f01230 2288 {
05394f39 2289 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
77f01230 2290 }
069efc1d
CW
2291
2292 /* The fence registers are invalidated so clear them out */
312817a3 2293 i915_gem_reset_fences(dev);
673a394b
EA
2294}
2295
2296/**
2297 * This function clears the request list as sequence numbers are passed.
2298 */
a71d8d94 2299void
db53a302 2300i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
673a394b 2301{
673a394b
EA
2302 uint32_t seqno;
2303
db53a302 2304 if (list_empty(&ring->request_list))
6c0594a3
KW
2305 return;
2306
db53a302 2307 WARN_ON(i915_verify_lists(ring->dev));
673a394b 2308
b2eadbc8 2309 seqno = ring->get_seqno(ring, true);
1ec14ad3 2310
852835f3 2311 while (!list_empty(&ring->request_list)) {
673a394b 2312 struct drm_i915_gem_request *request;
673a394b 2313
852835f3 2314 request = list_first_entry(&ring->request_list,
673a394b
EA
2315 struct drm_i915_gem_request,
2316 list);
673a394b 2317
dfaae392 2318 if (!i915_seqno_passed(seqno, request->seqno))
b84d5f0c
CW
2319 break;
2320
db53a302 2321 trace_i915_gem_request_retire(ring, request->seqno);
a71d8d94
CW
2322 /* We know the GPU must have read the request to have
2323 * sent us the seqno + interrupt, so use the position
2324 * of tail of the request to update the last known position
2325 * of the GPU head.
2326 */
2327 ring->last_retired_head = request->tail;
b84d5f0c 2328
0e50e96b 2329 i915_gem_free_request(request);
b84d5f0c 2330 }
673a394b 2331
b84d5f0c
CW
2332 /* Move any buffers on the active list that are no longer referenced
2333 * by the ringbuffer to the flushing/inactive lists as appropriate.
2334 */
2335 while (!list_empty(&ring->active_list)) {
05394f39 2336 struct drm_i915_gem_object *obj;
b84d5f0c 2337
0206e353 2338 obj = list_first_entry(&ring->active_list,
05394f39
CW
2339 struct drm_i915_gem_object,
2340 ring_list);
673a394b 2341
0201f1ec 2342 if (!i915_seqno_passed(seqno, obj->last_read_seqno))
673a394b 2343 break;
b84d5f0c 2344
65ce3027 2345 i915_gem_object_move_to_inactive(obj);
673a394b 2346 }
9d34e5db 2347
db53a302
CW
2348 if (unlikely(ring->trace_irq_seqno &&
2349 i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
1ec14ad3 2350 ring->irq_put(ring);
db53a302 2351 ring->trace_irq_seqno = 0;
9d34e5db 2352 }
23bc5982 2353
db53a302 2354 WARN_ON(i915_verify_lists(ring->dev));
673a394b
EA
2355}
2356
b09a1fec
CW
2357void
2358i915_gem_retire_requests(struct drm_device *dev)
2359{
2360 drm_i915_private_t *dev_priv = dev->dev_private;
b4519513 2361 struct intel_ring_buffer *ring;
1ec14ad3 2362 int i;
b09a1fec 2363
b4519513
CW
2364 for_each_ring(ring, dev_priv, i)
2365 i915_gem_retire_requests_ring(ring);
b09a1fec
CW
2366}
2367
75ef9da2 2368static void
673a394b
EA
2369i915_gem_retire_work_handler(struct work_struct *work)
2370{
2371 drm_i915_private_t *dev_priv;
2372 struct drm_device *dev;
b4519513 2373 struct intel_ring_buffer *ring;
0a58705b
CW
2374 bool idle;
2375 int i;
673a394b
EA
2376
2377 dev_priv = container_of(work, drm_i915_private_t,
2378 mm.retire_work.work);
2379 dev = dev_priv->dev;
2380
891b48cf
CW
2381 /* Come back later if the device is busy... */
2382 if (!mutex_trylock(&dev->struct_mutex)) {
bcb45086
CW
2383 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
2384 round_jiffies_up_relative(HZ));
891b48cf
CW
2385 return;
2386 }
673a394b 2387
b09a1fec 2388 i915_gem_retire_requests(dev);
673a394b 2389
0a58705b
CW
2390 /* Send a periodic flush down the ring so we don't hold onto GEM
2391 * objects indefinitely.
673a394b 2392 */
0a58705b 2393 idle = true;
b4519513 2394 for_each_ring(ring, dev_priv, i) {
3bb73aba 2395 if (ring->gpu_caches_dirty)
0025c077 2396 i915_add_request(ring, NULL);
0a58705b
CW
2397
2398 idle &= list_empty(&ring->request_list);
673a394b
EA
2399 }
2400
0a58705b 2401 if (!dev_priv->mm.suspended && !idle)
bcb45086
CW
2402 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
2403 round_jiffies_up_relative(HZ));
f047e395
CW
2404 if (idle)
2405 intel_mark_idle(dev);
0a58705b 2406
673a394b 2407 mutex_unlock(&dev->struct_mutex);
673a394b
EA
2408}
2409
30dfebf3
DV
2410/**
2411 * Ensures that an object will eventually get non-busy by flushing any required
2412 * write domains, emitting any outstanding lazy request and retiring and
2413 * completed requests.
2414 */
2415static int
2416i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
2417{
2418 int ret;
2419
2420 if (obj->active) {
0201f1ec 2421 ret = i915_gem_check_olr(obj->ring, obj->last_read_seqno);
30dfebf3
DV
2422 if (ret)
2423 return ret;
2424
30dfebf3
DV
2425 i915_gem_retire_requests_ring(obj->ring);
2426 }
2427
2428 return 0;
2429}
2430
23ba4fd0
BW
2431/**
2432 * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
2433 * @DRM_IOCTL_ARGS: standard ioctl arguments
2434 *
2435 * Returns 0 if successful, else an error is returned with the remaining time in
2436 * the timeout parameter.
2437 * -ETIME: object is still busy after timeout
2438 * -ERESTARTSYS: signal interrupted the wait
2439 * -ENONENT: object doesn't exist
2440 * Also possible, but rare:
2441 * -EAGAIN: GPU wedged
2442 * -ENOMEM: damn
2443 * -ENODEV: Internal IRQ fail
2444 * -E?: The add request failed
2445 *
2446 * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
2447 * non-zero timeout parameter the wait ioctl will wait for the given number of
2448 * nanoseconds on an object becoming unbusy. Since the wait itself does so
2449 * without holding struct_mutex the object may become re-busied before this
2450 * function completes. A similar but shorter * race condition exists in the busy
2451 * ioctl
2452 */
2453int
2454i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2455{
f69061be 2456 drm_i915_private_t *dev_priv = dev->dev_private;
23ba4fd0
BW
2457 struct drm_i915_gem_wait *args = data;
2458 struct drm_i915_gem_object *obj;
2459 struct intel_ring_buffer *ring = NULL;
eac1f14f 2460 struct timespec timeout_stack, *timeout = NULL;
f69061be 2461 unsigned reset_counter;
23ba4fd0
BW
2462 u32 seqno = 0;
2463 int ret = 0;
2464
eac1f14f
BW
2465 if (args->timeout_ns >= 0) {
2466 timeout_stack = ns_to_timespec(args->timeout_ns);
2467 timeout = &timeout_stack;
2468 }
23ba4fd0
BW
2469
2470 ret = i915_mutex_lock_interruptible(dev);
2471 if (ret)
2472 return ret;
2473
2474 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->bo_handle));
2475 if (&obj->base == NULL) {
2476 mutex_unlock(&dev->struct_mutex);
2477 return -ENOENT;
2478 }
2479
30dfebf3
DV
2480 /* Need to make sure the object gets inactive eventually. */
2481 ret = i915_gem_object_flush_active(obj);
23ba4fd0
BW
2482 if (ret)
2483 goto out;
2484
2485 if (obj->active) {
0201f1ec 2486 seqno = obj->last_read_seqno;
23ba4fd0
BW
2487 ring = obj->ring;
2488 }
2489
2490 if (seqno == 0)
2491 goto out;
2492
23ba4fd0
BW
2493 /* Do this after OLR check to make sure we make forward progress polling
2494 * on this IOCTL with a 0 timeout (like busy ioctl)
2495 */
2496 if (!args->timeout_ns) {
2497 ret = -ETIME;
2498 goto out;
2499 }
2500
2501 drm_gem_object_unreference(&obj->base);
f69061be 2502 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
23ba4fd0
BW
2503 mutex_unlock(&dev->struct_mutex);
2504
f69061be 2505 ret = __wait_seqno(ring, seqno, reset_counter, true, timeout);
4f42f4ef 2506 if (timeout)
eac1f14f 2507 args->timeout_ns = timespec_to_ns(timeout);
23ba4fd0
BW
2508 return ret;
2509
2510out:
2511 drm_gem_object_unreference(&obj->base);
2512 mutex_unlock(&dev->struct_mutex);
2513 return ret;
2514}
2515
5816d648
BW
2516/**
2517 * i915_gem_object_sync - sync an object to a ring.
2518 *
2519 * @obj: object which may be in use on another ring.
2520 * @to: ring we wish to use the object on. May be NULL.
2521 *
2522 * This code is meant to abstract object synchronization with the GPU.
2523 * Calling with NULL implies synchronizing the object with the CPU
2524 * rather than a particular GPU ring.
2525 *
2526 * Returns 0 if successful, else propagates up the lower layer error.
2527 */
2911a35b
BW
2528int
2529i915_gem_object_sync(struct drm_i915_gem_object *obj,
2530 struct intel_ring_buffer *to)
2531{
2532 struct intel_ring_buffer *from = obj->ring;
2533 u32 seqno;
2534 int ret, idx;
2535
2536 if (from == NULL || to == from)
2537 return 0;
2538
5816d648 2539 if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev))
0201f1ec 2540 return i915_gem_object_wait_rendering(obj, false);
2911a35b
BW
2541
2542 idx = intel_ring_sync_index(from, to);
2543
0201f1ec 2544 seqno = obj->last_read_seqno;
2911a35b
BW
2545 if (seqno <= from->sync_seqno[idx])
2546 return 0;
2547
b4aca010
BW
2548 ret = i915_gem_check_olr(obj->ring, seqno);
2549 if (ret)
2550 return ret;
2911a35b 2551
1500f7ea 2552 ret = to->sync_to(to, from, seqno);
e3a5a225 2553 if (!ret)
7b01e260
MK
2554 /* We use last_read_seqno because sync_to()
2555 * might have just caused seqno wrap under
2556 * the radar.
2557 */
2558 from->sync_seqno[idx] = obj->last_read_seqno;
2911a35b 2559
e3a5a225 2560 return ret;
2911a35b
BW
2561}
2562
b5ffc9bc
CW
2563static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
2564{
2565 u32 old_write_domain, old_read_domains;
2566
b5ffc9bc
CW
2567 /* Force a pagefault for domain tracking on next user access */
2568 i915_gem_release_mmap(obj);
2569
b97c3d9c
KP
2570 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
2571 return;
2572
97c809fd
CW
2573 /* Wait for any direct GTT access to complete */
2574 mb();
2575
b5ffc9bc
CW
2576 old_read_domains = obj->base.read_domains;
2577 old_write_domain = obj->base.write_domain;
2578
2579 obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
2580 obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
2581
2582 trace_i915_gem_object_change_domain(obj,
2583 old_read_domains,
2584 old_write_domain);
2585}
2586
673a394b
EA
2587/**
2588 * Unbinds an object from the GTT aperture.
2589 */
0f973f27 2590int
05394f39 2591i915_gem_object_unbind(struct drm_i915_gem_object *obj)
673a394b 2592{
7bddb01f 2593 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
43e28f09 2594 int ret;
673a394b 2595
05394f39 2596 if (obj->gtt_space == NULL)
673a394b
EA
2597 return 0;
2598
31d8d651
CW
2599 if (obj->pin_count)
2600 return -EBUSY;
673a394b 2601
c4670ad0
CW
2602 BUG_ON(obj->pages == NULL);
2603
a8198eea 2604 ret = i915_gem_object_finish_gpu(obj);
1488fc08 2605 if (ret)
a8198eea
CW
2606 return ret;
2607 /* Continue on if we fail due to EIO, the GPU is hung so we
2608 * should be safe and we need to cleanup or else we might
2609 * cause memory corruption through use-after-free.
2610 */
2611
b5ffc9bc 2612 i915_gem_object_finish_gtt(obj);
5323fd04 2613
96b47b65 2614 /* release the fence reg _after_ flushing */
d9e86c0e 2615 ret = i915_gem_object_put_fence(obj);
1488fc08 2616 if (ret)
d9e86c0e 2617 return ret;
96b47b65 2618
db53a302
CW
2619 trace_i915_gem_object_unbind(obj);
2620
74898d7e
DV
2621 if (obj->has_global_gtt_mapping)
2622 i915_gem_gtt_unbind_object(obj);
7bddb01f
DV
2623 if (obj->has_aliasing_ppgtt_mapping) {
2624 i915_ppgtt_unbind_object(dev_priv->mm.aliasing_ppgtt, obj);
2625 obj->has_aliasing_ppgtt_mapping = 0;
2626 }
74163907 2627 i915_gem_gtt_finish_object(obj);
401c29f6 2628 i915_gem_object_unpin_pages(obj);
7bddb01f 2629
6c085a72 2630 list_del(&obj->mm_list);
35c20a60 2631 list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
75e9e915 2632 /* Avoid an unnecessary call to unbind on rebind. */
05394f39 2633 obj->map_and_fenceable = true;
673a394b 2634
05394f39
CW
2635 drm_mm_put_block(obj->gtt_space);
2636 obj->gtt_space = NULL;
2637 obj->gtt_offset = 0;
673a394b 2638
88241785 2639 return 0;
54cf91dc
CW
2640}
2641
b2da9fe5 2642int i915_gpu_idle(struct drm_device *dev)
4df2faf4
DV
2643{
2644 drm_i915_private_t *dev_priv = dev->dev_private;
b4519513 2645 struct intel_ring_buffer *ring;
1ec14ad3 2646 int ret, i;
4df2faf4 2647
4df2faf4 2648 /* Flush everything onto the inactive list. */
b4519513 2649 for_each_ring(ring, dev_priv, i) {
b6c7488d
BW
2650 ret = i915_switch_context(ring, NULL, DEFAULT_CONTEXT_ID);
2651 if (ret)
2652 return ret;
2653
3e960501 2654 ret = intel_ring_idle(ring);
1ec14ad3
CW
2655 if (ret)
2656 return ret;
2657 }
4df2faf4 2658
8a1a49f9 2659 return 0;
4df2faf4
DV
2660}
2661
9ce079e4
CW
2662static void i965_write_fence_reg(struct drm_device *dev, int reg,
2663 struct drm_i915_gem_object *obj)
de151cf6 2664{
de151cf6 2665 drm_i915_private_t *dev_priv = dev->dev_private;
56c844e5
ID
2666 int fence_reg;
2667 int fence_pitch_shift;
de151cf6
JB
2668 uint64_t val;
2669
56c844e5
ID
2670 if (INTEL_INFO(dev)->gen >= 6) {
2671 fence_reg = FENCE_REG_SANDYBRIDGE_0;
2672 fence_pitch_shift = SANDYBRIDGE_FENCE_PITCH_SHIFT;
2673 } else {
2674 fence_reg = FENCE_REG_965_0;
2675 fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
2676 }
2677
9ce079e4
CW
2678 if (obj) {
2679 u32 size = obj->gtt_space->size;
de151cf6 2680
9ce079e4
CW
2681 val = (uint64_t)((obj->gtt_offset + size - 4096) &
2682 0xfffff000) << 32;
2683 val |= obj->gtt_offset & 0xfffff000;
56c844e5 2684 val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
9ce079e4
CW
2685 if (obj->tiling_mode == I915_TILING_Y)
2686 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2687 val |= I965_FENCE_REG_VALID;
2688 } else
2689 val = 0;
c6642782 2690
56c844e5
ID
2691 fence_reg += reg * 8;
2692 I915_WRITE64(fence_reg, val);
2693 POSTING_READ(fence_reg);
de151cf6
JB
2694}
2695
9ce079e4
CW
2696static void i915_write_fence_reg(struct drm_device *dev, int reg,
2697 struct drm_i915_gem_object *obj)
de151cf6 2698{
de151cf6 2699 drm_i915_private_t *dev_priv = dev->dev_private;
9ce079e4 2700 u32 val;
de151cf6 2701
9ce079e4
CW
2702 if (obj) {
2703 u32 size = obj->gtt_space->size;
2704 int pitch_val;
2705 int tile_width;
c6642782 2706
9ce079e4
CW
2707 WARN((obj->gtt_offset & ~I915_FENCE_START_MASK) ||
2708 (size & -size) != size ||
2709 (obj->gtt_offset & (size - 1)),
2710 "object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
2711 obj->gtt_offset, obj->map_and_fenceable, size);
c6642782 2712
9ce079e4
CW
2713 if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
2714 tile_width = 128;
2715 else
2716 tile_width = 512;
2717
2718 /* Note: pitch better be a power of two tile widths */
2719 pitch_val = obj->stride / tile_width;
2720 pitch_val = ffs(pitch_val) - 1;
2721
2722 val = obj->gtt_offset;
2723 if (obj->tiling_mode == I915_TILING_Y)
2724 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2725 val |= I915_FENCE_SIZE_BITS(size);
2726 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2727 val |= I830_FENCE_REG_VALID;
2728 } else
2729 val = 0;
2730
2731 if (reg < 8)
2732 reg = FENCE_REG_830_0 + reg * 4;
2733 else
2734 reg = FENCE_REG_945_8 + (reg - 8) * 4;
2735
2736 I915_WRITE(reg, val);
2737 POSTING_READ(reg);
de151cf6
JB
2738}
2739
9ce079e4
CW
2740static void i830_write_fence_reg(struct drm_device *dev, int reg,
2741 struct drm_i915_gem_object *obj)
de151cf6 2742{
de151cf6 2743 drm_i915_private_t *dev_priv = dev->dev_private;
de151cf6 2744 uint32_t val;
de151cf6 2745
9ce079e4
CW
2746 if (obj) {
2747 u32 size = obj->gtt_space->size;
2748 uint32_t pitch_val;
de151cf6 2749
9ce079e4
CW
2750 WARN((obj->gtt_offset & ~I830_FENCE_START_MASK) ||
2751 (size & -size) != size ||
2752 (obj->gtt_offset & (size - 1)),
2753 "object 0x%08x not 512K or pot-size 0x%08x aligned\n",
2754 obj->gtt_offset, size);
e76a16de 2755
9ce079e4
CW
2756 pitch_val = obj->stride / 128;
2757 pitch_val = ffs(pitch_val) - 1;
de151cf6 2758
9ce079e4
CW
2759 val = obj->gtt_offset;
2760 if (obj->tiling_mode == I915_TILING_Y)
2761 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2762 val |= I830_FENCE_SIZE_BITS(size);
2763 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2764 val |= I830_FENCE_REG_VALID;
2765 } else
2766 val = 0;
c6642782 2767
9ce079e4
CW
2768 I915_WRITE(FENCE_REG_830_0 + reg * 4, val);
2769 POSTING_READ(FENCE_REG_830_0 + reg * 4);
2770}
2771
d0a57789
CW
2772inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj)
2773{
2774 return obj && obj->base.read_domains & I915_GEM_DOMAIN_GTT;
2775}
2776
9ce079e4
CW
2777static void i915_gem_write_fence(struct drm_device *dev, int reg,
2778 struct drm_i915_gem_object *obj)
2779{
d0a57789
CW
2780 struct drm_i915_private *dev_priv = dev->dev_private;
2781
2782 /* Ensure that all CPU reads are completed before installing a fence
2783 * and all writes before removing the fence.
2784 */
2785 if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj))
2786 mb();
2787
9ce079e4
CW
2788 switch (INTEL_INFO(dev)->gen) {
2789 case 7:
56c844e5 2790 case 6:
9ce079e4
CW
2791 case 5:
2792 case 4: i965_write_fence_reg(dev, reg, obj); break;
2793 case 3: i915_write_fence_reg(dev, reg, obj); break;
2794 case 2: i830_write_fence_reg(dev, reg, obj); break;
7dbf9d6e 2795 default: BUG();
9ce079e4 2796 }
d0a57789
CW
2797
2798 /* And similarly be paranoid that no direct access to this region
2799 * is reordered to before the fence is installed.
2800 */
2801 if (i915_gem_object_needs_mb(obj))
2802 mb();
de151cf6
JB
2803}
2804
61050808
CW
2805static inline int fence_number(struct drm_i915_private *dev_priv,
2806 struct drm_i915_fence_reg *fence)
2807{
2808 return fence - dev_priv->fence_regs;
2809}
2810
2dc8aae0
CW
2811struct write_fence {
2812 struct drm_device *dev;
2813 struct drm_i915_gem_object *obj;
2814 int fence;
2815};
2816
25ff1195
CW
2817static void i915_gem_write_fence__ipi(void *data)
2818{
2dc8aae0
CW
2819 struct write_fence *args = data;
2820
2821 /* Required for SNB+ with LLC */
25ff1195 2822 wbinvd();
2dc8aae0
CW
2823
2824 /* Required for VLV */
2825 i915_gem_write_fence(args->dev, args->fence, args->obj);
25ff1195
CW
2826}
2827
61050808
CW
2828static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
2829 struct drm_i915_fence_reg *fence,
2830 bool enable)
2831{
2dc8aae0
CW
2832 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2833 struct write_fence args = {
2834 .dev = obj->base.dev,
2835 .fence = fence_number(dev_priv, fence),
2836 .obj = enable ? obj : NULL,
2837 };
25ff1195
CW
2838
2839 /* In order to fully serialize access to the fenced region and
2840 * the update to the fence register we need to take extreme
2841 * measures on SNB+. In theory, the write to the fence register
2842 * flushes all memory transactions before, and coupled with the
2843 * mb() placed around the register write we serialise all memory
2844 * operations with respect to the changes in the tiler. Yet, on
2845 * SNB+ we need to take a step further and emit an explicit wbinvd()
2846 * on each processor in order to manually flush all memory
2847 * transactions before updating the fence register.
2dc8aae0
CW
2848 *
2849 * However, Valleyview complicates matter. There the wbinvd is
2850 * insufficient and unlike SNB/IVB requires the serialising
2851 * register write. (Note that that register write by itself is
2852 * conversely not sufficient for SNB+.) To compromise, we do both.
25ff1195 2853 */
2dc8aae0
CW
2854 if (INTEL_INFO(args.dev)->gen >= 6)
2855 on_each_cpu(i915_gem_write_fence__ipi, &args, 1);
2856 else
2857 i915_gem_write_fence(args.dev, args.fence, args.obj);
61050808
CW
2858
2859 if (enable) {
2dc8aae0 2860 obj->fence_reg = args.fence;
61050808
CW
2861 fence->obj = obj;
2862 list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
2863 } else {
2864 obj->fence_reg = I915_FENCE_REG_NONE;
2865 fence->obj = NULL;
2866 list_del_init(&fence->lru_list);
2867 }
2868}
2869
d9e86c0e 2870static int
d0a57789 2871i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
d9e86c0e 2872{
1c293ea3 2873 if (obj->last_fenced_seqno) {
86d5bc37 2874 int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
18991845
CW
2875 if (ret)
2876 return ret;
d9e86c0e
CW
2877
2878 obj->last_fenced_seqno = 0;
d9e86c0e
CW
2879 }
2880
86d5bc37 2881 obj->fenced_gpu_access = false;
d9e86c0e
CW
2882 return 0;
2883}
2884
2885int
2886i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
2887{
61050808 2888 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
f9c513e9 2889 struct drm_i915_fence_reg *fence;
d9e86c0e
CW
2890 int ret;
2891
d0a57789 2892 ret = i915_gem_object_wait_fence(obj);
d9e86c0e
CW
2893 if (ret)
2894 return ret;
2895
61050808
CW
2896 if (obj->fence_reg == I915_FENCE_REG_NONE)
2897 return 0;
d9e86c0e 2898
f9c513e9
CW
2899 fence = &dev_priv->fence_regs[obj->fence_reg];
2900
61050808 2901 i915_gem_object_fence_lost(obj);
f9c513e9 2902 i915_gem_object_update_fence(obj, fence, false);
d9e86c0e
CW
2903
2904 return 0;
2905}
2906
2907static struct drm_i915_fence_reg *
a360bb1a 2908i915_find_fence_reg(struct drm_device *dev)
ae3db24a 2909{
ae3db24a 2910 struct drm_i915_private *dev_priv = dev->dev_private;
8fe301ad 2911 struct drm_i915_fence_reg *reg, *avail;
d9e86c0e 2912 int i;
ae3db24a
DV
2913
2914 /* First try to find a free reg */
d9e86c0e 2915 avail = NULL;
ae3db24a
DV
2916 for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
2917 reg = &dev_priv->fence_regs[i];
2918 if (!reg->obj)
d9e86c0e 2919 return reg;
ae3db24a 2920
1690e1eb 2921 if (!reg->pin_count)
d9e86c0e 2922 avail = reg;
ae3db24a
DV
2923 }
2924
d9e86c0e
CW
2925 if (avail == NULL)
2926 return NULL;
ae3db24a
DV
2927
2928 /* None available, try to steal one or wait for a user to finish */
d9e86c0e 2929 list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
1690e1eb 2930 if (reg->pin_count)
ae3db24a
DV
2931 continue;
2932
8fe301ad 2933 return reg;
ae3db24a
DV
2934 }
2935
8fe301ad 2936 return NULL;
ae3db24a
DV
2937}
2938
de151cf6 2939/**
9a5a53b3 2940 * i915_gem_object_get_fence - set up fencing for an object
de151cf6
JB
2941 * @obj: object to map through a fence reg
2942 *
2943 * When mapping objects through the GTT, userspace wants to be able to write
2944 * to them without having to worry about swizzling if the object is tiled.
de151cf6
JB
2945 * This function walks the fence regs looking for a free one for @obj,
2946 * stealing one if it can't find any.
2947 *
2948 * It then sets up the reg based on the object's properties: address, pitch
2949 * and tiling format.
9a5a53b3
CW
2950 *
2951 * For an untiled surface, this removes any existing fence.
de151cf6 2952 */
8c4b8c3f 2953int
06d98131 2954i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
de151cf6 2955{
05394f39 2956 struct drm_device *dev = obj->base.dev;
79e53945 2957 struct drm_i915_private *dev_priv = dev->dev_private;
14415745 2958 bool enable = obj->tiling_mode != I915_TILING_NONE;
d9e86c0e 2959 struct drm_i915_fence_reg *reg;
ae3db24a 2960 int ret;
de151cf6 2961
14415745
CW
2962 /* Have we updated the tiling parameters upon the object and so
2963 * will need to serialise the write to the associated fence register?
2964 */
5d82e3e6 2965 if (obj->fence_dirty) {
d0a57789 2966 ret = i915_gem_object_wait_fence(obj);
14415745
CW
2967 if (ret)
2968 return ret;
2969 }
9a5a53b3 2970
d9e86c0e 2971 /* Just update our place in the LRU if our fence is getting reused. */
05394f39
CW
2972 if (obj->fence_reg != I915_FENCE_REG_NONE) {
2973 reg = &dev_priv->fence_regs[obj->fence_reg];
5d82e3e6 2974 if (!obj->fence_dirty) {
14415745
CW
2975 list_move_tail(&reg->lru_list,
2976 &dev_priv->mm.fence_list);
2977 return 0;
2978 }
2979 } else if (enable) {
2980 reg = i915_find_fence_reg(dev);
2981 if (reg == NULL)
2982 return -EDEADLK;
d9e86c0e 2983
14415745
CW
2984 if (reg->obj) {
2985 struct drm_i915_gem_object *old = reg->obj;
2986
d0a57789 2987 ret = i915_gem_object_wait_fence(old);
29c5a587
CW
2988 if (ret)
2989 return ret;
2990
14415745 2991 i915_gem_object_fence_lost(old);
29c5a587 2992 }
14415745 2993 } else
a09ba7fa 2994 return 0;
a09ba7fa 2995
14415745 2996 i915_gem_object_update_fence(obj, reg, enable);
5d82e3e6 2997 obj->fence_dirty = false;
14415745 2998
9ce079e4 2999 return 0;
de151cf6
JB
3000}
3001
42d6ab48
CW
3002static bool i915_gem_valid_gtt_space(struct drm_device *dev,
3003 struct drm_mm_node *gtt_space,
3004 unsigned long cache_level)
3005{
3006 struct drm_mm_node *other;
3007
3008 /* On non-LLC machines we have to be careful when putting differing
3009 * types of snoopable memory together to avoid the prefetcher
4239ca77 3010 * crossing memory domains and dying.
42d6ab48
CW
3011 */
3012 if (HAS_LLC(dev))
3013 return true;
3014
3015 if (gtt_space == NULL)
3016 return true;
3017
3018 if (list_empty(&gtt_space->node_list))
3019 return true;
3020
3021 other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
3022 if (other->allocated && !other->hole_follows && other->color != cache_level)
3023 return false;
3024
3025 other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
3026 if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
3027 return false;
3028
3029 return true;
3030}
3031
3032static void i915_gem_verify_gtt(struct drm_device *dev)
3033{
3034#if WATCH_GTT
3035 struct drm_i915_private *dev_priv = dev->dev_private;
3036 struct drm_i915_gem_object *obj;
3037 int err = 0;
3038
35c20a60 3039 list_for_each_entry(obj, &dev_priv->mm.gtt_list, global_list) {
42d6ab48
CW
3040 if (obj->gtt_space == NULL) {
3041 printk(KERN_ERR "object found on GTT list with no space reserved\n");
3042 err++;
3043 continue;
3044 }
3045
3046 if (obj->cache_level != obj->gtt_space->color) {
3047 printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n",
3048 obj->gtt_space->start,
3049 obj->gtt_space->start + obj->gtt_space->size,
3050 obj->cache_level,
3051 obj->gtt_space->color);
3052 err++;
3053 continue;
3054 }
3055
3056 if (!i915_gem_valid_gtt_space(dev,
3057 obj->gtt_space,
3058 obj->cache_level)) {
3059 printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n",
3060 obj->gtt_space->start,
3061 obj->gtt_space->start + obj->gtt_space->size,
3062 obj->cache_level);
3063 err++;
3064 continue;
3065 }
3066 }
3067
3068 WARN_ON(err);
3069#endif
3070}
3071
673a394b
EA
3072/**
3073 * Finds free space in the GTT aperture and binds the object there.
3074 */
3075static int
05394f39 3076i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
920afa77 3077 unsigned alignment,
86a1ee26
CW
3078 bool map_and_fenceable,
3079 bool nonblocking)
673a394b 3080{
05394f39 3081 struct drm_device *dev = obj->base.dev;
673a394b 3082 drm_i915_private_t *dev_priv = dev->dev_private;
dc9dd7a2 3083 struct drm_mm_node *node;
5e783301 3084 u32 size, fence_size, fence_alignment, unfenced_alignment;
75e9e915 3085 bool mappable, fenceable;
0a9ae0d7
BW
3086 size_t gtt_max = map_and_fenceable ?
3087 dev_priv->gtt.mappable_end : dev_priv->gtt.total;
07f73f69 3088 int ret;
673a394b 3089
e28f8711
CW
3090 fence_size = i915_gem_get_gtt_size(dev,
3091 obj->base.size,
3092 obj->tiling_mode);
3093 fence_alignment = i915_gem_get_gtt_alignment(dev,
3094 obj->base.size,
d865110c 3095 obj->tiling_mode, true);
e28f8711 3096 unfenced_alignment =
d865110c 3097 i915_gem_get_gtt_alignment(dev,
e28f8711 3098 obj->base.size,
d865110c 3099 obj->tiling_mode, false);
a00b10c3 3100
673a394b 3101 if (alignment == 0)
5e783301
DV
3102 alignment = map_and_fenceable ? fence_alignment :
3103 unfenced_alignment;
75e9e915 3104 if (map_and_fenceable && alignment & (fence_alignment - 1)) {
673a394b
EA
3105 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
3106 return -EINVAL;
3107 }
3108
05394f39 3109 size = map_and_fenceable ? fence_size : obj->base.size;
a00b10c3 3110
654fc607
CW
3111 /* If the object is bigger than the entire aperture, reject it early
3112 * before evicting everything in a vain attempt to find space.
3113 */
0a9ae0d7 3114 if (obj->base.size > gtt_max) {
a36689cb
CW
3115 DRM_ERROR("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%ld\n",
3116 obj->base.size,
3117 map_and_fenceable ? "mappable" : "total",
0a9ae0d7 3118 gtt_max);
654fc607
CW
3119 return -E2BIG;
3120 }
3121
37e680a1 3122 ret = i915_gem_object_get_pages(obj);
6c085a72
CW
3123 if (ret)
3124 return ret;
3125
fbdda6fb
CW
3126 i915_gem_object_pin_pages(obj);
3127
dc9dd7a2
CW
3128 node = kzalloc(sizeof(*node), GFP_KERNEL);
3129 if (node == NULL) {
3130 i915_gem_object_unpin_pages(obj);
3131 return -ENOMEM;
3132 }
3133
0a9ae0d7
BW
3134search_free:
3135 ret = drm_mm_insert_node_in_range_generic(&dev_priv->mm.gtt_space, node,
3136 size, alignment,
3137 obj->cache_level, 0, gtt_max);
dc9dd7a2 3138 if (ret) {
75e9e915 3139 ret = i915_gem_evict_something(dev, size, alignment,
42d6ab48 3140 obj->cache_level,
86a1ee26
CW
3141 map_and_fenceable,
3142 nonblocking);
dc9dd7a2
CW
3143 if (ret == 0)
3144 goto search_free;
9731129c 3145
dc9dd7a2
CW
3146 i915_gem_object_unpin_pages(obj);
3147 kfree(node);
3148 return ret;
673a394b 3149 }
dc9dd7a2 3150 if (WARN_ON(!i915_gem_valid_gtt_space(dev, node, obj->cache_level))) {
fbdda6fb 3151 i915_gem_object_unpin_pages(obj);
dc9dd7a2 3152 drm_mm_put_block(node);
42d6ab48 3153 return -EINVAL;
673a394b
EA
3154 }
3155
74163907 3156 ret = i915_gem_gtt_prepare_object(obj);
7c2e6fdf 3157 if (ret) {
fbdda6fb 3158 i915_gem_object_unpin_pages(obj);
dc9dd7a2 3159 drm_mm_put_block(node);
6c085a72 3160 return ret;
673a394b 3161 }
673a394b 3162
35c20a60 3163 list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
05394f39 3164 list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
bf1a1092 3165
dc9dd7a2
CW
3166 obj->gtt_space = node;
3167 obj->gtt_offset = node->start;
1c5d22f7 3168
75e9e915 3169 fenceable =
dc9dd7a2
CW
3170 node->size == fence_size &&
3171 (node->start & (fence_alignment - 1)) == 0;
a00b10c3 3172
75e9e915 3173 mappable =
5d4545ae 3174 obj->gtt_offset + obj->base.size <= dev_priv->gtt.mappable_end;
a00b10c3 3175
05394f39 3176 obj->map_and_fenceable = mappable && fenceable;
75e9e915 3177
db53a302 3178 trace_i915_gem_object_bind(obj, map_and_fenceable);
42d6ab48 3179 i915_gem_verify_gtt(dev);
673a394b
EA
3180 return 0;
3181}
3182
3183void
05394f39 3184i915_gem_clflush_object(struct drm_i915_gem_object *obj)
673a394b 3185{
673a394b
EA
3186 /* If we don't have a page list set up, then we're not pinned
3187 * to GPU, and we can ignore the cache flush because it'll happen
3188 * again at bind time.
3189 */
05394f39 3190 if (obj->pages == NULL)
673a394b
EA
3191 return;
3192
769ce464
ID
3193 /*
3194 * Stolen memory is always coherent with the GPU as it is explicitly
3195 * marked as wc by the system, or the system is cache-coherent.
3196 */
3197 if (obj->stolen)
3198 return;
3199
9c23f7fc
CW
3200 /* If the GPU is snooping the contents of the CPU cache,
3201 * we do not need to manually clear the CPU cache lines. However,
3202 * the caches are only snooped when the render cache is
3203 * flushed/invalidated. As we always have to emit invalidations
3204 * and flushes when moving into and out of the RENDER domain, correct
3205 * snooping behaviour occurs naturally as the result of our domain
3206 * tracking.
3207 */
3208 if (obj->cache_level != I915_CACHE_NONE)
3209 return;
3210
1c5d22f7 3211 trace_i915_gem_object_clflush(obj);
cfa16a0d 3212
9da3da66 3213 drm_clflush_sg(obj->pages);
e47c68e9
EA
3214}
3215
3216/** Flushes the GTT write domain for the object if it's dirty. */
3217static void
05394f39 3218i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
e47c68e9 3219{
1c5d22f7
CW
3220 uint32_t old_write_domain;
3221
05394f39 3222 if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
e47c68e9
EA
3223 return;
3224
63256ec5 3225 /* No actual flushing is required for the GTT write domain. Writes
e47c68e9
EA
3226 * to it immediately go to main memory as far as we know, so there's
3227 * no chipset flush. It also doesn't land in render cache.
63256ec5
CW
3228 *
3229 * However, we do have to enforce the order so that all writes through
3230 * the GTT land before any writes to the device, such as updates to
3231 * the GATT itself.
e47c68e9 3232 */
63256ec5
CW
3233 wmb();
3234
05394f39
CW
3235 old_write_domain = obj->base.write_domain;
3236 obj->base.write_domain = 0;
1c5d22f7
CW
3237
3238 trace_i915_gem_object_change_domain(obj,
05394f39 3239 obj->base.read_domains,
1c5d22f7 3240 old_write_domain);
e47c68e9
EA
3241}
3242
3243/** Flushes the CPU write domain for the object if it's dirty. */
3244static void
05394f39 3245i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
e47c68e9 3246{
1c5d22f7 3247 uint32_t old_write_domain;
e47c68e9 3248
05394f39 3249 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
e47c68e9
EA
3250 return;
3251
3252 i915_gem_clflush_object(obj);
e76e9aeb 3253 i915_gem_chipset_flush(obj->base.dev);
05394f39
CW
3254 old_write_domain = obj->base.write_domain;
3255 obj->base.write_domain = 0;
1c5d22f7
CW
3256
3257 trace_i915_gem_object_change_domain(obj,
05394f39 3258 obj->base.read_domains,
1c5d22f7 3259 old_write_domain);
e47c68e9
EA
3260}
3261
2ef7eeaa
EA
3262/**
3263 * Moves a single object to the GTT read, and possibly write domain.
3264 *
3265 * This function returns when the move is complete, including waiting on
3266 * flushes to occur.
3267 */
79e53945 3268int
2021746e 3269i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
2ef7eeaa 3270{
8325a09d 3271 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
1c5d22f7 3272 uint32_t old_write_domain, old_read_domains;
e47c68e9 3273 int ret;
2ef7eeaa 3274
02354392 3275 /* Not valid to be called on unbound objects. */
05394f39 3276 if (obj->gtt_space == NULL)
02354392
EA
3277 return -EINVAL;
3278
8d7e3de1
CW
3279 if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
3280 return 0;
3281
0201f1ec 3282 ret = i915_gem_object_wait_rendering(obj, !write);
88241785
CW
3283 if (ret)
3284 return ret;
3285
7213342d 3286 i915_gem_object_flush_cpu_write_domain(obj);
1c5d22f7 3287
d0a57789
CW
3288 /* Serialise direct access to this object with the barriers for
3289 * coherent writes from the GPU, by effectively invalidating the
3290 * GTT domain upon first access.
3291 */
3292 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
3293 mb();
3294
05394f39
CW
3295 old_write_domain = obj->base.write_domain;
3296 old_read_domains = obj->base.read_domains;
1c5d22f7 3297
e47c68e9
EA
3298 /* It should now be out of any other write domains, and we can update
3299 * the domain values for our changes.
3300 */
05394f39
CW
3301 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
3302 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
e47c68e9 3303 if (write) {
05394f39
CW
3304 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
3305 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
3306 obj->dirty = 1;
2ef7eeaa
EA
3307 }
3308
1c5d22f7
CW
3309 trace_i915_gem_object_change_domain(obj,
3310 old_read_domains,
3311 old_write_domain);
3312
8325a09d
CW
3313 /* And bump the LRU for this access */
3314 if (i915_gem_object_is_inactive(obj))
3315 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
3316
e47c68e9
EA
3317 return 0;
3318}
3319
e4ffd173
CW
3320int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3321 enum i915_cache_level cache_level)
3322{
7bddb01f
DV
3323 struct drm_device *dev = obj->base.dev;
3324 drm_i915_private_t *dev_priv = dev->dev_private;
e4ffd173
CW
3325 int ret;
3326
3327 if (obj->cache_level == cache_level)
3328 return 0;
3329
3330 if (obj->pin_count) {
3331 DRM_DEBUG("can not change the cache level of pinned objects\n");
3332 return -EBUSY;
3333 }
3334
42d6ab48
CW
3335 if (!i915_gem_valid_gtt_space(dev, obj->gtt_space, cache_level)) {
3336 ret = i915_gem_object_unbind(obj);
3337 if (ret)
3338 return ret;
3339 }
3340
e4ffd173
CW
3341 if (obj->gtt_space) {
3342 ret = i915_gem_object_finish_gpu(obj);
3343 if (ret)
3344 return ret;
3345
3346 i915_gem_object_finish_gtt(obj);
3347
3348 /* Before SandyBridge, you could not use tiling or fence
3349 * registers with snooped memory, so relinquish any fences
3350 * currently pointing to our region in the aperture.
3351 */
42d6ab48 3352 if (INTEL_INFO(dev)->gen < 6) {
e4ffd173
CW
3353 ret = i915_gem_object_put_fence(obj);
3354 if (ret)
3355 return ret;
3356 }
3357
74898d7e
DV
3358 if (obj->has_global_gtt_mapping)
3359 i915_gem_gtt_bind_object(obj, cache_level);
7bddb01f
DV
3360 if (obj->has_aliasing_ppgtt_mapping)
3361 i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
3362 obj, cache_level);
42d6ab48
CW
3363
3364 obj->gtt_space->color = cache_level;
e4ffd173
CW
3365 }
3366
3367 if (cache_level == I915_CACHE_NONE) {
3368 u32 old_read_domains, old_write_domain;
3369
3370 /* If we're coming from LLC cached, then we haven't
3371 * actually been tracking whether the data is in the
3372 * CPU cache or not, since we only allow one bit set
3373 * in obj->write_domain and have been skipping the clflushes.
3374 * Just set it to the CPU cache for now.
3375 */
3376 WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
3377 WARN_ON(obj->base.read_domains & ~I915_GEM_DOMAIN_CPU);
3378
3379 old_read_domains = obj->base.read_domains;
3380 old_write_domain = obj->base.write_domain;
3381
3382 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3383 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3384
3385 trace_i915_gem_object_change_domain(obj,
3386 old_read_domains,
3387 old_write_domain);
3388 }
3389
3390 obj->cache_level = cache_level;
42d6ab48 3391 i915_gem_verify_gtt(dev);
e4ffd173
CW
3392 return 0;
3393}
3394
199adf40
BW
3395int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
3396 struct drm_file *file)
e6994aee 3397{
199adf40 3398 struct drm_i915_gem_caching *args = data;
e6994aee
CW
3399 struct drm_i915_gem_object *obj;
3400 int ret;
3401
3402 ret = i915_mutex_lock_interruptible(dev);
3403 if (ret)
3404 return ret;
3405
3406 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3407 if (&obj->base == NULL) {
3408 ret = -ENOENT;
3409 goto unlock;
3410 }
3411
199adf40 3412 args->caching = obj->cache_level != I915_CACHE_NONE;
e6994aee
CW
3413
3414 drm_gem_object_unreference(&obj->base);
3415unlock:
3416 mutex_unlock(&dev->struct_mutex);
3417 return ret;
3418}
3419
199adf40
BW
3420int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3421 struct drm_file *file)
e6994aee 3422{
199adf40 3423 struct drm_i915_gem_caching *args = data;
e6994aee
CW
3424 struct drm_i915_gem_object *obj;
3425 enum i915_cache_level level;
3426 int ret;
3427
199adf40
BW
3428 switch (args->caching) {
3429 case I915_CACHING_NONE:
e6994aee
CW
3430 level = I915_CACHE_NONE;
3431 break;
199adf40 3432 case I915_CACHING_CACHED:
e6994aee
CW
3433 level = I915_CACHE_LLC;
3434 break;
3435 default:
3436 return -EINVAL;
3437 }
3438
3bc2913e
BW
3439 ret = i915_mutex_lock_interruptible(dev);
3440 if (ret)
3441 return ret;
3442
e6994aee
CW
3443 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3444 if (&obj->base == NULL) {
3445 ret = -ENOENT;
3446 goto unlock;
3447 }
3448
3449 ret = i915_gem_object_set_cache_level(obj, level);
3450
3451 drm_gem_object_unreference(&obj->base);
3452unlock:
3453 mutex_unlock(&dev->struct_mutex);
3454 return ret;
3455}
3456
b9241ea3 3457/*
2da3b9b9
CW
3458 * Prepare buffer for display plane (scanout, cursors, etc).
3459 * Can be called from an uninterruptible phase (modesetting) and allows
3460 * any flushes to be pipelined (for pageflips).
b9241ea3
ZW
3461 */
3462int
2da3b9b9
CW
3463i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3464 u32 alignment,
919926ae 3465 struct intel_ring_buffer *pipelined)
b9241ea3 3466{
2da3b9b9 3467 u32 old_read_domains, old_write_domain;
b9241ea3
ZW
3468 int ret;
3469
0be73284 3470 if (pipelined != obj->ring) {
2911a35b
BW
3471 ret = i915_gem_object_sync(obj, pipelined);
3472 if (ret)
b9241ea3
ZW
3473 return ret;
3474 }
3475
a7ef0640
EA
3476 /* The display engine is not coherent with the LLC cache on gen6. As
3477 * a result, we make sure that the pinning that is about to occur is
3478 * done with uncached PTEs. This is lowest common denominator for all
3479 * chipsets.
3480 *
3481 * However for gen6+, we could do better by using the GFDT bit instead
3482 * of uncaching, which would allow us to flush all the LLC-cached data
3483 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
3484 */
3485 ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE);
3486 if (ret)
3487 return ret;
3488
2da3b9b9
CW
3489 /* As the user may map the buffer once pinned in the display plane
3490 * (e.g. libkms for the bootup splash), we have to ensure that we
3491 * always use map_and_fenceable for all scanout buffers.
3492 */
86a1ee26 3493 ret = i915_gem_object_pin(obj, alignment, true, false);
2da3b9b9
CW
3494 if (ret)
3495 return ret;
3496
b118c1e3
CW
3497 i915_gem_object_flush_cpu_write_domain(obj);
3498
2da3b9b9 3499 old_write_domain = obj->base.write_domain;
05394f39 3500 old_read_domains = obj->base.read_domains;
2da3b9b9
CW
3501
3502 /* It should now be out of any other write domains, and we can update
3503 * the domain values for our changes.
3504 */
e5f1d962 3505 obj->base.write_domain = 0;
05394f39 3506 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
b9241ea3
ZW
3507
3508 trace_i915_gem_object_change_domain(obj,
3509 old_read_domains,
2da3b9b9 3510 old_write_domain);
b9241ea3
ZW
3511
3512 return 0;
3513}
3514
85345517 3515int
a8198eea 3516i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
85345517 3517{
88241785
CW
3518 int ret;
3519
a8198eea 3520 if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
85345517
CW
3521 return 0;
3522
0201f1ec 3523 ret = i915_gem_object_wait_rendering(obj, false);
c501ae7f
CW
3524 if (ret)
3525 return ret;
3526
a8198eea
CW
3527 /* Ensure that we invalidate the GPU's caches and TLBs. */
3528 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
c501ae7f 3529 return 0;
85345517
CW
3530}
3531
e47c68e9
EA
3532/**
3533 * Moves a single object to the CPU read, and possibly write domain.
3534 *
3535 * This function returns when the move is complete, including waiting on
3536 * flushes to occur.
3537 */
dabdfe02 3538int
919926ae 3539i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
e47c68e9 3540{
1c5d22f7 3541 uint32_t old_write_domain, old_read_domains;
e47c68e9
EA
3542 int ret;
3543
8d7e3de1
CW
3544 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
3545 return 0;
3546
0201f1ec 3547 ret = i915_gem_object_wait_rendering(obj, !write);
88241785
CW
3548 if (ret)
3549 return ret;
3550
e47c68e9 3551 i915_gem_object_flush_gtt_write_domain(obj);
2ef7eeaa 3552
05394f39
CW
3553 old_write_domain = obj->base.write_domain;
3554 old_read_domains = obj->base.read_domains;
1c5d22f7 3555
e47c68e9 3556 /* Flush the CPU cache if it's still invalid. */
05394f39 3557 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
2ef7eeaa 3558 i915_gem_clflush_object(obj);
2ef7eeaa 3559
05394f39 3560 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
2ef7eeaa
EA
3561 }
3562
3563 /* It should now be out of any other write domains, and we can update
3564 * the domain values for our changes.
3565 */
05394f39 3566 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
e47c68e9
EA
3567
3568 /* If we're writing through the CPU, then the GPU read domains will
3569 * need to be invalidated at next use.
3570 */
3571 if (write) {
05394f39
CW
3572 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3573 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
e47c68e9 3574 }
2ef7eeaa 3575
1c5d22f7
CW
3576 trace_i915_gem_object_change_domain(obj,
3577 old_read_domains,
3578 old_write_domain);
3579
2ef7eeaa
EA
3580 return 0;
3581}
3582
673a394b
EA
3583/* Throttle our rendering by waiting until the ring has completed our requests
3584 * emitted over 20 msec ago.
3585 *
b962442e
EA
3586 * Note that if we were to use the current jiffies each time around the loop,
3587 * we wouldn't escape the function with any frames outstanding if the time to
3588 * render a frame was over 20ms.
3589 *
673a394b
EA
3590 * This should get us reasonable parallelism between CPU and GPU but also
3591 * relatively low latency when blocking on a particular request to finish.
3592 */
40a5f0de 3593static int
f787a5f5 3594i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
40a5f0de 3595{
f787a5f5
CW
3596 struct drm_i915_private *dev_priv = dev->dev_private;
3597 struct drm_i915_file_private *file_priv = file->driver_priv;
b962442e 3598 unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
f787a5f5
CW
3599 struct drm_i915_gem_request *request;
3600 struct intel_ring_buffer *ring = NULL;
f69061be 3601 unsigned reset_counter;
f787a5f5
CW
3602 u32 seqno = 0;
3603 int ret;
93533c29 3604
308887aa
DV
3605 ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
3606 if (ret)
3607 return ret;
3608
3609 ret = i915_gem_check_wedge(&dev_priv->gpu_error, false);
3610 if (ret)
3611 return ret;
e110e8d6 3612
1c25595f 3613 spin_lock(&file_priv->mm.lock);
f787a5f5 3614 list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
b962442e
EA
3615 if (time_after_eq(request->emitted_jiffies, recent_enough))
3616 break;
40a5f0de 3617
f787a5f5
CW
3618 ring = request->ring;
3619 seqno = request->seqno;
b962442e 3620 }
f69061be 3621 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
1c25595f 3622 spin_unlock(&file_priv->mm.lock);
40a5f0de 3623
f787a5f5
CW
3624 if (seqno == 0)
3625 return 0;
2bc43b5c 3626
f69061be 3627 ret = __wait_seqno(ring, seqno, reset_counter, true, NULL);
f787a5f5
CW
3628 if (ret == 0)
3629 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
40a5f0de
EA
3630
3631 return ret;
3632}
3633
673a394b 3634int
05394f39
CW
3635i915_gem_object_pin(struct drm_i915_gem_object *obj,
3636 uint32_t alignment,
86a1ee26
CW
3637 bool map_and_fenceable,
3638 bool nonblocking)
673a394b 3639{
673a394b
EA
3640 int ret;
3641
7e81a42e
CW
3642 if (WARN_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
3643 return -EBUSY;
ac0c6b5a 3644
05394f39
CW
3645 if (obj->gtt_space != NULL) {
3646 if ((alignment && obj->gtt_offset & (alignment - 1)) ||
3647 (map_and_fenceable && !obj->map_and_fenceable)) {
3648 WARN(obj->pin_count,
ae7d49d8 3649 "bo is already pinned with incorrect alignment:"
75e9e915
DV
3650 " offset=%x, req.alignment=%x, req.map_and_fenceable=%d,"
3651 " obj->map_and_fenceable=%d\n",
05394f39 3652 obj->gtt_offset, alignment,
75e9e915 3653 map_and_fenceable,
05394f39 3654 obj->map_and_fenceable);
ac0c6b5a
CW
3655 ret = i915_gem_object_unbind(obj);
3656 if (ret)
3657 return ret;
3658 }
3659 }
3660
05394f39 3661 if (obj->gtt_space == NULL) {
8742267a
CW
3662 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3663
a00b10c3 3664 ret = i915_gem_object_bind_to_gtt(obj, alignment,
86a1ee26
CW
3665 map_and_fenceable,
3666 nonblocking);
9731129c 3667 if (ret)
673a394b 3668 return ret;
8742267a
CW
3669
3670 if (!dev_priv->mm.aliasing_ppgtt)
3671 i915_gem_gtt_bind_object(obj, obj->cache_level);
22c344e9 3672 }
76446cac 3673
74898d7e
DV
3674 if (!obj->has_global_gtt_mapping && map_and_fenceable)
3675 i915_gem_gtt_bind_object(obj, obj->cache_level);
3676
1b50247a 3677 obj->pin_count++;
6299f992 3678 obj->pin_mappable |= map_and_fenceable;
673a394b
EA
3679
3680 return 0;
3681}
3682
3683void
05394f39 3684i915_gem_object_unpin(struct drm_i915_gem_object *obj)
673a394b 3685{
05394f39
CW
3686 BUG_ON(obj->pin_count == 0);
3687 BUG_ON(obj->gtt_space == NULL);
673a394b 3688
1b50247a 3689 if (--obj->pin_count == 0)
6299f992 3690 obj->pin_mappable = false;
673a394b
EA
3691}
3692
3693int
3694i915_gem_pin_ioctl(struct drm_device *dev, void *data,
05394f39 3695 struct drm_file *file)
673a394b
EA
3696{
3697 struct drm_i915_gem_pin *args = data;
05394f39 3698 struct drm_i915_gem_object *obj;
673a394b
EA
3699 int ret;
3700
1d7cfea1
CW
3701 ret = i915_mutex_lock_interruptible(dev);
3702 if (ret)
3703 return ret;
673a394b 3704
05394f39 3705 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
c8725226 3706 if (&obj->base == NULL) {
1d7cfea1
CW
3707 ret = -ENOENT;
3708 goto unlock;
673a394b 3709 }
673a394b 3710
05394f39 3711 if (obj->madv != I915_MADV_WILLNEED) {
bb6baf76 3712 DRM_ERROR("Attempting to pin a purgeable buffer\n");
1d7cfea1
CW
3713 ret = -EINVAL;
3714 goto out;
3ef94daa
CW
3715 }
3716
05394f39 3717 if (obj->pin_filp != NULL && obj->pin_filp != file) {
79e53945
JB
3718 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
3719 args->handle);
1d7cfea1
CW
3720 ret = -EINVAL;
3721 goto out;
79e53945
JB
3722 }
3723
93be8788 3724 if (obj->user_pin_count == 0) {
86a1ee26 3725 ret = i915_gem_object_pin(obj, args->alignment, true, false);
1d7cfea1
CW
3726 if (ret)
3727 goto out;
673a394b
EA
3728 }
3729
93be8788
CW
3730 obj->user_pin_count++;
3731 obj->pin_filp = file;
3732
673a394b
EA
3733 /* XXX - flush the CPU caches for pinned objects
3734 * as the X server doesn't manage domains yet
3735 */
e47c68e9 3736 i915_gem_object_flush_cpu_write_domain(obj);
05394f39 3737 args->offset = obj->gtt_offset;
1d7cfea1 3738out:
05394f39 3739 drm_gem_object_unreference(&obj->base);
1d7cfea1 3740unlock:
673a394b 3741 mutex_unlock(&dev->struct_mutex);
1d7cfea1 3742 return ret;
673a394b
EA
3743}
3744
3745int
3746i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
05394f39 3747 struct drm_file *file)
673a394b
EA
3748{
3749 struct drm_i915_gem_pin *args = data;
05394f39 3750 struct drm_i915_gem_object *obj;
76c1dec1 3751 int ret;
673a394b 3752
1d7cfea1
CW
3753 ret = i915_mutex_lock_interruptible(dev);
3754 if (ret)
3755 return ret;
673a394b 3756
05394f39 3757 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
c8725226 3758 if (&obj->base == NULL) {
1d7cfea1
CW
3759 ret = -ENOENT;
3760 goto unlock;
673a394b 3761 }
76c1dec1 3762
05394f39 3763 if (obj->pin_filp != file) {
79e53945
JB
3764 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
3765 args->handle);
1d7cfea1
CW
3766 ret = -EINVAL;
3767 goto out;
79e53945 3768 }
05394f39
CW
3769 obj->user_pin_count--;
3770 if (obj->user_pin_count == 0) {
3771 obj->pin_filp = NULL;
79e53945
JB
3772 i915_gem_object_unpin(obj);
3773 }
673a394b 3774
1d7cfea1 3775out:
05394f39 3776 drm_gem_object_unreference(&obj->base);
1d7cfea1 3777unlock:
673a394b 3778 mutex_unlock(&dev->struct_mutex);
1d7cfea1 3779 return ret;
673a394b
EA
3780}
3781
3782int
3783i915_gem_busy_ioctl(struct drm_device *dev, void *data,
05394f39 3784 struct drm_file *file)
673a394b
EA
3785{
3786 struct drm_i915_gem_busy *args = data;
05394f39 3787 struct drm_i915_gem_object *obj;
30dbf0c0
CW
3788 int ret;
3789
76c1dec1 3790 ret = i915_mutex_lock_interruptible(dev);
1d7cfea1 3791 if (ret)
76c1dec1 3792 return ret;
673a394b 3793
05394f39 3794 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
c8725226 3795 if (&obj->base == NULL) {
1d7cfea1
CW
3796 ret = -ENOENT;
3797 goto unlock;
673a394b 3798 }
d1b851fc 3799
0be555b6
CW
3800 /* Count all active objects as busy, even if they are currently not used
3801 * by the gpu. Users of this interface expect objects to eventually
3802 * become non-busy without any further actions, therefore emit any
3803 * necessary flushes here.
c4de0a5d 3804 */
30dfebf3 3805 ret = i915_gem_object_flush_active(obj);
0be555b6 3806
30dfebf3 3807 args->busy = obj->active;
e9808edd
CW
3808 if (obj->ring) {
3809 BUILD_BUG_ON(I915_NUM_RINGS > 16);
3810 args->busy |= intel_ring_flag(obj->ring) << 16;
3811 }
673a394b 3812
05394f39 3813 drm_gem_object_unreference(&obj->base);
1d7cfea1 3814unlock:
673a394b 3815 mutex_unlock(&dev->struct_mutex);
1d7cfea1 3816 return ret;
673a394b
EA
3817}
3818
3819int
3820i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
3821 struct drm_file *file_priv)
3822{
0206e353 3823 return i915_gem_ring_throttle(dev, file_priv);
673a394b
EA
3824}
3825
3ef94daa
CW
3826int
3827i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
3828 struct drm_file *file_priv)
3829{
3830 struct drm_i915_gem_madvise *args = data;
05394f39 3831 struct drm_i915_gem_object *obj;
76c1dec1 3832 int ret;
3ef94daa
CW
3833
3834 switch (args->madv) {
3835 case I915_MADV_DONTNEED:
3836 case I915_MADV_WILLNEED:
3837 break;
3838 default:
3839 return -EINVAL;
3840 }
3841
1d7cfea1
CW
3842 ret = i915_mutex_lock_interruptible(dev);
3843 if (ret)
3844 return ret;
3845
05394f39 3846 obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
c8725226 3847 if (&obj->base == NULL) {
1d7cfea1
CW
3848 ret = -ENOENT;
3849 goto unlock;
3ef94daa 3850 }
3ef94daa 3851
05394f39 3852 if (obj->pin_count) {
1d7cfea1
CW
3853 ret = -EINVAL;
3854 goto out;
3ef94daa
CW
3855 }
3856
05394f39
CW
3857 if (obj->madv != __I915_MADV_PURGED)
3858 obj->madv = args->madv;
3ef94daa 3859
6c085a72
CW
3860 /* if the object is no longer attached, discard its backing storage */
3861 if (i915_gem_object_is_purgeable(obj) && obj->pages == NULL)
2d7ef395
CW
3862 i915_gem_object_truncate(obj);
3863
05394f39 3864 args->retained = obj->madv != __I915_MADV_PURGED;
bb6baf76 3865
1d7cfea1 3866out:
05394f39 3867 drm_gem_object_unreference(&obj->base);
1d7cfea1 3868unlock:
3ef94daa 3869 mutex_unlock(&dev->struct_mutex);
1d7cfea1 3870 return ret;
3ef94daa
CW
3871}
3872
37e680a1
CW
3873void i915_gem_object_init(struct drm_i915_gem_object *obj,
3874 const struct drm_i915_gem_object_ops *ops)
0327d6ba 3875{
0327d6ba 3876 INIT_LIST_HEAD(&obj->mm_list);
35c20a60 3877 INIT_LIST_HEAD(&obj->global_list);
0327d6ba
CW
3878 INIT_LIST_HEAD(&obj->ring_list);
3879 INIT_LIST_HEAD(&obj->exec_list);
3880
37e680a1
CW
3881 obj->ops = ops;
3882
0327d6ba
CW
3883 obj->fence_reg = I915_FENCE_REG_NONE;
3884 obj->madv = I915_MADV_WILLNEED;
3885 /* Avoid an unnecessary call to unbind on the first bind. */
3886 obj->map_and_fenceable = true;
3887
3888 i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
3889}
3890
37e680a1
CW
3891static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
3892 .get_pages = i915_gem_object_get_pages_gtt,
3893 .put_pages = i915_gem_object_put_pages_gtt,
3894};
3895
05394f39
CW
3896struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
3897 size_t size)
ac52bc56 3898{
c397b908 3899 struct drm_i915_gem_object *obj;
5949eac4 3900 struct address_space *mapping;
1a240d4d 3901 gfp_t mask;
ac52bc56 3902
42dcedd4 3903 obj = i915_gem_object_alloc(dev);
c397b908
DV
3904 if (obj == NULL)
3905 return NULL;
673a394b 3906
c397b908 3907 if (drm_gem_object_init(dev, &obj->base, size) != 0) {
42dcedd4 3908 i915_gem_object_free(obj);
c397b908
DV
3909 return NULL;
3910 }
673a394b 3911
bed1ea95
CW
3912 mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
3913 if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
3914 /* 965gm cannot relocate objects above 4GiB. */
3915 mask &= ~__GFP_HIGHMEM;
3916 mask |= __GFP_DMA32;
3917 }
3918
496ad9aa 3919 mapping = file_inode(obj->base.filp)->i_mapping;
bed1ea95 3920 mapping_set_gfp_mask(mapping, mask);
5949eac4 3921
37e680a1 3922 i915_gem_object_init(obj, &i915_gem_object_ops);
73aa808f 3923
c397b908
DV
3924 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3925 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
673a394b 3926
3d29b842
ED
3927 if (HAS_LLC(dev)) {
3928 /* On some devices, we can have the GPU use the LLC (the CPU
a1871112
EA
3929 * cache) for about a 10% performance improvement
3930 * compared to uncached. Graphics requests other than
3931 * display scanout are coherent with the CPU in
3932 * accessing this cache. This means in this mode we
3933 * don't need to clflush on the CPU side, and on the
3934 * GPU side we only need to flush internal caches to
3935 * get data visible to the CPU.
3936 *
3937 * However, we maintain the display planes as UC, and so
3938 * need to rebind when first used as such.
3939 */
3940 obj->cache_level = I915_CACHE_LLC;
3941 } else
3942 obj->cache_level = I915_CACHE_NONE;
3943
05394f39 3944 return obj;
c397b908
DV
3945}
3946
3947int i915_gem_init_object(struct drm_gem_object *obj)
3948{
3949 BUG();
de151cf6 3950
673a394b
EA
3951 return 0;
3952}
3953
1488fc08 3954void i915_gem_free_object(struct drm_gem_object *gem_obj)
673a394b 3955{
1488fc08 3956 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
05394f39 3957 struct drm_device *dev = obj->base.dev;
be72615b 3958 drm_i915_private_t *dev_priv = dev->dev_private;
673a394b 3959
26e12f89
CW
3960 trace_i915_gem_object_destroy(obj);
3961
1488fc08
CW
3962 if (obj->phys_obj)
3963 i915_gem_detach_phys_object(dev, obj);
3964
3965 obj->pin_count = 0;
3966 if (WARN_ON(i915_gem_object_unbind(obj) == -ERESTARTSYS)) {
3967 bool was_interruptible;
3968
3969 was_interruptible = dev_priv->mm.interruptible;
3970 dev_priv->mm.interruptible = false;
3971
3972 WARN_ON(i915_gem_object_unbind(obj));
3973
3974 dev_priv->mm.interruptible = was_interruptible;
3975 }
3976
1d64ae71
BW
3977 /* Stolen objects don't hold a ref, but do hold pin count. Fix that up
3978 * before progressing. */
3979 if (obj->stolen)
3980 i915_gem_object_unpin_pages(obj);
3981
401c29f6
BW
3982 if (WARN_ON(obj->pages_pin_count))
3983 obj->pages_pin_count = 0;
37e680a1 3984 i915_gem_object_put_pages(obj);
d8cb5086 3985 i915_gem_object_free_mmap_offset(obj);
0104fdbb 3986 i915_gem_object_release_stolen(obj);
de151cf6 3987
9da3da66
CW
3988 BUG_ON(obj->pages);
3989
2f745ad3
CW
3990 if (obj->base.import_attach)
3991 drm_prime_gem_destroy(&obj->base, NULL);
de151cf6 3992
05394f39
CW
3993 drm_gem_object_release(&obj->base);
3994 i915_gem_info_remove_obj(dev_priv, obj->base.size);
c397b908 3995
05394f39 3996 kfree(obj->bit_17);
42dcedd4 3997 i915_gem_object_free(obj);
673a394b
EA
3998}
3999
29105ccc
CW
4000int
4001i915_gem_idle(struct drm_device *dev)
4002{
4003 drm_i915_private_t *dev_priv = dev->dev_private;
4004 int ret;
28dfe52a 4005
29105ccc 4006 mutex_lock(&dev->struct_mutex);
1c5d22f7 4007
87acb0a5 4008 if (dev_priv->mm.suspended) {
29105ccc
CW
4009 mutex_unlock(&dev->struct_mutex);
4010 return 0;
28dfe52a
EA
4011 }
4012
b2da9fe5 4013 ret = i915_gpu_idle(dev);
6dbe2772
KP
4014 if (ret) {
4015 mutex_unlock(&dev->struct_mutex);
673a394b 4016 return ret;
6dbe2772 4017 }
b2da9fe5 4018 i915_gem_retire_requests(dev);
673a394b 4019
29105ccc 4020 /* Under UMS, be paranoid and evict. */
a39d7efc 4021 if (!drm_core_check_feature(dev, DRIVER_MODESET))
6c085a72 4022 i915_gem_evict_everything(dev);
29105ccc 4023
312817a3
CW
4024 i915_gem_reset_fences(dev);
4025
29105ccc
CW
4026 /* Hack! Don't let anybody do execbuf while we don't control the chip.
4027 * We need to replace this with a semaphore, or something.
4028 * And not confound mm.suspended!
4029 */
4030 dev_priv->mm.suspended = 1;
99584db3 4031 del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
29105ccc
CW
4032
4033 i915_kernel_lost_context(dev);
6dbe2772 4034 i915_gem_cleanup_ringbuffer(dev);
29105ccc 4035
6dbe2772
KP
4036 mutex_unlock(&dev->struct_mutex);
4037
29105ccc
CW
4038 /* Cancel the retire work handler, which should be idle now. */
4039 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
4040
673a394b
EA
4041 return 0;
4042}
4043
b9524a1e
BW
4044void i915_gem_l3_remap(struct drm_device *dev)
4045{
4046 drm_i915_private_t *dev_priv = dev->dev_private;
4047 u32 misccpctl;
4048 int i;
4049
eb32e458 4050 if (!HAS_L3_GPU_CACHE(dev))
b9524a1e
BW
4051 return;
4052
a4da4fa4 4053 if (!dev_priv->l3_parity.remap_info)
b9524a1e
BW
4054 return;
4055
4056 misccpctl = I915_READ(GEN7_MISCCPCTL);
4057 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
4058 POSTING_READ(GEN7_MISCCPCTL);
4059
4060 for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
4061 u32 remap = I915_READ(GEN7_L3LOG_BASE + i);
a4da4fa4 4062 if (remap && remap != dev_priv->l3_parity.remap_info[i/4])
b9524a1e
BW
4063 DRM_DEBUG("0x%x was already programmed to %x\n",
4064 GEN7_L3LOG_BASE + i, remap);
a4da4fa4 4065 if (remap && !dev_priv->l3_parity.remap_info[i/4])
b9524a1e 4066 DRM_DEBUG_DRIVER("Clearing remapped register\n");
a4da4fa4 4067 I915_WRITE(GEN7_L3LOG_BASE + i, dev_priv->l3_parity.remap_info[i/4]);
b9524a1e
BW
4068 }
4069
4070 /* Make sure all the writes land before disabling dop clock gating */
4071 POSTING_READ(GEN7_L3LOG_BASE);
4072
4073 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
4074}
4075
f691e2f4
DV
4076void i915_gem_init_swizzling(struct drm_device *dev)
4077{
4078 drm_i915_private_t *dev_priv = dev->dev_private;
4079
11782b02 4080 if (INTEL_INFO(dev)->gen < 5 ||
f691e2f4
DV
4081 dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
4082 return;
4083
4084 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
4085 DISP_TILE_SURFACE_SWIZZLING);
4086
11782b02
DV
4087 if (IS_GEN5(dev))
4088 return;
4089
f691e2f4
DV
4090 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
4091 if (IS_GEN6(dev))
6b26c86d 4092 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
8782e26c 4093 else if (IS_GEN7(dev))
6b26c86d 4094 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
8782e26c
BW
4095 else
4096 BUG();
f691e2f4 4097}
e21af88d 4098
67b1b571
CW
4099static bool
4100intel_enable_blt(struct drm_device *dev)
4101{
4102 if (!HAS_BLT(dev))
4103 return false;
4104
4105 /* The blitter was dysfunctional on early prototypes */
4106 if (IS_GEN6(dev) && dev->pdev->revision < 8) {
4107 DRM_INFO("BLT not supported on this pre-production hardware;"
4108 " graphics performance will be degraded.\n");
4109 return false;
4110 }
4111
4112 return true;
4113}
4114
4fc7c971 4115static int i915_gem_init_rings(struct drm_device *dev)
8187a2b7 4116{
4fc7c971 4117 struct drm_i915_private *dev_priv = dev->dev_private;
8187a2b7 4118 int ret;
68f95ba9 4119
5c1143bb 4120 ret = intel_init_render_ring_buffer(dev);
68f95ba9 4121 if (ret)
b6913e4b 4122 return ret;
68f95ba9
CW
4123
4124 if (HAS_BSD(dev)) {
5c1143bb 4125 ret = intel_init_bsd_ring_buffer(dev);
68f95ba9
CW
4126 if (ret)
4127 goto cleanup_render_ring;
d1b851fc 4128 }
68f95ba9 4129
67b1b571 4130 if (intel_enable_blt(dev)) {
549f7365
CW
4131 ret = intel_init_blt_ring_buffer(dev);
4132 if (ret)
4133 goto cleanup_bsd_ring;
4134 }
4135
9a8a2213
BW
4136 if (HAS_VEBOX(dev)) {
4137 ret = intel_init_vebox_ring_buffer(dev);
4138 if (ret)
4139 goto cleanup_blt_ring;
4140 }
4141
4142
99433931 4143 ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
4fc7c971 4144 if (ret)
9a8a2213 4145 goto cleanup_vebox_ring;
4fc7c971
BW
4146
4147 return 0;
4148
9a8a2213
BW
4149cleanup_vebox_ring:
4150 intel_cleanup_ring_buffer(&dev_priv->ring[VECS]);
4fc7c971
BW
4151cleanup_blt_ring:
4152 intel_cleanup_ring_buffer(&dev_priv->ring[BCS]);
4153cleanup_bsd_ring:
4154 intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
4155cleanup_render_ring:
4156 intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
4157
4158 return ret;
4159}
4160
4161int
4162i915_gem_init_hw(struct drm_device *dev)
4163{
4164 drm_i915_private_t *dev_priv = dev->dev_private;
4165 int ret;
4166
4167 if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
4168 return -EIO;
4169
4170 if (IS_HASWELL(dev) && (I915_READ(0x120010) == 1))
4171 I915_WRITE(0x9008, I915_READ(0x9008) | 0xf0000);
4172
88a2b2a3
BW
4173 if (HAS_PCH_NOP(dev)) {
4174 u32 temp = I915_READ(GEN7_MSG_CTL);
4175 temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
4176 I915_WRITE(GEN7_MSG_CTL, temp);
4177 }
4178
4fc7c971
BW
4179 i915_gem_l3_remap(dev);
4180
4181 i915_gem_init_swizzling(dev);
4182
4183 ret = i915_gem_init_rings(dev);
99433931
MK
4184 if (ret)
4185 return ret;
4186
254f965c
BW
4187 /*
4188 * XXX: There was some w/a described somewhere suggesting loading
4189 * contexts before PPGTT.
4190 */
4191 i915_gem_context_init(dev);
b7c36d25
BW
4192 if (dev_priv->mm.aliasing_ppgtt) {
4193 ret = dev_priv->mm.aliasing_ppgtt->enable(dev);
4194 if (ret) {
4195 i915_gem_cleanup_aliasing_ppgtt(dev);
4196 DRM_INFO("PPGTT enable failed. This is not fatal, but unexpected\n");
4197 }
4198 }
e21af88d 4199
68f95ba9 4200 return 0;
8187a2b7
ZN
4201}
4202
1070a42b
CW
4203int i915_gem_init(struct drm_device *dev)
4204{
4205 struct drm_i915_private *dev_priv = dev->dev_private;
1070a42b
CW
4206 int ret;
4207
1070a42b 4208 mutex_lock(&dev->struct_mutex);
d62b4892
JB
4209
4210 if (IS_VALLEYVIEW(dev)) {
4211 /* VLVA0 (potential hack), BIOS isn't actually waking us */
4212 I915_WRITE(VLV_GTLC_WAKE_CTRL, 1);
4213 if (wait_for((I915_READ(VLV_GTLC_PW_STATUS) & 1) == 1, 10))
4214 DRM_DEBUG_DRIVER("allow wake ack timed out\n");
4215 }
4216
d7e5008f 4217 i915_gem_init_global_gtt(dev);
d62b4892 4218
1070a42b
CW
4219 ret = i915_gem_init_hw(dev);
4220 mutex_unlock(&dev->struct_mutex);
4221 if (ret) {
4222 i915_gem_cleanup_aliasing_ppgtt(dev);
4223 return ret;
4224 }
4225
53ca26ca
DV
4226 /* Allow hardware batchbuffers unless told otherwise, but not for KMS. */
4227 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4228 dev_priv->dri1.allow_batchbuffer = 1;
1070a42b
CW
4229 return 0;
4230}
4231
8187a2b7
ZN
4232void
4233i915_gem_cleanup_ringbuffer(struct drm_device *dev)
4234{
4235 drm_i915_private_t *dev_priv = dev->dev_private;
b4519513 4236 struct intel_ring_buffer *ring;
1ec14ad3 4237 int i;
8187a2b7 4238
b4519513
CW
4239 for_each_ring(ring, dev_priv, i)
4240 intel_cleanup_ring_buffer(ring);
8187a2b7
ZN
4241}
4242
673a394b
EA
4243int
4244i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4245 struct drm_file *file_priv)
4246{
4247 drm_i915_private_t *dev_priv = dev->dev_private;
b4519513 4248 int ret;
673a394b 4249
79e53945
JB
4250 if (drm_core_check_feature(dev, DRIVER_MODESET))
4251 return 0;
4252
1f83fee0 4253 if (i915_reset_in_progress(&dev_priv->gpu_error)) {
673a394b 4254 DRM_ERROR("Reenabling wedged hardware, good luck\n");
1f83fee0 4255 atomic_set(&dev_priv->gpu_error.reset_counter, 0);
673a394b
EA
4256 }
4257
673a394b 4258 mutex_lock(&dev->struct_mutex);
9bb2d6f9
EA
4259 dev_priv->mm.suspended = 0;
4260
f691e2f4 4261 ret = i915_gem_init_hw(dev);
d816f6ac
WF
4262 if (ret != 0) {
4263 mutex_unlock(&dev->struct_mutex);
9bb2d6f9 4264 return ret;
d816f6ac 4265 }
9bb2d6f9 4266
69dc4987 4267 BUG_ON(!list_empty(&dev_priv->mm.active_list));
673a394b 4268 mutex_unlock(&dev->struct_mutex);
dbb19d30 4269
5f35308b
CW
4270 ret = drm_irq_install(dev);
4271 if (ret)
4272 goto cleanup_ringbuffer;
dbb19d30 4273
673a394b 4274 return 0;
5f35308b
CW
4275
4276cleanup_ringbuffer:
4277 mutex_lock(&dev->struct_mutex);
4278 i915_gem_cleanup_ringbuffer(dev);
4279 dev_priv->mm.suspended = 1;
4280 mutex_unlock(&dev->struct_mutex);
4281
4282 return ret;
673a394b
EA
4283}
4284
4285int
4286i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
4287 struct drm_file *file_priv)
4288{
79e53945
JB
4289 if (drm_core_check_feature(dev, DRIVER_MODESET))
4290 return 0;
4291
dbb19d30 4292 drm_irq_uninstall(dev);
e6890f6f 4293 return i915_gem_idle(dev);
673a394b
EA
4294}
4295
4296void
4297i915_gem_lastclose(struct drm_device *dev)
4298{
4299 int ret;
673a394b 4300
e806b495
EA
4301 if (drm_core_check_feature(dev, DRIVER_MODESET))
4302 return;
4303
6dbe2772
KP
4304 ret = i915_gem_idle(dev);
4305 if (ret)
4306 DRM_ERROR("failed to idle hardware: %d\n", ret);
673a394b
EA
4307}
4308
64193406
CW
4309static void
4310init_ring_lists(struct intel_ring_buffer *ring)
4311{
4312 INIT_LIST_HEAD(&ring->active_list);
4313 INIT_LIST_HEAD(&ring->request_list);
64193406
CW
4314}
4315
673a394b
EA
4316void
4317i915_gem_load(struct drm_device *dev)
4318{
4319 drm_i915_private_t *dev_priv = dev->dev_private;
42dcedd4
CW
4320 int i;
4321
4322 dev_priv->slab =
4323 kmem_cache_create("i915_gem_object",
4324 sizeof(struct drm_i915_gem_object), 0,
4325 SLAB_HWCACHE_ALIGN,
4326 NULL);
673a394b 4327
69dc4987 4328 INIT_LIST_HEAD(&dev_priv->mm.active_list);
673a394b 4329 INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
6c085a72
CW
4330 INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
4331 INIT_LIST_HEAD(&dev_priv->mm.bound_list);
a09ba7fa 4332 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
1ec14ad3
CW
4333 for (i = 0; i < I915_NUM_RINGS; i++)
4334 init_ring_lists(&dev_priv->ring[i]);
4b9de737 4335 for (i = 0; i < I915_MAX_NUM_FENCES; i++)
007cc8ac 4336 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
673a394b
EA
4337 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
4338 i915_gem_retire_work_handler);
1f83fee0 4339 init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
31169714 4340
94400120
DA
4341 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
4342 if (IS_GEN3(dev)) {
50743298
DV
4343 I915_WRITE(MI_ARB_STATE,
4344 _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
94400120
DA
4345 }
4346
72bfa19c
CW
4347 dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
4348
de151cf6 4349 /* Old X drivers will take 0-2 for front, back, depth buffers */
b397c836
EA
4350 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4351 dev_priv->fence_reg_start = 3;
de151cf6 4352
42b5aeab
VS
4353 if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev))
4354 dev_priv->num_fence_regs = 32;
4355 else if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
de151cf6
JB
4356 dev_priv->num_fence_regs = 16;
4357 else
4358 dev_priv->num_fence_regs = 8;
4359
b5aa8a0f 4360 /* Initialize fence registers to zero */
ada726c7 4361 i915_gem_reset_fences(dev);
10ed13e4 4362
673a394b 4363 i915_gem_detect_bit_6_swizzle(dev);
6b95a207 4364 init_waitqueue_head(&dev_priv->pending_flip_queue);
17250b71 4365
ce453d81
CW
4366 dev_priv->mm.interruptible = true;
4367
17250b71
CW
4368 dev_priv->mm.inactive_shrinker.shrink = i915_gem_inactive_shrink;
4369 dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
4370 register_shrinker(&dev_priv->mm.inactive_shrinker);
673a394b 4371}
71acb5eb
DA
4372
4373/*
4374 * Create a physically contiguous memory object for this object
4375 * e.g. for cursor + overlay regs
4376 */
995b6762
CW
4377static int i915_gem_init_phys_object(struct drm_device *dev,
4378 int id, int size, int align)
71acb5eb
DA
4379{
4380 drm_i915_private_t *dev_priv = dev->dev_private;
4381 struct drm_i915_gem_phys_object *phys_obj;
4382 int ret;
4383
4384 if (dev_priv->mm.phys_objs[id - 1] || !size)
4385 return 0;
4386
9a298b2a 4387 phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL);
71acb5eb
DA
4388 if (!phys_obj)
4389 return -ENOMEM;
4390
4391 phys_obj->id = id;
4392
6eeefaf3 4393 phys_obj->handle = drm_pci_alloc(dev, size, align);
71acb5eb
DA
4394 if (!phys_obj->handle) {
4395 ret = -ENOMEM;
4396 goto kfree_obj;
4397 }
4398#ifdef CONFIG_X86
4399 set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4400#endif
4401
4402 dev_priv->mm.phys_objs[id - 1] = phys_obj;
4403
4404 return 0;
4405kfree_obj:
9a298b2a 4406 kfree(phys_obj);
71acb5eb
DA
4407 return ret;
4408}
4409
995b6762 4410static void i915_gem_free_phys_object(struct drm_device *dev, int id)
71acb5eb
DA
4411{
4412 drm_i915_private_t *dev_priv = dev->dev_private;
4413 struct drm_i915_gem_phys_object *phys_obj;
4414
4415 if (!dev_priv->mm.phys_objs[id - 1])
4416 return;
4417
4418 phys_obj = dev_priv->mm.phys_objs[id - 1];
4419 if (phys_obj->cur_obj) {
4420 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
4421 }
4422
4423#ifdef CONFIG_X86
4424 set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4425#endif
4426 drm_pci_free(dev, phys_obj->handle);
4427 kfree(phys_obj);
4428 dev_priv->mm.phys_objs[id - 1] = NULL;
4429}
4430
4431void i915_gem_free_all_phys_object(struct drm_device *dev)
4432{
4433 int i;
4434
260883c8 4435 for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
71acb5eb
DA
4436 i915_gem_free_phys_object(dev, i);
4437}
4438
4439void i915_gem_detach_phys_object(struct drm_device *dev,
05394f39 4440 struct drm_i915_gem_object *obj)
71acb5eb 4441{
496ad9aa 4442 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
e5281ccd 4443 char *vaddr;
71acb5eb 4444 int i;
71acb5eb
DA
4445 int page_count;
4446
05394f39 4447 if (!obj->phys_obj)
71acb5eb 4448 return;
05394f39 4449 vaddr = obj->phys_obj->handle->vaddr;
71acb5eb 4450
05394f39 4451 page_count = obj->base.size / PAGE_SIZE;
71acb5eb 4452 for (i = 0; i < page_count; i++) {
5949eac4 4453 struct page *page = shmem_read_mapping_page(mapping, i);
e5281ccd
CW
4454 if (!IS_ERR(page)) {
4455 char *dst = kmap_atomic(page);
4456 memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
4457 kunmap_atomic(dst);
4458
4459 drm_clflush_pages(&page, 1);
4460
4461 set_page_dirty(page);
4462 mark_page_accessed(page);
4463 page_cache_release(page);
4464 }
71acb5eb 4465 }
e76e9aeb 4466 i915_gem_chipset_flush(dev);
d78b47b9 4467
05394f39
CW
4468 obj->phys_obj->cur_obj = NULL;
4469 obj->phys_obj = NULL;
71acb5eb
DA
4470}
4471
4472int
4473i915_gem_attach_phys_object(struct drm_device *dev,
05394f39 4474 struct drm_i915_gem_object *obj,
6eeefaf3
CW
4475 int id,
4476 int align)
71acb5eb 4477{
496ad9aa 4478 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
71acb5eb 4479 drm_i915_private_t *dev_priv = dev->dev_private;
71acb5eb
DA
4480 int ret = 0;
4481 int page_count;
4482 int i;
4483
4484 if (id > I915_MAX_PHYS_OBJECT)
4485 return -EINVAL;
4486
05394f39
CW
4487 if (obj->phys_obj) {
4488 if (obj->phys_obj->id == id)
71acb5eb
DA
4489 return 0;
4490 i915_gem_detach_phys_object(dev, obj);
4491 }
4492
71acb5eb
DA
4493 /* create a new object */
4494 if (!dev_priv->mm.phys_objs[id - 1]) {
4495 ret = i915_gem_init_phys_object(dev, id,
05394f39 4496 obj->base.size, align);
71acb5eb 4497 if (ret) {
05394f39
CW
4498 DRM_ERROR("failed to init phys object %d size: %zu\n",
4499 id, obj->base.size);
e5281ccd 4500 return ret;
71acb5eb
DA
4501 }
4502 }
4503
4504 /* bind to the object */
05394f39
CW
4505 obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
4506 obj->phys_obj->cur_obj = obj;
71acb5eb 4507
05394f39 4508 page_count = obj->base.size / PAGE_SIZE;
71acb5eb
DA
4509
4510 for (i = 0; i < page_count; i++) {
e5281ccd
CW
4511 struct page *page;
4512 char *dst, *src;
4513
5949eac4 4514 page = shmem_read_mapping_page(mapping, i);
e5281ccd
CW
4515 if (IS_ERR(page))
4516 return PTR_ERR(page);
71acb5eb 4517
ff75b9bc 4518 src = kmap_atomic(page);
05394f39 4519 dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE);
71acb5eb 4520 memcpy(dst, src, PAGE_SIZE);
3e4d3af5 4521 kunmap_atomic(src);
71acb5eb 4522
e5281ccd
CW
4523 mark_page_accessed(page);
4524 page_cache_release(page);
4525 }
d78b47b9 4526
71acb5eb 4527 return 0;
71acb5eb
DA
4528}
4529
4530static int
05394f39
CW
4531i915_gem_phys_pwrite(struct drm_device *dev,
4532 struct drm_i915_gem_object *obj,
71acb5eb
DA
4533 struct drm_i915_gem_pwrite *args,
4534 struct drm_file *file_priv)
4535{
05394f39 4536 void *vaddr = obj->phys_obj->handle->vaddr + args->offset;
2bb4629a 4537 char __user *user_data = to_user_ptr(args->data_ptr);
71acb5eb 4538
b47b30cc
CW
4539 if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
4540 unsigned long unwritten;
4541
4542 /* The physical object once assigned is fixed for the lifetime
4543 * of the obj, so we can safely drop the lock and continue
4544 * to access vaddr.
4545 */
4546 mutex_unlock(&dev->struct_mutex);
4547 unwritten = copy_from_user(vaddr, user_data, args->size);
4548 mutex_lock(&dev->struct_mutex);
4549 if (unwritten)
4550 return -EFAULT;
4551 }
71acb5eb 4552
e76e9aeb 4553 i915_gem_chipset_flush(dev);
71acb5eb
DA
4554 return 0;
4555}
b962442e 4556
f787a5f5 4557void i915_gem_release(struct drm_device *dev, struct drm_file *file)
b962442e 4558{
f787a5f5 4559 struct drm_i915_file_private *file_priv = file->driver_priv;
b962442e
EA
4560
4561 /* Clean up our request list when the client is going away, so that
4562 * later retire_requests won't dereference our soon-to-be-gone
4563 * file_priv.
4564 */
1c25595f 4565 spin_lock(&file_priv->mm.lock);
f787a5f5
CW
4566 while (!list_empty(&file_priv->mm.request_list)) {
4567 struct drm_i915_gem_request *request;
4568
4569 request = list_first_entry(&file_priv->mm.request_list,
4570 struct drm_i915_gem_request,
4571 client_list);
4572 list_del(&request->client_list);
4573 request->file_priv = NULL;
4574 }
1c25595f 4575 spin_unlock(&file_priv->mm.lock);
b962442e 4576}
31169714 4577
5774506f
CW
4578static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
4579{
4580 if (!mutex_is_locked(mutex))
4581 return false;
4582
4583#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
4584 return mutex->owner == task;
4585#else
4586 /* Since UP may be pre-empted, we cannot assume that we own the lock */
4587 return false;
4588#endif
4589}
4590
31169714 4591static int
1495f230 4592i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
31169714 4593{
17250b71
CW
4594 struct drm_i915_private *dev_priv =
4595 container_of(shrinker,
4596 struct drm_i915_private,
4597 mm.inactive_shrinker);
4598 struct drm_device *dev = dev_priv->dev;
6c085a72 4599 struct drm_i915_gem_object *obj;
1495f230 4600 int nr_to_scan = sc->nr_to_scan;
5774506f 4601 bool unlock = true;
17250b71
CW
4602 int cnt;
4603
5774506f
CW
4604 if (!mutex_trylock(&dev->struct_mutex)) {
4605 if (!mutex_is_locked_by(&dev->struct_mutex, current))
4606 return 0;
4607
677feac2
DV
4608 if (dev_priv->mm.shrinker_no_lock_stealing)
4609 return 0;
4610
5774506f
CW
4611 unlock = false;
4612 }
31169714 4613
6c085a72
CW
4614 if (nr_to_scan) {
4615 nr_to_scan -= i915_gem_purge(dev_priv, nr_to_scan);
93927ca5
DV
4616 if (nr_to_scan > 0)
4617 nr_to_scan -= __i915_gem_shrink(dev_priv, nr_to_scan,
4618 false);
6c085a72
CW
4619 if (nr_to_scan > 0)
4620 i915_gem_shrink_all(dev_priv);
31169714
CW
4621 }
4622
17250b71 4623 cnt = 0;
35c20a60 4624 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
a5570178
CW
4625 if (obj->pages_pin_count == 0)
4626 cnt += obj->base.size >> PAGE_SHIFT;
35c20a60 4627 list_for_each_entry(obj, &dev_priv->mm.inactive_list, global_list)
a5570178 4628 if (obj->pin_count == 0 && obj->pages_pin_count == 0)
6c085a72 4629 cnt += obj->base.size >> PAGE_SHIFT;
17250b71 4630
5774506f
CW
4631 if (unlock)
4632 mutex_unlock(&dev->struct_mutex);
6c085a72 4633 return cnt;
31169714 4634}