]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/gpu/drm/i915/i915_gem_execbuffer.c
drm/i915: Skip clflushes for all non-page backed objects
[mirror_ubuntu-artful-kernel.git] / drivers / gpu / drm / i915 / i915_gem_execbuffer.c
CommitLineData
54cf91dc
CW
1/*
2 * Copyright © 2008,2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Chris Wilson <chris@chris-wilson.co.uk>
26 *
27 */
28
ad778f89
CW
29#include <linux/dma_remapping.h>
30#include <linux/reservation.h>
fec0445c 31#include <linux/sync_file.h>
ad778f89
CW
32#include <linux/uaccess.h>
33
760285e7
DH
34#include <drm/drmP.h>
35#include <drm/i915_drm.h>
ad778f89 36
54cf91dc
CW
37#include "i915_drv.h"
38#include "i915_trace.h"
39#include "intel_drv.h"
5d723d7a 40#include "intel_frontbuffer.h"
54cf91dc 41
d50415cc
CW
42#define DBG_USE_CPU_RELOC 0 /* -1 force GTT relocs; 1 force CPU relocs */
43
9e2793f6
DG
44#define __EXEC_OBJECT_HAS_PIN (1<<31)
45#define __EXEC_OBJECT_HAS_FENCE (1<<30)
46#define __EXEC_OBJECT_NEEDS_MAP (1<<29)
47#define __EXEC_OBJECT_NEEDS_BIAS (1<<28)
48#define __EXEC_OBJECT_INTERNAL_FLAGS (0xf<<28) /* all of the above */
d23db88c
CW
49
50#define BATCH_OFFSET_BIAS (256*1024)
a415d355 51
5b043f4e
CW
52struct i915_execbuffer_params {
53 struct drm_device *dev;
54 struct drm_file *file;
59bfa124
CW
55 struct i915_vma *batch;
56 u32 dispatch_flags;
57 u32 args_batch_start_offset;
5b043f4e 58 struct intel_engine_cs *engine;
5b043f4e
CW
59 struct i915_gem_context *ctx;
60 struct drm_i915_gem_request *request;
61};
62
27173f1f 63struct eb_vmas {
d50415cc 64 struct drm_i915_private *i915;
27173f1f 65 struct list_head vmas;
67731b87 66 int and;
eef90ccb 67 union {
27173f1f 68 struct i915_vma *lut[0];
eef90ccb
CW
69 struct hlist_head buckets[0];
70 };
67731b87
CW
71};
72
27173f1f 73static struct eb_vmas *
d50415cc
CW
74eb_create(struct drm_i915_private *i915,
75 struct drm_i915_gem_execbuffer2 *args)
67731b87 76{
27173f1f 77 struct eb_vmas *eb = NULL;
eef90ccb
CW
78
79 if (args->flags & I915_EXEC_HANDLE_LUT) {
b205ca57 80 unsigned size = args->buffer_count;
27173f1f
BW
81 size *= sizeof(struct i915_vma *);
82 size += sizeof(struct eb_vmas);
eef90ccb
CW
83 eb = kmalloc(size, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
84 }
85
86 if (eb == NULL) {
b205ca57
DV
87 unsigned size = args->buffer_count;
88 unsigned count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
27b7c63a 89 BUILD_BUG_ON_NOT_POWER_OF_2(PAGE_SIZE / sizeof(struct hlist_head));
eef90ccb
CW
90 while (count > 2*size)
91 count >>= 1;
92 eb = kzalloc(count*sizeof(struct hlist_head) +
27173f1f 93 sizeof(struct eb_vmas),
eef90ccb
CW
94 GFP_TEMPORARY);
95 if (eb == NULL)
96 return eb;
97
98 eb->and = count - 1;
99 } else
100 eb->and = -args->buffer_count;
101
d50415cc 102 eb->i915 = i915;
27173f1f 103 INIT_LIST_HEAD(&eb->vmas);
67731b87
CW
104 return eb;
105}
106
107static void
27173f1f 108eb_reset(struct eb_vmas *eb)
67731b87 109{
eef90ccb
CW
110 if (eb->and >= 0)
111 memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
67731b87
CW
112}
113
59bfa124
CW
114static struct i915_vma *
115eb_get_batch(struct eb_vmas *eb)
116{
117 struct i915_vma *vma = list_entry(eb->vmas.prev, typeof(*vma), exec_list);
118
119 /*
120 * SNA is doing fancy tricks with compressing batch buffers, which leads
121 * to negative relocation deltas. Usually that works out ok since the
122 * relocate address is still positive, except when the batch is placed
123 * very low in the GTT. Ensure this doesn't happen.
124 *
125 * Note that actual hangs have only been observed on gen7, but for
126 * paranoia do it everywhere.
127 */
128 if ((vma->exec_entry->flags & EXEC_OBJECT_PINNED) == 0)
129 vma->exec_entry->flags |= __EXEC_OBJECT_NEEDS_BIAS;
130
131 return vma;
132}
133
3b96eff4 134static int
27173f1f
BW
135eb_lookup_vmas(struct eb_vmas *eb,
136 struct drm_i915_gem_exec_object2 *exec,
137 const struct drm_i915_gem_execbuffer2 *args,
138 struct i915_address_space *vm,
139 struct drm_file *file)
3b96eff4 140{
27173f1f
BW
141 struct drm_i915_gem_object *obj;
142 struct list_head objects;
9ae9ab52 143 int i, ret;
3b96eff4 144
27173f1f 145 INIT_LIST_HEAD(&objects);
3b96eff4 146 spin_lock(&file->table_lock);
27173f1f
BW
147 /* Grab a reference to the object and release the lock so we can lookup
148 * or create the VMA without using GFP_ATOMIC */
eef90ccb 149 for (i = 0; i < args->buffer_count; i++) {
3b96eff4
CW
150 obj = to_intel_bo(idr_find(&file->object_idr, exec[i].handle));
151 if (obj == NULL) {
152 spin_unlock(&file->table_lock);
153 DRM_DEBUG("Invalid object handle %d at index %d\n",
154 exec[i].handle, i);
27173f1f 155 ret = -ENOENT;
9ae9ab52 156 goto err;
3b96eff4
CW
157 }
158
27173f1f 159 if (!list_empty(&obj->obj_exec_link)) {
3b96eff4
CW
160 spin_unlock(&file->table_lock);
161 DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
162 obj, exec[i].handle, i);
27173f1f 163 ret = -EINVAL;
9ae9ab52 164 goto err;
3b96eff4
CW
165 }
166
25dc556a 167 i915_gem_object_get(obj);
27173f1f
BW
168 list_add_tail(&obj->obj_exec_link, &objects);
169 }
170 spin_unlock(&file->table_lock);
3b96eff4 171
27173f1f 172 i = 0;
9ae9ab52 173 while (!list_empty(&objects)) {
27173f1f 174 struct i915_vma *vma;
6f65e29a 175
9ae9ab52
CW
176 obj = list_first_entry(&objects,
177 struct drm_i915_gem_object,
178 obj_exec_link);
179
e656a6cb
DV
180 /*
181 * NOTE: We can leak any vmas created here when something fails
182 * later on. But that's no issue since vma_unbind can deal with
183 * vmas which are not actually bound. And since only
184 * lookup_or_create exists as an interface to get at the vma
185 * from the (obj, vm) we don't run the risk of creating
186 * duplicated vmas for the same vm.
187 */
718659a6 188 vma = i915_vma_instance(obj, vm, NULL);
058d88c4 189 if (unlikely(IS_ERR(vma))) {
27173f1f
BW
190 DRM_DEBUG("Failed to lookup VMA\n");
191 ret = PTR_ERR(vma);
9ae9ab52 192 goto err;
27173f1f
BW
193 }
194
9ae9ab52 195 /* Transfer ownership from the objects list to the vmas list. */
27173f1f 196 list_add_tail(&vma->exec_list, &eb->vmas);
9ae9ab52 197 list_del_init(&obj->obj_exec_link);
27173f1f
BW
198
199 vma->exec_entry = &exec[i];
eef90ccb 200 if (eb->and < 0) {
27173f1f 201 eb->lut[i] = vma;
eef90ccb
CW
202 } else {
203 uint32_t handle = args->flags & I915_EXEC_HANDLE_LUT ? i : exec[i].handle;
27173f1f
BW
204 vma->exec_handle = handle;
205 hlist_add_head(&vma->exec_node,
eef90ccb
CW
206 &eb->buckets[handle & eb->and]);
207 }
27173f1f 208 ++i;
3b96eff4 209 }
3b96eff4 210
9ae9ab52 211 return 0;
27173f1f 212
27173f1f 213
9ae9ab52 214err:
27173f1f
BW
215 while (!list_empty(&objects)) {
216 obj = list_first_entry(&objects,
217 struct drm_i915_gem_object,
218 obj_exec_link);
219 list_del_init(&obj->obj_exec_link);
f8c417cd 220 i915_gem_object_put(obj);
27173f1f 221 }
9ae9ab52
CW
222 /*
223 * Objects already transfered to the vmas list will be unreferenced by
224 * eb_destroy.
225 */
226
27173f1f 227 return ret;
3b96eff4
CW
228}
229
27173f1f 230static struct i915_vma *eb_get_vma(struct eb_vmas *eb, unsigned long handle)
67731b87 231{
eef90ccb
CW
232 if (eb->and < 0) {
233 if (handle >= -eb->and)
234 return NULL;
235 return eb->lut[handle];
236 } else {
237 struct hlist_head *head;
aa45950b 238 struct i915_vma *vma;
67731b87 239
eef90ccb 240 head = &eb->buckets[handle & eb->and];
aa45950b 241 hlist_for_each_entry(vma, head, exec_node) {
27173f1f
BW
242 if (vma->exec_handle == handle)
243 return vma;
eef90ccb
CW
244 }
245 return NULL;
246 }
67731b87
CW
247}
248
a415d355
CW
249static void
250i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
251{
252 struct drm_i915_gem_exec_object2 *entry;
a415d355
CW
253
254 if (!drm_mm_node_allocated(&vma->node))
255 return;
256
257 entry = vma->exec_entry;
258
259 if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
49ef5294 260 i915_vma_unpin_fence(vma);
a415d355
CW
261
262 if (entry->flags & __EXEC_OBJECT_HAS_PIN)
20dfbde4 263 __i915_vma_unpin(vma);
a415d355 264
de4e783a 265 entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
a415d355
CW
266}
267
268static void eb_destroy(struct eb_vmas *eb)
269{
27173f1f
BW
270 while (!list_empty(&eb->vmas)) {
271 struct i915_vma *vma;
bcffc3fa 272
27173f1f
BW
273 vma = list_first_entry(&eb->vmas,
274 struct i915_vma,
bcffc3fa 275 exec_list);
27173f1f 276 list_del_init(&vma->exec_list);
a415d355 277 i915_gem_execbuffer_unreserve_vma(vma);
172ae5b4 278 vma->exec_entry = NULL;
624192cf 279 i915_vma_put(vma);
bcffc3fa 280 }
67731b87
CW
281 kfree(eb);
282}
283
dabdfe02
CW
284static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
285{
9e53d9be
CW
286 if (!i915_gem_object_has_struct_page(obj))
287 return false;
288
d50415cc
CW
289 if (DBG_USE_CPU_RELOC)
290 return DBG_USE_CPU_RELOC > 0;
291
0031fb96 292 return (HAS_LLC(to_i915(obj->base.dev)) ||
2cc86b82 293 obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
dabdfe02
CW
294 obj->cache_level != I915_CACHE_NONE);
295}
296
934acce3
MW
297/* Used to convert any address to canonical form.
298 * Starting from gen8, some commands (e.g. STATE_BASE_ADDRESS,
299 * MI_LOAD_REGISTER_MEM and others, see Broadwell PRM Vol2a) require the
300 * addresses to be in a canonical form:
301 * "GraphicsAddress[63:48] are ignored by the HW and assumed to be in correct
302 * canonical form [63:48] == [47]."
303 */
304#define GEN8_HIGH_ADDRESS_BIT 47
305static inline uint64_t gen8_canonical_addr(uint64_t address)
306{
307 return sign_extend64(address, GEN8_HIGH_ADDRESS_BIT);
308}
309
310static inline uint64_t gen8_noncanonical_addr(uint64_t address)
311{
312 return address & ((1ULL << (GEN8_HIGH_ADDRESS_BIT + 1)) - 1);
313}
314
315static inline uint64_t
d50415cc 316relocation_target(const struct drm_i915_gem_relocation_entry *reloc,
934acce3
MW
317 uint64_t target_offset)
318{
319 return gen8_canonical_addr((int)reloc->delta + target_offset);
320}
321
31a39207 322struct reloc_cache {
d50415cc
CW
323 struct drm_i915_private *i915;
324 struct drm_mm_node node;
325 unsigned long vaddr;
31a39207 326 unsigned int page;
d50415cc 327 bool use_64bit_reloc;
31a39207
CW
328};
329
d50415cc
CW
330static void reloc_cache_init(struct reloc_cache *cache,
331 struct drm_i915_private *i915)
5032d871 332{
31a39207 333 cache->page = -1;
d50415cc
CW
334 cache->vaddr = 0;
335 cache->i915 = i915;
dfc5148f
JL
336 /* Must be a variable in the struct to allow GCC to unroll. */
337 cache->use_64bit_reloc = HAS_64BIT_RELOC(i915);
e8cb909a 338 cache->node.allocated = false;
d50415cc 339}
5032d871 340
d50415cc
CW
341static inline void *unmask_page(unsigned long p)
342{
343 return (void *)(uintptr_t)(p & PAGE_MASK);
344}
345
346static inline unsigned int unmask_flags(unsigned long p)
347{
348 return p & ~PAGE_MASK;
31a39207
CW
349}
350
d50415cc
CW
351#define KMAP 0x4 /* after CLFLUSH_FLAGS */
352
31a39207
CW
353static void reloc_cache_fini(struct reloc_cache *cache)
354{
d50415cc 355 void *vaddr;
5032d871 356
31a39207
CW
357 if (!cache->vaddr)
358 return;
3c94ceee 359
d50415cc
CW
360 vaddr = unmask_page(cache->vaddr);
361 if (cache->vaddr & KMAP) {
362 if (cache->vaddr & CLFLUSH_AFTER)
363 mb();
3c94ceee 364
d50415cc
CW
365 kunmap_atomic(vaddr);
366 i915_gem_obj_finish_shmem_access((struct drm_i915_gem_object *)cache->node.mm);
367 } else {
e8cb909a 368 wmb();
d50415cc 369 io_mapping_unmap_atomic((void __iomem *)vaddr);
e8cb909a
CW
370 if (cache->node.allocated) {
371 struct i915_ggtt *ggtt = &cache->i915->ggtt;
372
373 ggtt->base.clear_range(&ggtt->base,
374 cache->node.start,
4fb84d99 375 cache->node.size);
e8cb909a
CW
376 drm_mm_remove_node(&cache->node);
377 } else {
378 i915_vma_unpin((struct i915_vma *)cache->node.mm);
3c94ceee 379 }
31a39207
CW
380 }
381}
382
383static void *reloc_kmap(struct drm_i915_gem_object *obj,
384 struct reloc_cache *cache,
385 int page)
386{
d50415cc
CW
387 void *vaddr;
388
389 if (cache->vaddr) {
390 kunmap_atomic(unmask_page(cache->vaddr));
391 } else {
392 unsigned int flushes;
393 int ret;
31a39207 394
d50415cc
CW
395 ret = i915_gem_obj_prepare_shmem_write(obj, &flushes);
396 if (ret)
397 return ERR_PTR(ret);
398
399 BUILD_BUG_ON(KMAP & CLFLUSH_FLAGS);
400 BUILD_BUG_ON((KMAP | CLFLUSH_FLAGS) & PAGE_MASK);
3c94ceee 401
d50415cc
CW
402 cache->vaddr = flushes | KMAP;
403 cache->node.mm = (void *)obj;
404 if (flushes)
405 mb();
3c94ceee
BW
406 }
407
d50415cc
CW
408 vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj, page));
409 cache->vaddr = unmask_flags(cache->vaddr) | (unsigned long)vaddr;
31a39207 410 cache->page = page;
5032d871 411
d50415cc 412 return vaddr;
5032d871
RB
413}
414
d50415cc
CW
415static void *reloc_iomap(struct drm_i915_gem_object *obj,
416 struct reloc_cache *cache,
417 int page)
5032d871 418{
e8cb909a
CW
419 struct i915_ggtt *ggtt = &cache->i915->ggtt;
420 unsigned long offset;
d50415cc 421 void *vaddr;
5032d871 422
d50415cc 423 if (cache->vaddr) {
615e5000 424 io_mapping_unmap_atomic((void __force __iomem *) unmask_page(cache->vaddr));
d50415cc
CW
425 } else {
426 struct i915_vma *vma;
427 int ret;
5032d871 428
d50415cc
CW
429 if (use_cpu_reloc(obj))
430 return NULL;
3c94ceee 431
d50415cc
CW
432 ret = i915_gem_object_set_to_gtt_domain(obj, true);
433 if (ret)
434 return ERR_PTR(ret);
3c94ceee 435
d50415cc
CW
436 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
437 PIN_MAPPABLE | PIN_NONBLOCK);
e8cb909a
CW
438 if (IS_ERR(vma)) {
439 memset(&cache->node, 0, sizeof(cache->node));
4e64e553 440 ret = drm_mm_insert_node_in_range
e8cb909a 441 (&ggtt->base.mm, &cache->node,
f51455d4 442 PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
e8cb909a 443 0, ggtt->mappable_end,
4e64e553 444 DRM_MM_INSERT_LOW);
c92fa4fe
CW
445 if (ret) /* no inactive aperture space, use cpu reloc */
446 return NULL;
e8cb909a 447 } else {
49ef5294 448 ret = i915_vma_put_fence(vma);
e8cb909a
CW
449 if (ret) {
450 i915_vma_unpin(vma);
451 return ERR_PTR(ret);
452 }
5032d871 453
e8cb909a
CW
454 cache->node.start = vma->node.start;
455 cache->node.mm = (void *)vma;
3c94ceee 456 }
e8cb909a 457 }
3c94ceee 458
e8cb909a
CW
459 offset = cache->node.start;
460 if (cache->node.allocated) {
fc099090 461 wmb();
e8cb909a
CW
462 ggtt->base.insert_page(&ggtt->base,
463 i915_gem_object_get_dma_address(obj, page),
464 offset, I915_CACHE_NONE, 0);
465 } else {
466 offset += page << PAGE_SHIFT;
3c94ceee
BW
467 }
468
615e5000 469 vaddr = (void __force *) io_mapping_map_atomic_wc(&cache->i915->ggtt.mappable, offset);
d50415cc
CW
470 cache->page = page;
471 cache->vaddr = (unsigned long)vaddr;
5032d871 472
d50415cc 473 return vaddr;
5032d871
RB
474}
475
d50415cc
CW
476static void *reloc_vaddr(struct drm_i915_gem_object *obj,
477 struct reloc_cache *cache,
478 int page)
edf4427b 479{
d50415cc 480 void *vaddr;
5032d871 481
d50415cc
CW
482 if (cache->page == page) {
483 vaddr = unmask_page(cache->vaddr);
484 } else {
485 vaddr = NULL;
486 if ((cache->vaddr & KMAP) == 0)
487 vaddr = reloc_iomap(obj, cache, page);
488 if (!vaddr)
489 vaddr = reloc_kmap(obj, cache, page);
3c94ceee
BW
490 }
491
d50415cc 492 return vaddr;
edf4427b
CW
493}
494
d50415cc 495static void clflush_write32(u32 *addr, u32 value, unsigned int flushes)
edf4427b 496{
d50415cc
CW
497 if (unlikely(flushes & (CLFLUSH_BEFORE | CLFLUSH_AFTER))) {
498 if (flushes & CLFLUSH_BEFORE) {
499 clflushopt(addr);
500 mb();
501 }
edf4427b 502
d50415cc 503 *addr = value;
edf4427b 504
d50415cc
CW
505 /* Writes to the same cacheline are serialised by the CPU
506 * (including clflush). On the write path, we only require
507 * that it hits memory in an orderly fashion and place
508 * mb barriers at the start and end of the relocation phase
509 * to ensure ordering of clflush wrt to the system.
510 */
511 if (flushes & CLFLUSH_AFTER)
512 clflushopt(addr);
513 } else
514 *addr = value;
edf4427b 515}
edf4427b 516
edf4427b 517static int
d50415cc
CW
518relocate_entry(struct drm_i915_gem_object *obj,
519 const struct drm_i915_gem_relocation_entry *reloc,
520 struct reloc_cache *cache,
521 u64 target_offset)
edf4427b 522{
d50415cc
CW
523 u64 offset = reloc->offset;
524 bool wide = cache->use_64bit_reloc;
525 void *vaddr;
edf4427b 526
d50415cc
CW
527 target_offset = relocation_target(reloc, target_offset);
528repeat:
529 vaddr = reloc_vaddr(obj, cache, offset >> PAGE_SHIFT);
530 if (IS_ERR(vaddr))
531 return PTR_ERR(vaddr);
532
533 clflush_write32(vaddr + offset_in_page(offset),
534 lower_32_bits(target_offset),
535 cache->vaddr);
536
537 if (wide) {
538 offset += sizeof(u32);
539 target_offset >>= 32;
540 wide = false;
541 goto repeat;
edf4427b 542 }
edf4427b 543
edf4427b
CW
544 return 0;
545}
edf4427b 546
54cf91dc
CW
547static int
548i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
27173f1f 549 struct eb_vmas *eb,
31a39207
CW
550 struct drm_i915_gem_relocation_entry *reloc,
551 struct reloc_cache *cache)
54cf91dc 552{
5db94019 553 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
54cf91dc 554 struct drm_gem_object *target_obj;
149c8407 555 struct drm_i915_gem_object *target_i915_obj;
27173f1f 556 struct i915_vma *target_vma;
d9ceb957 557 uint64_t target_offset;
8b78f0e5 558 int ret;
54cf91dc 559
67731b87 560 /* we've already hold a reference to all valid objects */
27173f1f
BW
561 target_vma = eb_get_vma(eb, reloc->target_handle);
562 if (unlikely(target_vma == NULL))
54cf91dc 563 return -ENOENT;
27173f1f
BW
564 target_i915_obj = target_vma->obj;
565 target_obj = &target_vma->obj->base;
54cf91dc 566
934acce3 567 target_offset = gen8_canonical_addr(target_vma->node.start);
54cf91dc 568
e844b990
EA
569 /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
570 * pipe_control writes because the gpu doesn't properly redirect them
571 * through the ppgtt for non_secure batchbuffers. */
5db94019 572 if (unlikely(IS_GEN6(dev_priv) &&
0875546c 573 reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION)) {
fe14d5f4 574 ret = i915_vma_bind(target_vma, target_i915_obj->cache_level,
0875546c 575 PIN_GLOBAL);
fe14d5f4
TU
576 if (WARN_ONCE(ret, "Unexpected failure to bind target VMA!"))
577 return ret;
578 }
e844b990 579
54cf91dc 580 /* Validate that the target is in a valid r/w GPU domain */
b8f7ab17 581 if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
ff240199 582 DRM_DEBUG("reloc with multiple write domains: "
54cf91dc
CW
583 "obj %p target %d offset %d "
584 "read %08x write %08x",
585 obj, reloc->target_handle,
586 (int) reloc->offset,
587 reloc->read_domains,
588 reloc->write_domain);
8b78f0e5 589 return -EINVAL;
54cf91dc 590 }
4ca4a250
DV
591 if (unlikely((reloc->write_domain | reloc->read_domains)
592 & ~I915_GEM_GPU_DOMAINS)) {
ff240199 593 DRM_DEBUG("reloc with read/write non-GPU domains: "
54cf91dc
CW
594 "obj %p target %d offset %d "
595 "read %08x write %08x",
596 obj, reloc->target_handle,
597 (int) reloc->offset,
598 reloc->read_domains,
599 reloc->write_domain);
8b78f0e5 600 return -EINVAL;
54cf91dc 601 }
54cf91dc
CW
602
603 target_obj->pending_read_domains |= reloc->read_domains;
604 target_obj->pending_write_domain |= reloc->write_domain;
605
606 /* If the relocation already has the right value in it, no
607 * more work needs to be done.
608 */
609 if (target_offset == reloc->presumed_offset)
67731b87 610 return 0;
54cf91dc
CW
611
612 /* Check that the relocation address is valid... */
3c94ceee 613 if (unlikely(reloc->offset >
d50415cc 614 obj->base.size - (cache->use_64bit_reloc ? 8 : 4))) {
ff240199 615 DRM_DEBUG("Relocation beyond object bounds: "
54cf91dc
CW
616 "obj %p target %d offset %d size %d.\n",
617 obj, reloc->target_handle,
618 (int) reloc->offset,
619 (int) obj->base.size);
8b78f0e5 620 return -EINVAL;
54cf91dc 621 }
b8f7ab17 622 if (unlikely(reloc->offset & 3)) {
ff240199 623 DRM_DEBUG("Relocation not 4-byte aligned: "
54cf91dc
CW
624 "obj %p target %d offset %d.\n",
625 obj, reloc->target_handle,
626 (int) reloc->offset);
8b78f0e5 627 return -EINVAL;
54cf91dc
CW
628 }
629
d50415cc 630 ret = relocate_entry(obj, reloc, cache, target_offset);
d4d36014
DV
631 if (ret)
632 return ret;
633
54cf91dc
CW
634 /* and update the user's relocation entry */
635 reloc->presumed_offset = target_offset;
67731b87 636 return 0;
54cf91dc
CW
637}
638
639static int
27173f1f
BW
640i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
641 struct eb_vmas *eb)
54cf91dc 642{
1d83f442
CW
643#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
644 struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
54cf91dc 645 struct drm_i915_gem_relocation_entry __user *user_relocs;
27173f1f 646 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
31a39207
CW
647 struct reloc_cache cache;
648 int remain, ret = 0;
54cf91dc 649
3ed605bc 650 user_relocs = u64_to_user_ptr(entry->relocs_ptr);
d50415cc 651 reloc_cache_init(&cache, eb->i915);
54cf91dc 652
1d83f442
CW
653 remain = entry->relocation_count;
654 while (remain) {
655 struct drm_i915_gem_relocation_entry *r = stack_reloc;
ebc0808f
CW
656 unsigned long unwritten;
657 unsigned int count;
658
659 count = min_t(unsigned int, remain, ARRAY_SIZE(stack_reloc));
1d83f442
CW
660 remain -= count;
661
ebc0808f
CW
662 /* This is the fast path and we cannot handle a pagefault
663 * whilst holding the struct mutex lest the user pass in the
664 * relocations contained within a mmaped bo. For in such a case
665 * we, the page fault handler would call i915_gem_fault() and
666 * we would try to acquire the struct mutex again. Obviously
667 * this is bad and so lockdep complains vehemently.
668 */
669 pagefault_disable();
670 unwritten = __copy_from_user_inatomic(r, user_relocs, count*sizeof(r[0]));
671 pagefault_enable();
672 if (unlikely(unwritten)) {
31a39207
CW
673 ret = -EFAULT;
674 goto out;
675 }
54cf91dc 676
1d83f442
CW
677 do {
678 u64 offset = r->presumed_offset;
54cf91dc 679
31a39207 680 ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r, &cache);
1d83f442 681 if (ret)
31a39207 682 goto out;
1d83f442 683
ebc0808f
CW
684 if (r->presumed_offset != offset) {
685 pagefault_disable();
686 unwritten = __put_user(r->presumed_offset,
687 &user_relocs->presumed_offset);
688 pagefault_enable();
689 if (unlikely(unwritten)) {
690 /* Note that reporting an error now
691 * leaves everything in an inconsistent
692 * state as we have *already* changed
693 * the relocation value inside the
694 * object. As we have not changed the
695 * reloc.presumed_offset or will not
696 * change the execobject.offset, on the
697 * call we may not rewrite the value
698 * inside the object, leaving it
699 * dangling and causing a GPU hang.
700 */
701 ret = -EFAULT;
702 goto out;
703 }
1d83f442
CW
704 }
705
706 user_relocs++;
707 r++;
708 } while (--count);
54cf91dc
CW
709 }
710
31a39207
CW
711out:
712 reloc_cache_fini(&cache);
713 return ret;
1d83f442 714#undef N_RELOC
54cf91dc
CW
715}
716
717static int
27173f1f
BW
718i915_gem_execbuffer_relocate_vma_slow(struct i915_vma *vma,
719 struct eb_vmas *eb,
720 struct drm_i915_gem_relocation_entry *relocs)
54cf91dc 721{
27173f1f 722 const struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
31a39207
CW
723 struct reloc_cache cache;
724 int i, ret = 0;
54cf91dc 725
d50415cc 726 reloc_cache_init(&cache, eb->i915);
54cf91dc 727 for (i = 0; i < entry->relocation_count; i++) {
31a39207 728 ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i], &cache);
54cf91dc 729 if (ret)
31a39207 730 break;
54cf91dc 731 }
31a39207 732 reloc_cache_fini(&cache);
54cf91dc 733
31a39207 734 return ret;
54cf91dc
CW
735}
736
737static int
17601cbc 738i915_gem_execbuffer_relocate(struct eb_vmas *eb)
54cf91dc 739{
27173f1f 740 struct i915_vma *vma;
d4aeee77
CW
741 int ret = 0;
742
27173f1f
BW
743 list_for_each_entry(vma, &eb->vmas, exec_list) {
744 ret = i915_gem_execbuffer_relocate_vma(vma, eb);
54cf91dc 745 if (ret)
d4aeee77 746 break;
54cf91dc
CW
747 }
748
d4aeee77 749 return ret;
54cf91dc
CW
750}
751
edf4427b
CW
752static bool only_mappable_for_reloc(unsigned int flags)
753{
754 return (flags & (EXEC_OBJECT_NEEDS_FENCE | __EXEC_OBJECT_NEEDS_MAP)) ==
755 __EXEC_OBJECT_NEEDS_MAP;
756}
757
1690e1eb 758static int
27173f1f 759i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
0bc40be8 760 struct intel_engine_cs *engine,
27173f1f 761 bool *need_reloc)
1690e1eb 762{
6f65e29a 763 struct drm_i915_gem_object *obj = vma->obj;
27173f1f 764 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
d23db88c 765 uint64_t flags;
1690e1eb
CW
766 int ret;
767
0875546c 768 flags = PIN_USER;
0229da32
DV
769 if (entry->flags & EXEC_OBJECT_NEEDS_GTT)
770 flags |= PIN_GLOBAL;
771
edf4427b 772 if (!drm_mm_node_allocated(&vma->node)) {
101b506a
MT
773 /* Wa32bitGeneralStateOffset & Wa32bitInstructionBaseOffset,
774 * limit address to the first 4GBs for unflagged objects.
775 */
776 if ((entry->flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) == 0)
777 flags |= PIN_ZONE_4G;
edf4427b
CW
778 if (entry->flags & __EXEC_OBJECT_NEEDS_MAP)
779 flags |= PIN_GLOBAL | PIN_MAPPABLE;
edf4427b
CW
780 if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS)
781 flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
506a8e87
CW
782 if (entry->flags & EXEC_OBJECT_PINNED)
783 flags |= entry->offset | PIN_OFFSET_FIXED;
101b506a
MT
784 if ((flags & PIN_MAPPABLE) == 0)
785 flags |= PIN_HIGH;
edf4427b 786 }
1ec9e26d 787
59bfa124
CW
788 ret = i915_vma_pin(vma,
789 entry->pad_to_size,
790 entry->alignment,
791 flags);
792 if ((ret == -ENOSPC || ret == -E2BIG) &&
edf4427b 793 only_mappable_for_reloc(entry->flags))
59bfa124
CW
794 ret = i915_vma_pin(vma,
795 entry->pad_to_size,
796 entry->alignment,
797 flags & ~PIN_MAPPABLE);
1690e1eb
CW
798 if (ret)
799 return ret;
800
7788a765
CW
801 entry->flags |= __EXEC_OBJECT_HAS_PIN;
802
82b6b6d7 803 if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
49ef5294 804 ret = i915_vma_get_fence(vma);
82b6b6d7
CW
805 if (ret)
806 return ret;
9a5a53b3 807
49ef5294 808 if (i915_vma_pin_fence(vma))
82b6b6d7 809 entry->flags |= __EXEC_OBJECT_HAS_FENCE;
1690e1eb
CW
810 }
811
27173f1f
BW
812 if (entry->offset != vma->node.start) {
813 entry->offset = vma->node.start;
ed5982e6
DV
814 *need_reloc = true;
815 }
816
817 if (entry->flags & EXEC_OBJECT_WRITE) {
818 obj->base.pending_read_domains = I915_GEM_DOMAIN_RENDER;
819 obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER;
820 }
821
1690e1eb 822 return 0;
7788a765 823}
1690e1eb 824
d23db88c 825static bool
e6a84468 826need_reloc_mappable(struct i915_vma *vma)
d23db88c
CW
827{
828 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
d23db88c 829
e6a84468
CW
830 if (entry->relocation_count == 0)
831 return false;
832
3272db53 833 if (!i915_vma_is_ggtt(vma))
e6a84468
CW
834 return false;
835
836 /* See also use_cpu_reloc() */
0031fb96 837 if (HAS_LLC(to_i915(vma->obj->base.dev)))
e6a84468
CW
838 return false;
839
840 if (vma->obj->base.write_domain == I915_GEM_DOMAIN_CPU)
841 return false;
842
843 return true;
844}
845
846static bool
847eb_vma_misplaced(struct i915_vma *vma)
848{
849 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
d23db88c 850
3272db53
CW
851 WARN_ON(entry->flags & __EXEC_OBJECT_NEEDS_MAP &&
852 !i915_vma_is_ggtt(vma));
d23db88c 853
f51455d4 854 if (entry->alignment && !IS_ALIGNED(vma->node.start, entry->alignment))
d23db88c
CW
855 return true;
856
91b2db6f
CW
857 if (vma->node.size < entry->pad_to_size)
858 return true;
859
506a8e87
CW
860 if (entry->flags & EXEC_OBJECT_PINNED &&
861 vma->node.start != entry->offset)
862 return true;
863
d23db88c
CW
864 if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS &&
865 vma->node.start < BATCH_OFFSET_BIAS)
866 return true;
867
edf4427b 868 /* avoid costly ping-pong once a batch bo ended up non-mappable */
05a20d09
CW
869 if (entry->flags & __EXEC_OBJECT_NEEDS_MAP &&
870 !i915_vma_is_map_and_fenceable(vma))
edf4427b
CW
871 return !only_mappable_for_reloc(entry->flags);
872
101b506a
MT
873 if ((entry->flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) == 0 &&
874 (vma->node.start + vma->node.size - 1) >> 32)
875 return true;
876
d23db88c
CW
877 return false;
878}
879
54cf91dc 880static int
0bc40be8 881i915_gem_execbuffer_reserve(struct intel_engine_cs *engine,
27173f1f 882 struct list_head *vmas,
e2efd130 883 struct i915_gem_context *ctx,
ed5982e6 884 bool *need_relocs)
54cf91dc 885{
432e58ed 886 struct drm_i915_gem_object *obj;
27173f1f 887 struct i915_vma *vma;
68c8c17f 888 struct i915_address_space *vm;
27173f1f 889 struct list_head ordered_vmas;
506a8e87 890 struct list_head pinned_vmas;
c033666a 891 bool has_fenced_gpu_access = INTEL_GEN(engine->i915) < 4;
7788a765 892 int retry;
6fe4f140 893
68c8c17f
BW
894 vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;
895
27173f1f 896 INIT_LIST_HEAD(&ordered_vmas);
506a8e87 897 INIT_LIST_HEAD(&pinned_vmas);
27173f1f 898 while (!list_empty(vmas)) {
6fe4f140
CW
899 struct drm_i915_gem_exec_object2 *entry;
900 bool need_fence, need_mappable;
901
27173f1f
BW
902 vma = list_first_entry(vmas, struct i915_vma, exec_list);
903 obj = vma->obj;
904 entry = vma->exec_entry;
6fe4f140 905
b1b38278
DW
906 if (ctx->flags & CONTEXT_NO_ZEROMAP)
907 entry->flags |= __EXEC_OBJECT_NEEDS_BIAS;
908
82b6b6d7
CW
909 if (!has_fenced_gpu_access)
910 entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE;
6fe4f140 911 need_fence =
6fe4f140 912 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
3e510a8e 913 i915_gem_object_is_tiled(obj);
27173f1f 914 need_mappable = need_fence || need_reloc_mappable(vma);
6fe4f140 915
506a8e87
CW
916 if (entry->flags & EXEC_OBJECT_PINNED)
917 list_move_tail(&vma->exec_list, &pinned_vmas);
918 else if (need_mappable) {
e6a84468 919 entry->flags |= __EXEC_OBJECT_NEEDS_MAP;
27173f1f 920 list_move(&vma->exec_list, &ordered_vmas);
e6a84468 921 } else
27173f1f 922 list_move_tail(&vma->exec_list, &ordered_vmas);
595dad76 923
ed5982e6 924 obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND;
595dad76 925 obj->base.pending_write_domain = 0;
6fe4f140 926 }
27173f1f 927 list_splice(&ordered_vmas, vmas);
506a8e87 928 list_splice(&pinned_vmas, vmas);
54cf91dc
CW
929
930 /* Attempt to pin all of the buffers into the GTT.
931 * This is done in 3 phases:
932 *
933 * 1a. Unbind all objects that do not match the GTT constraints for
934 * the execbuffer (fenceable, mappable, alignment etc).
935 * 1b. Increment pin count for already bound objects.
936 * 2. Bind new objects.
937 * 3. Decrement pin count.
938 *
7788a765 939 * This avoid unnecessary unbinding of later objects in order to make
54cf91dc
CW
940 * room for the earlier objects *unless* we need to defragment.
941 */
942 retry = 0;
943 do {
7788a765 944 int ret = 0;
54cf91dc
CW
945
946 /* Unbind any ill-fitting objects or pin. */
27173f1f 947 list_for_each_entry(vma, vmas, exec_list) {
27173f1f 948 if (!drm_mm_node_allocated(&vma->node))
54cf91dc
CW
949 continue;
950
e6a84468 951 if (eb_vma_misplaced(vma))
27173f1f 952 ret = i915_vma_unbind(vma);
54cf91dc 953 else
0bc40be8
TU
954 ret = i915_gem_execbuffer_reserve_vma(vma,
955 engine,
956 need_relocs);
432e58ed 957 if (ret)
54cf91dc 958 goto err;
54cf91dc
CW
959 }
960
961 /* Bind fresh objects */
27173f1f
BW
962 list_for_each_entry(vma, vmas, exec_list) {
963 if (drm_mm_node_allocated(&vma->node))
1690e1eb 964 continue;
54cf91dc 965
0bc40be8
TU
966 ret = i915_gem_execbuffer_reserve_vma(vma, engine,
967 need_relocs);
7788a765
CW
968 if (ret)
969 goto err;
54cf91dc
CW
970 }
971
a415d355 972err:
6c085a72 973 if (ret != -ENOSPC || retry++)
54cf91dc
CW
974 return ret;
975
a415d355
CW
976 /* Decrement pin count for bound objects */
977 list_for_each_entry(vma, vmas, exec_list)
978 i915_gem_execbuffer_unreserve_vma(vma);
979
68c8c17f 980 ret = i915_gem_evict_vm(vm, true);
54cf91dc
CW
981 if (ret)
982 return ret;
54cf91dc
CW
983 } while (1);
984}
985
986static int
987i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
ed5982e6 988 struct drm_i915_gem_execbuffer2 *args,
54cf91dc 989 struct drm_file *file,
0bc40be8 990 struct intel_engine_cs *engine,
27173f1f 991 struct eb_vmas *eb,
b1b38278 992 struct drm_i915_gem_exec_object2 *exec,
e2efd130 993 struct i915_gem_context *ctx)
54cf91dc
CW
994{
995 struct drm_i915_gem_relocation_entry *reloc;
27173f1f
BW
996 struct i915_address_space *vm;
997 struct i915_vma *vma;
ed5982e6 998 bool need_relocs;
dd6864a4 999 int *reloc_offset;
54cf91dc 1000 int i, total, ret;
b205ca57 1001 unsigned count = args->buffer_count;
54cf91dc 1002
27173f1f
BW
1003 vm = list_first_entry(&eb->vmas, struct i915_vma, exec_list)->vm;
1004
67731b87 1005 /* We may process another execbuffer during the unlock... */
27173f1f
BW
1006 while (!list_empty(&eb->vmas)) {
1007 vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list);
1008 list_del_init(&vma->exec_list);
a415d355 1009 i915_gem_execbuffer_unreserve_vma(vma);
624192cf 1010 i915_vma_put(vma);
67731b87
CW
1011 }
1012
54cf91dc
CW
1013 mutex_unlock(&dev->struct_mutex);
1014
1015 total = 0;
1016 for (i = 0; i < count; i++)
432e58ed 1017 total += exec[i].relocation_count;
54cf91dc 1018
dd6864a4 1019 reloc_offset = drm_malloc_ab(count, sizeof(*reloc_offset));
54cf91dc 1020 reloc = drm_malloc_ab(total, sizeof(*reloc));
dd6864a4
CW
1021 if (reloc == NULL || reloc_offset == NULL) {
1022 drm_free_large(reloc);
1023 drm_free_large(reloc_offset);
54cf91dc
CW
1024 mutex_lock(&dev->struct_mutex);
1025 return -ENOMEM;
1026 }
1027
1028 total = 0;
1029 for (i = 0; i < count; i++) {
1030 struct drm_i915_gem_relocation_entry __user *user_relocs;
262b6d36
CW
1031 u64 invalid_offset = (u64)-1;
1032 int j;
54cf91dc 1033
3ed605bc 1034 user_relocs = u64_to_user_ptr(exec[i].relocs_ptr);
54cf91dc
CW
1035
1036 if (copy_from_user(reloc+total, user_relocs,
432e58ed 1037 exec[i].relocation_count * sizeof(*reloc))) {
54cf91dc
CW
1038 ret = -EFAULT;
1039 mutex_lock(&dev->struct_mutex);
1040 goto err;
1041 }
1042
262b6d36
CW
1043 /* As we do not update the known relocation offsets after
1044 * relocating (due to the complexities in lock handling),
1045 * we need to mark them as invalid now so that we force the
1046 * relocation processing next time. Just in case the target
1047 * object is evicted and then rebound into its old
1048 * presumed_offset before the next execbuffer - if that
1049 * happened we would make the mistake of assuming that the
1050 * relocations were valid.
1051 */
1052 for (j = 0; j < exec[i].relocation_count; j++) {
9aab8bff
CW
1053 if (__copy_to_user(&user_relocs[j].presumed_offset,
1054 &invalid_offset,
1055 sizeof(invalid_offset))) {
262b6d36
CW
1056 ret = -EFAULT;
1057 mutex_lock(&dev->struct_mutex);
1058 goto err;
1059 }
1060 }
1061
dd6864a4 1062 reloc_offset[i] = total;
432e58ed 1063 total += exec[i].relocation_count;
54cf91dc
CW
1064 }
1065
1066 ret = i915_mutex_lock_interruptible(dev);
1067 if (ret) {
1068 mutex_lock(&dev->struct_mutex);
1069 goto err;
1070 }
1071
67731b87 1072 /* reacquire the objects */
67731b87 1073 eb_reset(eb);
27173f1f 1074 ret = eb_lookup_vmas(eb, exec, args, vm, file);
3b96eff4
CW
1075 if (ret)
1076 goto err;
67731b87 1077
ed5982e6 1078 need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
0bc40be8
TU
1079 ret = i915_gem_execbuffer_reserve(engine, &eb->vmas, ctx,
1080 &need_relocs);
54cf91dc
CW
1081 if (ret)
1082 goto err;
1083
27173f1f
BW
1084 list_for_each_entry(vma, &eb->vmas, exec_list) {
1085 int offset = vma->exec_entry - exec;
1086 ret = i915_gem_execbuffer_relocate_vma_slow(vma, eb,
1087 reloc + reloc_offset[offset]);
54cf91dc
CW
1088 if (ret)
1089 goto err;
54cf91dc
CW
1090 }
1091
1092 /* Leave the user relocations as are, this is the painfully slow path,
1093 * and we want to avoid the complication of dropping the lock whilst
1094 * having buffers reserved in the aperture and so causing spurious
1095 * ENOSPC for random operations.
1096 */
1097
1098err:
1099 drm_free_large(reloc);
dd6864a4 1100 drm_free_large(reloc_offset);
54cf91dc
CW
1101 return ret;
1102}
1103
54cf91dc 1104static int
535fbe82 1105i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
27173f1f 1106 struct list_head *vmas)
54cf91dc 1107{
27173f1f 1108 struct i915_vma *vma;
432e58ed 1109 int ret;
54cf91dc 1110
27173f1f
BW
1111 list_for_each_entry(vma, vmas, exec_list) {
1112 struct drm_i915_gem_object *obj = vma->obj;
03ade511 1113
77ae9957
CW
1114 if (vma->exec_entry->flags & EXEC_OBJECT_ASYNC)
1115 continue;
1116
d07f0e59
CW
1117 ret = i915_gem_request_await_object
1118 (req, obj, obj->base.pending_write_domain);
1119 if (ret)
1120 return ret;
851ba2d6 1121
6ac42f41 1122 if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
dcd79934 1123 i915_gem_clflush_object(obj, false);
c59a333f
CW
1124 }
1125
dcd79934
CW
1126 /* Unconditionally flush any chipset caches (for streaming writes). */
1127 i915_gem_chipset_flush(req->engine->i915);
6ac42f41 1128
c7fe7d25 1129 /* Unconditionally invalidate GPU caches and TLBs. */
7c9cf4e3 1130 return req->engine->emit_flush(req, EMIT_INVALIDATE);
54cf91dc
CW
1131}
1132
432e58ed
CW
1133static bool
1134i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
54cf91dc 1135{
ed5982e6
DV
1136 if (exec->flags & __I915_EXEC_UNKNOWN_FLAGS)
1137 return false;
1138
2f5945bc
CW
1139 /* Kernel clipping was a DRI1 misfeature */
1140 if (exec->num_cliprects || exec->cliprects_ptr)
1141 return false;
1142
1143 if (exec->DR4 == 0xffffffff) {
1144 DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
1145 exec->DR4 = 0;
1146 }
1147 if (exec->DR1 || exec->DR4)
1148 return false;
1149
1150 if ((exec->batch_start_offset | exec->batch_len) & 0x7)
1151 return false;
1152
1153 return true;
54cf91dc
CW
1154}
1155
1156static int
ad19f10b
CW
1157validate_exec_list(struct drm_device *dev,
1158 struct drm_i915_gem_exec_object2 *exec,
54cf91dc
CW
1159 int count)
1160{
b205ca57
DV
1161 unsigned relocs_total = 0;
1162 unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
ad19f10b
CW
1163 unsigned invalid_flags;
1164 int i;
1165
9e2793f6
DG
1166 /* INTERNAL flags must not overlap with external ones */
1167 BUILD_BUG_ON(__EXEC_OBJECT_INTERNAL_FLAGS & ~__EXEC_OBJECT_UNKNOWN_FLAGS);
1168
ad19f10b
CW
1169 invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
1170 if (USES_FULL_PPGTT(dev))
1171 invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
54cf91dc
CW
1172
1173 for (i = 0; i < count; i++) {
3ed605bc 1174 char __user *ptr = u64_to_user_ptr(exec[i].relocs_ptr);
54cf91dc
CW
1175 int length; /* limited by fault_in_pages_readable() */
1176
ad19f10b 1177 if (exec[i].flags & invalid_flags)
ed5982e6
DV
1178 return -EINVAL;
1179
934acce3
MW
1180 /* Offset can be used as input (EXEC_OBJECT_PINNED), reject
1181 * any non-page-aligned or non-canonical addresses.
1182 */
1183 if (exec[i].flags & EXEC_OBJECT_PINNED) {
1184 if (exec[i].offset !=
1185 gen8_canonical_addr(exec[i].offset & PAGE_MASK))
1186 return -EINVAL;
934acce3
MW
1187 }
1188
038c95a3
MW
1189 /* From drm_mm perspective address space is continuous,
1190 * so from this point we're always using non-canonical
1191 * form internally.
1192 */
1193 exec[i].offset = gen8_noncanonical_addr(exec[i].offset);
1194
55a9785d
CW
1195 if (exec[i].alignment && !is_power_of_2(exec[i].alignment))
1196 return -EINVAL;
1197
91b2db6f
CW
1198 /* pad_to_size was once a reserved field, so sanitize it */
1199 if (exec[i].flags & EXEC_OBJECT_PAD_TO_SIZE) {
1200 if (offset_in_page(exec[i].pad_to_size))
1201 return -EINVAL;
1202 } else {
1203 exec[i].pad_to_size = 0;
1204 }
1205
3118a4f6
KC
1206 /* First check for malicious input causing overflow in
1207 * the worst case where we need to allocate the entire
1208 * relocation tree as a single array.
1209 */
1210 if (exec[i].relocation_count > relocs_max - relocs_total)
54cf91dc 1211 return -EINVAL;
3118a4f6 1212 relocs_total += exec[i].relocation_count;
54cf91dc
CW
1213
1214 length = exec[i].relocation_count *
1215 sizeof(struct drm_i915_gem_relocation_entry);
30587535
KC
1216 /*
1217 * We must check that the entire relocation array is safe
1218 * to read, but since we may need to update the presumed
1219 * offsets during execution, check for full write access.
1220 */
54cf91dc
CW
1221 if (!access_ok(VERIFY_WRITE, ptr, length))
1222 return -EFAULT;
1223
d330a953 1224 if (likely(!i915.prefault_disable)) {
4bce9f6e 1225 if (fault_in_pages_readable(ptr, length))
0b74b508
XZ
1226 return -EFAULT;
1227 }
54cf91dc
CW
1228 }
1229
1230 return 0;
1231}
1232
e2efd130 1233static struct i915_gem_context *
d299cce7 1234i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
0bc40be8 1235 struct intel_engine_cs *engine, const u32 ctx_id)
d299cce7 1236{
f7978a0c 1237 struct i915_gem_context *ctx;
d299cce7 1238
ca585b5d 1239 ctx = i915_gem_context_lookup(file->driver_priv, ctx_id);
72ad5c45 1240 if (IS_ERR(ctx))
41bde553 1241 return ctx;
d299cce7 1242
6095868a 1243 if (i915_gem_context_is_banned(ctx)) {
d299cce7 1244 DRM_DEBUG("Context %u tried to submit while banned\n", ctx_id);
41bde553 1245 return ERR_PTR(-EIO);
d299cce7
MK
1246 }
1247
41bde553 1248 return ctx;
d299cce7
MK
1249}
1250
7aa6ca61
CW
1251static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj)
1252{
1253 return !(obj->cache_level == I915_CACHE_NONE ||
1254 obj->cache_level == I915_CACHE_WT);
1255}
1256
5cf3d280
CW
1257void i915_vma_move_to_active(struct i915_vma *vma,
1258 struct drm_i915_gem_request *req,
1259 unsigned int flags)
1260{
1261 struct drm_i915_gem_object *obj = vma->obj;
1262 const unsigned int idx = req->engine->id;
1263
81147b07 1264 lockdep_assert_held(&req->i915->drm.struct_mutex);
5cf3d280
CW
1265 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1266
b0decaf7
CW
1267 /* Add a reference if we're newly entering the active list.
1268 * The order in which we add operations to the retirement queue is
1269 * vital here: mark_active adds to the start of the callback list,
1270 * such that subsequent callbacks are called first. Therefore we
1271 * add the active reference first and queue for it to be dropped
1272 * *last*.
1273 */
d07f0e59
CW
1274 if (!i915_vma_is_active(vma))
1275 obj->active_count++;
1276 i915_vma_set_active(vma, idx);
1277 i915_gem_active_set(&vma->last_read[idx], req);
1278 list_move_tail(&vma->vm_link, &vma->vm->active_list);
5cf3d280
CW
1279
1280 if (flags & EXEC_OBJECT_WRITE) {
5b8c8aec
CW
1281 if (intel_fb_obj_invalidate(obj, ORIGIN_CS))
1282 i915_gem_active_set(&obj->frontbuffer_write, req);
5cf3d280
CW
1283
1284 /* update for the implicit flush after a batch */
1285 obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
7aa6ca61
CW
1286 if (!obj->cache_dirty && gpu_write_needs_clflush(obj))
1287 obj->cache_dirty = true;
5cf3d280
CW
1288 }
1289
49ef5294
CW
1290 if (flags & EXEC_OBJECT_NEEDS_FENCE)
1291 i915_gem_active_set(&vma->last_fence, req);
5cf3d280
CW
1292}
1293
ad778f89
CW
1294static void eb_export_fence(struct drm_i915_gem_object *obj,
1295 struct drm_i915_gem_request *req,
1296 unsigned int flags)
1297{
d07f0e59 1298 struct reservation_object *resv = obj->resv;
ad778f89
CW
1299
1300 /* Ignore errors from failing to allocate the new fence, we can't
1301 * handle an error right now. Worst case should be missed
1302 * synchronisation leading to rendering corruption.
1303 */
e2989f14 1304 reservation_object_lock(resv, NULL);
ad778f89
CW
1305 if (flags & EXEC_OBJECT_WRITE)
1306 reservation_object_add_excl_fence(resv, &req->fence);
1307 else if (reservation_object_reserve_shared(resv) == 0)
1308 reservation_object_add_shared_fence(resv, &req->fence);
e2989f14 1309 reservation_object_unlock(resv);
ad778f89
CW
1310}
1311
5b043f4e 1312static void
27173f1f 1313i915_gem_execbuffer_move_to_active(struct list_head *vmas,
8a8edb59 1314 struct drm_i915_gem_request *req)
432e58ed 1315{
27173f1f 1316 struct i915_vma *vma;
432e58ed 1317
27173f1f
BW
1318 list_for_each_entry(vma, vmas, exec_list) {
1319 struct drm_i915_gem_object *obj = vma->obj;
db53a302 1320
432e58ed 1321 obj->base.write_domain = obj->base.pending_write_domain;
5cf3d280
CW
1322 if (obj->base.write_domain)
1323 vma->exec_entry->flags |= EXEC_OBJECT_WRITE;
1324 else
ed5982e6
DV
1325 obj->base.pending_read_domains |= obj->base.read_domains;
1326 obj->base.read_domains = obj->base.pending_read_domains;
432e58ed 1327
5cf3d280 1328 i915_vma_move_to_active(vma, req, vma->exec_entry->flags);
ad778f89 1329 eb_export_fence(obj, req, vma->exec_entry->flags);
432e58ed
CW
1330 }
1331}
1332
ae662d31 1333static int
b5321f30 1334i915_reset_gen7_sol_offsets(struct drm_i915_gem_request *req)
ae662d31 1335{
73dec95e
TU
1336 u32 *cs;
1337 int i;
ae662d31 1338
b5321f30 1339 if (!IS_GEN7(req->i915) || req->engine->id != RCS) {
9d662da8
DV
1340 DRM_DEBUG("sol reset is gen7/rcs only\n");
1341 return -EINVAL;
1342 }
ae662d31 1343
73dec95e
TU
1344 cs = intel_ring_begin(req, 4 * 3);
1345 if (IS_ERR(cs))
1346 return PTR_ERR(cs);
ae662d31
EA
1347
1348 for (i = 0; i < 4; i++) {
73dec95e
TU
1349 *cs++ = MI_LOAD_REGISTER_IMM(1);
1350 *cs++ = i915_mmio_reg_offset(GEN7_SO_WRITE_OFFSET(i));
1351 *cs++ = 0;
ae662d31
EA
1352 }
1353
73dec95e 1354 intel_ring_advance(req, cs);
ae662d31
EA
1355
1356 return 0;
1357}
1358
058d88c4 1359static struct i915_vma *
0bc40be8 1360i915_gem_execbuffer_parse(struct intel_engine_cs *engine,
71745376 1361 struct drm_i915_gem_exec_object2 *shadow_exec_entry,
71745376 1362 struct drm_i915_gem_object *batch_obj,
59bfa124 1363 struct eb_vmas *eb,
71745376
BV
1364 u32 batch_start_offset,
1365 u32 batch_len,
17cabf57 1366 bool is_master)
71745376 1367{
71745376 1368 struct drm_i915_gem_object *shadow_batch_obj;
17cabf57 1369 struct i915_vma *vma;
71745376
BV
1370 int ret;
1371
0bc40be8 1372 shadow_batch_obj = i915_gem_batch_pool_get(&engine->batch_pool,
17cabf57 1373 PAGE_ALIGN(batch_len));
71745376 1374 if (IS_ERR(shadow_batch_obj))
59bfa124 1375 return ERR_CAST(shadow_batch_obj);
71745376 1376
33a051a5
CW
1377 ret = intel_engine_cmd_parser(engine,
1378 batch_obj,
1379 shadow_batch_obj,
1380 batch_start_offset,
1381 batch_len,
1382 is_master);
058d88c4
CW
1383 if (ret) {
1384 if (ret == -EACCES) /* unhandled chained batch */
1385 vma = NULL;
1386 else
1387 vma = ERR_PTR(ret);
1388 goto out;
1389 }
71745376 1390
058d88c4
CW
1391 vma = i915_gem_object_ggtt_pin(shadow_batch_obj, NULL, 0, 0, 0);
1392 if (IS_ERR(vma))
1393 goto out;
de4e783a 1394
17cabf57 1395 memset(shadow_exec_entry, 0, sizeof(*shadow_exec_entry));
71745376 1396
17cabf57 1397 vma->exec_entry = shadow_exec_entry;
de4e783a 1398 vma->exec_entry->flags = __EXEC_OBJECT_HAS_PIN;
25dc556a 1399 i915_gem_object_get(shadow_batch_obj);
17cabf57 1400 list_add_tail(&vma->exec_list, &eb->vmas);
71745376 1401
058d88c4 1402out:
de4e783a 1403 i915_gem_object_unpin_pages(shadow_batch_obj);
058d88c4 1404 return vma;
71745376 1405}
5c6c6003 1406
5b043f4e
CW
1407static int
1408execbuf_submit(struct i915_execbuffer_params *params,
1409 struct drm_i915_gem_execbuffer2 *args,
1410 struct list_head *vmas)
78382593 1411{
b5321f30 1412 struct drm_i915_private *dev_priv = params->request->i915;
5f19e2bf 1413 u64 exec_start, exec_len;
78382593 1414 int instp_mode;
73dec95e 1415 u32 instp_mask, *cs;
2f5945bc 1416 int ret;
78382593 1417
535fbe82 1418 ret = i915_gem_execbuffer_move_to_gpu(params->request, vmas);
78382593 1419 if (ret)
2f5945bc 1420 return ret;
78382593 1421
ba01cc93 1422 ret = i915_switch_context(params->request);
78382593 1423 if (ret)
2f5945bc 1424 return ret;
78382593
OM
1425
1426 instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
1427 instp_mask = I915_EXEC_CONSTANTS_MASK;
1428 switch (instp_mode) {
1429 case I915_EXEC_CONSTANTS_REL_GENERAL:
1430 case I915_EXEC_CONSTANTS_ABSOLUTE:
1431 case I915_EXEC_CONSTANTS_REL_SURFACE:
b5321f30 1432 if (instp_mode != 0 && params->engine->id != RCS) {
78382593 1433 DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
2f5945bc 1434 return -EINVAL;
78382593
OM
1435 }
1436
1437 if (instp_mode != dev_priv->relative_constants_mode) {
b5321f30 1438 if (INTEL_INFO(dev_priv)->gen < 4) {
78382593 1439 DRM_DEBUG("no rel constants on pre-gen4\n");
2f5945bc 1440 return -EINVAL;
78382593
OM
1441 }
1442
b5321f30 1443 if (INTEL_INFO(dev_priv)->gen > 5 &&
78382593
OM
1444 instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
1445 DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
2f5945bc 1446 return -EINVAL;
78382593
OM
1447 }
1448
1449 /* The HW changed the meaning on this bit on gen6 */
b5321f30 1450 if (INTEL_INFO(dev_priv)->gen >= 6)
78382593
OM
1451 instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
1452 }
1453 break;
1454 default:
1455 DRM_DEBUG("execbuf with unknown constants: %d\n", instp_mode);
2f5945bc 1456 return -EINVAL;
78382593
OM
1457 }
1458
b5321f30 1459 if (params->engine->id == RCS &&
2f5945bc 1460 instp_mode != dev_priv->relative_constants_mode) {
73dec95e
TU
1461 cs = intel_ring_begin(params->request, 4);
1462 if (IS_ERR(cs))
1463 return PTR_ERR(cs);
1464
1465 *cs++ = MI_NOOP;
1466 *cs++ = MI_LOAD_REGISTER_IMM(1);
1467 *cs++ = i915_mmio_reg_offset(INSTPM);
1468 *cs++ = instp_mask << 16 | instp_mode;
1469 intel_ring_advance(params->request, cs);
78382593
OM
1470
1471 dev_priv->relative_constants_mode = instp_mode;
1472 }
1473
1474 if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
b5321f30 1475 ret = i915_reset_gen7_sol_offsets(params->request);
78382593 1476 if (ret)
2f5945bc 1477 return ret;
78382593
OM
1478 }
1479
5f19e2bf 1480 exec_len = args->batch_len;
59bfa124 1481 exec_start = params->batch->node.start +
5f19e2bf
JH
1482 params->args_batch_start_offset;
1483
9d611c03 1484 if (exec_len == 0)
0b537272 1485 exec_len = params->batch->size - params->args_batch_start_offset;
9d611c03 1486
803688ba
CW
1487 ret = params->engine->emit_bb_start(params->request,
1488 exec_start, exec_len,
1489 params->dispatch_flags);
2f5945bc
CW
1490 if (ret)
1491 return ret;
78382593 1492
8a8edb59 1493 i915_gem_execbuffer_move_to_active(vmas, params->request);
78382593 1494
2f5945bc 1495 return 0;
78382593
OM
1496}
1497
a8ebba75
ZY
1498/**
1499 * Find one BSD ring to dispatch the corresponding BSD command.
c80ff16e 1500 * The engine index is returned.
a8ebba75 1501 */
de1add36 1502static unsigned int
c80ff16e
CW
1503gen8_dispatch_bsd_engine(struct drm_i915_private *dev_priv,
1504 struct drm_file *file)
a8ebba75 1505{
a8ebba75
ZY
1506 struct drm_i915_file_private *file_priv = file->driver_priv;
1507
de1add36 1508 /* Check whether the file_priv has already selected one ring. */
6f633402
JL
1509 if ((int)file_priv->bsd_engine < 0)
1510 file_priv->bsd_engine = atomic_fetch_xor(1,
1511 &dev_priv->mm.bsd_engine_dispatch_index);
d23db88c 1512
c80ff16e 1513 return file_priv->bsd_engine;
d23db88c
CW
1514}
1515
de1add36
TU
1516#define I915_USER_RINGS (4)
1517
117897f4 1518static const enum intel_engine_id user_ring_map[I915_USER_RINGS + 1] = {
de1add36
TU
1519 [I915_EXEC_DEFAULT] = RCS,
1520 [I915_EXEC_RENDER] = RCS,
1521 [I915_EXEC_BLT] = BCS,
1522 [I915_EXEC_BSD] = VCS,
1523 [I915_EXEC_VEBOX] = VECS
1524};
1525
f8ca0c07
DG
1526static struct intel_engine_cs *
1527eb_select_engine(struct drm_i915_private *dev_priv,
1528 struct drm_file *file,
1529 struct drm_i915_gem_execbuffer2 *args)
de1add36
TU
1530{
1531 unsigned int user_ring_id = args->flags & I915_EXEC_RING_MASK;
f8ca0c07 1532 struct intel_engine_cs *engine;
de1add36
TU
1533
1534 if (user_ring_id > I915_USER_RINGS) {
1535 DRM_DEBUG("execbuf with unknown ring: %u\n", user_ring_id);
f8ca0c07 1536 return NULL;
de1add36
TU
1537 }
1538
1539 if ((user_ring_id != I915_EXEC_BSD) &&
1540 ((args->flags & I915_EXEC_BSD_MASK) != 0)) {
1541 DRM_DEBUG("execbuf with non bsd ring but with invalid "
1542 "bsd dispatch flags: %d\n", (int)(args->flags));
f8ca0c07 1543 return NULL;
de1add36
TU
1544 }
1545
1546 if (user_ring_id == I915_EXEC_BSD && HAS_BSD2(dev_priv)) {
1547 unsigned int bsd_idx = args->flags & I915_EXEC_BSD_MASK;
1548
1549 if (bsd_idx == I915_EXEC_BSD_DEFAULT) {
c80ff16e 1550 bsd_idx = gen8_dispatch_bsd_engine(dev_priv, file);
de1add36
TU
1551 } else if (bsd_idx >= I915_EXEC_BSD_RING1 &&
1552 bsd_idx <= I915_EXEC_BSD_RING2) {
d9da6aa0 1553 bsd_idx >>= I915_EXEC_BSD_SHIFT;
de1add36
TU
1554 bsd_idx--;
1555 } else {
1556 DRM_DEBUG("execbuf with unknown bsd ring: %u\n",
1557 bsd_idx);
f8ca0c07 1558 return NULL;
de1add36
TU
1559 }
1560
3b3f1650 1561 engine = dev_priv->engine[_VCS(bsd_idx)];
de1add36 1562 } else {
3b3f1650 1563 engine = dev_priv->engine[user_ring_map[user_ring_id]];
de1add36
TU
1564 }
1565
3b3f1650 1566 if (!engine) {
de1add36 1567 DRM_DEBUG("execbuf with invalid ring: %u\n", user_ring_id);
f8ca0c07 1568 return NULL;
de1add36
TU
1569 }
1570
f8ca0c07 1571 return engine;
de1add36
TU
1572}
1573
54cf91dc
CW
1574static int
1575i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1576 struct drm_file *file,
1577 struct drm_i915_gem_execbuffer2 *args,
41bde553 1578 struct drm_i915_gem_exec_object2 *exec)
54cf91dc 1579{
72e96d64
JL
1580 struct drm_i915_private *dev_priv = to_i915(dev);
1581 struct i915_ggtt *ggtt = &dev_priv->ggtt;
27173f1f 1582 struct eb_vmas *eb;
78a42377 1583 struct drm_i915_gem_exec_object2 shadow_exec_entry;
e2f80391 1584 struct intel_engine_cs *engine;
e2efd130 1585 struct i915_gem_context *ctx;
41bde553 1586 struct i915_address_space *vm;
5f19e2bf
JH
1587 struct i915_execbuffer_params params_master; /* XXX: will be removed later */
1588 struct i915_execbuffer_params *params = &params_master;
d299cce7 1589 const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
8e004efc 1590 u32 dispatch_flags;
fec0445c
CW
1591 struct dma_fence *in_fence = NULL;
1592 struct sync_file *out_fence = NULL;
1593 int out_fence_fd = -1;
78382593 1594 int ret;
ed5982e6 1595 bool need_relocs;
54cf91dc 1596
ed5982e6 1597 if (!i915_gem_check_execbuffer(args))
432e58ed 1598 return -EINVAL;
432e58ed 1599
ad19f10b 1600 ret = validate_exec_list(dev, exec, args->buffer_count);
54cf91dc
CW
1601 if (ret)
1602 return ret;
1603
8e004efc 1604 dispatch_flags = 0;
d7d4eedd 1605 if (args->flags & I915_EXEC_SECURE) {
b3ac9f25 1606 if (!drm_is_current_master(file) || !capable(CAP_SYS_ADMIN))
d7d4eedd
CW
1607 return -EPERM;
1608
8e004efc 1609 dispatch_flags |= I915_DISPATCH_SECURE;
d7d4eedd 1610 }
b45305fc 1611 if (args->flags & I915_EXEC_IS_PINNED)
8e004efc 1612 dispatch_flags |= I915_DISPATCH_PINNED;
d7d4eedd 1613
f8ca0c07
DG
1614 engine = eb_select_engine(dev_priv, file, args);
1615 if (!engine)
1616 return -EINVAL;
54cf91dc
CW
1617
1618 if (args->buffer_count < 1) {
ff240199 1619 DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
54cf91dc
CW
1620 return -EINVAL;
1621 }
54cf91dc 1622
a9ed33ca 1623 if (args->flags & I915_EXEC_RESOURCE_STREAMER) {
4805fe82 1624 if (!HAS_RESOURCE_STREAMER(dev_priv)) {
a9ed33ca
AJ
1625 DRM_DEBUG("RS is only allowed for Haswell, Gen8 and above\n");
1626 return -EINVAL;
1627 }
e2f80391 1628 if (engine->id != RCS) {
a9ed33ca 1629 DRM_DEBUG("RS is not available on %s\n",
e2f80391 1630 engine->name);
a9ed33ca
AJ
1631 return -EINVAL;
1632 }
1633
1634 dispatch_flags |= I915_DISPATCH_RS;
1635 }
1636
fec0445c
CW
1637 if (args->flags & I915_EXEC_FENCE_IN) {
1638 in_fence = sync_file_get_fence(lower_32_bits(args->rsvd2));
4a04e371
DCS
1639 if (!in_fence)
1640 return -EINVAL;
fec0445c
CW
1641 }
1642
1643 if (args->flags & I915_EXEC_FENCE_OUT) {
1644 out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
1645 if (out_fence_fd < 0) {
1646 ret = out_fence_fd;
4a04e371 1647 goto err_in_fence;
fec0445c
CW
1648 }
1649 }
1650
67d97da3
CW
1651 /* Take a local wakeref for preparing to dispatch the execbuf as
1652 * we expect to access the hardware fairly frequently in the
1653 * process. Upon first dispatch, we acquire another prolonged
1654 * wakeref that we hold until the GPU has been idle for at least
1655 * 100ms.
1656 */
f65c9168
PZ
1657 intel_runtime_pm_get(dev_priv);
1658
54cf91dc
CW
1659 ret = i915_mutex_lock_interruptible(dev);
1660 if (ret)
1661 goto pre_mutex_err;
1662
e2f80391 1663 ctx = i915_gem_validate_context(dev, file, engine, ctx_id);
72ad5c45 1664 if (IS_ERR(ctx)) {
d299cce7 1665 mutex_unlock(&dev->struct_mutex);
41bde553 1666 ret = PTR_ERR(ctx);
d299cce7 1667 goto pre_mutex_err;
935f38d6 1668 }
41bde553 1669
9a6feaf0 1670 i915_gem_context_get(ctx);
41bde553 1671
ae6c4806
DV
1672 if (ctx->ppgtt)
1673 vm = &ctx->ppgtt->base;
1674 else
72e96d64 1675 vm = &ggtt->base;
d299cce7 1676
5f19e2bf
JH
1677 memset(&params_master, 0x00, sizeof(params_master));
1678
d50415cc 1679 eb = eb_create(dev_priv, args);
67731b87 1680 if (eb == NULL) {
9a6feaf0 1681 i915_gem_context_put(ctx);
67731b87
CW
1682 mutex_unlock(&dev->struct_mutex);
1683 ret = -ENOMEM;
1684 goto pre_mutex_err;
1685 }
1686
54cf91dc 1687 /* Look up object handles */
27173f1f 1688 ret = eb_lookup_vmas(eb, exec, args, vm, file);
3b96eff4
CW
1689 if (ret)
1690 goto err;
54cf91dc 1691
6fe4f140 1692 /* take note of the batch buffer before we might reorder the lists */
59bfa124 1693 params->batch = eb_get_batch(eb);
6fe4f140 1694
54cf91dc 1695 /* Move the objects en-masse into the GTT, evicting if necessary. */
ed5982e6 1696 need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
e2f80391
TU
1697 ret = i915_gem_execbuffer_reserve(engine, &eb->vmas, ctx,
1698 &need_relocs);
54cf91dc
CW
1699 if (ret)
1700 goto err;
1701
1702 /* The objects are in their final locations, apply the relocations. */
ed5982e6 1703 if (need_relocs)
17601cbc 1704 ret = i915_gem_execbuffer_relocate(eb);
54cf91dc
CW
1705 if (ret) {
1706 if (ret == -EFAULT) {
e2f80391
TU
1707 ret = i915_gem_execbuffer_relocate_slow(dev, args, file,
1708 engine,
b1b38278 1709 eb, exec, ctx);
54cf91dc
CW
1710 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1711 }
1712 if (ret)
1713 goto err;
1714 }
1715
1716 /* Set the pending read domains for the batch buffer to COMMAND */
59bfa124 1717 if (params->batch->obj->base.pending_write_domain) {
ff240199 1718 DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
54cf91dc
CW
1719 ret = -EINVAL;
1720 goto err;
1721 }
0b537272
CW
1722 if (args->batch_start_offset > params->batch->size ||
1723 args->batch_len > params->batch->size - args->batch_start_offset) {
1724 DRM_DEBUG("Attempting to use out-of-bounds batch\n");
1725 ret = -EINVAL;
1726 goto err;
1727 }
54cf91dc 1728
5f19e2bf 1729 params->args_batch_start_offset = args->batch_start_offset;
41736a8e 1730 if (engine->needs_cmd_parser && args->batch_len) {
59bfa124
CW
1731 struct i915_vma *vma;
1732
1733 vma = i915_gem_execbuffer_parse(engine, &shadow_exec_entry,
1734 params->batch->obj,
1735 eb,
1736 args->batch_start_offset,
1737 args->batch_len,
1738 drm_is_current_master(file));
1739 if (IS_ERR(vma)) {
1740 ret = PTR_ERR(vma);
78a42377
BV
1741 goto err;
1742 }
17cabf57 1743
59bfa124 1744 if (vma) {
c7c7372e
RP
1745 /*
1746 * Batch parsed and accepted:
1747 *
1748 * Set the DISPATCH_SECURE bit to remove the NON_SECURE
1749 * bit from MI_BATCH_BUFFER_START commands issued in
1750 * the dispatch_execbuffer implementations. We
1751 * specifically don't want that set on batches the
1752 * command parser has accepted.
1753 */
1754 dispatch_flags |= I915_DISPATCH_SECURE;
5f19e2bf 1755 params->args_batch_start_offset = 0;
59bfa124 1756 params->batch = vma;
c7c7372e 1757 }
351e3db2
BV
1758 }
1759
59bfa124 1760 params->batch->obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
78a42377 1761
d7d4eedd
CW
1762 /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
1763 * batch" bit. Hence we need to pin secure batches into the global gtt.
28cf5415 1764 * hsw should have this fixed, but bdw mucks it up again. */
8e004efc 1765 if (dispatch_flags & I915_DISPATCH_SECURE) {
59bfa124 1766 struct drm_i915_gem_object *obj = params->batch->obj;
058d88c4 1767 struct i915_vma *vma;
59bfa124 1768
da51a1e7
DV
1769 /*
1770 * So on first glance it looks freaky that we pin the batch here
1771 * outside of the reservation loop. But:
1772 * - The batch is already pinned into the relevant ppgtt, so we
1773 * already have the backing storage fully allocated.
1774 * - No other BO uses the global gtt (well contexts, but meh),
fd0753cf 1775 * so we don't really have issues with multiple objects not
da51a1e7
DV
1776 * fitting due to fragmentation.
1777 * So this is actually safe.
1778 */
058d88c4
CW
1779 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
1780 if (IS_ERR(vma)) {
1781 ret = PTR_ERR(vma);
da51a1e7 1782 goto err;
058d88c4 1783 }
d7d4eedd 1784
058d88c4 1785 params->batch = vma;
59bfa124 1786 }
d7d4eedd 1787
0c8dac88 1788 /* Allocate a request for this batch buffer nice and early. */
8e637178
CW
1789 params->request = i915_gem_request_alloc(engine, ctx);
1790 if (IS_ERR(params->request)) {
1791 ret = PTR_ERR(params->request);
0c8dac88 1792 goto err_batch_unpin;
26827088 1793 }
0c8dac88 1794
fec0445c
CW
1795 if (in_fence) {
1796 ret = i915_gem_request_await_dma_fence(params->request,
1797 in_fence);
1798 if (ret < 0)
1799 goto err_request;
1800 }
1801
1802 if (out_fence_fd != -1) {
1803 out_fence = sync_file_create(&params->request->fence);
1804 if (!out_fence) {
1805 ret = -ENOMEM;
1806 goto err_request;
1807 }
1808 }
1809
17f298cf
CW
1810 /* Whilst this request exists, batch_obj will be on the
1811 * active_list, and so will hold the active reference. Only when this
1812 * request is retired will the the batch_obj be moved onto the
1813 * inactive_list and lose its active reference. Hence we do not need
1814 * to explicitly hold another reference here.
1815 */
058d88c4 1816 params->request->batch = params->batch;
17f298cf 1817
8e637178 1818 ret = i915_gem_request_add_to_client(params->request, file);
fcfa423c 1819 if (ret)
aa9b7810 1820 goto err_request;
fcfa423c 1821
5f19e2bf
JH
1822 /*
1823 * Save assorted stuff away to pass through to *_submission().
1824 * NB: This data should be 'persistent' and not local as it will
1825 * kept around beyond the duration of the IOCTL once the GPU
1826 * scheduler arrives.
1827 */
1828 params->dev = dev;
1829 params->file = file;
4a570db5 1830 params->engine = engine;
5f19e2bf 1831 params->dispatch_flags = dispatch_flags;
5f19e2bf
JH
1832 params->ctx = ctx;
1833
1cce8922
TU
1834 trace_i915_gem_request_queue(params->request, dispatch_flags);
1835
5b043f4e 1836 ret = execbuf_submit(params, args, &eb->vmas);
aa9b7810 1837err_request:
17f298cf 1838 __i915_add_request(params->request, ret == 0);
fec0445c
CW
1839 if (out_fence) {
1840 if (ret == 0) {
1841 fd_install(out_fence_fd, out_fence->file);
1842 args->rsvd2 &= GENMASK_ULL(0, 31); /* keep in-fence */
1843 args->rsvd2 |= (u64)out_fence_fd << 32;
1844 out_fence_fd = -1;
1845 } else {
1846 fput(out_fence->file);
1847 }
1848 }
54cf91dc 1849
0c8dac88 1850err_batch_unpin:
da51a1e7
DV
1851 /*
1852 * FIXME: We crucially rely upon the active tracking for the (ppgtt)
1853 * batch vma for correctness. For less ugly and less fragility this
1854 * needs to be adjusted to also track the ggtt batch vma properly as
1855 * active.
1856 */
8e004efc 1857 if (dispatch_flags & I915_DISPATCH_SECURE)
59bfa124 1858 i915_vma_unpin(params->batch);
54cf91dc 1859err:
41bde553 1860 /* the request owns the ref now */
9a6feaf0 1861 i915_gem_context_put(ctx);
67731b87 1862 eb_destroy(eb);
54cf91dc
CW
1863
1864 mutex_unlock(&dev->struct_mutex);
1865
1866pre_mutex_err:
f65c9168
PZ
1867 /* intel_gpu_busy should also get a ref, so it will free when the device
1868 * is really idle. */
1869 intel_runtime_pm_put(dev_priv);
fec0445c
CW
1870 if (out_fence_fd != -1)
1871 put_unused_fd(out_fence_fd);
4a04e371 1872err_in_fence:
fec0445c 1873 dma_fence_put(in_fence);
54cf91dc
CW
1874 return ret;
1875}
1876
1877/*
1878 * Legacy execbuffer just creates an exec2 list from the original exec object
1879 * list array and passes it to the real function.
1880 */
1881int
1882i915_gem_execbuffer(struct drm_device *dev, void *data,
1883 struct drm_file *file)
1884{
1885 struct drm_i915_gem_execbuffer *args = data;
1886 struct drm_i915_gem_execbuffer2 exec2;
1887 struct drm_i915_gem_exec_object *exec_list = NULL;
1888 struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1889 int ret, i;
1890
54cf91dc 1891 if (args->buffer_count < 1) {
ff240199 1892 DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
54cf91dc
CW
1893 return -EINVAL;
1894 }
1895
1896 /* Copy in the exec list from userland */
1897 exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
1898 exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
1899 if (exec_list == NULL || exec2_list == NULL) {
ff240199 1900 DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
54cf91dc
CW
1901 args->buffer_count);
1902 drm_free_large(exec_list);
1903 drm_free_large(exec2_list);
1904 return -ENOMEM;
1905 }
1906 ret = copy_from_user(exec_list,
3ed605bc 1907 u64_to_user_ptr(args->buffers_ptr),
54cf91dc
CW
1908 sizeof(*exec_list) * args->buffer_count);
1909 if (ret != 0) {
ff240199 1910 DRM_DEBUG("copy %d exec entries failed %d\n",
54cf91dc
CW
1911 args->buffer_count, ret);
1912 drm_free_large(exec_list);
1913 drm_free_large(exec2_list);
1914 return -EFAULT;
1915 }
1916
1917 for (i = 0; i < args->buffer_count; i++) {
1918 exec2_list[i].handle = exec_list[i].handle;
1919 exec2_list[i].relocation_count = exec_list[i].relocation_count;
1920 exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
1921 exec2_list[i].alignment = exec_list[i].alignment;
1922 exec2_list[i].offset = exec_list[i].offset;
f0836b72 1923 if (INTEL_GEN(to_i915(dev)) < 4)
54cf91dc
CW
1924 exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
1925 else
1926 exec2_list[i].flags = 0;
1927 }
1928
1929 exec2.buffers_ptr = args->buffers_ptr;
1930 exec2.buffer_count = args->buffer_count;
1931 exec2.batch_start_offset = args->batch_start_offset;
1932 exec2.batch_len = args->batch_len;
1933 exec2.DR1 = args->DR1;
1934 exec2.DR4 = args->DR4;
1935 exec2.num_cliprects = args->num_cliprects;
1936 exec2.cliprects_ptr = args->cliprects_ptr;
1937 exec2.flags = I915_EXEC_RENDER;
6e0a69db 1938 i915_execbuffer2_set_context_id(exec2, 0);
54cf91dc 1939
41bde553 1940 ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
54cf91dc 1941 if (!ret) {
9aab8bff 1942 struct drm_i915_gem_exec_object __user *user_exec_list =
3ed605bc 1943 u64_to_user_ptr(args->buffers_ptr);
9aab8bff 1944
54cf91dc 1945 /* Copy the new buffer offsets back to the user's exec list. */
9aab8bff 1946 for (i = 0; i < args->buffer_count; i++) {
934acce3
MW
1947 exec2_list[i].offset =
1948 gen8_canonical_addr(exec2_list[i].offset);
9aab8bff
CW
1949 ret = __copy_to_user(&user_exec_list[i].offset,
1950 &exec2_list[i].offset,
1951 sizeof(user_exec_list[i].offset));
1952 if (ret) {
1953 ret = -EFAULT;
1954 DRM_DEBUG("failed to copy %d exec entries "
1955 "back to user (%d)\n",
1956 args->buffer_count, ret);
1957 break;
1958 }
54cf91dc
CW
1959 }
1960 }
1961
1962 drm_free_large(exec_list);
1963 drm_free_large(exec2_list);
1964 return ret;
1965}
1966
1967int
1968i915_gem_execbuffer2(struct drm_device *dev, void *data,
1969 struct drm_file *file)
1970{
1971 struct drm_i915_gem_execbuffer2 *args = data;
1972 struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1973 int ret;
1974
ed8cd3b2
XW
1975 if (args->buffer_count < 1 ||
1976 args->buffer_count > UINT_MAX / sizeof(*exec2_list)) {
ff240199 1977 DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
54cf91dc
CW
1978 return -EINVAL;
1979 }
1980
f2a85e19
CW
1981 exec2_list = drm_malloc_gfp(args->buffer_count,
1982 sizeof(*exec2_list),
1983 GFP_TEMPORARY);
54cf91dc 1984 if (exec2_list == NULL) {
ff240199 1985 DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
54cf91dc
CW
1986 args->buffer_count);
1987 return -ENOMEM;
1988 }
1989 ret = copy_from_user(exec2_list,
3ed605bc 1990 u64_to_user_ptr(args->buffers_ptr),
54cf91dc
CW
1991 sizeof(*exec2_list) * args->buffer_count);
1992 if (ret != 0) {
ff240199 1993 DRM_DEBUG("copy %d exec entries failed %d\n",
54cf91dc
CW
1994 args->buffer_count, ret);
1995 drm_free_large(exec2_list);
1996 return -EFAULT;
1997 }
1998
41bde553 1999 ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
54cf91dc
CW
2000 if (!ret) {
2001 /* Copy the new buffer offsets back to the user's exec list. */
d593d992 2002 struct drm_i915_gem_exec_object2 __user *user_exec_list =
3ed605bc 2003 u64_to_user_ptr(args->buffers_ptr);
9aab8bff
CW
2004 int i;
2005
2006 for (i = 0; i < args->buffer_count; i++) {
934acce3
MW
2007 exec2_list[i].offset =
2008 gen8_canonical_addr(exec2_list[i].offset);
9aab8bff
CW
2009 ret = __copy_to_user(&user_exec_list[i].offset,
2010 &exec2_list[i].offset,
2011 sizeof(user_exec_list[i].offset));
2012 if (ret) {
2013 ret = -EFAULT;
2014 DRM_DEBUG("failed to copy %d exec entries "
2015 "back to user\n",
2016 args->buffer_count);
2017 break;
2018 }
54cf91dc
CW
2019 }
2020 }
2021
2022 drm_free_large(exec2_list);
2023 return ret;
2024}