]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blame - drivers/gpu/drm/i915/i915_gem_execbuffer.c
drm/i915: Allow large objects to be tiled on gen2/3
[mirror_ubuntu-focal-kernel.git] / drivers / gpu / drm / i915 / i915_gem_execbuffer.c
CommitLineData
54cf91dc
CW
1/*
2 * Copyright © 2008,2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Chris Wilson <chris@chris-wilson.co.uk>
26 *
27 */
28
ad778f89
CW
29#include <linux/dma_remapping.h>
30#include <linux/reservation.h>
fec0445c 31#include <linux/sync_file.h>
ad778f89
CW
32#include <linux/uaccess.h>
33
760285e7
DH
34#include <drm/drmP.h>
35#include <drm/i915_drm.h>
ad778f89 36
54cf91dc
CW
37#include "i915_drv.h"
38#include "i915_trace.h"
39#include "intel_drv.h"
5d723d7a 40#include "intel_frontbuffer.h"
54cf91dc 41
d50415cc
CW
42#define DBG_USE_CPU_RELOC 0 /* -1 force GTT relocs; 1 force CPU relocs */
43
9e2793f6
DG
44#define __EXEC_OBJECT_HAS_PIN (1<<31)
45#define __EXEC_OBJECT_HAS_FENCE (1<<30)
46#define __EXEC_OBJECT_NEEDS_MAP (1<<29)
47#define __EXEC_OBJECT_NEEDS_BIAS (1<<28)
48#define __EXEC_OBJECT_INTERNAL_FLAGS (0xf<<28) /* all of the above */
d23db88c
CW
49
50#define BATCH_OFFSET_BIAS (256*1024)
a415d355 51
5b043f4e
CW
52struct i915_execbuffer_params {
53 struct drm_device *dev;
54 struct drm_file *file;
59bfa124
CW
55 struct i915_vma *batch;
56 u32 dispatch_flags;
57 u32 args_batch_start_offset;
5b043f4e 58 struct intel_engine_cs *engine;
5b043f4e
CW
59 struct i915_gem_context *ctx;
60 struct drm_i915_gem_request *request;
61};
62
27173f1f 63struct eb_vmas {
d50415cc 64 struct drm_i915_private *i915;
27173f1f 65 struct list_head vmas;
67731b87 66 int and;
eef90ccb 67 union {
27173f1f 68 struct i915_vma *lut[0];
eef90ccb
CW
69 struct hlist_head buckets[0];
70 };
67731b87
CW
71};
72
27173f1f 73static struct eb_vmas *
d50415cc
CW
74eb_create(struct drm_i915_private *i915,
75 struct drm_i915_gem_execbuffer2 *args)
67731b87 76{
27173f1f 77 struct eb_vmas *eb = NULL;
eef90ccb
CW
78
79 if (args->flags & I915_EXEC_HANDLE_LUT) {
b205ca57 80 unsigned size = args->buffer_count;
27173f1f
BW
81 size *= sizeof(struct i915_vma *);
82 size += sizeof(struct eb_vmas);
eef90ccb
CW
83 eb = kmalloc(size, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
84 }
85
86 if (eb == NULL) {
b205ca57
DV
87 unsigned size = args->buffer_count;
88 unsigned count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
27b7c63a 89 BUILD_BUG_ON_NOT_POWER_OF_2(PAGE_SIZE / sizeof(struct hlist_head));
eef90ccb
CW
90 while (count > 2*size)
91 count >>= 1;
92 eb = kzalloc(count*sizeof(struct hlist_head) +
27173f1f 93 sizeof(struct eb_vmas),
eef90ccb
CW
94 GFP_TEMPORARY);
95 if (eb == NULL)
96 return eb;
97
98 eb->and = count - 1;
99 } else
100 eb->and = -args->buffer_count;
101
d50415cc 102 eb->i915 = i915;
27173f1f 103 INIT_LIST_HEAD(&eb->vmas);
67731b87
CW
104 return eb;
105}
106
107static void
27173f1f 108eb_reset(struct eb_vmas *eb)
67731b87 109{
eef90ccb
CW
110 if (eb->and >= 0)
111 memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
67731b87
CW
112}
113
59bfa124
CW
114static struct i915_vma *
115eb_get_batch(struct eb_vmas *eb)
116{
117 struct i915_vma *vma = list_entry(eb->vmas.prev, typeof(*vma), exec_list);
118
119 /*
120 * SNA is doing fancy tricks with compressing batch buffers, which leads
121 * to negative relocation deltas. Usually that works out ok since the
122 * relocate address is still positive, except when the batch is placed
123 * very low in the GTT. Ensure this doesn't happen.
124 *
125 * Note that actual hangs have only been observed on gen7, but for
126 * paranoia do it everywhere.
127 */
128 if ((vma->exec_entry->flags & EXEC_OBJECT_PINNED) == 0)
129 vma->exec_entry->flags |= __EXEC_OBJECT_NEEDS_BIAS;
130
131 return vma;
132}
133
3b96eff4 134static int
27173f1f
BW
135eb_lookup_vmas(struct eb_vmas *eb,
136 struct drm_i915_gem_exec_object2 *exec,
137 const struct drm_i915_gem_execbuffer2 *args,
138 struct i915_address_space *vm,
139 struct drm_file *file)
3b96eff4 140{
27173f1f
BW
141 struct drm_i915_gem_object *obj;
142 struct list_head objects;
9ae9ab52 143 int i, ret;
3b96eff4 144
27173f1f 145 INIT_LIST_HEAD(&objects);
3b96eff4 146 spin_lock(&file->table_lock);
27173f1f
BW
147 /* Grab a reference to the object and release the lock so we can lookup
148 * or create the VMA without using GFP_ATOMIC */
eef90ccb 149 for (i = 0; i < args->buffer_count; i++) {
3b96eff4
CW
150 obj = to_intel_bo(idr_find(&file->object_idr, exec[i].handle));
151 if (obj == NULL) {
152 spin_unlock(&file->table_lock);
153 DRM_DEBUG("Invalid object handle %d at index %d\n",
154 exec[i].handle, i);
27173f1f 155 ret = -ENOENT;
9ae9ab52 156 goto err;
3b96eff4
CW
157 }
158
27173f1f 159 if (!list_empty(&obj->obj_exec_link)) {
3b96eff4
CW
160 spin_unlock(&file->table_lock);
161 DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
162 obj, exec[i].handle, i);
27173f1f 163 ret = -EINVAL;
9ae9ab52 164 goto err;
3b96eff4
CW
165 }
166
25dc556a 167 i915_gem_object_get(obj);
27173f1f
BW
168 list_add_tail(&obj->obj_exec_link, &objects);
169 }
170 spin_unlock(&file->table_lock);
3b96eff4 171
27173f1f 172 i = 0;
9ae9ab52 173 while (!list_empty(&objects)) {
27173f1f 174 struct i915_vma *vma;
6f65e29a 175
9ae9ab52
CW
176 obj = list_first_entry(&objects,
177 struct drm_i915_gem_object,
178 obj_exec_link);
179
e656a6cb
DV
180 /*
181 * NOTE: We can leak any vmas created here when something fails
182 * later on. But that's no issue since vma_unbind can deal with
183 * vmas which are not actually bound. And since only
184 * lookup_or_create exists as an interface to get at the vma
185 * from the (obj, vm) we don't run the risk of creating
186 * duplicated vmas for the same vm.
187 */
718659a6 188 vma = i915_vma_instance(obj, vm, NULL);
058d88c4 189 if (unlikely(IS_ERR(vma))) {
27173f1f
BW
190 DRM_DEBUG("Failed to lookup VMA\n");
191 ret = PTR_ERR(vma);
9ae9ab52 192 goto err;
27173f1f
BW
193 }
194
9ae9ab52 195 /* Transfer ownership from the objects list to the vmas list. */
27173f1f 196 list_add_tail(&vma->exec_list, &eb->vmas);
9ae9ab52 197 list_del_init(&obj->obj_exec_link);
27173f1f
BW
198
199 vma->exec_entry = &exec[i];
eef90ccb 200 if (eb->and < 0) {
27173f1f 201 eb->lut[i] = vma;
eef90ccb
CW
202 } else {
203 uint32_t handle = args->flags & I915_EXEC_HANDLE_LUT ? i : exec[i].handle;
27173f1f
BW
204 vma->exec_handle = handle;
205 hlist_add_head(&vma->exec_node,
eef90ccb
CW
206 &eb->buckets[handle & eb->and]);
207 }
27173f1f 208 ++i;
3b96eff4 209 }
3b96eff4 210
9ae9ab52 211 return 0;
27173f1f 212
27173f1f 213
9ae9ab52 214err:
27173f1f
BW
215 while (!list_empty(&objects)) {
216 obj = list_first_entry(&objects,
217 struct drm_i915_gem_object,
218 obj_exec_link);
219 list_del_init(&obj->obj_exec_link);
f8c417cd 220 i915_gem_object_put(obj);
27173f1f 221 }
9ae9ab52
CW
222 /*
223 * Objects already transfered to the vmas list will be unreferenced by
224 * eb_destroy.
225 */
226
27173f1f 227 return ret;
3b96eff4
CW
228}
229
27173f1f 230static struct i915_vma *eb_get_vma(struct eb_vmas *eb, unsigned long handle)
67731b87 231{
eef90ccb
CW
232 if (eb->and < 0) {
233 if (handle >= -eb->and)
234 return NULL;
235 return eb->lut[handle];
236 } else {
237 struct hlist_head *head;
aa45950b 238 struct i915_vma *vma;
67731b87 239
eef90ccb 240 head = &eb->buckets[handle & eb->and];
aa45950b 241 hlist_for_each_entry(vma, head, exec_node) {
27173f1f
BW
242 if (vma->exec_handle == handle)
243 return vma;
eef90ccb
CW
244 }
245 return NULL;
246 }
67731b87
CW
247}
248
a415d355
CW
249static void
250i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
251{
252 struct drm_i915_gem_exec_object2 *entry;
a415d355
CW
253
254 if (!drm_mm_node_allocated(&vma->node))
255 return;
256
257 entry = vma->exec_entry;
258
259 if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
49ef5294 260 i915_vma_unpin_fence(vma);
a415d355
CW
261
262 if (entry->flags & __EXEC_OBJECT_HAS_PIN)
20dfbde4 263 __i915_vma_unpin(vma);
a415d355 264
de4e783a 265 entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
a415d355
CW
266}
267
268static void eb_destroy(struct eb_vmas *eb)
269{
27173f1f
BW
270 while (!list_empty(&eb->vmas)) {
271 struct i915_vma *vma;
bcffc3fa 272
27173f1f
BW
273 vma = list_first_entry(&eb->vmas,
274 struct i915_vma,
bcffc3fa 275 exec_list);
27173f1f 276 list_del_init(&vma->exec_list);
a415d355 277 i915_gem_execbuffer_unreserve_vma(vma);
172ae5b4 278 vma->exec_entry = NULL;
624192cf 279 i915_vma_put(vma);
bcffc3fa 280 }
67731b87
CW
281 kfree(eb);
282}
283
dabdfe02
CW
284static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
285{
9e53d9be
CW
286 if (!i915_gem_object_has_struct_page(obj))
287 return false;
288
d50415cc
CW
289 if (DBG_USE_CPU_RELOC)
290 return DBG_USE_CPU_RELOC > 0;
291
0031fb96 292 return (HAS_LLC(to_i915(obj->base.dev)) ||
2cc86b82 293 obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
dabdfe02
CW
294 obj->cache_level != I915_CACHE_NONE);
295}
296
934acce3
MW
297/* Used to convert any address to canonical form.
298 * Starting from gen8, some commands (e.g. STATE_BASE_ADDRESS,
299 * MI_LOAD_REGISTER_MEM and others, see Broadwell PRM Vol2a) require the
300 * addresses to be in a canonical form:
301 * "GraphicsAddress[63:48] are ignored by the HW and assumed to be in correct
302 * canonical form [63:48] == [47]."
303 */
304#define GEN8_HIGH_ADDRESS_BIT 47
305static inline uint64_t gen8_canonical_addr(uint64_t address)
306{
307 return sign_extend64(address, GEN8_HIGH_ADDRESS_BIT);
308}
309
310static inline uint64_t gen8_noncanonical_addr(uint64_t address)
311{
312 return address & ((1ULL << (GEN8_HIGH_ADDRESS_BIT + 1)) - 1);
313}
314
315static inline uint64_t
d50415cc 316relocation_target(const struct drm_i915_gem_relocation_entry *reloc,
934acce3
MW
317 uint64_t target_offset)
318{
319 return gen8_canonical_addr((int)reloc->delta + target_offset);
320}
321
31a39207 322struct reloc_cache {
d50415cc
CW
323 struct drm_i915_private *i915;
324 struct drm_mm_node node;
325 unsigned long vaddr;
31a39207 326 unsigned int page;
d50415cc 327 bool use_64bit_reloc;
31a39207
CW
328};
329
d50415cc
CW
330static void reloc_cache_init(struct reloc_cache *cache,
331 struct drm_i915_private *i915)
5032d871 332{
31a39207 333 cache->page = -1;
d50415cc
CW
334 cache->vaddr = 0;
335 cache->i915 = i915;
dfc5148f
JL
336 /* Must be a variable in the struct to allow GCC to unroll. */
337 cache->use_64bit_reloc = HAS_64BIT_RELOC(i915);
e8cb909a 338 cache->node.allocated = false;
d50415cc 339}
5032d871 340
d50415cc
CW
341static inline void *unmask_page(unsigned long p)
342{
343 return (void *)(uintptr_t)(p & PAGE_MASK);
344}
345
346static inline unsigned int unmask_flags(unsigned long p)
347{
348 return p & ~PAGE_MASK;
31a39207
CW
349}
350
d50415cc
CW
351#define KMAP 0x4 /* after CLFLUSH_FLAGS */
352
31a39207
CW
353static void reloc_cache_fini(struct reloc_cache *cache)
354{
d50415cc 355 void *vaddr;
5032d871 356
31a39207
CW
357 if (!cache->vaddr)
358 return;
3c94ceee 359
d50415cc
CW
360 vaddr = unmask_page(cache->vaddr);
361 if (cache->vaddr & KMAP) {
362 if (cache->vaddr & CLFLUSH_AFTER)
363 mb();
3c94ceee 364
d50415cc
CW
365 kunmap_atomic(vaddr);
366 i915_gem_obj_finish_shmem_access((struct drm_i915_gem_object *)cache->node.mm);
367 } else {
e8cb909a 368 wmb();
d50415cc 369 io_mapping_unmap_atomic((void __iomem *)vaddr);
e8cb909a
CW
370 if (cache->node.allocated) {
371 struct i915_ggtt *ggtt = &cache->i915->ggtt;
372
373 ggtt->base.clear_range(&ggtt->base,
374 cache->node.start,
4fb84d99 375 cache->node.size);
e8cb909a
CW
376 drm_mm_remove_node(&cache->node);
377 } else {
378 i915_vma_unpin((struct i915_vma *)cache->node.mm);
3c94ceee 379 }
31a39207
CW
380 }
381}
382
383static void *reloc_kmap(struct drm_i915_gem_object *obj,
384 struct reloc_cache *cache,
385 int page)
386{
d50415cc
CW
387 void *vaddr;
388
389 if (cache->vaddr) {
390 kunmap_atomic(unmask_page(cache->vaddr));
391 } else {
392 unsigned int flushes;
393 int ret;
31a39207 394
d50415cc
CW
395 ret = i915_gem_obj_prepare_shmem_write(obj, &flushes);
396 if (ret)
397 return ERR_PTR(ret);
398
399 BUILD_BUG_ON(KMAP & CLFLUSH_FLAGS);
400 BUILD_BUG_ON((KMAP | CLFLUSH_FLAGS) & PAGE_MASK);
3c94ceee 401
d50415cc
CW
402 cache->vaddr = flushes | KMAP;
403 cache->node.mm = (void *)obj;
404 if (flushes)
405 mb();
3c94ceee
BW
406 }
407
d50415cc
CW
408 vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj, page));
409 cache->vaddr = unmask_flags(cache->vaddr) | (unsigned long)vaddr;
31a39207 410 cache->page = page;
5032d871 411
d50415cc 412 return vaddr;
5032d871
RB
413}
414
d50415cc
CW
415static void *reloc_iomap(struct drm_i915_gem_object *obj,
416 struct reloc_cache *cache,
417 int page)
5032d871 418{
e8cb909a
CW
419 struct i915_ggtt *ggtt = &cache->i915->ggtt;
420 unsigned long offset;
d50415cc 421 void *vaddr;
5032d871 422
d50415cc 423 if (cache->vaddr) {
615e5000 424 io_mapping_unmap_atomic((void __force __iomem *) unmask_page(cache->vaddr));
d50415cc
CW
425 } else {
426 struct i915_vma *vma;
427 int ret;
5032d871 428
d50415cc
CW
429 if (use_cpu_reloc(obj))
430 return NULL;
3c94ceee 431
d50415cc
CW
432 ret = i915_gem_object_set_to_gtt_domain(obj, true);
433 if (ret)
434 return ERR_PTR(ret);
3c94ceee 435
d50415cc
CW
436 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
437 PIN_MAPPABLE | PIN_NONBLOCK);
e8cb909a
CW
438 if (IS_ERR(vma)) {
439 memset(&cache->node, 0, sizeof(cache->node));
440 ret = drm_mm_insert_node_in_range_generic
441 (&ggtt->base.mm, &cache->node,
f51455d4 442 PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
e8cb909a
CW
443 0, ggtt->mappable_end,
444 DRM_MM_SEARCH_DEFAULT,
445 DRM_MM_CREATE_DEFAULT);
c92fa4fe
CW
446 if (ret) /* no inactive aperture space, use cpu reloc */
447 return NULL;
e8cb909a 448 } else {
49ef5294 449 ret = i915_vma_put_fence(vma);
e8cb909a
CW
450 if (ret) {
451 i915_vma_unpin(vma);
452 return ERR_PTR(ret);
453 }
5032d871 454
e8cb909a
CW
455 cache->node.start = vma->node.start;
456 cache->node.mm = (void *)vma;
3c94ceee 457 }
e8cb909a 458 }
3c94ceee 459
e8cb909a
CW
460 offset = cache->node.start;
461 if (cache->node.allocated) {
fc099090 462 wmb();
e8cb909a
CW
463 ggtt->base.insert_page(&ggtt->base,
464 i915_gem_object_get_dma_address(obj, page),
465 offset, I915_CACHE_NONE, 0);
466 } else {
467 offset += page << PAGE_SHIFT;
3c94ceee
BW
468 }
469
615e5000 470 vaddr = (void __force *) io_mapping_map_atomic_wc(&cache->i915->ggtt.mappable, offset);
d50415cc
CW
471 cache->page = page;
472 cache->vaddr = (unsigned long)vaddr;
5032d871 473
d50415cc 474 return vaddr;
5032d871
RB
475}
476
d50415cc
CW
477static void *reloc_vaddr(struct drm_i915_gem_object *obj,
478 struct reloc_cache *cache,
479 int page)
edf4427b 480{
d50415cc 481 void *vaddr;
5032d871 482
d50415cc
CW
483 if (cache->page == page) {
484 vaddr = unmask_page(cache->vaddr);
485 } else {
486 vaddr = NULL;
487 if ((cache->vaddr & KMAP) == 0)
488 vaddr = reloc_iomap(obj, cache, page);
489 if (!vaddr)
490 vaddr = reloc_kmap(obj, cache, page);
3c94ceee
BW
491 }
492
d50415cc 493 return vaddr;
edf4427b
CW
494}
495
d50415cc 496static void clflush_write32(u32 *addr, u32 value, unsigned int flushes)
edf4427b 497{
d50415cc
CW
498 if (unlikely(flushes & (CLFLUSH_BEFORE | CLFLUSH_AFTER))) {
499 if (flushes & CLFLUSH_BEFORE) {
500 clflushopt(addr);
501 mb();
502 }
edf4427b 503
d50415cc 504 *addr = value;
edf4427b 505
d50415cc
CW
506 /* Writes to the same cacheline are serialised by the CPU
507 * (including clflush). On the write path, we only require
508 * that it hits memory in an orderly fashion and place
509 * mb barriers at the start and end of the relocation phase
510 * to ensure ordering of clflush wrt to the system.
511 */
512 if (flushes & CLFLUSH_AFTER)
513 clflushopt(addr);
514 } else
515 *addr = value;
edf4427b 516}
edf4427b 517
edf4427b 518static int
d50415cc
CW
519relocate_entry(struct drm_i915_gem_object *obj,
520 const struct drm_i915_gem_relocation_entry *reloc,
521 struct reloc_cache *cache,
522 u64 target_offset)
edf4427b 523{
d50415cc
CW
524 u64 offset = reloc->offset;
525 bool wide = cache->use_64bit_reloc;
526 void *vaddr;
edf4427b 527
d50415cc
CW
528 target_offset = relocation_target(reloc, target_offset);
529repeat:
530 vaddr = reloc_vaddr(obj, cache, offset >> PAGE_SHIFT);
531 if (IS_ERR(vaddr))
532 return PTR_ERR(vaddr);
533
534 clflush_write32(vaddr + offset_in_page(offset),
535 lower_32_bits(target_offset),
536 cache->vaddr);
537
538 if (wide) {
539 offset += sizeof(u32);
540 target_offset >>= 32;
541 wide = false;
542 goto repeat;
edf4427b 543 }
edf4427b 544
edf4427b
CW
545 return 0;
546}
edf4427b 547
54cf91dc
CW
548static int
549i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
27173f1f 550 struct eb_vmas *eb,
31a39207
CW
551 struct drm_i915_gem_relocation_entry *reloc,
552 struct reloc_cache *cache)
54cf91dc 553{
5db94019 554 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
54cf91dc 555 struct drm_gem_object *target_obj;
149c8407 556 struct drm_i915_gem_object *target_i915_obj;
27173f1f 557 struct i915_vma *target_vma;
d9ceb957 558 uint64_t target_offset;
8b78f0e5 559 int ret;
54cf91dc 560
67731b87 561 /* we've already hold a reference to all valid objects */
27173f1f
BW
562 target_vma = eb_get_vma(eb, reloc->target_handle);
563 if (unlikely(target_vma == NULL))
54cf91dc 564 return -ENOENT;
27173f1f
BW
565 target_i915_obj = target_vma->obj;
566 target_obj = &target_vma->obj->base;
54cf91dc 567
934acce3 568 target_offset = gen8_canonical_addr(target_vma->node.start);
54cf91dc 569
e844b990
EA
570 /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
571 * pipe_control writes because the gpu doesn't properly redirect them
572 * through the ppgtt for non_secure batchbuffers. */
5db94019 573 if (unlikely(IS_GEN6(dev_priv) &&
0875546c 574 reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION)) {
fe14d5f4 575 ret = i915_vma_bind(target_vma, target_i915_obj->cache_level,
0875546c 576 PIN_GLOBAL);
fe14d5f4
TU
577 if (WARN_ONCE(ret, "Unexpected failure to bind target VMA!"))
578 return ret;
579 }
e844b990 580
54cf91dc 581 /* Validate that the target is in a valid r/w GPU domain */
b8f7ab17 582 if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
ff240199 583 DRM_DEBUG("reloc with multiple write domains: "
54cf91dc
CW
584 "obj %p target %d offset %d "
585 "read %08x write %08x",
586 obj, reloc->target_handle,
587 (int) reloc->offset,
588 reloc->read_domains,
589 reloc->write_domain);
8b78f0e5 590 return -EINVAL;
54cf91dc 591 }
4ca4a250
DV
592 if (unlikely((reloc->write_domain | reloc->read_domains)
593 & ~I915_GEM_GPU_DOMAINS)) {
ff240199 594 DRM_DEBUG("reloc with read/write non-GPU domains: "
54cf91dc
CW
595 "obj %p target %d offset %d "
596 "read %08x write %08x",
597 obj, reloc->target_handle,
598 (int) reloc->offset,
599 reloc->read_domains,
600 reloc->write_domain);
8b78f0e5 601 return -EINVAL;
54cf91dc 602 }
54cf91dc
CW
603
604 target_obj->pending_read_domains |= reloc->read_domains;
605 target_obj->pending_write_domain |= reloc->write_domain;
606
607 /* If the relocation already has the right value in it, no
608 * more work needs to be done.
609 */
610 if (target_offset == reloc->presumed_offset)
67731b87 611 return 0;
54cf91dc
CW
612
613 /* Check that the relocation address is valid... */
3c94ceee 614 if (unlikely(reloc->offset >
d50415cc 615 obj->base.size - (cache->use_64bit_reloc ? 8 : 4))) {
ff240199 616 DRM_DEBUG("Relocation beyond object bounds: "
54cf91dc
CW
617 "obj %p target %d offset %d size %d.\n",
618 obj, reloc->target_handle,
619 (int) reloc->offset,
620 (int) obj->base.size);
8b78f0e5 621 return -EINVAL;
54cf91dc 622 }
b8f7ab17 623 if (unlikely(reloc->offset & 3)) {
ff240199 624 DRM_DEBUG("Relocation not 4-byte aligned: "
54cf91dc
CW
625 "obj %p target %d offset %d.\n",
626 obj, reloc->target_handle,
627 (int) reloc->offset);
8b78f0e5 628 return -EINVAL;
54cf91dc
CW
629 }
630
d50415cc 631 ret = relocate_entry(obj, reloc, cache, target_offset);
d4d36014
DV
632 if (ret)
633 return ret;
634
54cf91dc
CW
635 /* and update the user's relocation entry */
636 reloc->presumed_offset = target_offset;
67731b87 637 return 0;
54cf91dc
CW
638}
639
640static int
27173f1f
BW
641i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
642 struct eb_vmas *eb)
54cf91dc 643{
1d83f442
CW
644#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
645 struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
54cf91dc 646 struct drm_i915_gem_relocation_entry __user *user_relocs;
27173f1f 647 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
31a39207
CW
648 struct reloc_cache cache;
649 int remain, ret = 0;
54cf91dc 650
3ed605bc 651 user_relocs = u64_to_user_ptr(entry->relocs_ptr);
d50415cc 652 reloc_cache_init(&cache, eb->i915);
54cf91dc 653
1d83f442
CW
654 remain = entry->relocation_count;
655 while (remain) {
656 struct drm_i915_gem_relocation_entry *r = stack_reloc;
ebc0808f
CW
657 unsigned long unwritten;
658 unsigned int count;
659
660 count = min_t(unsigned int, remain, ARRAY_SIZE(stack_reloc));
1d83f442
CW
661 remain -= count;
662
ebc0808f
CW
663 /* This is the fast path and we cannot handle a pagefault
664 * whilst holding the struct mutex lest the user pass in the
665 * relocations contained within a mmaped bo. For in such a case
666 * we, the page fault handler would call i915_gem_fault() and
667 * we would try to acquire the struct mutex again. Obviously
668 * this is bad and so lockdep complains vehemently.
669 */
670 pagefault_disable();
671 unwritten = __copy_from_user_inatomic(r, user_relocs, count*sizeof(r[0]));
672 pagefault_enable();
673 if (unlikely(unwritten)) {
31a39207
CW
674 ret = -EFAULT;
675 goto out;
676 }
54cf91dc 677
1d83f442
CW
678 do {
679 u64 offset = r->presumed_offset;
54cf91dc 680
31a39207 681 ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r, &cache);
1d83f442 682 if (ret)
31a39207 683 goto out;
1d83f442 684
ebc0808f
CW
685 if (r->presumed_offset != offset) {
686 pagefault_disable();
687 unwritten = __put_user(r->presumed_offset,
688 &user_relocs->presumed_offset);
689 pagefault_enable();
690 if (unlikely(unwritten)) {
691 /* Note that reporting an error now
692 * leaves everything in an inconsistent
693 * state as we have *already* changed
694 * the relocation value inside the
695 * object. As we have not changed the
696 * reloc.presumed_offset or will not
697 * change the execobject.offset, on the
698 * call we may not rewrite the value
699 * inside the object, leaving it
700 * dangling and causing a GPU hang.
701 */
702 ret = -EFAULT;
703 goto out;
704 }
1d83f442
CW
705 }
706
707 user_relocs++;
708 r++;
709 } while (--count);
54cf91dc
CW
710 }
711
31a39207
CW
712out:
713 reloc_cache_fini(&cache);
714 return ret;
1d83f442 715#undef N_RELOC
54cf91dc
CW
716}
717
718static int
27173f1f
BW
719i915_gem_execbuffer_relocate_vma_slow(struct i915_vma *vma,
720 struct eb_vmas *eb,
721 struct drm_i915_gem_relocation_entry *relocs)
54cf91dc 722{
27173f1f 723 const struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
31a39207
CW
724 struct reloc_cache cache;
725 int i, ret = 0;
54cf91dc 726
d50415cc 727 reloc_cache_init(&cache, eb->i915);
54cf91dc 728 for (i = 0; i < entry->relocation_count; i++) {
31a39207 729 ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i], &cache);
54cf91dc 730 if (ret)
31a39207 731 break;
54cf91dc 732 }
31a39207 733 reloc_cache_fini(&cache);
54cf91dc 734
31a39207 735 return ret;
54cf91dc
CW
736}
737
738static int
17601cbc 739i915_gem_execbuffer_relocate(struct eb_vmas *eb)
54cf91dc 740{
27173f1f 741 struct i915_vma *vma;
d4aeee77
CW
742 int ret = 0;
743
27173f1f
BW
744 list_for_each_entry(vma, &eb->vmas, exec_list) {
745 ret = i915_gem_execbuffer_relocate_vma(vma, eb);
54cf91dc 746 if (ret)
d4aeee77 747 break;
54cf91dc
CW
748 }
749
d4aeee77 750 return ret;
54cf91dc
CW
751}
752
edf4427b
CW
753static bool only_mappable_for_reloc(unsigned int flags)
754{
755 return (flags & (EXEC_OBJECT_NEEDS_FENCE | __EXEC_OBJECT_NEEDS_MAP)) ==
756 __EXEC_OBJECT_NEEDS_MAP;
757}
758
1690e1eb 759static int
27173f1f 760i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
0bc40be8 761 struct intel_engine_cs *engine,
27173f1f 762 bool *need_reloc)
1690e1eb 763{
6f65e29a 764 struct drm_i915_gem_object *obj = vma->obj;
27173f1f 765 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
d23db88c 766 uint64_t flags;
1690e1eb
CW
767 int ret;
768
0875546c 769 flags = PIN_USER;
0229da32
DV
770 if (entry->flags & EXEC_OBJECT_NEEDS_GTT)
771 flags |= PIN_GLOBAL;
772
edf4427b 773 if (!drm_mm_node_allocated(&vma->node)) {
101b506a
MT
774 /* Wa32bitGeneralStateOffset & Wa32bitInstructionBaseOffset,
775 * limit address to the first 4GBs for unflagged objects.
776 */
777 if ((entry->flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) == 0)
778 flags |= PIN_ZONE_4G;
edf4427b
CW
779 if (entry->flags & __EXEC_OBJECT_NEEDS_MAP)
780 flags |= PIN_GLOBAL | PIN_MAPPABLE;
edf4427b
CW
781 if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS)
782 flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
506a8e87
CW
783 if (entry->flags & EXEC_OBJECT_PINNED)
784 flags |= entry->offset | PIN_OFFSET_FIXED;
101b506a
MT
785 if ((flags & PIN_MAPPABLE) == 0)
786 flags |= PIN_HIGH;
edf4427b 787 }
1ec9e26d 788
59bfa124
CW
789 ret = i915_vma_pin(vma,
790 entry->pad_to_size,
791 entry->alignment,
792 flags);
793 if ((ret == -ENOSPC || ret == -E2BIG) &&
edf4427b 794 only_mappable_for_reloc(entry->flags))
59bfa124
CW
795 ret = i915_vma_pin(vma,
796 entry->pad_to_size,
797 entry->alignment,
798 flags & ~PIN_MAPPABLE);
1690e1eb
CW
799 if (ret)
800 return ret;
801
7788a765
CW
802 entry->flags |= __EXEC_OBJECT_HAS_PIN;
803
82b6b6d7 804 if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
49ef5294 805 ret = i915_vma_get_fence(vma);
82b6b6d7
CW
806 if (ret)
807 return ret;
9a5a53b3 808
49ef5294 809 if (i915_vma_pin_fence(vma))
82b6b6d7 810 entry->flags |= __EXEC_OBJECT_HAS_FENCE;
1690e1eb
CW
811 }
812
27173f1f
BW
813 if (entry->offset != vma->node.start) {
814 entry->offset = vma->node.start;
ed5982e6
DV
815 *need_reloc = true;
816 }
817
818 if (entry->flags & EXEC_OBJECT_WRITE) {
819 obj->base.pending_read_domains = I915_GEM_DOMAIN_RENDER;
820 obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER;
821 }
822
1690e1eb 823 return 0;
7788a765 824}
1690e1eb 825
d23db88c 826static bool
e6a84468 827need_reloc_mappable(struct i915_vma *vma)
d23db88c
CW
828{
829 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
d23db88c 830
e6a84468
CW
831 if (entry->relocation_count == 0)
832 return false;
833
3272db53 834 if (!i915_vma_is_ggtt(vma))
e6a84468
CW
835 return false;
836
837 /* See also use_cpu_reloc() */
0031fb96 838 if (HAS_LLC(to_i915(vma->obj->base.dev)))
e6a84468
CW
839 return false;
840
841 if (vma->obj->base.write_domain == I915_GEM_DOMAIN_CPU)
842 return false;
843
844 return true;
845}
846
847static bool
848eb_vma_misplaced(struct i915_vma *vma)
849{
850 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
d23db88c 851
3272db53
CW
852 WARN_ON(entry->flags & __EXEC_OBJECT_NEEDS_MAP &&
853 !i915_vma_is_ggtt(vma));
d23db88c 854
f51455d4 855 if (entry->alignment && !IS_ALIGNED(vma->node.start, entry->alignment))
d23db88c
CW
856 return true;
857
91b2db6f
CW
858 if (vma->node.size < entry->pad_to_size)
859 return true;
860
506a8e87
CW
861 if (entry->flags & EXEC_OBJECT_PINNED &&
862 vma->node.start != entry->offset)
863 return true;
864
d23db88c
CW
865 if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS &&
866 vma->node.start < BATCH_OFFSET_BIAS)
867 return true;
868
edf4427b 869 /* avoid costly ping-pong once a batch bo ended up non-mappable */
05a20d09
CW
870 if (entry->flags & __EXEC_OBJECT_NEEDS_MAP &&
871 !i915_vma_is_map_and_fenceable(vma))
edf4427b
CW
872 return !only_mappable_for_reloc(entry->flags);
873
101b506a
MT
874 if ((entry->flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) == 0 &&
875 (vma->node.start + vma->node.size - 1) >> 32)
876 return true;
877
d23db88c
CW
878 return false;
879}
880
54cf91dc 881static int
0bc40be8 882i915_gem_execbuffer_reserve(struct intel_engine_cs *engine,
27173f1f 883 struct list_head *vmas,
e2efd130 884 struct i915_gem_context *ctx,
ed5982e6 885 bool *need_relocs)
54cf91dc 886{
432e58ed 887 struct drm_i915_gem_object *obj;
27173f1f 888 struct i915_vma *vma;
68c8c17f 889 struct i915_address_space *vm;
27173f1f 890 struct list_head ordered_vmas;
506a8e87 891 struct list_head pinned_vmas;
c033666a 892 bool has_fenced_gpu_access = INTEL_GEN(engine->i915) < 4;
7788a765 893 int retry;
6fe4f140 894
68c8c17f
BW
895 vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;
896
27173f1f 897 INIT_LIST_HEAD(&ordered_vmas);
506a8e87 898 INIT_LIST_HEAD(&pinned_vmas);
27173f1f 899 while (!list_empty(vmas)) {
6fe4f140
CW
900 struct drm_i915_gem_exec_object2 *entry;
901 bool need_fence, need_mappable;
902
27173f1f
BW
903 vma = list_first_entry(vmas, struct i915_vma, exec_list);
904 obj = vma->obj;
905 entry = vma->exec_entry;
6fe4f140 906
b1b38278
DW
907 if (ctx->flags & CONTEXT_NO_ZEROMAP)
908 entry->flags |= __EXEC_OBJECT_NEEDS_BIAS;
909
82b6b6d7
CW
910 if (!has_fenced_gpu_access)
911 entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE;
6fe4f140 912 need_fence =
6fe4f140 913 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
3e510a8e 914 i915_gem_object_is_tiled(obj);
27173f1f 915 need_mappable = need_fence || need_reloc_mappable(vma);
6fe4f140 916
506a8e87
CW
917 if (entry->flags & EXEC_OBJECT_PINNED)
918 list_move_tail(&vma->exec_list, &pinned_vmas);
919 else if (need_mappable) {
e6a84468 920 entry->flags |= __EXEC_OBJECT_NEEDS_MAP;
27173f1f 921 list_move(&vma->exec_list, &ordered_vmas);
e6a84468 922 } else
27173f1f 923 list_move_tail(&vma->exec_list, &ordered_vmas);
595dad76 924
ed5982e6 925 obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND;
595dad76 926 obj->base.pending_write_domain = 0;
6fe4f140 927 }
27173f1f 928 list_splice(&ordered_vmas, vmas);
506a8e87 929 list_splice(&pinned_vmas, vmas);
54cf91dc
CW
930
931 /* Attempt to pin all of the buffers into the GTT.
932 * This is done in 3 phases:
933 *
934 * 1a. Unbind all objects that do not match the GTT constraints for
935 * the execbuffer (fenceable, mappable, alignment etc).
936 * 1b. Increment pin count for already bound objects.
937 * 2. Bind new objects.
938 * 3. Decrement pin count.
939 *
7788a765 940 * This avoid unnecessary unbinding of later objects in order to make
54cf91dc
CW
941 * room for the earlier objects *unless* we need to defragment.
942 */
943 retry = 0;
944 do {
7788a765 945 int ret = 0;
54cf91dc
CW
946
947 /* Unbind any ill-fitting objects or pin. */
27173f1f 948 list_for_each_entry(vma, vmas, exec_list) {
27173f1f 949 if (!drm_mm_node_allocated(&vma->node))
54cf91dc
CW
950 continue;
951
e6a84468 952 if (eb_vma_misplaced(vma))
27173f1f 953 ret = i915_vma_unbind(vma);
54cf91dc 954 else
0bc40be8
TU
955 ret = i915_gem_execbuffer_reserve_vma(vma,
956 engine,
957 need_relocs);
432e58ed 958 if (ret)
54cf91dc 959 goto err;
54cf91dc
CW
960 }
961
962 /* Bind fresh objects */
27173f1f
BW
963 list_for_each_entry(vma, vmas, exec_list) {
964 if (drm_mm_node_allocated(&vma->node))
1690e1eb 965 continue;
54cf91dc 966
0bc40be8
TU
967 ret = i915_gem_execbuffer_reserve_vma(vma, engine,
968 need_relocs);
7788a765
CW
969 if (ret)
970 goto err;
54cf91dc
CW
971 }
972
a415d355 973err:
6c085a72 974 if (ret != -ENOSPC || retry++)
54cf91dc
CW
975 return ret;
976
a415d355
CW
977 /* Decrement pin count for bound objects */
978 list_for_each_entry(vma, vmas, exec_list)
979 i915_gem_execbuffer_unreserve_vma(vma);
980
68c8c17f 981 ret = i915_gem_evict_vm(vm, true);
54cf91dc
CW
982 if (ret)
983 return ret;
54cf91dc
CW
984 } while (1);
985}
986
987static int
988i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
ed5982e6 989 struct drm_i915_gem_execbuffer2 *args,
54cf91dc 990 struct drm_file *file,
0bc40be8 991 struct intel_engine_cs *engine,
27173f1f 992 struct eb_vmas *eb,
b1b38278 993 struct drm_i915_gem_exec_object2 *exec,
e2efd130 994 struct i915_gem_context *ctx)
54cf91dc
CW
995{
996 struct drm_i915_gem_relocation_entry *reloc;
27173f1f
BW
997 struct i915_address_space *vm;
998 struct i915_vma *vma;
ed5982e6 999 bool need_relocs;
dd6864a4 1000 int *reloc_offset;
54cf91dc 1001 int i, total, ret;
b205ca57 1002 unsigned count = args->buffer_count;
54cf91dc 1003
27173f1f
BW
1004 vm = list_first_entry(&eb->vmas, struct i915_vma, exec_list)->vm;
1005
67731b87 1006 /* We may process another execbuffer during the unlock... */
27173f1f
BW
1007 while (!list_empty(&eb->vmas)) {
1008 vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list);
1009 list_del_init(&vma->exec_list);
a415d355 1010 i915_gem_execbuffer_unreserve_vma(vma);
624192cf 1011 i915_vma_put(vma);
67731b87
CW
1012 }
1013
54cf91dc
CW
1014 mutex_unlock(&dev->struct_mutex);
1015
1016 total = 0;
1017 for (i = 0; i < count; i++)
432e58ed 1018 total += exec[i].relocation_count;
54cf91dc 1019
dd6864a4 1020 reloc_offset = drm_malloc_ab(count, sizeof(*reloc_offset));
54cf91dc 1021 reloc = drm_malloc_ab(total, sizeof(*reloc));
dd6864a4
CW
1022 if (reloc == NULL || reloc_offset == NULL) {
1023 drm_free_large(reloc);
1024 drm_free_large(reloc_offset);
54cf91dc
CW
1025 mutex_lock(&dev->struct_mutex);
1026 return -ENOMEM;
1027 }
1028
1029 total = 0;
1030 for (i = 0; i < count; i++) {
1031 struct drm_i915_gem_relocation_entry __user *user_relocs;
262b6d36
CW
1032 u64 invalid_offset = (u64)-1;
1033 int j;
54cf91dc 1034
3ed605bc 1035 user_relocs = u64_to_user_ptr(exec[i].relocs_ptr);
54cf91dc
CW
1036
1037 if (copy_from_user(reloc+total, user_relocs,
432e58ed 1038 exec[i].relocation_count * sizeof(*reloc))) {
54cf91dc
CW
1039 ret = -EFAULT;
1040 mutex_lock(&dev->struct_mutex);
1041 goto err;
1042 }
1043
262b6d36
CW
1044 /* As we do not update the known relocation offsets after
1045 * relocating (due to the complexities in lock handling),
1046 * we need to mark them as invalid now so that we force the
1047 * relocation processing next time. Just in case the target
1048 * object is evicted and then rebound into its old
1049 * presumed_offset before the next execbuffer - if that
1050 * happened we would make the mistake of assuming that the
1051 * relocations were valid.
1052 */
1053 for (j = 0; j < exec[i].relocation_count; j++) {
9aab8bff
CW
1054 if (__copy_to_user(&user_relocs[j].presumed_offset,
1055 &invalid_offset,
1056 sizeof(invalid_offset))) {
262b6d36
CW
1057 ret = -EFAULT;
1058 mutex_lock(&dev->struct_mutex);
1059 goto err;
1060 }
1061 }
1062
dd6864a4 1063 reloc_offset[i] = total;
432e58ed 1064 total += exec[i].relocation_count;
54cf91dc
CW
1065 }
1066
1067 ret = i915_mutex_lock_interruptible(dev);
1068 if (ret) {
1069 mutex_lock(&dev->struct_mutex);
1070 goto err;
1071 }
1072
67731b87 1073 /* reacquire the objects */
67731b87 1074 eb_reset(eb);
27173f1f 1075 ret = eb_lookup_vmas(eb, exec, args, vm, file);
3b96eff4
CW
1076 if (ret)
1077 goto err;
67731b87 1078
ed5982e6 1079 need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
0bc40be8
TU
1080 ret = i915_gem_execbuffer_reserve(engine, &eb->vmas, ctx,
1081 &need_relocs);
54cf91dc
CW
1082 if (ret)
1083 goto err;
1084
27173f1f
BW
1085 list_for_each_entry(vma, &eb->vmas, exec_list) {
1086 int offset = vma->exec_entry - exec;
1087 ret = i915_gem_execbuffer_relocate_vma_slow(vma, eb,
1088 reloc + reloc_offset[offset]);
54cf91dc
CW
1089 if (ret)
1090 goto err;
54cf91dc
CW
1091 }
1092
1093 /* Leave the user relocations as are, this is the painfully slow path,
1094 * and we want to avoid the complication of dropping the lock whilst
1095 * having buffers reserved in the aperture and so causing spurious
1096 * ENOSPC for random operations.
1097 */
1098
1099err:
1100 drm_free_large(reloc);
dd6864a4 1101 drm_free_large(reloc_offset);
54cf91dc
CW
1102 return ret;
1103}
1104
54cf91dc 1105static int
535fbe82 1106i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
27173f1f 1107 struct list_head *vmas)
54cf91dc 1108{
27173f1f 1109 struct i915_vma *vma;
432e58ed 1110 int ret;
54cf91dc 1111
27173f1f
BW
1112 list_for_each_entry(vma, vmas, exec_list) {
1113 struct drm_i915_gem_object *obj = vma->obj;
03ade511 1114
77ae9957
CW
1115 if (vma->exec_entry->flags & EXEC_OBJECT_ASYNC)
1116 continue;
1117
d07f0e59
CW
1118 ret = i915_gem_request_await_object
1119 (req, obj, obj->base.pending_write_domain);
1120 if (ret)
1121 return ret;
851ba2d6 1122
6ac42f41 1123 if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
dcd79934 1124 i915_gem_clflush_object(obj, false);
c59a333f
CW
1125 }
1126
dcd79934
CW
1127 /* Unconditionally flush any chipset caches (for streaming writes). */
1128 i915_gem_chipset_flush(req->engine->i915);
6ac42f41 1129
c7fe7d25 1130 /* Unconditionally invalidate GPU caches and TLBs. */
7c9cf4e3 1131 return req->engine->emit_flush(req, EMIT_INVALIDATE);
54cf91dc
CW
1132}
1133
432e58ed
CW
1134static bool
1135i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
54cf91dc 1136{
ed5982e6
DV
1137 if (exec->flags & __I915_EXEC_UNKNOWN_FLAGS)
1138 return false;
1139
2f5945bc
CW
1140 /* Kernel clipping was a DRI1 misfeature */
1141 if (exec->num_cliprects || exec->cliprects_ptr)
1142 return false;
1143
1144 if (exec->DR4 == 0xffffffff) {
1145 DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
1146 exec->DR4 = 0;
1147 }
1148 if (exec->DR1 || exec->DR4)
1149 return false;
1150
1151 if ((exec->batch_start_offset | exec->batch_len) & 0x7)
1152 return false;
1153
1154 return true;
54cf91dc
CW
1155}
1156
1157static int
ad19f10b
CW
1158validate_exec_list(struct drm_device *dev,
1159 struct drm_i915_gem_exec_object2 *exec,
54cf91dc
CW
1160 int count)
1161{
b205ca57
DV
1162 unsigned relocs_total = 0;
1163 unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
ad19f10b
CW
1164 unsigned invalid_flags;
1165 int i;
1166
9e2793f6
DG
1167 /* INTERNAL flags must not overlap with external ones */
1168 BUILD_BUG_ON(__EXEC_OBJECT_INTERNAL_FLAGS & ~__EXEC_OBJECT_UNKNOWN_FLAGS);
1169
ad19f10b
CW
1170 invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
1171 if (USES_FULL_PPGTT(dev))
1172 invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
54cf91dc
CW
1173
1174 for (i = 0; i < count; i++) {
3ed605bc 1175 char __user *ptr = u64_to_user_ptr(exec[i].relocs_ptr);
54cf91dc
CW
1176 int length; /* limited by fault_in_pages_readable() */
1177
ad19f10b 1178 if (exec[i].flags & invalid_flags)
ed5982e6
DV
1179 return -EINVAL;
1180
934acce3
MW
1181 /* Offset can be used as input (EXEC_OBJECT_PINNED), reject
1182 * any non-page-aligned or non-canonical addresses.
1183 */
1184 if (exec[i].flags & EXEC_OBJECT_PINNED) {
1185 if (exec[i].offset !=
1186 gen8_canonical_addr(exec[i].offset & PAGE_MASK))
1187 return -EINVAL;
1188
1189 /* From drm_mm perspective address space is continuous,
1190 * so from this point we're always using non-canonical
1191 * form internally.
1192 */
1193 exec[i].offset = gen8_noncanonical_addr(exec[i].offset);
1194 }
1195
55a9785d
CW
1196 if (exec[i].alignment && !is_power_of_2(exec[i].alignment))
1197 return -EINVAL;
1198
91b2db6f
CW
1199 /* pad_to_size was once a reserved field, so sanitize it */
1200 if (exec[i].flags & EXEC_OBJECT_PAD_TO_SIZE) {
1201 if (offset_in_page(exec[i].pad_to_size))
1202 return -EINVAL;
1203 } else {
1204 exec[i].pad_to_size = 0;
1205 }
1206
3118a4f6
KC
1207 /* First check for malicious input causing overflow in
1208 * the worst case where we need to allocate the entire
1209 * relocation tree as a single array.
1210 */
1211 if (exec[i].relocation_count > relocs_max - relocs_total)
54cf91dc 1212 return -EINVAL;
3118a4f6 1213 relocs_total += exec[i].relocation_count;
54cf91dc
CW
1214
1215 length = exec[i].relocation_count *
1216 sizeof(struct drm_i915_gem_relocation_entry);
30587535
KC
1217 /*
1218 * We must check that the entire relocation array is safe
1219 * to read, but since we may need to update the presumed
1220 * offsets during execution, check for full write access.
1221 */
54cf91dc
CW
1222 if (!access_ok(VERIFY_WRITE, ptr, length))
1223 return -EFAULT;
1224
d330a953 1225 if (likely(!i915.prefault_disable)) {
4bce9f6e 1226 if (fault_in_pages_readable(ptr, length))
0b74b508
XZ
1227 return -EFAULT;
1228 }
54cf91dc
CW
1229 }
1230
1231 return 0;
1232}
1233
e2efd130 1234static struct i915_gem_context *
d299cce7 1235i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
0bc40be8 1236 struct intel_engine_cs *engine, const u32 ctx_id)
d299cce7 1237{
f7978a0c 1238 struct i915_gem_context *ctx;
d299cce7 1239
ca585b5d 1240 ctx = i915_gem_context_lookup(file->driver_priv, ctx_id);
72ad5c45 1241 if (IS_ERR(ctx))
41bde553 1242 return ctx;
d299cce7 1243
6095868a 1244 if (i915_gem_context_is_banned(ctx)) {
d299cce7 1245 DRM_DEBUG("Context %u tried to submit while banned\n", ctx_id);
41bde553 1246 return ERR_PTR(-EIO);
d299cce7
MK
1247 }
1248
41bde553 1249 return ctx;
d299cce7
MK
1250}
1251
7aa6ca61
CW
1252static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj)
1253{
1254 return !(obj->cache_level == I915_CACHE_NONE ||
1255 obj->cache_level == I915_CACHE_WT);
1256}
1257
5cf3d280
CW
1258void i915_vma_move_to_active(struct i915_vma *vma,
1259 struct drm_i915_gem_request *req,
1260 unsigned int flags)
1261{
1262 struct drm_i915_gem_object *obj = vma->obj;
1263 const unsigned int idx = req->engine->id;
1264
81147b07 1265 lockdep_assert_held(&req->i915->drm.struct_mutex);
5cf3d280
CW
1266 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1267
b0decaf7
CW
1268 /* Add a reference if we're newly entering the active list.
1269 * The order in which we add operations to the retirement queue is
1270 * vital here: mark_active adds to the start of the callback list,
1271 * such that subsequent callbacks are called first. Therefore we
1272 * add the active reference first and queue for it to be dropped
1273 * *last*.
1274 */
d07f0e59
CW
1275 if (!i915_vma_is_active(vma))
1276 obj->active_count++;
1277 i915_vma_set_active(vma, idx);
1278 i915_gem_active_set(&vma->last_read[idx], req);
1279 list_move_tail(&vma->vm_link, &vma->vm->active_list);
5cf3d280
CW
1280
1281 if (flags & EXEC_OBJECT_WRITE) {
5b8c8aec
CW
1282 if (intel_fb_obj_invalidate(obj, ORIGIN_CS))
1283 i915_gem_active_set(&obj->frontbuffer_write, req);
5cf3d280
CW
1284
1285 /* update for the implicit flush after a batch */
1286 obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
7aa6ca61
CW
1287 if (!obj->cache_dirty && gpu_write_needs_clflush(obj))
1288 obj->cache_dirty = true;
5cf3d280
CW
1289 }
1290
49ef5294
CW
1291 if (flags & EXEC_OBJECT_NEEDS_FENCE)
1292 i915_gem_active_set(&vma->last_fence, req);
5cf3d280
CW
1293}
1294
ad778f89
CW
1295static void eb_export_fence(struct drm_i915_gem_object *obj,
1296 struct drm_i915_gem_request *req,
1297 unsigned int flags)
1298{
d07f0e59 1299 struct reservation_object *resv = obj->resv;
ad778f89
CW
1300
1301 /* Ignore errors from failing to allocate the new fence, we can't
1302 * handle an error right now. Worst case should be missed
1303 * synchronisation leading to rendering corruption.
1304 */
1305 ww_mutex_lock(&resv->lock, NULL);
1306 if (flags & EXEC_OBJECT_WRITE)
1307 reservation_object_add_excl_fence(resv, &req->fence);
1308 else if (reservation_object_reserve_shared(resv) == 0)
1309 reservation_object_add_shared_fence(resv, &req->fence);
1310 ww_mutex_unlock(&resv->lock);
1311}
1312
5b043f4e 1313static void
27173f1f 1314i915_gem_execbuffer_move_to_active(struct list_head *vmas,
8a8edb59 1315 struct drm_i915_gem_request *req)
432e58ed 1316{
27173f1f 1317 struct i915_vma *vma;
432e58ed 1318
27173f1f
BW
1319 list_for_each_entry(vma, vmas, exec_list) {
1320 struct drm_i915_gem_object *obj = vma->obj;
69c2fc89
CW
1321 u32 old_read = obj->base.read_domains;
1322 u32 old_write = obj->base.write_domain;
db53a302 1323
432e58ed 1324 obj->base.write_domain = obj->base.pending_write_domain;
5cf3d280
CW
1325 if (obj->base.write_domain)
1326 vma->exec_entry->flags |= EXEC_OBJECT_WRITE;
1327 else
ed5982e6
DV
1328 obj->base.pending_read_domains |= obj->base.read_domains;
1329 obj->base.read_domains = obj->base.pending_read_domains;
432e58ed 1330
5cf3d280 1331 i915_vma_move_to_active(vma, req, vma->exec_entry->flags);
ad778f89 1332 eb_export_fence(obj, req, vma->exec_entry->flags);
db53a302 1333 trace_i915_gem_object_change_domain(obj, old_read, old_write);
432e58ed
CW
1334 }
1335}
1336
ae662d31 1337static int
b5321f30 1338i915_reset_gen7_sol_offsets(struct drm_i915_gem_request *req)
ae662d31 1339{
7e37f889 1340 struct intel_ring *ring = req->ring;
ae662d31
EA
1341 int ret, i;
1342
b5321f30 1343 if (!IS_GEN7(req->i915) || req->engine->id != RCS) {
9d662da8
DV
1344 DRM_DEBUG("sol reset is gen7/rcs only\n");
1345 return -EINVAL;
1346 }
ae662d31 1347
5fb9de1a 1348 ret = intel_ring_begin(req, 4 * 3);
ae662d31
EA
1349 if (ret)
1350 return ret;
1351
1352 for (i = 0; i < 4; i++) {
b5321f30
CW
1353 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
1354 intel_ring_emit_reg(ring, GEN7_SO_WRITE_OFFSET(i));
1355 intel_ring_emit(ring, 0);
ae662d31
EA
1356 }
1357
b5321f30 1358 intel_ring_advance(ring);
ae662d31
EA
1359
1360 return 0;
1361}
1362
058d88c4 1363static struct i915_vma *
0bc40be8 1364i915_gem_execbuffer_parse(struct intel_engine_cs *engine,
71745376 1365 struct drm_i915_gem_exec_object2 *shadow_exec_entry,
71745376 1366 struct drm_i915_gem_object *batch_obj,
59bfa124 1367 struct eb_vmas *eb,
71745376
BV
1368 u32 batch_start_offset,
1369 u32 batch_len,
17cabf57 1370 bool is_master)
71745376 1371{
71745376 1372 struct drm_i915_gem_object *shadow_batch_obj;
17cabf57 1373 struct i915_vma *vma;
71745376
BV
1374 int ret;
1375
0bc40be8 1376 shadow_batch_obj = i915_gem_batch_pool_get(&engine->batch_pool,
17cabf57 1377 PAGE_ALIGN(batch_len));
71745376 1378 if (IS_ERR(shadow_batch_obj))
59bfa124 1379 return ERR_CAST(shadow_batch_obj);
71745376 1380
33a051a5
CW
1381 ret = intel_engine_cmd_parser(engine,
1382 batch_obj,
1383 shadow_batch_obj,
1384 batch_start_offset,
1385 batch_len,
1386 is_master);
058d88c4
CW
1387 if (ret) {
1388 if (ret == -EACCES) /* unhandled chained batch */
1389 vma = NULL;
1390 else
1391 vma = ERR_PTR(ret);
1392 goto out;
1393 }
71745376 1394
058d88c4
CW
1395 vma = i915_gem_object_ggtt_pin(shadow_batch_obj, NULL, 0, 0, 0);
1396 if (IS_ERR(vma))
1397 goto out;
de4e783a 1398
17cabf57 1399 memset(shadow_exec_entry, 0, sizeof(*shadow_exec_entry));
71745376 1400
17cabf57 1401 vma->exec_entry = shadow_exec_entry;
de4e783a 1402 vma->exec_entry->flags = __EXEC_OBJECT_HAS_PIN;
25dc556a 1403 i915_gem_object_get(shadow_batch_obj);
17cabf57 1404 list_add_tail(&vma->exec_list, &eb->vmas);
71745376 1405
058d88c4 1406out:
de4e783a 1407 i915_gem_object_unpin_pages(shadow_batch_obj);
058d88c4 1408 return vma;
71745376 1409}
5c6c6003 1410
5b043f4e
CW
1411static int
1412execbuf_submit(struct i915_execbuffer_params *params,
1413 struct drm_i915_gem_execbuffer2 *args,
1414 struct list_head *vmas)
78382593 1415{
b5321f30 1416 struct drm_i915_private *dev_priv = params->request->i915;
5f19e2bf 1417 u64 exec_start, exec_len;
78382593
OM
1418 int instp_mode;
1419 u32 instp_mask;
2f5945bc 1420 int ret;
78382593 1421
535fbe82 1422 ret = i915_gem_execbuffer_move_to_gpu(params->request, vmas);
78382593 1423 if (ret)
2f5945bc 1424 return ret;
78382593 1425
ba01cc93 1426 ret = i915_switch_context(params->request);
78382593 1427 if (ret)
2f5945bc 1428 return ret;
78382593
OM
1429
1430 instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
1431 instp_mask = I915_EXEC_CONSTANTS_MASK;
1432 switch (instp_mode) {
1433 case I915_EXEC_CONSTANTS_REL_GENERAL:
1434 case I915_EXEC_CONSTANTS_ABSOLUTE:
1435 case I915_EXEC_CONSTANTS_REL_SURFACE:
b5321f30 1436 if (instp_mode != 0 && params->engine->id != RCS) {
78382593 1437 DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
2f5945bc 1438 return -EINVAL;
78382593
OM
1439 }
1440
1441 if (instp_mode != dev_priv->relative_constants_mode) {
b5321f30 1442 if (INTEL_INFO(dev_priv)->gen < 4) {
78382593 1443 DRM_DEBUG("no rel constants on pre-gen4\n");
2f5945bc 1444 return -EINVAL;
78382593
OM
1445 }
1446
b5321f30 1447 if (INTEL_INFO(dev_priv)->gen > 5 &&
78382593
OM
1448 instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
1449 DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
2f5945bc 1450 return -EINVAL;
78382593
OM
1451 }
1452
1453 /* The HW changed the meaning on this bit on gen6 */
b5321f30 1454 if (INTEL_INFO(dev_priv)->gen >= 6)
78382593
OM
1455 instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
1456 }
1457 break;
1458 default:
1459 DRM_DEBUG("execbuf with unknown constants: %d\n", instp_mode);
2f5945bc 1460 return -EINVAL;
78382593
OM
1461 }
1462
b5321f30 1463 if (params->engine->id == RCS &&
2f5945bc 1464 instp_mode != dev_priv->relative_constants_mode) {
7e37f889 1465 struct intel_ring *ring = params->request->ring;
b5321f30 1466
5fb9de1a 1467 ret = intel_ring_begin(params->request, 4);
78382593 1468 if (ret)
2f5945bc 1469 return ret;
78382593 1470
b5321f30
CW
1471 intel_ring_emit(ring, MI_NOOP);
1472 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
1473 intel_ring_emit_reg(ring, INSTPM);
1474 intel_ring_emit(ring, instp_mask << 16 | instp_mode);
1475 intel_ring_advance(ring);
78382593
OM
1476
1477 dev_priv->relative_constants_mode = instp_mode;
1478 }
1479
1480 if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
b5321f30 1481 ret = i915_reset_gen7_sol_offsets(params->request);
78382593 1482 if (ret)
2f5945bc 1483 return ret;
78382593
OM
1484 }
1485
5f19e2bf 1486 exec_len = args->batch_len;
59bfa124 1487 exec_start = params->batch->node.start +
5f19e2bf
JH
1488 params->args_batch_start_offset;
1489
9d611c03 1490 if (exec_len == 0)
0b537272 1491 exec_len = params->batch->size - params->args_batch_start_offset;
9d611c03 1492
803688ba
CW
1493 ret = params->engine->emit_bb_start(params->request,
1494 exec_start, exec_len,
1495 params->dispatch_flags);
2f5945bc
CW
1496 if (ret)
1497 return ret;
78382593 1498
95c24161 1499 trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags);
78382593 1500
8a8edb59 1501 i915_gem_execbuffer_move_to_active(vmas, params->request);
78382593 1502
2f5945bc 1503 return 0;
78382593
OM
1504}
1505
a8ebba75
ZY
1506/**
1507 * Find one BSD ring to dispatch the corresponding BSD command.
c80ff16e 1508 * The engine index is returned.
a8ebba75 1509 */
de1add36 1510static unsigned int
c80ff16e
CW
1511gen8_dispatch_bsd_engine(struct drm_i915_private *dev_priv,
1512 struct drm_file *file)
a8ebba75 1513{
a8ebba75
ZY
1514 struct drm_i915_file_private *file_priv = file->driver_priv;
1515
de1add36 1516 /* Check whether the file_priv has already selected one ring. */
6f633402
JL
1517 if ((int)file_priv->bsd_engine < 0)
1518 file_priv->bsd_engine = atomic_fetch_xor(1,
1519 &dev_priv->mm.bsd_engine_dispatch_index);
d23db88c 1520
c80ff16e 1521 return file_priv->bsd_engine;
d23db88c
CW
1522}
1523
de1add36
TU
1524#define I915_USER_RINGS (4)
1525
117897f4 1526static const enum intel_engine_id user_ring_map[I915_USER_RINGS + 1] = {
de1add36
TU
1527 [I915_EXEC_DEFAULT] = RCS,
1528 [I915_EXEC_RENDER] = RCS,
1529 [I915_EXEC_BLT] = BCS,
1530 [I915_EXEC_BSD] = VCS,
1531 [I915_EXEC_VEBOX] = VECS
1532};
1533
f8ca0c07
DG
1534static struct intel_engine_cs *
1535eb_select_engine(struct drm_i915_private *dev_priv,
1536 struct drm_file *file,
1537 struct drm_i915_gem_execbuffer2 *args)
de1add36
TU
1538{
1539 unsigned int user_ring_id = args->flags & I915_EXEC_RING_MASK;
f8ca0c07 1540 struct intel_engine_cs *engine;
de1add36
TU
1541
1542 if (user_ring_id > I915_USER_RINGS) {
1543 DRM_DEBUG("execbuf with unknown ring: %u\n", user_ring_id);
f8ca0c07 1544 return NULL;
de1add36
TU
1545 }
1546
1547 if ((user_ring_id != I915_EXEC_BSD) &&
1548 ((args->flags & I915_EXEC_BSD_MASK) != 0)) {
1549 DRM_DEBUG("execbuf with non bsd ring but with invalid "
1550 "bsd dispatch flags: %d\n", (int)(args->flags));
f8ca0c07 1551 return NULL;
de1add36
TU
1552 }
1553
1554 if (user_ring_id == I915_EXEC_BSD && HAS_BSD2(dev_priv)) {
1555 unsigned int bsd_idx = args->flags & I915_EXEC_BSD_MASK;
1556
1557 if (bsd_idx == I915_EXEC_BSD_DEFAULT) {
c80ff16e 1558 bsd_idx = gen8_dispatch_bsd_engine(dev_priv, file);
de1add36
TU
1559 } else if (bsd_idx >= I915_EXEC_BSD_RING1 &&
1560 bsd_idx <= I915_EXEC_BSD_RING2) {
d9da6aa0 1561 bsd_idx >>= I915_EXEC_BSD_SHIFT;
de1add36
TU
1562 bsd_idx--;
1563 } else {
1564 DRM_DEBUG("execbuf with unknown bsd ring: %u\n",
1565 bsd_idx);
f8ca0c07 1566 return NULL;
de1add36
TU
1567 }
1568
3b3f1650 1569 engine = dev_priv->engine[_VCS(bsd_idx)];
de1add36 1570 } else {
3b3f1650 1571 engine = dev_priv->engine[user_ring_map[user_ring_id]];
de1add36
TU
1572 }
1573
3b3f1650 1574 if (!engine) {
de1add36 1575 DRM_DEBUG("execbuf with invalid ring: %u\n", user_ring_id);
f8ca0c07 1576 return NULL;
de1add36
TU
1577 }
1578
f8ca0c07 1579 return engine;
de1add36
TU
1580}
1581
54cf91dc
CW
1582static int
1583i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1584 struct drm_file *file,
1585 struct drm_i915_gem_execbuffer2 *args,
41bde553 1586 struct drm_i915_gem_exec_object2 *exec)
54cf91dc 1587{
72e96d64
JL
1588 struct drm_i915_private *dev_priv = to_i915(dev);
1589 struct i915_ggtt *ggtt = &dev_priv->ggtt;
27173f1f 1590 struct eb_vmas *eb;
78a42377 1591 struct drm_i915_gem_exec_object2 shadow_exec_entry;
e2f80391 1592 struct intel_engine_cs *engine;
e2efd130 1593 struct i915_gem_context *ctx;
41bde553 1594 struct i915_address_space *vm;
5f19e2bf
JH
1595 struct i915_execbuffer_params params_master; /* XXX: will be removed later */
1596 struct i915_execbuffer_params *params = &params_master;
d299cce7 1597 const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
8e004efc 1598 u32 dispatch_flags;
fec0445c
CW
1599 struct dma_fence *in_fence = NULL;
1600 struct sync_file *out_fence = NULL;
1601 int out_fence_fd = -1;
78382593 1602 int ret;
ed5982e6 1603 bool need_relocs;
54cf91dc 1604
ed5982e6 1605 if (!i915_gem_check_execbuffer(args))
432e58ed 1606 return -EINVAL;
432e58ed 1607
ad19f10b 1608 ret = validate_exec_list(dev, exec, args->buffer_count);
54cf91dc
CW
1609 if (ret)
1610 return ret;
1611
8e004efc 1612 dispatch_flags = 0;
d7d4eedd 1613 if (args->flags & I915_EXEC_SECURE) {
b3ac9f25 1614 if (!drm_is_current_master(file) || !capable(CAP_SYS_ADMIN))
d7d4eedd
CW
1615 return -EPERM;
1616
8e004efc 1617 dispatch_flags |= I915_DISPATCH_SECURE;
d7d4eedd 1618 }
b45305fc 1619 if (args->flags & I915_EXEC_IS_PINNED)
8e004efc 1620 dispatch_flags |= I915_DISPATCH_PINNED;
d7d4eedd 1621
f8ca0c07
DG
1622 engine = eb_select_engine(dev_priv, file, args);
1623 if (!engine)
1624 return -EINVAL;
54cf91dc
CW
1625
1626 if (args->buffer_count < 1) {
ff240199 1627 DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
54cf91dc
CW
1628 return -EINVAL;
1629 }
54cf91dc 1630
a9ed33ca 1631 if (args->flags & I915_EXEC_RESOURCE_STREAMER) {
4805fe82 1632 if (!HAS_RESOURCE_STREAMER(dev_priv)) {
a9ed33ca
AJ
1633 DRM_DEBUG("RS is only allowed for Haswell, Gen8 and above\n");
1634 return -EINVAL;
1635 }
e2f80391 1636 if (engine->id != RCS) {
a9ed33ca 1637 DRM_DEBUG("RS is not available on %s\n",
e2f80391 1638 engine->name);
a9ed33ca
AJ
1639 return -EINVAL;
1640 }
1641
1642 dispatch_flags |= I915_DISPATCH_RS;
1643 }
1644
fec0445c
CW
1645 if (args->flags & I915_EXEC_FENCE_IN) {
1646 in_fence = sync_file_get_fence(lower_32_bits(args->rsvd2));
1647 if (!in_fence) {
1648 ret = -EINVAL;
1649 goto pre_mutex_err;
1650 }
1651 }
1652
1653 if (args->flags & I915_EXEC_FENCE_OUT) {
1654 out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
1655 if (out_fence_fd < 0) {
1656 ret = out_fence_fd;
1657 out_fence_fd = -1;
1658 goto pre_mutex_err;
1659 }
1660 }
1661
67d97da3
CW
1662 /* Take a local wakeref for preparing to dispatch the execbuf as
1663 * we expect to access the hardware fairly frequently in the
1664 * process. Upon first dispatch, we acquire another prolonged
1665 * wakeref that we hold until the GPU has been idle for at least
1666 * 100ms.
1667 */
f65c9168
PZ
1668 intel_runtime_pm_get(dev_priv);
1669
54cf91dc
CW
1670 ret = i915_mutex_lock_interruptible(dev);
1671 if (ret)
1672 goto pre_mutex_err;
1673
e2f80391 1674 ctx = i915_gem_validate_context(dev, file, engine, ctx_id);
72ad5c45 1675 if (IS_ERR(ctx)) {
d299cce7 1676 mutex_unlock(&dev->struct_mutex);
41bde553 1677 ret = PTR_ERR(ctx);
d299cce7 1678 goto pre_mutex_err;
935f38d6 1679 }
41bde553 1680
9a6feaf0 1681 i915_gem_context_get(ctx);
41bde553 1682
ae6c4806
DV
1683 if (ctx->ppgtt)
1684 vm = &ctx->ppgtt->base;
1685 else
72e96d64 1686 vm = &ggtt->base;
d299cce7 1687
5f19e2bf
JH
1688 memset(&params_master, 0x00, sizeof(params_master));
1689
d50415cc 1690 eb = eb_create(dev_priv, args);
67731b87 1691 if (eb == NULL) {
9a6feaf0 1692 i915_gem_context_put(ctx);
67731b87
CW
1693 mutex_unlock(&dev->struct_mutex);
1694 ret = -ENOMEM;
1695 goto pre_mutex_err;
1696 }
1697
54cf91dc 1698 /* Look up object handles */
27173f1f 1699 ret = eb_lookup_vmas(eb, exec, args, vm, file);
3b96eff4
CW
1700 if (ret)
1701 goto err;
54cf91dc 1702
6fe4f140 1703 /* take note of the batch buffer before we might reorder the lists */
59bfa124 1704 params->batch = eb_get_batch(eb);
6fe4f140 1705
54cf91dc 1706 /* Move the objects en-masse into the GTT, evicting if necessary. */
ed5982e6 1707 need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
e2f80391
TU
1708 ret = i915_gem_execbuffer_reserve(engine, &eb->vmas, ctx,
1709 &need_relocs);
54cf91dc
CW
1710 if (ret)
1711 goto err;
1712
1713 /* The objects are in their final locations, apply the relocations. */
ed5982e6 1714 if (need_relocs)
17601cbc 1715 ret = i915_gem_execbuffer_relocate(eb);
54cf91dc
CW
1716 if (ret) {
1717 if (ret == -EFAULT) {
e2f80391
TU
1718 ret = i915_gem_execbuffer_relocate_slow(dev, args, file,
1719 engine,
b1b38278 1720 eb, exec, ctx);
54cf91dc
CW
1721 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1722 }
1723 if (ret)
1724 goto err;
1725 }
1726
1727 /* Set the pending read domains for the batch buffer to COMMAND */
59bfa124 1728 if (params->batch->obj->base.pending_write_domain) {
ff240199 1729 DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
54cf91dc
CW
1730 ret = -EINVAL;
1731 goto err;
1732 }
0b537272
CW
1733 if (args->batch_start_offset > params->batch->size ||
1734 args->batch_len > params->batch->size - args->batch_start_offset) {
1735 DRM_DEBUG("Attempting to use out-of-bounds batch\n");
1736 ret = -EINVAL;
1737 goto err;
1738 }
54cf91dc 1739
5f19e2bf 1740 params->args_batch_start_offset = args->batch_start_offset;
41736a8e 1741 if (engine->needs_cmd_parser && args->batch_len) {
59bfa124
CW
1742 struct i915_vma *vma;
1743
1744 vma = i915_gem_execbuffer_parse(engine, &shadow_exec_entry,
1745 params->batch->obj,
1746 eb,
1747 args->batch_start_offset,
1748 args->batch_len,
1749 drm_is_current_master(file));
1750 if (IS_ERR(vma)) {
1751 ret = PTR_ERR(vma);
78a42377
BV
1752 goto err;
1753 }
17cabf57 1754
59bfa124 1755 if (vma) {
c7c7372e
RP
1756 /*
1757 * Batch parsed and accepted:
1758 *
1759 * Set the DISPATCH_SECURE bit to remove the NON_SECURE
1760 * bit from MI_BATCH_BUFFER_START commands issued in
1761 * the dispatch_execbuffer implementations. We
1762 * specifically don't want that set on batches the
1763 * command parser has accepted.
1764 */
1765 dispatch_flags |= I915_DISPATCH_SECURE;
5f19e2bf 1766 params->args_batch_start_offset = 0;
59bfa124 1767 params->batch = vma;
c7c7372e 1768 }
351e3db2
BV
1769 }
1770
59bfa124 1771 params->batch->obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
78a42377 1772
d7d4eedd
CW
1773 /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
1774 * batch" bit. Hence we need to pin secure batches into the global gtt.
28cf5415 1775 * hsw should have this fixed, but bdw mucks it up again. */
8e004efc 1776 if (dispatch_flags & I915_DISPATCH_SECURE) {
59bfa124 1777 struct drm_i915_gem_object *obj = params->batch->obj;
058d88c4 1778 struct i915_vma *vma;
59bfa124 1779
da51a1e7
DV
1780 /*
1781 * So on first glance it looks freaky that we pin the batch here
1782 * outside of the reservation loop. But:
1783 * - The batch is already pinned into the relevant ppgtt, so we
1784 * already have the backing storage fully allocated.
1785 * - No other BO uses the global gtt (well contexts, but meh),
fd0753cf 1786 * so we don't really have issues with multiple objects not
da51a1e7
DV
1787 * fitting due to fragmentation.
1788 * So this is actually safe.
1789 */
058d88c4
CW
1790 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
1791 if (IS_ERR(vma)) {
1792 ret = PTR_ERR(vma);
da51a1e7 1793 goto err;
058d88c4 1794 }
d7d4eedd 1795
058d88c4 1796 params->batch = vma;
59bfa124 1797 }
d7d4eedd 1798
0c8dac88 1799 /* Allocate a request for this batch buffer nice and early. */
8e637178
CW
1800 params->request = i915_gem_request_alloc(engine, ctx);
1801 if (IS_ERR(params->request)) {
1802 ret = PTR_ERR(params->request);
0c8dac88 1803 goto err_batch_unpin;
26827088 1804 }
0c8dac88 1805
fec0445c
CW
1806 if (in_fence) {
1807 ret = i915_gem_request_await_dma_fence(params->request,
1808 in_fence);
1809 if (ret < 0)
1810 goto err_request;
1811 }
1812
1813 if (out_fence_fd != -1) {
1814 out_fence = sync_file_create(&params->request->fence);
1815 if (!out_fence) {
1816 ret = -ENOMEM;
1817 goto err_request;
1818 }
1819 }
1820
17f298cf
CW
1821 /* Whilst this request exists, batch_obj will be on the
1822 * active_list, and so will hold the active reference. Only when this
1823 * request is retired will the the batch_obj be moved onto the
1824 * inactive_list and lose its active reference. Hence we do not need
1825 * to explicitly hold another reference here.
1826 */
058d88c4 1827 params->request->batch = params->batch;
17f298cf 1828
8e637178 1829 ret = i915_gem_request_add_to_client(params->request, file);
fcfa423c 1830 if (ret)
aa9b7810 1831 goto err_request;
fcfa423c 1832
5f19e2bf
JH
1833 /*
1834 * Save assorted stuff away to pass through to *_submission().
1835 * NB: This data should be 'persistent' and not local as it will
1836 * kept around beyond the duration of the IOCTL once the GPU
1837 * scheduler arrives.
1838 */
1839 params->dev = dev;
1840 params->file = file;
4a570db5 1841 params->engine = engine;
5f19e2bf 1842 params->dispatch_flags = dispatch_flags;
5f19e2bf
JH
1843 params->ctx = ctx;
1844
5b043f4e 1845 ret = execbuf_submit(params, args, &eb->vmas);
aa9b7810 1846err_request:
17f298cf 1847 __i915_add_request(params->request, ret == 0);
fec0445c
CW
1848 if (out_fence) {
1849 if (ret == 0) {
1850 fd_install(out_fence_fd, out_fence->file);
1851 args->rsvd2 &= GENMASK_ULL(0, 31); /* keep in-fence */
1852 args->rsvd2 |= (u64)out_fence_fd << 32;
1853 out_fence_fd = -1;
1854 } else {
1855 fput(out_fence->file);
1856 }
1857 }
54cf91dc 1858
0c8dac88 1859err_batch_unpin:
da51a1e7
DV
1860 /*
1861 * FIXME: We crucially rely upon the active tracking for the (ppgtt)
1862 * batch vma for correctness. For less ugly and less fragility this
1863 * needs to be adjusted to also track the ggtt batch vma properly as
1864 * active.
1865 */
8e004efc 1866 if (dispatch_flags & I915_DISPATCH_SECURE)
59bfa124 1867 i915_vma_unpin(params->batch);
54cf91dc 1868err:
41bde553 1869 /* the request owns the ref now */
9a6feaf0 1870 i915_gem_context_put(ctx);
67731b87 1871 eb_destroy(eb);
54cf91dc
CW
1872
1873 mutex_unlock(&dev->struct_mutex);
1874
1875pre_mutex_err:
f65c9168
PZ
1876 /* intel_gpu_busy should also get a ref, so it will free when the device
1877 * is really idle. */
1878 intel_runtime_pm_put(dev_priv);
fec0445c
CW
1879 if (out_fence_fd != -1)
1880 put_unused_fd(out_fence_fd);
1881 dma_fence_put(in_fence);
54cf91dc
CW
1882 return ret;
1883}
1884
1885/*
1886 * Legacy execbuffer just creates an exec2 list from the original exec object
1887 * list array and passes it to the real function.
1888 */
1889int
1890i915_gem_execbuffer(struct drm_device *dev, void *data,
1891 struct drm_file *file)
1892{
1893 struct drm_i915_gem_execbuffer *args = data;
1894 struct drm_i915_gem_execbuffer2 exec2;
1895 struct drm_i915_gem_exec_object *exec_list = NULL;
1896 struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1897 int ret, i;
1898
54cf91dc 1899 if (args->buffer_count < 1) {
ff240199 1900 DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
54cf91dc
CW
1901 return -EINVAL;
1902 }
1903
1904 /* Copy in the exec list from userland */
1905 exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
1906 exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
1907 if (exec_list == NULL || exec2_list == NULL) {
ff240199 1908 DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
54cf91dc
CW
1909 args->buffer_count);
1910 drm_free_large(exec_list);
1911 drm_free_large(exec2_list);
1912 return -ENOMEM;
1913 }
1914 ret = copy_from_user(exec_list,
3ed605bc 1915 u64_to_user_ptr(args->buffers_ptr),
54cf91dc
CW
1916 sizeof(*exec_list) * args->buffer_count);
1917 if (ret != 0) {
ff240199 1918 DRM_DEBUG("copy %d exec entries failed %d\n",
54cf91dc
CW
1919 args->buffer_count, ret);
1920 drm_free_large(exec_list);
1921 drm_free_large(exec2_list);
1922 return -EFAULT;
1923 }
1924
1925 for (i = 0; i < args->buffer_count; i++) {
1926 exec2_list[i].handle = exec_list[i].handle;
1927 exec2_list[i].relocation_count = exec_list[i].relocation_count;
1928 exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
1929 exec2_list[i].alignment = exec_list[i].alignment;
1930 exec2_list[i].offset = exec_list[i].offset;
f0836b72 1931 if (INTEL_GEN(to_i915(dev)) < 4)
54cf91dc
CW
1932 exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
1933 else
1934 exec2_list[i].flags = 0;
1935 }
1936
1937 exec2.buffers_ptr = args->buffers_ptr;
1938 exec2.buffer_count = args->buffer_count;
1939 exec2.batch_start_offset = args->batch_start_offset;
1940 exec2.batch_len = args->batch_len;
1941 exec2.DR1 = args->DR1;
1942 exec2.DR4 = args->DR4;
1943 exec2.num_cliprects = args->num_cliprects;
1944 exec2.cliprects_ptr = args->cliprects_ptr;
1945 exec2.flags = I915_EXEC_RENDER;
6e0a69db 1946 i915_execbuffer2_set_context_id(exec2, 0);
54cf91dc 1947
41bde553 1948 ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
54cf91dc 1949 if (!ret) {
9aab8bff 1950 struct drm_i915_gem_exec_object __user *user_exec_list =
3ed605bc 1951 u64_to_user_ptr(args->buffers_ptr);
9aab8bff 1952
54cf91dc 1953 /* Copy the new buffer offsets back to the user's exec list. */
9aab8bff 1954 for (i = 0; i < args->buffer_count; i++) {
934acce3
MW
1955 exec2_list[i].offset =
1956 gen8_canonical_addr(exec2_list[i].offset);
9aab8bff
CW
1957 ret = __copy_to_user(&user_exec_list[i].offset,
1958 &exec2_list[i].offset,
1959 sizeof(user_exec_list[i].offset));
1960 if (ret) {
1961 ret = -EFAULT;
1962 DRM_DEBUG("failed to copy %d exec entries "
1963 "back to user (%d)\n",
1964 args->buffer_count, ret);
1965 break;
1966 }
54cf91dc
CW
1967 }
1968 }
1969
1970 drm_free_large(exec_list);
1971 drm_free_large(exec2_list);
1972 return ret;
1973}
1974
1975int
1976i915_gem_execbuffer2(struct drm_device *dev, void *data,
1977 struct drm_file *file)
1978{
1979 struct drm_i915_gem_execbuffer2 *args = data;
1980 struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1981 int ret;
1982
ed8cd3b2
XW
1983 if (args->buffer_count < 1 ||
1984 args->buffer_count > UINT_MAX / sizeof(*exec2_list)) {
ff240199 1985 DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
54cf91dc
CW
1986 return -EINVAL;
1987 }
1988
f2a85e19
CW
1989 exec2_list = drm_malloc_gfp(args->buffer_count,
1990 sizeof(*exec2_list),
1991 GFP_TEMPORARY);
54cf91dc 1992 if (exec2_list == NULL) {
ff240199 1993 DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
54cf91dc
CW
1994 args->buffer_count);
1995 return -ENOMEM;
1996 }
1997 ret = copy_from_user(exec2_list,
3ed605bc 1998 u64_to_user_ptr(args->buffers_ptr),
54cf91dc
CW
1999 sizeof(*exec2_list) * args->buffer_count);
2000 if (ret != 0) {
ff240199 2001 DRM_DEBUG("copy %d exec entries failed %d\n",
54cf91dc
CW
2002 args->buffer_count, ret);
2003 drm_free_large(exec2_list);
2004 return -EFAULT;
2005 }
2006
41bde553 2007 ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
54cf91dc
CW
2008 if (!ret) {
2009 /* Copy the new buffer offsets back to the user's exec list. */
d593d992 2010 struct drm_i915_gem_exec_object2 __user *user_exec_list =
3ed605bc 2011 u64_to_user_ptr(args->buffers_ptr);
9aab8bff
CW
2012 int i;
2013
2014 for (i = 0; i < args->buffer_count; i++) {
934acce3
MW
2015 exec2_list[i].offset =
2016 gen8_canonical_addr(exec2_list[i].offset);
9aab8bff
CW
2017 ret = __copy_to_user(&user_exec_list[i].offset,
2018 &exec2_list[i].offset,
2019 sizeof(user_exec_list[i].offset));
2020 if (ret) {
2021 ret = -EFAULT;
2022 DRM_DEBUG("failed to copy %d exec entries "
2023 "back to user\n",
2024 args->buffer_count);
2025 break;
2026 }
54cf91dc
CW
2027 }
2028 }
2029
2030 drm_free_large(exec2_list);
2031 return ret;
2032}