]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/gpu/drm/i915/i915_gem_execbuffer.c
drm/i915: Allocate intel_engine_cs structure only for the enabled engines
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / i915 / i915_gem_execbuffer.c
CommitLineData
54cf91dc
CW
1/*
2 * Copyright © 2008,2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Chris Wilson <chris@chris-wilson.co.uk>
26 *
27 */
28
ad778f89
CW
29#include <linux/dma_remapping.h>
30#include <linux/reservation.h>
31#include <linux/uaccess.h>
32
760285e7
DH
33#include <drm/drmP.h>
34#include <drm/i915_drm.h>
ad778f89 35
54cf91dc 36#include "i915_drv.h"
ad778f89 37#include "i915_gem_dmabuf.h"
54cf91dc
CW
38#include "i915_trace.h"
39#include "intel_drv.h"
5d723d7a 40#include "intel_frontbuffer.h"
54cf91dc 41
d50415cc
CW
42#define DBG_USE_CPU_RELOC 0 /* -1 force GTT relocs; 1 force CPU relocs */
43
9e2793f6
DG
44#define __EXEC_OBJECT_HAS_PIN (1<<31)
45#define __EXEC_OBJECT_HAS_FENCE (1<<30)
46#define __EXEC_OBJECT_NEEDS_MAP (1<<29)
47#define __EXEC_OBJECT_NEEDS_BIAS (1<<28)
48#define __EXEC_OBJECT_INTERNAL_FLAGS (0xf<<28) /* all of the above */
d23db88c
CW
49
50#define BATCH_OFFSET_BIAS (256*1024)
a415d355 51
5b043f4e
CW
52struct i915_execbuffer_params {
53 struct drm_device *dev;
54 struct drm_file *file;
59bfa124
CW
55 struct i915_vma *batch;
56 u32 dispatch_flags;
57 u32 args_batch_start_offset;
5b043f4e 58 struct intel_engine_cs *engine;
5b043f4e
CW
59 struct i915_gem_context *ctx;
60 struct drm_i915_gem_request *request;
61};
62
27173f1f 63struct eb_vmas {
d50415cc 64 struct drm_i915_private *i915;
27173f1f 65 struct list_head vmas;
67731b87 66 int and;
eef90ccb 67 union {
27173f1f 68 struct i915_vma *lut[0];
eef90ccb
CW
69 struct hlist_head buckets[0];
70 };
67731b87
CW
71};
72
27173f1f 73static struct eb_vmas *
d50415cc
CW
74eb_create(struct drm_i915_private *i915,
75 struct drm_i915_gem_execbuffer2 *args)
67731b87 76{
27173f1f 77 struct eb_vmas *eb = NULL;
eef90ccb
CW
78
79 if (args->flags & I915_EXEC_HANDLE_LUT) {
b205ca57 80 unsigned size = args->buffer_count;
27173f1f
BW
81 size *= sizeof(struct i915_vma *);
82 size += sizeof(struct eb_vmas);
eef90ccb
CW
83 eb = kmalloc(size, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
84 }
85
86 if (eb == NULL) {
b205ca57
DV
87 unsigned size = args->buffer_count;
88 unsigned count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
27b7c63a 89 BUILD_BUG_ON_NOT_POWER_OF_2(PAGE_SIZE / sizeof(struct hlist_head));
eef90ccb
CW
90 while (count > 2*size)
91 count >>= 1;
92 eb = kzalloc(count*sizeof(struct hlist_head) +
27173f1f 93 sizeof(struct eb_vmas),
eef90ccb
CW
94 GFP_TEMPORARY);
95 if (eb == NULL)
96 return eb;
97
98 eb->and = count - 1;
99 } else
100 eb->and = -args->buffer_count;
101
d50415cc 102 eb->i915 = i915;
27173f1f 103 INIT_LIST_HEAD(&eb->vmas);
67731b87
CW
104 return eb;
105}
106
107static void
27173f1f 108eb_reset(struct eb_vmas *eb)
67731b87 109{
eef90ccb
CW
110 if (eb->and >= 0)
111 memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
67731b87
CW
112}
113
59bfa124
CW
114static struct i915_vma *
115eb_get_batch(struct eb_vmas *eb)
116{
117 struct i915_vma *vma = list_entry(eb->vmas.prev, typeof(*vma), exec_list);
118
119 /*
120 * SNA is doing fancy tricks with compressing batch buffers, which leads
121 * to negative relocation deltas. Usually that works out ok since the
122 * relocate address is still positive, except when the batch is placed
123 * very low in the GTT. Ensure this doesn't happen.
124 *
125 * Note that actual hangs have only been observed on gen7, but for
126 * paranoia do it everywhere.
127 */
128 if ((vma->exec_entry->flags & EXEC_OBJECT_PINNED) == 0)
129 vma->exec_entry->flags |= __EXEC_OBJECT_NEEDS_BIAS;
130
131 return vma;
132}
133
3b96eff4 134static int
27173f1f
BW
135eb_lookup_vmas(struct eb_vmas *eb,
136 struct drm_i915_gem_exec_object2 *exec,
137 const struct drm_i915_gem_execbuffer2 *args,
138 struct i915_address_space *vm,
139 struct drm_file *file)
3b96eff4 140{
27173f1f
BW
141 struct drm_i915_gem_object *obj;
142 struct list_head objects;
9ae9ab52 143 int i, ret;
3b96eff4 144
27173f1f 145 INIT_LIST_HEAD(&objects);
3b96eff4 146 spin_lock(&file->table_lock);
27173f1f
BW
147 /* Grab a reference to the object and release the lock so we can lookup
148 * or create the VMA without using GFP_ATOMIC */
eef90ccb 149 for (i = 0; i < args->buffer_count; i++) {
3b96eff4
CW
150 obj = to_intel_bo(idr_find(&file->object_idr, exec[i].handle));
151 if (obj == NULL) {
152 spin_unlock(&file->table_lock);
153 DRM_DEBUG("Invalid object handle %d at index %d\n",
154 exec[i].handle, i);
27173f1f 155 ret = -ENOENT;
9ae9ab52 156 goto err;
3b96eff4
CW
157 }
158
27173f1f 159 if (!list_empty(&obj->obj_exec_link)) {
3b96eff4
CW
160 spin_unlock(&file->table_lock);
161 DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
162 obj, exec[i].handle, i);
27173f1f 163 ret = -EINVAL;
9ae9ab52 164 goto err;
3b96eff4
CW
165 }
166
25dc556a 167 i915_gem_object_get(obj);
27173f1f
BW
168 list_add_tail(&obj->obj_exec_link, &objects);
169 }
170 spin_unlock(&file->table_lock);
3b96eff4 171
27173f1f 172 i = 0;
9ae9ab52 173 while (!list_empty(&objects)) {
27173f1f 174 struct i915_vma *vma;
6f65e29a 175
9ae9ab52
CW
176 obj = list_first_entry(&objects,
177 struct drm_i915_gem_object,
178 obj_exec_link);
179
e656a6cb
DV
180 /*
181 * NOTE: We can leak any vmas created here when something fails
182 * later on. But that's no issue since vma_unbind can deal with
183 * vmas which are not actually bound. And since only
184 * lookup_or_create exists as an interface to get at the vma
185 * from the (obj, vm) we don't run the risk of creating
186 * duplicated vmas for the same vm.
187 */
058d88c4
CW
188 vma = i915_gem_obj_lookup_or_create_vma(obj, vm, NULL);
189 if (unlikely(IS_ERR(vma))) {
27173f1f
BW
190 DRM_DEBUG("Failed to lookup VMA\n");
191 ret = PTR_ERR(vma);
9ae9ab52 192 goto err;
27173f1f
BW
193 }
194
9ae9ab52 195 /* Transfer ownership from the objects list to the vmas list. */
27173f1f 196 list_add_tail(&vma->exec_list, &eb->vmas);
9ae9ab52 197 list_del_init(&obj->obj_exec_link);
27173f1f
BW
198
199 vma->exec_entry = &exec[i];
eef90ccb 200 if (eb->and < 0) {
27173f1f 201 eb->lut[i] = vma;
eef90ccb
CW
202 } else {
203 uint32_t handle = args->flags & I915_EXEC_HANDLE_LUT ? i : exec[i].handle;
27173f1f
BW
204 vma->exec_handle = handle;
205 hlist_add_head(&vma->exec_node,
eef90ccb
CW
206 &eb->buckets[handle & eb->and]);
207 }
27173f1f 208 ++i;
3b96eff4 209 }
3b96eff4 210
9ae9ab52 211 return 0;
27173f1f 212
27173f1f 213
9ae9ab52 214err:
27173f1f
BW
215 while (!list_empty(&objects)) {
216 obj = list_first_entry(&objects,
217 struct drm_i915_gem_object,
218 obj_exec_link);
219 list_del_init(&obj->obj_exec_link);
f8c417cd 220 i915_gem_object_put(obj);
27173f1f 221 }
9ae9ab52
CW
222 /*
223 * Objects already transfered to the vmas list will be unreferenced by
224 * eb_destroy.
225 */
226
27173f1f 227 return ret;
3b96eff4
CW
228}
229
27173f1f 230static struct i915_vma *eb_get_vma(struct eb_vmas *eb, unsigned long handle)
67731b87 231{
eef90ccb
CW
232 if (eb->and < 0) {
233 if (handle >= -eb->and)
234 return NULL;
235 return eb->lut[handle];
236 } else {
237 struct hlist_head *head;
aa45950b 238 struct i915_vma *vma;
67731b87 239
eef90ccb 240 head = &eb->buckets[handle & eb->and];
aa45950b 241 hlist_for_each_entry(vma, head, exec_node) {
27173f1f
BW
242 if (vma->exec_handle == handle)
243 return vma;
eef90ccb
CW
244 }
245 return NULL;
246 }
67731b87
CW
247}
248
a415d355
CW
249static void
250i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
251{
252 struct drm_i915_gem_exec_object2 *entry;
a415d355
CW
253
254 if (!drm_mm_node_allocated(&vma->node))
255 return;
256
257 entry = vma->exec_entry;
258
259 if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
49ef5294 260 i915_vma_unpin_fence(vma);
a415d355
CW
261
262 if (entry->flags & __EXEC_OBJECT_HAS_PIN)
20dfbde4 263 __i915_vma_unpin(vma);
a415d355 264
de4e783a 265 entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
a415d355
CW
266}
267
268static void eb_destroy(struct eb_vmas *eb)
269{
27173f1f
BW
270 while (!list_empty(&eb->vmas)) {
271 struct i915_vma *vma;
bcffc3fa 272
27173f1f
BW
273 vma = list_first_entry(&eb->vmas,
274 struct i915_vma,
bcffc3fa 275 exec_list);
27173f1f 276 list_del_init(&vma->exec_list);
a415d355 277 i915_gem_execbuffer_unreserve_vma(vma);
624192cf 278 i915_vma_put(vma);
bcffc3fa 279 }
67731b87
CW
280 kfree(eb);
281}
282
dabdfe02
CW
283static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
284{
9e53d9be
CW
285 if (!i915_gem_object_has_struct_page(obj))
286 return false;
287
d50415cc
CW
288 if (DBG_USE_CPU_RELOC)
289 return DBG_USE_CPU_RELOC > 0;
290
2cc86b82
CW
291 return (HAS_LLC(obj->base.dev) ||
292 obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
dabdfe02
CW
293 obj->cache_level != I915_CACHE_NONE);
294}
295
934acce3
MW
296/* Used to convert any address to canonical form.
297 * Starting from gen8, some commands (e.g. STATE_BASE_ADDRESS,
298 * MI_LOAD_REGISTER_MEM and others, see Broadwell PRM Vol2a) require the
299 * addresses to be in a canonical form:
300 * "GraphicsAddress[63:48] are ignored by the HW and assumed to be in correct
301 * canonical form [63:48] == [47]."
302 */
303#define GEN8_HIGH_ADDRESS_BIT 47
304static inline uint64_t gen8_canonical_addr(uint64_t address)
305{
306 return sign_extend64(address, GEN8_HIGH_ADDRESS_BIT);
307}
308
309static inline uint64_t gen8_noncanonical_addr(uint64_t address)
310{
311 return address & ((1ULL << (GEN8_HIGH_ADDRESS_BIT + 1)) - 1);
312}
313
314static inline uint64_t
d50415cc 315relocation_target(const struct drm_i915_gem_relocation_entry *reloc,
934acce3
MW
316 uint64_t target_offset)
317{
318 return gen8_canonical_addr((int)reloc->delta + target_offset);
319}
320
31a39207 321struct reloc_cache {
d50415cc
CW
322 struct drm_i915_private *i915;
323 struct drm_mm_node node;
324 unsigned long vaddr;
31a39207 325 unsigned int page;
d50415cc 326 bool use_64bit_reloc;
31a39207
CW
327};
328
d50415cc
CW
329static void reloc_cache_init(struct reloc_cache *cache,
330 struct drm_i915_private *i915)
31a39207
CW
331{
332 cache->page = -1;
d50415cc
CW
333 cache->vaddr = 0;
334 cache->i915 = i915;
335 cache->use_64bit_reloc = INTEL_GEN(cache->i915) >= 8;
e8cb909a 336 cache->node.allocated = false;
d50415cc
CW
337}
338
339static inline void *unmask_page(unsigned long p)
340{
341 return (void *)(uintptr_t)(p & PAGE_MASK);
342}
343
344static inline unsigned int unmask_flags(unsigned long p)
345{
346 return p & ~PAGE_MASK;
31a39207
CW
347}
348
d50415cc
CW
349#define KMAP 0x4 /* after CLFLUSH_FLAGS */
350
31a39207
CW
351static void reloc_cache_fini(struct reloc_cache *cache)
352{
d50415cc
CW
353 void *vaddr;
354
31a39207
CW
355 if (!cache->vaddr)
356 return;
357
d50415cc
CW
358 vaddr = unmask_page(cache->vaddr);
359 if (cache->vaddr & KMAP) {
360 if (cache->vaddr & CLFLUSH_AFTER)
361 mb();
31a39207 362
d50415cc
CW
363 kunmap_atomic(vaddr);
364 i915_gem_obj_finish_shmem_access((struct drm_i915_gem_object *)cache->node.mm);
365 } else {
e8cb909a 366 wmb();
d50415cc 367 io_mapping_unmap_atomic((void __iomem *)vaddr);
e8cb909a
CW
368 if (cache->node.allocated) {
369 struct i915_ggtt *ggtt = &cache->i915->ggtt;
370
371 ggtt->base.clear_range(&ggtt->base,
372 cache->node.start,
373 cache->node.size,
374 true);
375 drm_mm_remove_node(&cache->node);
376 } else {
377 i915_vma_unpin((struct i915_vma *)cache->node.mm);
378 }
31a39207
CW
379 }
380}
381
382static void *reloc_kmap(struct drm_i915_gem_object *obj,
383 struct reloc_cache *cache,
384 int page)
385{
d50415cc
CW
386 void *vaddr;
387
388 if (cache->vaddr) {
389 kunmap_atomic(unmask_page(cache->vaddr));
390 } else {
391 unsigned int flushes;
392 int ret;
31a39207 393
d50415cc
CW
394 ret = i915_gem_obj_prepare_shmem_write(obj, &flushes);
395 if (ret)
396 return ERR_PTR(ret);
397
398 BUILD_BUG_ON(KMAP & CLFLUSH_FLAGS);
399 BUILD_BUG_ON((KMAP | CLFLUSH_FLAGS) & PAGE_MASK);
31a39207 400
d50415cc
CW
401 cache->vaddr = flushes | KMAP;
402 cache->node.mm = (void *)obj;
403 if (flushes)
404 mb();
405 }
406
407 vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj, page));
408 cache->vaddr = unmask_flags(cache->vaddr) | (unsigned long)vaddr;
31a39207 409 cache->page = page;
31a39207 410
d50415cc 411 return vaddr;
31a39207
CW
412}
413
d50415cc
CW
414static void *reloc_iomap(struct drm_i915_gem_object *obj,
415 struct reloc_cache *cache,
416 int page)
5032d871 417{
e8cb909a
CW
418 struct i915_ggtt *ggtt = &cache->i915->ggtt;
419 unsigned long offset;
d50415cc 420 void *vaddr;
5032d871 421
e8cb909a
CW
422 if (cache->node.allocated) {
423 wmb();
424 ggtt->base.insert_page(&ggtt->base,
425 i915_gem_object_get_dma_address(obj, page),
426 cache->node.start, I915_CACHE_NONE, 0);
427 cache->page = page;
428 return unmask_page(cache->vaddr);
429 }
430
d50415cc 431 if (cache->vaddr) {
615e5000 432 io_mapping_unmap_atomic((void __force __iomem *) unmask_page(cache->vaddr));
d50415cc
CW
433 } else {
434 struct i915_vma *vma;
435 int ret;
5032d871 436
d50415cc
CW
437 if (use_cpu_reloc(obj))
438 return NULL;
3c94ceee 439
d50415cc
CW
440 ret = i915_gem_object_set_to_gtt_domain(obj, true);
441 if (ret)
442 return ERR_PTR(ret);
3c94ceee 443
d50415cc
CW
444 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
445 PIN_MAPPABLE | PIN_NONBLOCK);
e8cb909a
CW
446 if (IS_ERR(vma)) {
447 memset(&cache->node, 0, sizeof(cache->node));
448 ret = drm_mm_insert_node_in_range_generic
449 (&ggtt->base.mm, &cache->node,
450 4096, 0, 0,
451 0, ggtt->mappable_end,
452 DRM_MM_SEARCH_DEFAULT,
453 DRM_MM_CREATE_DEFAULT);
d7f76335
CW
454 if (ret) /* no inactive aperture space, use cpu reloc */
455 return NULL;
e8cb909a 456 } else {
49ef5294 457 ret = i915_vma_put_fence(vma);
e8cb909a
CW
458 if (ret) {
459 i915_vma_unpin(vma);
460 return ERR_PTR(ret);
461 }
462
463 cache->node.start = vma->node.start;
464 cache->node.mm = (void *)vma;
d50415cc 465 }
e8cb909a 466 }
31a39207 467
e8cb909a
CW
468 offset = cache->node.start;
469 if (cache->node.allocated) {
470 ggtt->base.insert_page(&ggtt->base,
471 i915_gem_object_get_dma_address(obj, page),
472 offset, I915_CACHE_NONE, 0);
473 } else {
474 offset += page << PAGE_SHIFT;
d50415cc 475 }
31a39207 476
615e5000 477 vaddr = (void __force *) io_mapping_map_atomic_wc(&cache->i915->ggtt.mappable, offset);
d50415cc
CW
478 cache->page = page;
479 cache->vaddr = (unsigned long)vaddr;
31a39207 480
d50415cc 481 return vaddr;
31a39207
CW
482}
483
d50415cc
CW
484static void *reloc_vaddr(struct drm_i915_gem_object *obj,
485 struct reloc_cache *cache,
486 int page)
5032d871 487{
d50415cc 488 void *vaddr;
5032d871 489
d50415cc
CW
490 if (cache->page == page) {
491 vaddr = unmask_page(cache->vaddr);
492 } else {
493 vaddr = NULL;
494 if ((cache->vaddr & KMAP) == 0)
495 vaddr = reloc_iomap(obj, cache, page);
496 if (!vaddr)
497 vaddr = reloc_kmap(obj, cache, page);
3c94ceee
BW
498 }
499
d50415cc 500 return vaddr;
5032d871
RB
501}
502
d50415cc 503static void clflush_write32(u32 *addr, u32 value, unsigned int flushes)
edf4427b 504{
d50415cc
CW
505 if (unlikely(flushes & (CLFLUSH_BEFORE | CLFLUSH_AFTER))) {
506 if (flushes & CLFLUSH_BEFORE) {
507 clflushopt(addr);
508 mb();
509 }
510
511 *addr = value;
512
513 /* Writes to the same cacheline are serialised by the CPU
514 * (including clflush). On the write path, we only require
515 * that it hits memory in an orderly fashion and place
516 * mb barriers at the start and end of the relocation phase
517 * to ensure ordering of clflush wrt to the system.
518 */
519 if (flushes & CLFLUSH_AFTER)
520 clflushopt(addr);
521 } else
522 *addr = value;
edf4427b
CW
523}
524
525static int
d50415cc
CW
526relocate_entry(struct drm_i915_gem_object *obj,
527 const struct drm_i915_gem_relocation_entry *reloc,
528 struct reloc_cache *cache,
529 u64 target_offset)
edf4427b 530{
d50415cc
CW
531 u64 offset = reloc->offset;
532 bool wide = cache->use_64bit_reloc;
533 void *vaddr;
edf4427b 534
d50415cc
CW
535 target_offset = relocation_target(reloc, target_offset);
536repeat:
537 vaddr = reloc_vaddr(obj, cache, offset >> PAGE_SHIFT);
538 if (IS_ERR(vaddr))
539 return PTR_ERR(vaddr);
540
541 clflush_write32(vaddr + offset_in_page(offset),
542 lower_32_bits(target_offset),
543 cache->vaddr);
544
545 if (wide) {
546 offset += sizeof(u32);
547 target_offset >>= 32;
548 wide = false;
549 goto repeat;
edf4427b
CW
550 }
551
edf4427b
CW
552 return 0;
553}
554
909d074c
CW
555static bool object_is_idle(struct drm_i915_gem_object *obj)
556{
573adb39 557 unsigned long active = i915_gem_object_get_active(obj);
909d074c
CW
558 int idx;
559
560 for_each_active(active, idx) {
561 if (!i915_gem_active_is_idle(&obj->last_read[idx],
562 &obj->base.dev->struct_mutex))
563 return false;
564 }
565
566 return true;
567}
568
54cf91dc
CW
569static int
570i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
27173f1f 571 struct eb_vmas *eb,
31a39207
CW
572 struct drm_i915_gem_relocation_entry *reloc,
573 struct reloc_cache *cache)
54cf91dc
CW
574{
575 struct drm_device *dev = obj->base.dev;
576 struct drm_gem_object *target_obj;
149c8407 577 struct drm_i915_gem_object *target_i915_obj;
27173f1f 578 struct i915_vma *target_vma;
d9ceb957 579 uint64_t target_offset;
8b78f0e5 580 int ret;
54cf91dc 581
67731b87 582 /* we've already hold a reference to all valid objects */
27173f1f
BW
583 target_vma = eb_get_vma(eb, reloc->target_handle);
584 if (unlikely(target_vma == NULL))
54cf91dc 585 return -ENOENT;
27173f1f
BW
586 target_i915_obj = target_vma->obj;
587 target_obj = &target_vma->obj->base;
54cf91dc 588
934acce3 589 target_offset = gen8_canonical_addr(target_vma->node.start);
54cf91dc 590
e844b990
EA
591 /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
592 * pipe_control writes because the gpu doesn't properly redirect them
593 * through the ppgtt for non_secure batchbuffers. */
594 if (unlikely(IS_GEN6(dev) &&
0875546c 595 reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION)) {
fe14d5f4 596 ret = i915_vma_bind(target_vma, target_i915_obj->cache_level,
0875546c 597 PIN_GLOBAL);
fe14d5f4
TU
598 if (WARN_ONCE(ret, "Unexpected failure to bind target VMA!"))
599 return ret;
600 }
e844b990 601
54cf91dc 602 /* Validate that the target is in a valid r/w GPU domain */
b8f7ab17 603 if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
ff240199 604 DRM_DEBUG("reloc with multiple write domains: "
54cf91dc
CW
605 "obj %p target %d offset %d "
606 "read %08x write %08x",
607 obj, reloc->target_handle,
608 (int) reloc->offset,
609 reloc->read_domains,
610 reloc->write_domain);
8b78f0e5 611 return -EINVAL;
54cf91dc 612 }
4ca4a250
DV
613 if (unlikely((reloc->write_domain | reloc->read_domains)
614 & ~I915_GEM_GPU_DOMAINS)) {
ff240199 615 DRM_DEBUG("reloc with read/write non-GPU domains: "
54cf91dc
CW
616 "obj %p target %d offset %d "
617 "read %08x write %08x",
618 obj, reloc->target_handle,
619 (int) reloc->offset,
620 reloc->read_domains,
621 reloc->write_domain);
8b78f0e5 622 return -EINVAL;
54cf91dc 623 }
54cf91dc
CW
624
625 target_obj->pending_read_domains |= reloc->read_domains;
626 target_obj->pending_write_domain |= reloc->write_domain;
627
628 /* If the relocation already has the right value in it, no
629 * more work needs to be done.
630 */
631 if (target_offset == reloc->presumed_offset)
67731b87 632 return 0;
54cf91dc
CW
633
634 /* Check that the relocation address is valid... */
3c94ceee 635 if (unlikely(reloc->offset >
d50415cc 636 obj->base.size - (cache->use_64bit_reloc ? 8 : 4))) {
ff240199 637 DRM_DEBUG("Relocation beyond object bounds: "
54cf91dc
CW
638 "obj %p target %d offset %d size %d.\n",
639 obj, reloc->target_handle,
640 (int) reloc->offset,
641 (int) obj->base.size);
8b78f0e5 642 return -EINVAL;
54cf91dc 643 }
b8f7ab17 644 if (unlikely(reloc->offset & 3)) {
ff240199 645 DRM_DEBUG("Relocation not 4-byte aligned: "
54cf91dc
CW
646 "obj %p target %d offset %d.\n",
647 obj, reloc->target_handle,
648 (int) reloc->offset);
8b78f0e5 649 return -EINVAL;
54cf91dc
CW
650 }
651
dabdfe02 652 /* We can't wait for rendering with pagefaults disabled */
909d074c 653 if (pagefault_disabled() && !object_is_idle(obj))
dabdfe02
CW
654 return -EFAULT;
655
d50415cc 656 ret = relocate_entry(obj, reloc, cache, target_offset);
d4d36014
DV
657 if (ret)
658 return ret;
659
54cf91dc
CW
660 /* and update the user's relocation entry */
661 reloc->presumed_offset = target_offset;
67731b87 662 return 0;
54cf91dc
CW
663}
664
665static int
27173f1f
BW
666i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
667 struct eb_vmas *eb)
54cf91dc 668{
1d83f442
CW
669#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
670 struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
54cf91dc 671 struct drm_i915_gem_relocation_entry __user *user_relocs;
27173f1f 672 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
31a39207
CW
673 struct reloc_cache cache;
674 int remain, ret = 0;
54cf91dc 675
3ed605bc 676 user_relocs = u64_to_user_ptr(entry->relocs_ptr);
d50415cc 677 reloc_cache_init(&cache, eb->i915);
54cf91dc 678
1d83f442
CW
679 remain = entry->relocation_count;
680 while (remain) {
681 struct drm_i915_gem_relocation_entry *r = stack_reloc;
682 int count = remain;
683 if (count > ARRAY_SIZE(stack_reloc))
684 count = ARRAY_SIZE(stack_reloc);
685 remain -= count;
686
31a39207
CW
687 if (__copy_from_user_inatomic(r, user_relocs, count*sizeof(r[0]))) {
688 ret = -EFAULT;
689 goto out;
690 }
54cf91dc 691
1d83f442
CW
692 do {
693 u64 offset = r->presumed_offset;
54cf91dc 694
31a39207 695 ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r, &cache);
1d83f442 696 if (ret)
31a39207 697 goto out;
1d83f442
CW
698
699 if (r->presumed_offset != offset &&
31a39207
CW
700 __put_user(r->presumed_offset,
701 &user_relocs->presumed_offset)) {
702 ret = -EFAULT;
703 goto out;
1d83f442
CW
704 }
705
706 user_relocs++;
707 r++;
708 } while (--count);
54cf91dc
CW
709 }
710
31a39207
CW
711out:
712 reloc_cache_fini(&cache);
713 return ret;
1d83f442 714#undef N_RELOC
54cf91dc
CW
715}
716
717static int
27173f1f
BW
718i915_gem_execbuffer_relocate_vma_slow(struct i915_vma *vma,
719 struct eb_vmas *eb,
720 struct drm_i915_gem_relocation_entry *relocs)
54cf91dc 721{
27173f1f 722 const struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
31a39207
CW
723 struct reloc_cache cache;
724 int i, ret = 0;
54cf91dc 725
d50415cc 726 reloc_cache_init(&cache, eb->i915);
54cf91dc 727 for (i = 0; i < entry->relocation_count; i++) {
31a39207 728 ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i], &cache);
54cf91dc 729 if (ret)
31a39207 730 break;
54cf91dc 731 }
31a39207 732 reloc_cache_fini(&cache);
54cf91dc 733
31a39207 734 return ret;
54cf91dc
CW
735}
736
737static int
17601cbc 738i915_gem_execbuffer_relocate(struct eb_vmas *eb)
54cf91dc 739{
27173f1f 740 struct i915_vma *vma;
d4aeee77
CW
741 int ret = 0;
742
743 /* This is the fast path and we cannot handle a pagefault whilst
744 * holding the struct mutex lest the user pass in the relocations
745 * contained within a mmaped bo. For in such a case we, the page
746 * fault handler would call i915_gem_fault() and we would try to
747 * acquire the struct mutex again. Obviously this is bad and so
748 * lockdep complains vehemently.
749 */
750 pagefault_disable();
27173f1f
BW
751 list_for_each_entry(vma, &eb->vmas, exec_list) {
752 ret = i915_gem_execbuffer_relocate_vma(vma, eb);
54cf91dc 753 if (ret)
d4aeee77 754 break;
54cf91dc 755 }
d4aeee77 756 pagefault_enable();
54cf91dc 757
d4aeee77 758 return ret;
54cf91dc
CW
759}
760
edf4427b
CW
761static bool only_mappable_for_reloc(unsigned int flags)
762{
763 return (flags & (EXEC_OBJECT_NEEDS_FENCE | __EXEC_OBJECT_NEEDS_MAP)) ==
764 __EXEC_OBJECT_NEEDS_MAP;
765}
766
1690e1eb 767static int
27173f1f 768i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
0bc40be8 769 struct intel_engine_cs *engine,
27173f1f 770 bool *need_reloc)
1690e1eb 771{
6f65e29a 772 struct drm_i915_gem_object *obj = vma->obj;
27173f1f 773 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
d23db88c 774 uint64_t flags;
1690e1eb
CW
775 int ret;
776
0875546c 777 flags = PIN_USER;
0229da32
DV
778 if (entry->flags & EXEC_OBJECT_NEEDS_GTT)
779 flags |= PIN_GLOBAL;
780
edf4427b 781 if (!drm_mm_node_allocated(&vma->node)) {
101b506a
MT
782 /* Wa32bitGeneralStateOffset & Wa32bitInstructionBaseOffset,
783 * limit address to the first 4GBs for unflagged objects.
784 */
785 if ((entry->flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) == 0)
786 flags |= PIN_ZONE_4G;
edf4427b
CW
787 if (entry->flags & __EXEC_OBJECT_NEEDS_MAP)
788 flags |= PIN_GLOBAL | PIN_MAPPABLE;
edf4427b
CW
789 if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS)
790 flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
506a8e87
CW
791 if (entry->flags & EXEC_OBJECT_PINNED)
792 flags |= entry->offset | PIN_OFFSET_FIXED;
101b506a
MT
793 if ((flags & PIN_MAPPABLE) == 0)
794 flags |= PIN_HIGH;
edf4427b 795 }
1ec9e26d 796
59bfa124
CW
797 ret = i915_vma_pin(vma,
798 entry->pad_to_size,
799 entry->alignment,
800 flags);
801 if ((ret == -ENOSPC || ret == -E2BIG) &&
edf4427b 802 only_mappable_for_reloc(entry->flags))
59bfa124
CW
803 ret = i915_vma_pin(vma,
804 entry->pad_to_size,
805 entry->alignment,
806 flags & ~PIN_MAPPABLE);
1690e1eb
CW
807 if (ret)
808 return ret;
809
7788a765
CW
810 entry->flags |= __EXEC_OBJECT_HAS_PIN;
811
82b6b6d7 812 if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
49ef5294 813 ret = i915_vma_get_fence(vma);
82b6b6d7
CW
814 if (ret)
815 return ret;
9a5a53b3 816
49ef5294 817 if (i915_vma_pin_fence(vma))
82b6b6d7 818 entry->flags |= __EXEC_OBJECT_HAS_FENCE;
1690e1eb
CW
819 }
820
27173f1f
BW
821 if (entry->offset != vma->node.start) {
822 entry->offset = vma->node.start;
ed5982e6
DV
823 *need_reloc = true;
824 }
825
826 if (entry->flags & EXEC_OBJECT_WRITE) {
827 obj->base.pending_read_domains = I915_GEM_DOMAIN_RENDER;
828 obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER;
829 }
830
1690e1eb 831 return 0;
7788a765 832}
1690e1eb 833
d23db88c 834static bool
e6a84468 835need_reloc_mappable(struct i915_vma *vma)
d23db88c
CW
836{
837 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
d23db88c 838
e6a84468
CW
839 if (entry->relocation_count == 0)
840 return false;
841
3272db53 842 if (!i915_vma_is_ggtt(vma))
e6a84468
CW
843 return false;
844
845 /* See also use_cpu_reloc() */
846 if (HAS_LLC(vma->obj->base.dev))
847 return false;
848
849 if (vma->obj->base.write_domain == I915_GEM_DOMAIN_CPU)
850 return false;
851
852 return true;
853}
854
855static bool
856eb_vma_misplaced(struct i915_vma *vma)
857{
858 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
d23db88c 859
3272db53
CW
860 WARN_ON(entry->flags & __EXEC_OBJECT_NEEDS_MAP &&
861 !i915_vma_is_ggtt(vma));
d23db88c
CW
862
863 if (entry->alignment &&
864 vma->node.start & (entry->alignment - 1))
865 return true;
866
91b2db6f
CW
867 if (vma->node.size < entry->pad_to_size)
868 return true;
869
506a8e87
CW
870 if (entry->flags & EXEC_OBJECT_PINNED &&
871 vma->node.start != entry->offset)
872 return true;
873
d23db88c
CW
874 if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS &&
875 vma->node.start < BATCH_OFFSET_BIAS)
876 return true;
877
edf4427b 878 /* avoid costly ping-pong once a batch bo ended up non-mappable */
05a20d09
CW
879 if (entry->flags & __EXEC_OBJECT_NEEDS_MAP &&
880 !i915_vma_is_map_and_fenceable(vma))
edf4427b
CW
881 return !only_mappable_for_reloc(entry->flags);
882
101b506a
MT
883 if ((entry->flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) == 0 &&
884 (vma->node.start + vma->node.size - 1) >> 32)
885 return true;
886
d23db88c
CW
887 return false;
888}
889
54cf91dc 890static int
0bc40be8 891i915_gem_execbuffer_reserve(struct intel_engine_cs *engine,
27173f1f 892 struct list_head *vmas,
e2efd130 893 struct i915_gem_context *ctx,
ed5982e6 894 bool *need_relocs)
54cf91dc 895{
432e58ed 896 struct drm_i915_gem_object *obj;
27173f1f 897 struct i915_vma *vma;
68c8c17f 898 struct i915_address_space *vm;
27173f1f 899 struct list_head ordered_vmas;
506a8e87 900 struct list_head pinned_vmas;
c033666a 901 bool has_fenced_gpu_access = INTEL_GEN(engine->i915) < 4;
7788a765 902 int retry;
6fe4f140 903
68c8c17f
BW
904 vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;
905
27173f1f 906 INIT_LIST_HEAD(&ordered_vmas);
506a8e87 907 INIT_LIST_HEAD(&pinned_vmas);
27173f1f 908 while (!list_empty(vmas)) {
6fe4f140
CW
909 struct drm_i915_gem_exec_object2 *entry;
910 bool need_fence, need_mappable;
911
27173f1f
BW
912 vma = list_first_entry(vmas, struct i915_vma, exec_list);
913 obj = vma->obj;
914 entry = vma->exec_entry;
6fe4f140 915
b1b38278
DW
916 if (ctx->flags & CONTEXT_NO_ZEROMAP)
917 entry->flags |= __EXEC_OBJECT_NEEDS_BIAS;
918
82b6b6d7
CW
919 if (!has_fenced_gpu_access)
920 entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE;
6fe4f140 921 need_fence =
6fe4f140 922 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
3e510a8e 923 i915_gem_object_is_tiled(obj);
27173f1f 924 need_mappable = need_fence || need_reloc_mappable(vma);
6fe4f140 925
506a8e87
CW
926 if (entry->flags & EXEC_OBJECT_PINNED)
927 list_move_tail(&vma->exec_list, &pinned_vmas);
928 else if (need_mappable) {
e6a84468 929 entry->flags |= __EXEC_OBJECT_NEEDS_MAP;
27173f1f 930 list_move(&vma->exec_list, &ordered_vmas);
e6a84468 931 } else
27173f1f 932 list_move_tail(&vma->exec_list, &ordered_vmas);
595dad76 933
ed5982e6 934 obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND;
595dad76 935 obj->base.pending_write_domain = 0;
6fe4f140 936 }
27173f1f 937 list_splice(&ordered_vmas, vmas);
506a8e87 938 list_splice(&pinned_vmas, vmas);
54cf91dc
CW
939
940 /* Attempt to pin all of the buffers into the GTT.
941 * This is done in 3 phases:
942 *
943 * 1a. Unbind all objects that do not match the GTT constraints for
944 * the execbuffer (fenceable, mappable, alignment etc).
945 * 1b. Increment pin count for already bound objects.
946 * 2. Bind new objects.
947 * 3. Decrement pin count.
948 *
7788a765 949 * This avoid unnecessary unbinding of later objects in order to make
54cf91dc
CW
950 * room for the earlier objects *unless* we need to defragment.
951 */
952 retry = 0;
953 do {
7788a765 954 int ret = 0;
54cf91dc
CW
955
956 /* Unbind any ill-fitting objects or pin. */
27173f1f 957 list_for_each_entry(vma, vmas, exec_list) {
27173f1f 958 if (!drm_mm_node_allocated(&vma->node))
54cf91dc
CW
959 continue;
960
e6a84468 961 if (eb_vma_misplaced(vma))
27173f1f 962 ret = i915_vma_unbind(vma);
54cf91dc 963 else
0bc40be8
TU
964 ret = i915_gem_execbuffer_reserve_vma(vma,
965 engine,
966 need_relocs);
432e58ed 967 if (ret)
54cf91dc 968 goto err;
54cf91dc
CW
969 }
970
971 /* Bind fresh objects */
27173f1f
BW
972 list_for_each_entry(vma, vmas, exec_list) {
973 if (drm_mm_node_allocated(&vma->node))
1690e1eb 974 continue;
54cf91dc 975
0bc40be8
TU
976 ret = i915_gem_execbuffer_reserve_vma(vma, engine,
977 need_relocs);
7788a765
CW
978 if (ret)
979 goto err;
54cf91dc
CW
980 }
981
a415d355 982err:
6c085a72 983 if (ret != -ENOSPC || retry++)
54cf91dc
CW
984 return ret;
985
a415d355
CW
986 /* Decrement pin count for bound objects */
987 list_for_each_entry(vma, vmas, exec_list)
988 i915_gem_execbuffer_unreserve_vma(vma);
989
68c8c17f 990 ret = i915_gem_evict_vm(vm, true);
54cf91dc
CW
991 if (ret)
992 return ret;
54cf91dc
CW
993 } while (1);
994}
995
996static int
997i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
ed5982e6 998 struct drm_i915_gem_execbuffer2 *args,
54cf91dc 999 struct drm_file *file,
0bc40be8 1000 struct intel_engine_cs *engine,
27173f1f 1001 struct eb_vmas *eb,
b1b38278 1002 struct drm_i915_gem_exec_object2 *exec,
e2efd130 1003 struct i915_gem_context *ctx)
54cf91dc
CW
1004{
1005 struct drm_i915_gem_relocation_entry *reloc;
27173f1f
BW
1006 struct i915_address_space *vm;
1007 struct i915_vma *vma;
ed5982e6 1008 bool need_relocs;
dd6864a4 1009 int *reloc_offset;
54cf91dc 1010 int i, total, ret;
b205ca57 1011 unsigned count = args->buffer_count;
54cf91dc 1012
27173f1f
BW
1013 vm = list_first_entry(&eb->vmas, struct i915_vma, exec_list)->vm;
1014
67731b87 1015 /* We may process another execbuffer during the unlock... */
27173f1f
BW
1016 while (!list_empty(&eb->vmas)) {
1017 vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list);
1018 list_del_init(&vma->exec_list);
a415d355 1019 i915_gem_execbuffer_unreserve_vma(vma);
624192cf 1020 i915_vma_put(vma);
67731b87
CW
1021 }
1022
54cf91dc
CW
1023 mutex_unlock(&dev->struct_mutex);
1024
1025 total = 0;
1026 for (i = 0; i < count; i++)
432e58ed 1027 total += exec[i].relocation_count;
54cf91dc 1028
dd6864a4 1029 reloc_offset = drm_malloc_ab(count, sizeof(*reloc_offset));
54cf91dc 1030 reloc = drm_malloc_ab(total, sizeof(*reloc));
dd6864a4
CW
1031 if (reloc == NULL || reloc_offset == NULL) {
1032 drm_free_large(reloc);
1033 drm_free_large(reloc_offset);
54cf91dc
CW
1034 mutex_lock(&dev->struct_mutex);
1035 return -ENOMEM;
1036 }
1037
1038 total = 0;
1039 for (i = 0; i < count; i++) {
1040 struct drm_i915_gem_relocation_entry __user *user_relocs;
262b6d36
CW
1041 u64 invalid_offset = (u64)-1;
1042 int j;
54cf91dc 1043
3ed605bc 1044 user_relocs = u64_to_user_ptr(exec[i].relocs_ptr);
54cf91dc
CW
1045
1046 if (copy_from_user(reloc+total, user_relocs,
432e58ed 1047 exec[i].relocation_count * sizeof(*reloc))) {
54cf91dc
CW
1048 ret = -EFAULT;
1049 mutex_lock(&dev->struct_mutex);
1050 goto err;
1051 }
1052
262b6d36
CW
1053 /* As we do not update the known relocation offsets after
1054 * relocating (due to the complexities in lock handling),
1055 * we need to mark them as invalid now so that we force the
1056 * relocation processing next time. Just in case the target
1057 * object is evicted and then rebound into its old
1058 * presumed_offset before the next execbuffer - if that
1059 * happened we would make the mistake of assuming that the
1060 * relocations were valid.
1061 */
1062 for (j = 0; j < exec[i].relocation_count; j++) {
9aab8bff
CW
1063 if (__copy_to_user(&user_relocs[j].presumed_offset,
1064 &invalid_offset,
1065 sizeof(invalid_offset))) {
262b6d36
CW
1066 ret = -EFAULT;
1067 mutex_lock(&dev->struct_mutex);
1068 goto err;
1069 }
1070 }
1071
dd6864a4 1072 reloc_offset[i] = total;
432e58ed 1073 total += exec[i].relocation_count;
54cf91dc
CW
1074 }
1075
1076 ret = i915_mutex_lock_interruptible(dev);
1077 if (ret) {
1078 mutex_lock(&dev->struct_mutex);
1079 goto err;
1080 }
1081
67731b87 1082 /* reacquire the objects */
67731b87 1083 eb_reset(eb);
27173f1f 1084 ret = eb_lookup_vmas(eb, exec, args, vm, file);
3b96eff4
CW
1085 if (ret)
1086 goto err;
67731b87 1087
ed5982e6 1088 need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
0bc40be8
TU
1089 ret = i915_gem_execbuffer_reserve(engine, &eb->vmas, ctx,
1090 &need_relocs);
54cf91dc
CW
1091 if (ret)
1092 goto err;
1093
27173f1f
BW
1094 list_for_each_entry(vma, &eb->vmas, exec_list) {
1095 int offset = vma->exec_entry - exec;
1096 ret = i915_gem_execbuffer_relocate_vma_slow(vma, eb,
1097 reloc + reloc_offset[offset]);
54cf91dc
CW
1098 if (ret)
1099 goto err;
54cf91dc
CW
1100 }
1101
1102 /* Leave the user relocations as are, this is the painfully slow path,
1103 * and we want to avoid the complication of dropping the lock whilst
1104 * having buffers reserved in the aperture and so causing spurious
1105 * ENOSPC for random operations.
1106 */
1107
1108err:
1109 drm_free_large(reloc);
dd6864a4 1110 drm_free_large(reloc_offset);
54cf91dc
CW
1111 return ret;
1112}
1113
573adb39
CW
1114static unsigned int eb_other_engines(struct drm_i915_gem_request *req)
1115{
1116 unsigned int mask;
1117
1118 mask = ~intel_engine_flag(req->engine) & I915_BO_ACTIVE_MASK;
1119 mask <<= I915_BO_ACTIVE_SHIFT;
1120
1121 return mask;
1122}
1123
54cf91dc 1124static int
535fbe82 1125i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
27173f1f 1126 struct list_head *vmas)
54cf91dc 1127{
573adb39 1128 const unsigned int other_rings = eb_other_engines(req);
27173f1f 1129 struct i915_vma *vma;
432e58ed 1130 int ret;
54cf91dc 1131
27173f1f
BW
1132 list_for_each_entry(vma, vmas, exec_list) {
1133 struct drm_i915_gem_object *obj = vma->obj;
851ba2d6 1134 struct reservation_object *resv;
03ade511 1135
573adb39 1136 if (obj->flags & other_rings) {
a2bc4695
CW
1137 ret = i915_gem_request_await_object
1138 (req, obj, obj->base.pending_write_domain);
03ade511
CW
1139 if (ret)
1140 return ret;
1141 }
6ac42f41 1142
851ba2d6
CW
1143 resv = i915_gem_object_get_dmabuf_resv(obj);
1144 if (resv) {
1145 ret = i915_sw_fence_await_reservation
1146 (&req->submit, resv, &i915_fence_ops,
1147 obj->base.pending_write_domain, 10*HZ,
1148 GFP_KERNEL | __GFP_NOWARN);
1149 if (ret < 0)
1150 return ret;
1151 }
1152
6ac42f41 1153 if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
600f4368 1154 i915_gem_clflush_object(obj, false);
c59a333f
CW
1155 }
1156
600f4368
CW
1157 /* Unconditionally flush any chipset caches (for streaming writes). */
1158 i915_gem_chipset_flush(req->engine->i915);
6ac42f41 1159
c7fe7d25 1160 /* Unconditionally invalidate GPU caches and TLBs. */
7c9cf4e3 1161 return req->engine->emit_flush(req, EMIT_INVALIDATE);
54cf91dc
CW
1162}
1163
432e58ed
CW
1164static bool
1165i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
54cf91dc 1166{
ed5982e6
DV
1167 if (exec->flags & __I915_EXEC_UNKNOWN_FLAGS)
1168 return false;
1169
2f5945bc
CW
1170 /* Kernel clipping was a DRI1 misfeature */
1171 if (exec->num_cliprects || exec->cliprects_ptr)
1172 return false;
1173
1174 if (exec->DR4 == 0xffffffff) {
1175 DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
1176 exec->DR4 = 0;
1177 }
1178 if (exec->DR1 || exec->DR4)
1179 return false;
1180
1181 if ((exec->batch_start_offset | exec->batch_len) & 0x7)
1182 return false;
1183
1184 return true;
54cf91dc
CW
1185}
1186
1187static int
ad19f10b
CW
1188validate_exec_list(struct drm_device *dev,
1189 struct drm_i915_gem_exec_object2 *exec,
54cf91dc
CW
1190 int count)
1191{
b205ca57
DV
1192 unsigned relocs_total = 0;
1193 unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
ad19f10b
CW
1194 unsigned invalid_flags;
1195 int i;
1196
9e2793f6
DG
1197 /* INTERNAL flags must not overlap with external ones */
1198 BUILD_BUG_ON(__EXEC_OBJECT_INTERNAL_FLAGS & ~__EXEC_OBJECT_UNKNOWN_FLAGS);
1199
ad19f10b
CW
1200 invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
1201 if (USES_FULL_PPGTT(dev))
1202 invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
54cf91dc
CW
1203
1204 for (i = 0; i < count; i++) {
3ed605bc 1205 char __user *ptr = u64_to_user_ptr(exec[i].relocs_ptr);
54cf91dc
CW
1206 int length; /* limited by fault_in_pages_readable() */
1207
ad19f10b 1208 if (exec[i].flags & invalid_flags)
ed5982e6
DV
1209 return -EINVAL;
1210
934acce3
MW
1211 /* Offset can be used as input (EXEC_OBJECT_PINNED), reject
1212 * any non-page-aligned or non-canonical addresses.
1213 */
1214 if (exec[i].flags & EXEC_OBJECT_PINNED) {
1215 if (exec[i].offset !=
1216 gen8_canonical_addr(exec[i].offset & PAGE_MASK))
1217 return -EINVAL;
1218
1219 /* From drm_mm perspective address space is continuous,
1220 * so from this point we're always using non-canonical
1221 * form internally.
1222 */
1223 exec[i].offset = gen8_noncanonical_addr(exec[i].offset);
1224 }
1225
55a9785d
CW
1226 if (exec[i].alignment && !is_power_of_2(exec[i].alignment))
1227 return -EINVAL;
1228
91b2db6f
CW
1229 /* pad_to_size was once a reserved field, so sanitize it */
1230 if (exec[i].flags & EXEC_OBJECT_PAD_TO_SIZE) {
1231 if (offset_in_page(exec[i].pad_to_size))
1232 return -EINVAL;
1233 } else {
1234 exec[i].pad_to_size = 0;
1235 }
1236
3118a4f6
KC
1237 /* First check for malicious input causing overflow in
1238 * the worst case where we need to allocate the entire
1239 * relocation tree as a single array.
1240 */
1241 if (exec[i].relocation_count > relocs_max - relocs_total)
54cf91dc 1242 return -EINVAL;
3118a4f6 1243 relocs_total += exec[i].relocation_count;
54cf91dc
CW
1244
1245 length = exec[i].relocation_count *
1246 sizeof(struct drm_i915_gem_relocation_entry);
30587535
KC
1247 /*
1248 * We must check that the entire relocation array is safe
1249 * to read, but since we may need to update the presumed
1250 * offsets during execution, check for full write access.
1251 */
54cf91dc
CW
1252 if (!access_ok(VERIFY_WRITE, ptr, length))
1253 return -EFAULT;
1254
d330a953 1255 if (likely(!i915.prefault_disable)) {
0b74b508
XZ
1256 if (fault_in_multipages_readable(ptr, length))
1257 return -EFAULT;
1258 }
54cf91dc
CW
1259 }
1260
1261 return 0;
1262}
1263
e2efd130 1264static struct i915_gem_context *
d299cce7 1265i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
0bc40be8 1266 struct intel_engine_cs *engine, const u32 ctx_id)
d299cce7 1267{
f7978a0c 1268 struct i915_gem_context *ctx;
d299cce7
MK
1269 struct i915_ctx_hang_stats *hs;
1270
ca585b5d 1271 ctx = i915_gem_context_lookup(file->driver_priv, ctx_id);
72ad5c45 1272 if (IS_ERR(ctx))
41bde553 1273 return ctx;
d299cce7 1274
41bde553 1275 hs = &ctx->hang_stats;
d299cce7
MK
1276 if (hs->banned) {
1277 DRM_DEBUG("Context %u tried to submit while banned\n", ctx_id);
41bde553 1278 return ERR_PTR(-EIO);
d299cce7
MK
1279 }
1280
41bde553 1281 return ctx;
d299cce7
MK
1282}
1283
5cf3d280
CW
1284void i915_vma_move_to_active(struct i915_vma *vma,
1285 struct drm_i915_gem_request *req,
1286 unsigned int flags)
1287{
1288 struct drm_i915_gem_object *obj = vma->obj;
1289 const unsigned int idx = req->engine->id;
1290
1291 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1292
1293 obj->dirty = 1; /* be paranoid */
1294
b0decaf7
CW
1295 /* Add a reference if we're newly entering the active list.
1296 * The order in which we add operations to the retirement queue is
1297 * vital here: mark_active adds to the start of the callback list,
1298 * such that subsequent callbacks are called first. Therefore we
1299 * add the active reference first and queue for it to be dropped
1300 * *last*.
1301 */
573adb39 1302 if (!i915_gem_object_is_active(obj))
5cf3d280 1303 i915_gem_object_get(obj);
573adb39 1304 i915_gem_object_set_active(obj, idx);
5cf3d280
CW
1305 i915_gem_active_set(&obj->last_read[idx], req);
1306
1307 if (flags & EXEC_OBJECT_WRITE) {
1308 i915_gem_active_set(&obj->last_write, req);
1309
1310 intel_fb_obj_invalidate(obj, ORIGIN_CS);
1311
1312 /* update for the implicit flush after a batch */
1313 obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
1314 }
1315
49ef5294
CW
1316 if (flags & EXEC_OBJECT_NEEDS_FENCE)
1317 i915_gem_active_set(&vma->last_fence, req);
5cf3d280 1318
b0decaf7
CW
1319 i915_vma_set_active(vma, idx);
1320 i915_gem_active_set(&vma->last_read[idx], req);
5cf3d280
CW
1321 list_move_tail(&vma->vm_link, &vma->vm->active_list);
1322}
1323
ad778f89
CW
1324static void eb_export_fence(struct drm_i915_gem_object *obj,
1325 struct drm_i915_gem_request *req,
1326 unsigned int flags)
1327{
1328 struct reservation_object *resv;
1329
1330 resv = i915_gem_object_get_dmabuf_resv(obj);
1331 if (!resv)
1332 return;
1333
1334 /* Ignore errors from failing to allocate the new fence, we can't
1335 * handle an error right now. Worst case should be missed
1336 * synchronisation leading to rendering corruption.
1337 */
1338 ww_mutex_lock(&resv->lock, NULL);
1339 if (flags & EXEC_OBJECT_WRITE)
1340 reservation_object_add_excl_fence(resv, &req->fence);
1341 else if (reservation_object_reserve_shared(resv) == 0)
1342 reservation_object_add_shared_fence(resv, &req->fence);
1343 ww_mutex_unlock(&resv->lock);
1344}
1345
5b043f4e 1346static void
27173f1f 1347i915_gem_execbuffer_move_to_active(struct list_head *vmas,
8a8edb59 1348 struct drm_i915_gem_request *req)
432e58ed 1349{
27173f1f 1350 struct i915_vma *vma;
432e58ed 1351
27173f1f
BW
1352 list_for_each_entry(vma, vmas, exec_list) {
1353 struct drm_i915_gem_object *obj = vma->obj;
69c2fc89
CW
1354 u32 old_read = obj->base.read_domains;
1355 u32 old_write = obj->base.write_domain;
db53a302 1356
432e58ed 1357 obj->base.write_domain = obj->base.pending_write_domain;
5cf3d280
CW
1358 if (obj->base.write_domain)
1359 vma->exec_entry->flags |= EXEC_OBJECT_WRITE;
1360 else
ed5982e6
DV
1361 obj->base.pending_read_domains |= obj->base.read_domains;
1362 obj->base.read_domains = obj->base.pending_read_domains;
432e58ed 1363
5cf3d280 1364 i915_vma_move_to_active(vma, req, vma->exec_entry->flags);
ad778f89 1365 eb_export_fence(obj, req, vma->exec_entry->flags);
db53a302 1366 trace_i915_gem_object_change_domain(obj, old_read, old_write);
432e58ed
CW
1367 }
1368}
1369
ae662d31 1370static int
b5321f30 1371i915_reset_gen7_sol_offsets(struct drm_i915_gem_request *req)
ae662d31 1372{
7e37f889 1373 struct intel_ring *ring = req->ring;
ae662d31
EA
1374 int ret, i;
1375
b5321f30 1376 if (!IS_GEN7(req->i915) || req->engine->id != RCS) {
9d662da8
DV
1377 DRM_DEBUG("sol reset is gen7/rcs only\n");
1378 return -EINVAL;
1379 }
ae662d31 1380
5fb9de1a 1381 ret = intel_ring_begin(req, 4 * 3);
ae662d31
EA
1382 if (ret)
1383 return ret;
1384
1385 for (i = 0; i < 4; i++) {
b5321f30
CW
1386 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
1387 intel_ring_emit_reg(ring, GEN7_SO_WRITE_OFFSET(i));
1388 intel_ring_emit(ring, 0);
ae662d31
EA
1389 }
1390
b5321f30 1391 intel_ring_advance(ring);
ae662d31
EA
1392
1393 return 0;
1394}
1395
058d88c4 1396static struct i915_vma *
0bc40be8 1397i915_gem_execbuffer_parse(struct intel_engine_cs *engine,
71745376 1398 struct drm_i915_gem_exec_object2 *shadow_exec_entry,
71745376 1399 struct drm_i915_gem_object *batch_obj,
59bfa124 1400 struct eb_vmas *eb,
71745376
BV
1401 u32 batch_start_offset,
1402 u32 batch_len,
17cabf57 1403 bool is_master)
71745376 1404{
71745376 1405 struct drm_i915_gem_object *shadow_batch_obj;
17cabf57 1406 struct i915_vma *vma;
71745376
BV
1407 int ret;
1408
0bc40be8 1409 shadow_batch_obj = i915_gem_batch_pool_get(&engine->batch_pool,
17cabf57 1410 PAGE_ALIGN(batch_len));
71745376 1411 if (IS_ERR(shadow_batch_obj))
59bfa124 1412 return ERR_CAST(shadow_batch_obj);
71745376 1413
33a051a5
CW
1414 ret = intel_engine_cmd_parser(engine,
1415 batch_obj,
1416 shadow_batch_obj,
1417 batch_start_offset,
1418 batch_len,
1419 is_master);
058d88c4
CW
1420 if (ret) {
1421 if (ret == -EACCES) /* unhandled chained batch */
1422 vma = NULL;
1423 else
1424 vma = ERR_PTR(ret);
1425 goto out;
1426 }
71745376 1427
058d88c4
CW
1428 vma = i915_gem_object_ggtt_pin(shadow_batch_obj, NULL, 0, 0, 0);
1429 if (IS_ERR(vma))
1430 goto out;
de4e783a 1431
17cabf57 1432 memset(shadow_exec_entry, 0, sizeof(*shadow_exec_entry));
71745376 1433
17cabf57 1434 vma->exec_entry = shadow_exec_entry;
de4e783a 1435 vma->exec_entry->flags = __EXEC_OBJECT_HAS_PIN;
25dc556a 1436 i915_gem_object_get(shadow_batch_obj);
17cabf57 1437 list_add_tail(&vma->exec_list, &eb->vmas);
71745376 1438
058d88c4 1439out:
de4e783a 1440 i915_gem_object_unpin_pages(shadow_batch_obj);
058d88c4 1441 return vma;
71745376 1442}
5c6c6003 1443
5b043f4e
CW
1444static int
1445execbuf_submit(struct i915_execbuffer_params *params,
1446 struct drm_i915_gem_execbuffer2 *args,
1447 struct list_head *vmas)
78382593 1448{
b5321f30 1449 struct drm_i915_private *dev_priv = params->request->i915;
5f19e2bf 1450 u64 exec_start, exec_len;
78382593
OM
1451 int instp_mode;
1452 u32 instp_mask;
2f5945bc 1453 int ret;
78382593 1454
535fbe82 1455 ret = i915_gem_execbuffer_move_to_gpu(params->request, vmas);
78382593 1456 if (ret)
2f5945bc 1457 return ret;
78382593 1458
ba01cc93 1459 ret = i915_switch_context(params->request);
78382593 1460 if (ret)
2f5945bc 1461 return ret;
78382593
OM
1462
1463 instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
1464 instp_mask = I915_EXEC_CONSTANTS_MASK;
1465 switch (instp_mode) {
1466 case I915_EXEC_CONSTANTS_REL_GENERAL:
1467 case I915_EXEC_CONSTANTS_ABSOLUTE:
1468 case I915_EXEC_CONSTANTS_REL_SURFACE:
b5321f30 1469 if (instp_mode != 0 && params->engine->id != RCS) {
78382593 1470 DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
2f5945bc 1471 return -EINVAL;
78382593
OM
1472 }
1473
1474 if (instp_mode != dev_priv->relative_constants_mode) {
b5321f30 1475 if (INTEL_INFO(dev_priv)->gen < 4) {
78382593 1476 DRM_DEBUG("no rel constants on pre-gen4\n");
2f5945bc 1477 return -EINVAL;
78382593
OM
1478 }
1479
b5321f30 1480 if (INTEL_INFO(dev_priv)->gen > 5 &&
78382593
OM
1481 instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
1482 DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
2f5945bc 1483 return -EINVAL;
78382593
OM
1484 }
1485
1486 /* The HW changed the meaning on this bit on gen6 */
b5321f30 1487 if (INTEL_INFO(dev_priv)->gen >= 6)
78382593
OM
1488 instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
1489 }
1490 break;
1491 default:
1492 DRM_DEBUG("execbuf with unknown constants: %d\n", instp_mode);
2f5945bc 1493 return -EINVAL;
78382593
OM
1494 }
1495
b5321f30 1496 if (params->engine->id == RCS &&
2f5945bc 1497 instp_mode != dev_priv->relative_constants_mode) {
7e37f889 1498 struct intel_ring *ring = params->request->ring;
b5321f30 1499
5fb9de1a 1500 ret = intel_ring_begin(params->request, 4);
78382593 1501 if (ret)
2f5945bc 1502 return ret;
78382593 1503
b5321f30
CW
1504 intel_ring_emit(ring, MI_NOOP);
1505 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
1506 intel_ring_emit_reg(ring, INSTPM);
1507 intel_ring_emit(ring, instp_mask << 16 | instp_mode);
1508 intel_ring_advance(ring);
78382593
OM
1509
1510 dev_priv->relative_constants_mode = instp_mode;
1511 }
1512
1513 if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
b5321f30 1514 ret = i915_reset_gen7_sol_offsets(params->request);
78382593 1515 if (ret)
2f5945bc 1516 return ret;
78382593
OM
1517 }
1518
5f19e2bf 1519 exec_len = args->batch_len;
59bfa124 1520 exec_start = params->batch->node.start +
5f19e2bf
JH
1521 params->args_batch_start_offset;
1522
9d611c03 1523 if (exec_len == 0)
0b537272 1524 exec_len = params->batch->size - params->args_batch_start_offset;
9d611c03 1525
803688ba
CW
1526 ret = params->engine->emit_bb_start(params->request,
1527 exec_start, exec_len,
1528 params->dispatch_flags);
2f5945bc
CW
1529 if (ret)
1530 return ret;
78382593 1531
95c24161 1532 trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags);
78382593 1533
8a8edb59 1534 i915_gem_execbuffer_move_to_active(vmas, params->request);
78382593 1535
2f5945bc 1536 return 0;
78382593
OM
1537}
1538
a8ebba75
ZY
1539/**
1540 * Find one BSD ring to dispatch the corresponding BSD command.
c80ff16e 1541 * The engine index is returned.
a8ebba75 1542 */
de1add36 1543static unsigned int
c80ff16e
CW
1544gen8_dispatch_bsd_engine(struct drm_i915_private *dev_priv,
1545 struct drm_file *file)
a8ebba75 1546{
a8ebba75
ZY
1547 struct drm_i915_file_private *file_priv = file->driver_priv;
1548
de1add36 1549 /* Check whether the file_priv has already selected one ring. */
6f633402
JL
1550 if ((int)file_priv->bsd_engine < 0)
1551 file_priv->bsd_engine = atomic_fetch_xor(1,
1552 &dev_priv->mm.bsd_engine_dispatch_index);
de1add36 1553
c80ff16e 1554 return file_priv->bsd_engine;
a8ebba75
ZY
1555}
1556
de1add36
TU
1557#define I915_USER_RINGS (4)
1558
117897f4 1559static const enum intel_engine_id user_ring_map[I915_USER_RINGS + 1] = {
de1add36
TU
1560 [I915_EXEC_DEFAULT] = RCS,
1561 [I915_EXEC_RENDER] = RCS,
1562 [I915_EXEC_BLT] = BCS,
1563 [I915_EXEC_BSD] = VCS,
1564 [I915_EXEC_VEBOX] = VECS
1565};
1566
f8ca0c07
DG
1567static struct intel_engine_cs *
1568eb_select_engine(struct drm_i915_private *dev_priv,
1569 struct drm_file *file,
1570 struct drm_i915_gem_execbuffer2 *args)
de1add36
TU
1571{
1572 unsigned int user_ring_id = args->flags & I915_EXEC_RING_MASK;
f8ca0c07 1573 struct intel_engine_cs *engine;
de1add36
TU
1574
1575 if (user_ring_id > I915_USER_RINGS) {
1576 DRM_DEBUG("execbuf with unknown ring: %u\n", user_ring_id);
f8ca0c07 1577 return NULL;
de1add36
TU
1578 }
1579
1580 if ((user_ring_id != I915_EXEC_BSD) &&
1581 ((args->flags & I915_EXEC_BSD_MASK) != 0)) {
1582 DRM_DEBUG("execbuf with non bsd ring but with invalid "
1583 "bsd dispatch flags: %d\n", (int)(args->flags));
f8ca0c07 1584 return NULL;
de1add36
TU
1585 }
1586
1587 if (user_ring_id == I915_EXEC_BSD && HAS_BSD2(dev_priv)) {
1588 unsigned int bsd_idx = args->flags & I915_EXEC_BSD_MASK;
1589
1590 if (bsd_idx == I915_EXEC_BSD_DEFAULT) {
c80ff16e 1591 bsd_idx = gen8_dispatch_bsd_engine(dev_priv, file);
de1add36
TU
1592 } else if (bsd_idx >= I915_EXEC_BSD_RING1 &&
1593 bsd_idx <= I915_EXEC_BSD_RING2) {
d9da6aa0 1594 bsd_idx >>= I915_EXEC_BSD_SHIFT;
de1add36
TU
1595 bsd_idx--;
1596 } else {
1597 DRM_DEBUG("execbuf with unknown bsd ring: %u\n",
1598 bsd_idx);
f8ca0c07 1599 return NULL;
de1add36
TU
1600 }
1601
3b3f1650 1602 engine = dev_priv->engine[_VCS(bsd_idx)];
de1add36 1603 } else {
3b3f1650 1604 engine = dev_priv->engine[user_ring_map[user_ring_id]];
de1add36
TU
1605 }
1606
3b3f1650 1607 if (!engine) {
de1add36 1608 DRM_DEBUG("execbuf with invalid ring: %u\n", user_ring_id);
f8ca0c07 1609 return NULL;
de1add36
TU
1610 }
1611
f8ca0c07 1612 return engine;
de1add36
TU
1613}
1614
54cf91dc
CW
1615static int
1616i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1617 struct drm_file *file,
1618 struct drm_i915_gem_execbuffer2 *args,
41bde553 1619 struct drm_i915_gem_exec_object2 *exec)
54cf91dc 1620{
72e96d64
JL
1621 struct drm_i915_private *dev_priv = to_i915(dev);
1622 struct i915_ggtt *ggtt = &dev_priv->ggtt;
27173f1f 1623 struct eb_vmas *eb;
78a42377 1624 struct drm_i915_gem_exec_object2 shadow_exec_entry;
e2f80391 1625 struct intel_engine_cs *engine;
e2efd130 1626 struct i915_gem_context *ctx;
41bde553 1627 struct i915_address_space *vm;
5f19e2bf
JH
1628 struct i915_execbuffer_params params_master; /* XXX: will be removed later */
1629 struct i915_execbuffer_params *params = &params_master;
d299cce7 1630 const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
8e004efc 1631 u32 dispatch_flags;
78382593 1632 int ret;
ed5982e6 1633 bool need_relocs;
54cf91dc 1634
ed5982e6 1635 if (!i915_gem_check_execbuffer(args))
432e58ed 1636 return -EINVAL;
432e58ed 1637
ad19f10b 1638 ret = validate_exec_list(dev, exec, args->buffer_count);
54cf91dc
CW
1639 if (ret)
1640 return ret;
1641
8e004efc 1642 dispatch_flags = 0;
d7d4eedd 1643 if (args->flags & I915_EXEC_SECURE) {
b3ac9f25 1644 if (!drm_is_current_master(file) || !capable(CAP_SYS_ADMIN))
d7d4eedd
CW
1645 return -EPERM;
1646
8e004efc 1647 dispatch_flags |= I915_DISPATCH_SECURE;
d7d4eedd 1648 }
b45305fc 1649 if (args->flags & I915_EXEC_IS_PINNED)
8e004efc 1650 dispatch_flags |= I915_DISPATCH_PINNED;
d7d4eedd 1651
f8ca0c07
DG
1652 engine = eb_select_engine(dev_priv, file, args);
1653 if (!engine)
1654 return -EINVAL;
54cf91dc
CW
1655
1656 if (args->buffer_count < 1) {
ff240199 1657 DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
54cf91dc
CW
1658 return -EINVAL;
1659 }
54cf91dc 1660
a9ed33ca
AJ
1661 if (args->flags & I915_EXEC_RESOURCE_STREAMER) {
1662 if (!HAS_RESOURCE_STREAMER(dev)) {
1663 DRM_DEBUG("RS is only allowed for Haswell, Gen8 and above\n");
1664 return -EINVAL;
1665 }
e2f80391 1666 if (engine->id != RCS) {
a9ed33ca 1667 DRM_DEBUG("RS is not available on %s\n",
e2f80391 1668 engine->name);
a9ed33ca
AJ
1669 return -EINVAL;
1670 }
1671
1672 dispatch_flags |= I915_DISPATCH_RS;
1673 }
1674
67d97da3
CW
1675 /* Take a local wakeref for preparing to dispatch the execbuf as
1676 * we expect to access the hardware fairly frequently in the
1677 * process. Upon first dispatch, we acquire another prolonged
1678 * wakeref that we hold until the GPU has been idle for at least
1679 * 100ms.
1680 */
f65c9168
PZ
1681 intel_runtime_pm_get(dev_priv);
1682
54cf91dc
CW
1683 ret = i915_mutex_lock_interruptible(dev);
1684 if (ret)
1685 goto pre_mutex_err;
1686
e2f80391 1687 ctx = i915_gem_validate_context(dev, file, engine, ctx_id);
72ad5c45 1688 if (IS_ERR(ctx)) {
d299cce7 1689 mutex_unlock(&dev->struct_mutex);
41bde553 1690 ret = PTR_ERR(ctx);
d299cce7 1691 goto pre_mutex_err;
935f38d6 1692 }
41bde553 1693
9a6feaf0 1694 i915_gem_context_get(ctx);
41bde553 1695
ae6c4806
DV
1696 if (ctx->ppgtt)
1697 vm = &ctx->ppgtt->base;
1698 else
72e96d64 1699 vm = &ggtt->base;
d299cce7 1700
5f19e2bf
JH
1701 memset(&params_master, 0x00, sizeof(params_master));
1702
d50415cc 1703 eb = eb_create(dev_priv, args);
67731b87 1704 if (eb == NULL) {
9a6feaf0 1705 i915_gem_context_put(ctx);
67731b87
CW
1706 mutex_unlock(&dev->struct_mutex);
1707 ret = -ENOMEM;
1708 goto pre_mutex_err;
1709 }
1710
54cf91dc 1711 /* Look up object handles */
27173f1f 1712 ret = eb_lookup_vmas(eb, exec, args, vm, file);
3b96eff4
CW
1713 if (ret)
1714 goto err;
54cf91dc 1715
6fe4f140 1716 /* take note of the batch buffer before we might reorder the lists */
59bfa124 1717 params->batch = eb_get_batch(eb);
6fe4f140 1718
54cf91dc 1719 /* Move the objects en-masse into the GTT, evicting if necessary. */
ed5982e6 1720 need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
e2f80391
TU
1721 ret = i915_gem_execbuffer_reserve(engine, &eb->vmas, ctx,
1722 &need_relocs);
54cf91dc
CW
1723 if (ret)
1724 goto err;
1725
1726 /* The objects are in their final locations, apply the relocations. */
ed5982e6 1727 if (need_relocs)
17601cbc 1728 ret = i915_gem_execbuffer_relocate(eb);
54cf91dc
CW
1729 if (ret) {
1730 if (ret == -EFAULT) {
e2f80391
TU
1731 ret = i915_gem_execbuffer_relocate_slow(dev, args, file,
1732 engine,
b1b38278 1733 eb, exec, ctx);
54cf91dc
CW
1734 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1735 }
1736 if (ret)
1737 goto err;
1738 }
1739
1740 /* Set the pending read domains for the batch buffer to COMMAND */
59bfa124 1741 if (params->batch->obj->base.pending_write_domain) {
ff240199 1742 DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
54cf91dc
CW
1743 ret = -EINVAL;
1744 goto err;
1745 }
0b537272
CW
1746 if (args->batch_start_offset > params->batch->size ||
1747 args->batch_len > params->batch->size - args->batch_start_offset) {
1748 DRM_DEBUG("Attempting to use out-of-bounds batch\n");
1749 ret = -EINVAL;
1750 goto err;
1751 }
54cf91dc 1752
5f19e2bf 1753 params->args_batch_start_offset = args->batch_start_offset;
33a051a5 1754 if (intel_engine_needs_cmd_parser(engine) && args->batch_len) {
59bfa124
CW
1755 struct i915_vma *vma;
1756
1757 vma = i915_gem_execbuffer_parse(engine, &shadow_exec_entry,
1758 params->batch->obj,
1759 eb,
1760 args->batch_start_offset,
1761 args->batch_len,
1762 drm_is_current_master(file));
1763 if (IS_ERR(vma)) {
1764 ret = PTR_ERR(vma);
78a42377
BV
1765 goto err;
1766 }
17cabf57 1767
59bfa124 1768 if (vma) {
c7c7372e
RP
1769 /*
1770 * Batch parsed and accepted:
1771 *
1772 * Set the DISPATCH_SECURE bit to remove the NON_SECURE
1773 * bit from MI_BATCH_BUFFER_START commands issued in
1774 * the dispatch_execbuffer implementations. We
1775 * specifically don't want that set on batches the
1776 * command parser has accepted.
1777 */
1778 dispatch_flags |= I915_DISPATCH_SECURE;
5f19e2bf 1779 params->args_batch_start_offset = 0;
59bfa124 1780 params->batch = vma;
c7c7372e 1781 }
351e3db2
BV
1782 }
1783
59bfa124 1784 params->batch->obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
78a42377 1785
d7d4eedd
CW
1786 /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
1787 * batch" bit. Hence we need to pin secure batches into the global gtt.
28cf5415 1788 * hsw should have this fixed, but bdw mucks it up again. */
8e004efc 1789 if (dispatch_flags & I915_DISPATCH_SECURE) {
59bfa124 1790 struct drm_i915_gem_object *obj = params->batch->obj;
058d88c4 1791 struct i915_vma *vma;
59bfa124 1792
da51a1e7
DV
1793 /*
1794 * So on first glance it looks freaky that we pin the batch here
1795 * outside of the reservation loop. But:
1796 * - The batch is already pinned into the relevant ppgtt, so we
1797 * already have the backing storage fully allocated.
1798 * - No other BO uses the global gtt (well contexts, but meh),
fd0753cf 1799 * so we don't really have issues with multiple objects not
da51a1e7
DV
1800 * fitting due to fragmentation.
1801 * So this is actually safe.
1802 */
058d88c4
CW
1803 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
1804 if (IS_ERR(vma)) {
1805 ret = PTR_ERR(vma);
da51a1e7 1806 goto err;
058d88c4 1807 }
d7d4eedd 1808
058d88c4 1809 params->batch = vma;
59bfa124 1810 }
d7d4eedd 1811
0c8dac88 1812 /* Allocate a request for this batch buffer nice and early. */
8e637178
CW
1813 params->request = i915_gem_request_alloc(engine, ctx);
1814 if (IS_ERR(params->request)) {
1815 ret = PTR_ERR(params->request);
0c8dac88 1816 goto err_batch_unpin;
26827088 1817 }
0c8dac88 1818
17f298cf
CW
1819 /* Whilst this request exists, batch_obj will be on the
1820 * active_list, and so will hold the active reference. Only when this
1821 * request is retired will the the batch_obj be moved onto the
1822 * inactive_list and lose its active reference. Hence we do not need
1823 * to explicitly hold another reference here.
1824 */
058d88c4 1825 params->request->batch = params->batch;
17f298cf 1826
8e637178 1827 ret = i915_gem_request_add_to_client(params->request, file);
fcfa423c 1828 if (ret)
aa9b7810 1829 goto err_request;
fcfa423c 1830
5f19e2bf
JH
1831 /*
1832 * Save assorted stuff away to pass through to *_submission().
1833 * NB: This data should be 'persistent' and not local as it will
1834 * kept around beyond the duration of the IOCTL once the GPU
1835 * scheduler arrives.
1836 */
1837 params->dev = dev;
1838 params->file = file;
4a570db5 1839 params->engine = engine;
5f19e2bf 1840 params->dispatch_flags = dispatch_flags;
5f19e2bf
JH
1841 params->ctx = ctx;
1842
5b043f4e 1843 ret = execbuf_submit(params, args, &eb->vmas);
aa9b7810 1844err_request:
17f298cf 1845 __i915_add_request(params->request, ret == 0);
54cf91dc 1846
0c8dac88 1847err_batch_unpin:
da51a1e7
DV
1848 /*
1849 * FIXME: We crucially rely upon the active tracking for the (ppgtt)
1850 * batch vma for correctness. For less ugly and less fragility this
1851 * needs to be adjusted to also track the ggtt batch vma properly as
1852 * active.
1853 */
8e004efc 1854 if (dispatch_flags & I915_DISPATCH_SECURE)
59bfa124 1855 i915_vma_unpin(params->batch);
54cf91dc 1856err:
41bde553 1857 /* the request owns the ref now */
9a6feaf0 1858 i915_gem_context_put(ctx);
67731b87 1859 eb_destroy(eb);
54cf91dc
CW
1860
1861 mutex_unlock(&dev->struct_mutex);
1862
1863pre_mutex_err:
f65c9168
PZ
1864 /* intel_gpu_busy should also get a ref, so it will free when the device
1865 * is really idle. */
1866 intel_runtime_pm_put(dev_priv);
54cf91dc
CW
1867 return ret;
1868}
1869
1870/*
1871 * Legacy execbuffer just creates an exec2 list from the original exec object
1872 * list array and passes it to the real function.
1873 */
1874int
1875i915_gem_execbuffer(struct drm_device *dev, void *data,
1876 struct drm_file *file)
1877{
1878 struct drm_i915_gem_execbuffer *args = data;
1879 struct drm_i915_gem_execbuffer2 exec2;
1880 struct drm_i915_gem_exec_object *exec_list = NULL;
1881 struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1882 int ret, i;
1883
54cf91dc 1884 if (args->buffer_count < 1) {
ff240199 1885 DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
54cf91dc
CW
1886 return -EINVAL;
1887 }
1888
1889 /* Copy in the exec list from userland */
1890 exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
1891 exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
1892 if (exec_list == NULL || exec2_list == NULL) {
ff240199 1893 DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
54cf91dc
CW
1894 args->buffer_count);
1895 drm_free_large(exec_list);
1896 drm_free_large(exec2_list);
1897 return -ENOMEM;
1898 }
1899 ret = copy_from_user(exec_list,
3ed605bc 1900 u64_to_user_ptr(args->buffers_ptr),
54cf91dc
CW
1901 sizeof(*exec_list) * args->buffer_count);
1902 if (ret != 0) {
ff240199 1903 DRM_DEBUG("copy %d exec entries failed %d\n",
54cf91dc
CW
1904 args->buffer_count, ret);
1905 drm_free_large(exec_list);
1906 drm_free_large(exec2_list);
1907 return -EFAULT;
1908 }
1909
1910 for (i = 0; i < args->buffer_count; i++) {
1911 exec2_list[i].handle = exec_list[i].handle;
1912 exec2_list[i].relocation_count = exec_list[i].relocation_count;
1913 exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
1914 exec2_list[i].alignment = exec_list[i].alignment;
1915 exec2_list[i].offset = exec_list[i].offset;
1916 if (INTEL_INFO(dev)->gen < 4)
1917 exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
1918 else
1919 exec2_list[i].flags = 0;
1920 }
1921
1922 exec2.buffers_ptr = args->buffers_ptr;
1923 exec2.buffer_count = args->buffer_count;
1924 exec2.batch_start_offset = args->batch_start_offset;
1925 exec2.batch_len = args->batch_len;
1926 exec2.DR1 = args->DR1;
1927 exec2.DR4 = args->DR4;
1928 exec2.num_cliprects = args->num_cliprects;
1929 exec2.cliprects_ptr = args->cliprects_ptr;
1930 exec2.flags = I915_EXEC_RENDER;
6e0a69db 1931 i915_execbuffer2_set_context_id(exec2, 0);
54cf91dc 1932
41bde553 1933 ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
54cf91dc 1934 if (!ret) {
9aab8bff 1935 struct drm_i915_gem_exec_object __user *user_exec_list =
3ed605bc 1936 u64_to_user_ptr(args->buffers_ptr);
9aab8bff 1937
54cf91dc 1938 /* Copy the new buffer offsets back to the user's exec list. */
9aab8bff 1939 for (i = 0; i < args->buffer_count; i++) {
934acce3
MW
1940 exec2_list[i].offset =
1941 gen8_canonical_addr(exec2_list[i].offset);
9aab8bff
CW
1942 ret = __copy_to_user(&user_exec_list[i].offset,
1943 &exec2_list[i].offset,
1944 sizeof(user_exec_list[i].offset));
1945 if (ret) {
1946 ret = -EFAULT;
1947 DRM_DEBUG("failed to copy %d exec entries "
1948 "back to user (%d)\n",
1949 args->buffer_count, ret);
1950 break;
1951 }
54cf91dc
CW
1952 }
1953 }
1954
1955 drm_free_large(exec_list);
1956 drm_free_large(exec2_list);
1957 return ret;
1958}
1959
1960int
1961i915_gem_execbuffer2(struct drm_device *dev, void *data,
1962 struct drm_file *file)
1963{
1964 struct drm_i915_gem_execbuffer2 *args = data;
1965 struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1966 int ret;
1967
ed8cd3b2
XW
1968 if (args->buffer_count < 1 ||
1969 args->buffer_count > UINT_MAX / sizeof(*exec2_list)) {
ff240199 1970 DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
54cf91dc
CW
1971 return -EINVAL;
1972 }
1973
9cb34664
DV
1974 if (args->rsvd2 != 0) {
1975 DRM_DEBUG("dirty rvsd2 field\n");
1976 return -EINVAL;
1977 }
1978
f2a85e19
CW
1979 exec2_list = drm_malloc_gfp(args->buffer_count,
1980 sizeof(*exec2_list),
1981 GFP_TEMPORARY);
54cf91dc 1982 if (exec2_list == NULL) {
ff240199 1983 DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
54cf91dc
CW
1984 args->buffer_count);
1985 return -ENOMEM;
1986 }
1987 ret = copy_from_user(exec2_list,
3ed605bc 1988 u64_to_user_ptr(args->buffers_ptr),
54cf91dc
CW
1989 sizeof(*exec2_list) * args->buffer_count);
1990 if (ret != 0) {
ff240199 1991 DRM_DEBUG("copy %d exec entries failed %d\n",
54cf91dc
CW
1992 args->buffer_count, ret);
1993 drm_free_large(exec2_list);
1994 return -EFAULT;
1995 }
1996
41bde553 1997 ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
54cf91dc
CW
1998 if (!ret) {
1999 /* Copy the new buffer offsets back to the user's exec list. */
d593d992 2000 struct drm_i915_gem_exec_object2 __user *user_exec_list =
3ed605bc 2001 u64_to_user_ptr(args->buffers_ptr);
9aab8bff
CW
2002 int i;
2003
2004 for (i = 0; i < args->buffer_count; i++) {
934acce3
MW
2005 exec2_list[i].offset =
2006 gen8_canonical_addr(exec2_list[i].offset);
9aab8bff
CW
2007 ret = __copy_to_user(&user_exec_list[i].offset,
2008 &exec2_list[i].offset,
2009 sizeof(user_exec_list[i].offset));
2010 if (ret) {
2011 ret = -EFAULT;
2012 DRM_DEBUG("failed to copy %d exec entries "
2013 "back to user\n",
2014 args->buffer_count);
2015 break;
2016 }
54cf91dc
CW
2017 }
2018 }
2019
2020 drm_free_large(exec2_list);
2021 return ret;
2022}