]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/gpu/drm/i915/i915_gem_execbuffer.c
drm/i915: set "ret" correctly on error paths
[mirror_ubuntu-artful-kernel.git] / drivers / gpu / drm / i915 / i915_gem_execbuffer.c
CommitLineData
54cf91dc
CW
1/*
2 * Copyright © 2008,2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Chris Wilson <chris@chris-wilson.co.uk>
26 *
27 */
28
ad778f89
CW
29#include <linux/dma_remapping.h>
30#include <linux/reservation.h>
fec0445c 31#include <linux/sync_file.h>
ad778f89
CW
32#include <linux/uaccess.h>
33
760285e7
DH
34#include <drm/drmP.h>
35#include <drm/i915_drm.h>
ad778f89 36
54cf91dc 37#include "i915_drv.h"
57822dc6 38#include "i915_gem_clflush.h"
54cf91dc
CW
39#include "i915_trace.h"
40#include "intel_drv.h"
5d723d7a 41#include "intel_frontbuffer.h"
54cf91dc 42
d50415cc
CW
43#define DBG_USE_CPU_RELOC 0 /* -1 force GTT relocs; 1 force CPU relocs */
44
9e2793f6
DG
45#define __EXEC_OBJECT_HAS_PIN (1<<31)
46#define __EXEC_OBJECT_HAS_FENCE (1<<30)
47#define __EXEC_OBJECT_NEEDS_MAP (1<<29)
48#define __EXEC_OBJECT_NEEDS_BIAS (1<<28)
49#define __EXEC_OBJECT_INTERNAL_FLAGS (0xf<<28) /* all of the above */
d23db88c
CW
50
51#define BATCH_OFFSET_BIAS (256*1024)
a415d355 52
5b043f4e
CW
53struct i915_execbuffer_params {
54 struct drm_device *dev;
55 struct drm_file *file;
59bfa124
CW
56 struct i915_vma *batch;
57 u32 dispatch_flags;
58 u32 args_batch_start_offset;
5b043f4e 59 struct intel_engine_cs *engine;
5b043f4e
CW
60 struct i915_gem_context *ctx;
61 struct drm_i915_gem_request *request;
62};
63
27173f1f 64struct eb_vmas {
d50415cc 65 struct drm_i915_private *i915;
27173f1f 66 struct list_head vmas;
67731b87 67 int and;
eef90ccb 68 union {
27173f1f 69 struct i915_vma *lut[0];
eef90ccb
CW
70 struct hlist_head buckets[0];
71 };
67731b87
CW
72};
73
27173f1f 74static struct eb_vmas *
d50415cc
CW
75eb_create(struct drm_i915_private *i915,
76 struct drm_i915_gem_execbuffer2 *args)
67731b87 77{
27173f1f 78 struct eb_vmas *eb = NULL;
eef90ccb
CW
79
80 if (args->flags & I915_EXEC_HANDLE_LUT) {
b205ca57 81 unsigned size = args->buffer_count;
27173f1f
BW
82 size *= sizeof(struct i915_vma *);
83 size += sizeof(struct eb_vmas);
eef90ccb
CW
84 eb = kmalloc(size, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
85 }
86
87 if (eb == NULL) {
b205ca57
DV
88 unsigned size = args->buffer_count;
89 unsigned count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
27b7c63a 90 BUILD_BUG_ON_NOT_POWER_OF_2(PAGE_SIZE / sizeof(struct hlist_head));
eef90ccb
CW
91 while (count > 2*size)
92 count >>= 1;
93 eb = kzalloc(count*sizeof(struct hlist_head) +
27173f1f 94 sizeof(struct eb_vmas),
eef90ccb
CW
95 GFP_TEMPORARY);
96 if (eb == NULL)
97 return eb;
98
99 eb->and = count - 1;
100 } else
101 eb->and = -args->buffer_count;
102
d50415cc 103 eb->i915 = i915;
27173f1f 104 INIT_LIST_HEAD(&eb->vmas);
67731b87
CW
105 return eb;
106}
107
108static void
27173f1f 109eb_reset(struct eb_vmas *eb)
67731b87 110{
eef90ccb
CW
111 if (eb->and >= 0)
112 memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
67731b87
CW
113}
114
59bfa124
CW
115static struct i915_vma *
116eb_get_batch(struct eb_vmas *eb)
117{
118 struct i915_vma *vma = list_entry(eb->vmas.prev, typeof(*vma), exec_list);
119
120 /*
121 * SNA is doing fancy tricks with compressing batch buffers, which leads
122 * to negative relocation deltas. Usually that works out ok since the
123 * relocate address is still positive, except when the batch is placed
124 * very low in the GTT. Ensure this doesn't happen.
125 *
126 * Note that actual hangs have only been observed on gen7, but for
127 * paranoia do it everywhere.
128 */
129 if ((vma->exec_entry->flags & EXEC_OBJECT_PINNED) == 0)
130 vma->exec_entry->flags |= __EXEC_OBJECT_NEEDS_BIAS;
131
132 return vma;
133}
134
3b96eff4 135static int
27173f1f
BW
136eb_lookup_vmas(struct eb_vmas *eb,
137 struct drm_i915_gem_exec_object2 *exec,
138 const struct drm_i915_gem_execbuffer2 *args,
139 struct i915_address_space *vm,
140 struct drm_file *file)
3b96eff4 141{
27173f1f
BW
142 struct drm_i915_gem_object *obj;
143 struct list_head objects;
9ae9ab52 144 int i, ret;
3b96eff4 145
27173f1f 146 INIT_LIST_HEAD(&objects);
3b96eff4 147 spin_lock(&file->table_lock);
27173f1f
BW
148 /* Grab a reference to the object and release the lock so we can lookup
149 * or create the VMA without using GFP_ATOMIC */
eef90ccb 150 for (i = 0; i < args->buffer_count; i++) {
3b96eff4
CW
151 obj = to_intel_bo(idr_find(&file->object_idr, exec[i].handle));
152 if (obj == NULL) {
153 spin_unlock(&file->table_lock);
154 DRM_DEBUG("Invalid object handle %d at index %d\n",
155 exec[i].handle, i);
27173f1f 156 ret = -ENOENT;
9ae9ab52 157 goto err;
3b96eff4
CW
158 }
159
27173f1f 160 if (!list_empty(&obj->obj_exec_link)) {
3b96eff4
CW
161 spin_unlock(&file->table_lock);
162 DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
163 obj, exec[i].handle, i);
27173f1f 164 ret = -EINVAL;
9ae9ab52 165 goto err;
3b96eff4
CW
166 }
167
25dc556a 168 i915_gem_object_get(obj);
27173f1f
BW
169 list_add_tail(&obj->obj_exec_link, &objects);
170 }
171 spin_unlock(&file->table_lock);
3b96eff4 172
27173f1f 173 i = 0;
9ae9ab52 174 while (!list_empty(&objects)) {
27173f1f 175 struct i915_vma *vma;
6f65e29a 176
9ae9ab52
CW
177 obj = list_first_entry(&objects,
178 struct drm_i915_gem_object,
179 obj_exec_link);
180
e656a6cb
DV
181 /*
182 * NOTE: We can leak any vmas created here when something fails
183 * later on. But that's no issue since vma_unbind can deal with
184 * vmas which are not actually bound. And since only
185 * lookup_or_create exists as an interface to get at the vma
186 * from the (obj, vm) we don't run the risk of creating
187 * duplicated vmas for the same vm.
188 */
718659a6 189 vma = i915_vma_instance(obj, vm, NULL);
058d88c4 190 if (unlikely(IS_ERR(vma))) {
27173f1f
BW
191 DRM_DEBUG("Failed to lookup VMA\n");
192 ret = PTR_ERR(vma);
9ae9ab52 193 goto err;
27173f1f
BW
194 }
195
9ae9ab52 196 /* Transfer ownership from the objects list to the vmas list. */
27173f1f 197 list_add_tail(&vma->exec_list, &eb->vmas);
9ae9ab52 198 list_del_init(&obj->obj_exec_link);
27173f1f
BW
199
200 vma->exec_entry = &exec[i];
eef90ccb 201 if (eb->and < 0) {
27173f1f 202 eb->lut[i] = vma;
eef90ccb
CW
203 } else {
204 uint32_t handle = args->flags & I915_EXEC_HANDLE_LUT ? i : exec[i].handle;
27173f1f
BW
205 vma->exec_handle = handle;
206 hlist_add_head(&vma->exec_node,
eef90ccb
CW
207 &eb->buckets[handle & eb->and]);
208 }
27173f1f 209 ++i;
3b96eff4 210 }
3b96eff4 211
9ae9ab52 212 return 0;
27173f1f 213
27173f1f 214
9ae9ab52 215err:
27173f1f
BW
216 while (!list_empty(&objects)) {
217 obj = list_first_entry(&objects,
218 struct drm_i915_gem_object,
219 obj_exec_link);
220 list_del_init(&obj->obj_exec_link);
f8c417cd 221 i915_gem_object_put(obj);
27173f1f 222 }
9ae9ab52
CW
223 /*
224 * Objects already transfered to the vmas list will be unreferenced by
225 * eb_destroy.
226 */
227
27173f1f 228 return ret;
3b96eff4
CW
229}
230
27173f1f 231static struct i915_vma *eb_get_vma(struct eb_vmas *eb, unsigned long handle)
67731b87 232{
eef90ccb
CW
233 if (eb->and < 0) {
234 if (handle >= -eb->and)
235 return NULL;
236 return eb->lut[handle];
237 } else {
238 struct hlist_head *head;
aa45950b 239 struct i915_vma *vma;
67731b87 240
eef90ccb 241 head = &eb->buckets[handle & eb->and];
aa45950b 242 hlist_for_each_entry(vma, head, exec_node) {
27173f1f
BW
243 if (vma->exec_handle == handle)
244 return vma;
eef90ccb
CW
245 }
246 return NULL;
247 }
67731b87
CW
248}
249
a415d355
CW
250static void
251i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
252{
253 struct drm_i915_gem_exec_object2 *entry;
a415d355
CW
254
255 if (!drm_mm_node_allocated(&vma->node))
256 return;
257
258 entry = vma->exec_entry;
259
260 if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
49ef5294 261 i915_vma_unpin_fence(vma);
a415d355
CW
262
263 if (entry->flags & __EXEC_OBJECT_HAS_PIN)
20dfbde4 264 __i915_vma_unpin(vma);
a415d355 265
de4e783a 266 entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
a415d355
CW
267}
268
269static void eb_destroy(struct eb_vmas *eb)
270{
27173f1f
BW
271 while (!list_empty(&eb->vmas)) {
272 struct i915_vma *vma;
bcffc3fa 273
27173f1f
BW
274 vma = list_first_entry(&eb->vmas,
275 struct i915_vma,
bcffc3fa 276 exec_list);
27173f1f 277 list_del_init(&vma->exec_list);
a415d355 278 i915_gem_execbuffer_unreserve_vma(vma);
172ae5b4 279 vma->exec_entry = NULL;
624192cf 280 i915_vma_put(vma);
bcffc3fa 281 }
67731b87
CW
282 kfree(eb);
283}
284
dabdfe02
CW
285static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
286{
9e53d9be
CW
287 if (!i915_gem_object_has_struct_page(obj))
288 return false;
289
d50415cc
CW
290 if (DBG_USE_CPU_RELOC)
291 return DBG_USE_CPU_RELOC > 0;
292
0031fb96 293 return (HAS_LLC(to_i915(obj->base.dev)) ||
2cc86b82 294 obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
dabdfe02
CW
295 obj->cache_level != I915_CACHE_NONE);
296}
297
934acce3
MW
298/* Used to convert any address to canonical form.
299 * Starting from gen8, some commands (e.g. STATE_BASE_ADDRESS,
300 * MI_LOAD_REGISTER_MEM and others, see Broadwell PRM Vol2a) require the
301 * addresses to be in a canonical form:
302 * "GraphicsAddress[63:48] are ignored by the HW and assumed to be in correct
303 * canonical form [63:48] == [47]."
304 */
305#define GEN8_HIGH_ADDRESS_BIT 47
306static inline uint64_t gen8_canonical_addr(uint64_t address)
307{
308 return sign_extend64(address, GEN8_HIGH_ADDRESS_BIT);
309}
310
311static inline uint64_t gen8_noncanonical_addr(uint64_t address)
312{
313 return address & ((1ULL << (GEN8_HIGH_ADDRESS_BIT + 1)) - 1);
314}
315
316static inline uint64_t
d50415cc 317relocation_target(const struct drm_i915_gem_relocation_entry *reloc,
934acce3
MW
318 uint64_t target_offset)
319{
320 return gen8_canonical_addr((int)reloc->delta + target_offset);
321}
322
31a39207 323struct reloc_cache {
d50415cc
CW
324 struct drm_i915_private *i915;
325 struct drm_mm_node node;
326 unsigned long vaddr;
31a39207 327 unsigned int page;
d50415cc 328 bool use_64bit_reloc;
31a39207
CW
329};
330
d50415cc
CW
331static void reloc_cache_init(struct reloc_cache *cache,
332 struct drm_i915_private *i915)
5032d871 333{
31a39207 334 cache->page = -1;
d50415cc
CW
335 cache->vaddr = 0;
336 cache->i915 = i915;
dfc5148f
JL
337 /* Must be a variable in the struct to allow GCC to unroll. */
338 cache->use_64bit_reloc = HAS_64BIT_RELOC(i915);
e8cb909a 339 cache->node.allocated = false;
d50415cc 340}
5032d871 341
d50415cc
CW
342static inline void *unmask_page(unsigned long p)
343{
344 return (void *)(uintptr_t)(p & PAGE_MASK);
345}
346
347static inline unsigned int unmask_flags(unsigned long p)
348{
349 return p & ~PAGE_MASK;
31a39207
CW
350}
351
d50415cc
CW
352#define KMAP 0x4 /* after CLFLUSH_FLAGS */
353
31a39207
CW
354static void reloc_cache_fini(struct reloc_cache *cache)
355{
d50415cc 356 void *vaddr;
5032d871 357
31a39207
CW
358 if (!cache->vaddr)
359 return;
3c94ceee 360
d50415cc
CW
361 vaddr = unmask_page(cache->vaddr);
362 if (cache->vaddr & KMAP) {
363 if (cache->vaddr & CLFLUSH_AFTER)
364 mb();
3c94ceee 365
d50415cc
CW
366 kunmap_atomic(vaddr);
367 i915_gem_obj_finish_shmem_access((struct drm_i915_gem_object *)cache->node.mm);
368 } else {
e8cb909a 369 wmb();
d50415cc 370 io_mapping_unmap_atomic((void __iomem *)vaddr);
e8cb909a
CW
371 if (cache->node.allocated) {
372 struct i915_ggtt *ggtt = &cache->i915->ggtt;
373
374 ggtt->base.clear_range(&ggtt->base,
375 cache->node.start,
4fb84d99 376 cache->node.size);
e8cb909a
CW
377 drm_mm_remove_node(&cache->node);
378 } else {
379 i915_vma_unpin((struct i915_vma *)cache->node.mm);
3c94ceee 380 }
31a39207
CW
381 }
382}
383
384static void *reloc_kmap(struct drm_i915_gem_object *obj,
385 struct reloc_cache *cache,
386 int page)
387{
d50415cc
CW
388 void *vaddr;
389
390 if (cache->vaddr) {
391 kunmap_atomic(unmask_page(cache->vaddr));
392 } else {
393 unsigned int flushes;
394 int ret;
31a39207 395
d50415cc
CW
396 ret = i915_gem_obj_prepare_shmem_write(obj, &flushes);
397 if (ret)
398 return ERR_PTR(ret);
399
400 BUILD_BUG_ON(KMAP & CLFLUSH_FLAGS);
401 BUILD_BUG_ON((KMAP | CLFLUSH_FLAGS) & PAGE_MASK);
3c94ceee 402
d50415cc
CW
403 cache->vaddr = flushes | KMAP;
404 cache->node.mm = (void *)obj;
405 if (flushes)
406 mb();
3c94ceee
BW
407 }
408
d50415cc
CW
409 vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj, page));
410 cache->vaddr = unmask_flags(cache->vaddr) | (unsigned long)vaddr;
31a39207 411 cache->page = page;
5032d871 412
d50415cc 413 return vaddr;
5032d871
RB
414}
415
d50415cc
CW
416static void *reloc_iomap(struct drm_i915_gem_object *obj,
417 struct reloc_cache *cache,
418 int page)
5032d871 419{
e8cb909a
CW
420 struct i915_ggtt *ggtt = &cache->i915->ggtt;
421 unsigned long offset;
d50415cc 422 void *vaddr;
5032d871 423
d50415cc 424 if (cache->vaddr) {
615e5000 425 io_mapping_unmap_atomic((void __force __iomem *) unmask_page(cache->vaddr));
d50415cc
CW
426 } else {
427 struct i915_vma *vma;
428 int ret;
5032d871 429
d50415cc
CW
430 if (use_cpu_reloc(obj))
431 return NULL;
3c94ceee 432
d50415cc
CW
433 ret = i915_gem_object_set_to_gtt_domain(obj, true);
434 if (ret)
435 return ERR_PTR(ret);
3c94ceee 436
d50415cc
CW
437 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
438 PIN_MAPPABLE | PIN_NONBLOCK);
e8cb909a
CW
439 if (IS_ERR(vma)) {
440 memset(&cache->node, 0, sizeof(cache->node));
4e64e553 441 ret = drm_mm_insert_node_in_range
e8cb909a 442 (&ggtt->base.mm, &cache->node,
f51455d4 443 PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
e8cb909a 444 0, ggtt->mappable_end,
4e64e553 445 DRM_MM_INSERT_LOW);
c92fa4fe
CW
446 if (ret) /* no inactive aperture space, use cpu reloc */
447 return NULL;
e8cb909a 448 } else {
49ef5294 449 ret = i915_vma_put_fence(vma);
e8cb909a
CW
450 if (ret) {
451 i915_vma_unpin(vma);
452 return ERR_PTR(ret);
453 }
5032d871 454
e8cb909a
CW
455 cache->node.start = vma->node.start;
456 cache->node.mm = (void *)vma;
3c94ceee 457 }
e8cb909a 458 }
3c94ceee 459
e8cb909a
CW
460 offset = cache->node.start;
461 if (cache->node.allocated) {
fc099090 462 wmb();
e8cb909a
CW
463 ggtt->base.insert_page(&ggtt->base,
464 i915_gem_object_get_dma_address(obj, page),
465 offset, I915_CACHE_NONE, 0);
466 } else {
467 offset += page << PAGE_SHIFT;
3c94ceee
BW
468 }
469
615e5000 470 vaddr = (void __force *) io_mapping_map_atomic_wc(&cache->i915->ggtt.mappable, offset);
d50415cc
CW
471 cache->page = page;
472 cache->vaddr = (unsigned long)vaddr;
5032d871 473
d50415cc 474 return vaddr;
5032d871
RB
475}
476
d50415cc
CW
477static void *reloc_vaddr(struct drm_i915_gem_object *obj,
478 struct reloc_cache *cache,
479 int page)
edf4427b 480{
d50415cc 481 void *vaddr;
5032d871 482
d50415cc
CW
483 if (cache->page == page) {
484 vaddr = unmask_page(cache->vaddr);
485 } else {
486 vaddr = NULL;
487 if ((cache->vaddr & KMAP) == 0)
488 vaddr = reloc_iomap(obj, cache, page);
489 if (!vaddr)
490 vaddr = reloc_kmap(obj, cache, page);
3c94ceee
BW
491 }
492
d50415cc 493 return vaddr;
edf4427b
CW
494}
495
d50415cc 496static void clflush_write32(u32 *addr, u32 value, unsigned int flushes)
edf4427b 497{
d50415cc
CW
498 if (unlikely(flushes & (CLFLUSH_BEFORE | CLFLUSH_AFTER))) {
499 if (flushes & CLFLUSH_BEFORE) {
500 clflushopt(addr);
501 mb();
502 }
edf4427b 503
d50415cc 504 *addr = value;
edf4427b 505
d50415cc
CW
506 /* Writes to the same cacheline are serialised by the CPU
507 * (including clflush). On the write path, we only require
508 * that it hits memory in an orderly fashion and place
509 * mb barriers at the start and end of the relocation phase
510 * to ensure ordering of clflush wrt to the system.
511 */
512 if (flushes & CLFLUSH_AFTER)
513 clflushopt(addr);
514 } else
515 *addr = value;
edf4427b 516}
edf4427b 517
edf4427b 518static int
d50415cc
CW
519relocate_entry(struct drm_i915_gem_object *obj,
520 const struct drm_i915_gem_relocation_entry *reloc,
521 struct reloc_cache *cache,
522 u64 target_offset)
edf4427b 523{
d50415cc
CW
524 u64 offset = reloc->offset;
525 bool wide = cache->use_64bit_reloc;
526 void *vaddr;
edf4427b 527
d50415cc
CW
528 target_offset = relocation_target(reloc, target_offset);
529repeat:
530 vaddr = reloc_vaddr(obj, cache, offset >> PAGE_SHIFT);
531 if (IS_ERR(vaddr))
532 return PTR_ERR(vaddr);
533
534 clflush_write32(vaddr + offset_in_page(offset),
535 lower_32_bits(target_offset),
536 cache->vaddr);
537
538 if (wide) {
539 offset += sizeof(u32);
540 target_offset >>= 32;
541 wide = false;
542 goto repeat;
edf4427b 543 }
edf4427b 544
edf4427b
CW
545 return 0;
546}
edf4427b 547
54cf91dc
CW
548static int
549i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
27173f1f 550 struct eb_vmas *eb,
31a39207
CW
551 struct drm_i915_gem_relocation_entry *reloc,
552 struct reloc_cache *cache)
54cf91dc 553{
5db94019 554 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
54cf91dc 555 struct drm_gem_object *target_obj;
149c8407 556 struct drm_i915_gem_object *target_i915_obj;
27173f1f 557 struct i915_vma *target_vma;
d9ceb957 558 uint64_t target_offset;
8b78f0e5 559 int ret;
54cf91dc 560
67731b87 561 /* we've already hold a reference to all valid objects */
27173f1f
BW
562 target_vma = eb_get_vma(eb, reloc->target_handle);
563 if (unlikely(target_vma == NULL))
54cf91dc 564 return -ENOENT;
27173f1f
BW
565 target_i915_obj = target_vma->obj;
566 target_obj = &target_vma->obj->base;
54cf91dc 567
934acce3 568 target_offset = gen8_canonical_addr(target_vma->node.start);
54cf91dc 569
e844b990
EA
570 /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
571 * pipe_control writes because the gpu doesn't properly redirect them
572 * through the ppgtt for non_secure batchbuffers. */
5db94019 573 if (unlikely(IS_GEN6(dev_priv) &&
0875546c 574 reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION)) {
fe14d5f4 575 ret = i915_vma_bind(target_vma, target_i915_obj->cache_level,
0875546c 576 PIN_GLOBAL);
fe14d5f4
TU
577 if (WARN_ONCE(ret, "Unexpected failure to bind target VMA!"))
578 return ret;
579 }
e844b990 580
54cf91dc 581 /* Validate that the target is in a valid r/w GPU domain */
b8f7ab17 582 if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
ff240199 583 DRM_DEBUG("reloc with multiple write domains: "
54cf91dc
CW
584 "obj %p target %d offset %d "
585 "read %08x write %08x",
586 obj, reloc->target_handle,
587 (int) reloc->offset,
588 reloc->read_domains,
589 reloc->write_domain);
8b78f0e5 590 return -EINVAL;
54cf91dc 591 }
4ca4a250
DV
592 if (unlikely((reloc->write_domain | reloc->read_domains)
593 & ~I915_GEM_GPU_DOMAINS)) {
ff240199 594 DRM_DEBUG("reloc with read/write non-GPU domains: "
54cf91dc
CW
595 "obj %p target %d offset %d "
596 "read %08x write %08x",
597 obj, reloc->target_handle,
598 (int) reloc->offset,
599 reloc->read_domains,
600 reloc->write_domain);
8b78f0e5 601 return -EINVAL;
54cf91dc 602 }
54cf91dc
CW
603
604 target_obj->pending_read_domains |= reloc->read_domains;
605 target_obj->pending_write_domain |= reloc->write_domain;
606
607 /* If the relocation already has the right value in it, no
608 * more work needs to be done.
609 */
610 if (target_offset == reloc->presumed_offset)
67731b87 611 return 0;
54cf91dc
CW
612
613 /* Check that the relocation address is valid... */
3c94ceee 614 if (unlikely(reloc->offset >
d50415cc 615 obj->base.size - (cache->use_64bit_reloc ? 8 : 4))) {
ff240199 616 DRM_DEBUG("Relocation beyond object bounds: "
54cf91dc
CW
617 "obj %p target %d offset %d size %d.\n",
618 obj, reloc->target_handle,
619 (int) reloc->offset,
620 (int) obj->base.size);
8b78f0e5 621 return -EINVAL;
54cf91dc 622 }
b8f7ab17 623 if (unlikely(reloc->offset & 3)) {
ff240199 624 DRM_DEBUG("Relocation not 4-byte aligned: "
54cf91dc
CW
625 "obj %p target %d offset %d.\n",
626 obj, reloc->target_handle,
627 (int) reloc->offset);
8b78f0e5 628 return -EINVAL;
54cf91dc
CW
629 }
630
d50415cc 631 ret = relocate_entry(obj, reloc, cache, target_offset);
d4d36014
DV
632 if (ret)
633 return ret;
634
54cf91dc
CW
635 /* and update the user's relocation entry */
636 reloc->presumed_offset = target_offset;
67731b87 637 return 0;
54cf91dc
CW
638}
639
640static int
27173f1f
BW
641i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
642 struct eb_vmas *eb)
54cf91dc 643{
1d83f442
CW
644#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
645 struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
54cf91dc 646 struct drm_i915_gem_relocation_entry __user *user_relocs;
27173f1f 647 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
31a39207
CW
648 struct reloc_cache cache;
649 int remain, ret = 0;
54cf91dc 650
3ed605bc 651 user_relocs = u64_to_user_ptr(entry->relocs_ptr);
d50415cc 652 reloc_cache_init(&cache, eb->i915);
54cf91dc 653
1d83f442
CW
654 remain = entry->relocation_count;
655 while (remain) {
656 struct drm_i915_gem_relocation_entry *r = stack_reloc;
ebc0808f
CW
657 unsigned long unwritten;
658 unsigned int count;
659
660 count = min_t(unsigned int, remain, ARRAY_SIZE(stack_reloc));
1d83f442
CW
661 remain -= count;
662
ebc0808f
CW
663 /* This is the fast path and we cannot handle a pagefault
664 * whilst holding the struct mutex lest the user pass in the
665 * relocations contained within a mmaped bo. For in such a case
666 * we, the page fault handler would call i915_gem_fault() and
667 * we would try to acquire the struct mutex again. Obviously
668 * this is bad and so lockdep complains vehemently.
669 */
670 pagefault_disable();
671 unwritten = __copy_from_user_inatomic(r, user_relocs, count*sizeof(r[0]));
672 pagefault_enable();
673 if (unlikely(unwritten)) {
31a39207
CW
674 ret = -EFAULT;
675 goto out;
676 }
54cf91dc 677
1d83f442
CW
678 do {
679 u64 offset = r->presumed_offset;
54cf91dc 680
31a39207 681 ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r, &cache);
1d83f442 682 if (ret)
31a39207 683 goto out;
1d83f442 684
ebc0808f
CW
685 if (r->presumed_offset != offset) {
686 pagefault_disable();
687 unwritten = __put_user(r->presumed_offset,
688 &user_relocs->presumed_offset);
689 pagefault_enable();
690 if (unlikely(unwritten)) {
691 /* Note that reporting an error now
692 * leaves everything in an inconsistent
693 * state as we have *already* changed
694 * the relocation value inside the
695 * object. As we have not changed the
696 * reloc.presumed_offset or will not
697 * change the execobject.offset, on the
698 * call we may not rewrite the value
699 * inside the object, leaving it
700 * dangling and causing a GPU hang.
701 */
702 ret = -EFAULT;
703 goto out;
704 }
1d83f442
CW
705 }
706
707 user_relocs++;
708 r++;
709 } while (--count);
54cf91dc
CW
710 }
711
31a39207
CW
712out:
713 reloc_cache_fini(&cache);
714 return ret;
1d83f442 715#undef N_RELOC
54cf91dc
CW
716}
717
718static int
27173f1f
BW
719i915_gem_execbuffer_relocate_vma_slow(struct i915_vma *vma,
720 struct eb_vmas *eb,
721 struct drm_i915_gem_relocation_entry *relocs)
54cf91dc 722{
27173f1f 723 const struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
31a39207
CW
724 struct reloc_cache cache;
725 int i, ret = 0;
54cf91dc 726
d50415cc 727 reloc_cache_init(&cache, eb->i915);
54cf91dc 728 for (i = 0; i < entry->relocation_count; i++) {
31a39207 729 ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i], &cache);
54cf91dc 730 if (ret)
31a39207 731 break;
54cf91dc 732 }
31a39207 733 reloc_cache_fini(&cache);
54cf91dc 734
31a39207 735 return ret;
54cf91dc
CW
736}
737
738static int
17601cbc 739i915_gem_execbuffer_relocate(struct eb_vmas *eb)
54cf91dc 740{
27173f1f 741 struct i915_vma *vma;
d4aeee77
CW
742 int ret = 0;
743
27173f1f
BW
744 list_for_each_entry(vma, &eb->vmas, exec_list) {
745 ret = i915_gem_execbuffer_relocate_vma(vma, eb);
54cf91dc 746 if (ret)
d4aeee77 747 break;
54cf91dc
CW
748 }
749
d4aeee77 750 return ret;
54cf91dc
CW
751}
752
edf4427b
CW
753static bool only_mappable_for_reloc(unsigned int flags)
754{
755 return (flags & (EXEC_OBJECT_NEEDS_FENCE | __EXEC_OBJECT_NEEDS_MAP)) ==
756 __EXEC_OBJECT_NEEDS_MAP;
757}
758
1690e1eb 759static int
27173f1f 760i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
0bc40be8 761 struct intel_engine_cs *engine,
27173f1f 762 bool *need_reloc)
1690e1eb 763{
6f65e29a 764 struct drm_i915_gem_object *obj = vma->obj;
27173f1f 765 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
d23db88c 766 uint64_t flags;
1690e1eb
CW
767 int ret;
768
0875546c 769 flags = PIN_USER;
0229da32
DV
770 if (entry->flags & EXEC_OBJECT_NEEDS_GTT)
771 flags |= PIN_GLOBAL;
772
edf4427b 773 if (!drm_mm_node_allocated(&vma->node)) {
101b506a
MT
774 /* Wa32bitGeneralStateOffset & Wa32bitInstructionBaseOffset,
775 * limit address to the first 4GBs for unflagged objects.
776 */
777 if ((entry->flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) == 0)
778 flags |= PIN_ZONE_4G;
edf4427b
CW
779 if (entry->flags & __EXEC_OBJECT_NEEDS_MAP)
780 flags |= PIN_GLOBAL | PIN_MAPPABLE;
edf4427b
CW
781 if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS)
782 flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
506a8e87
CW
783 if (entry->flags & EXEC_OBJECT_PINNED)
784 flags |= entry->offset | PIN_OFFSET_FIXED;
101b506a
MT
785 if ((flags & PIN_MAPPABLE) == 0)
786 flags |= PIN_HIGH;
edf4427b 787 }
1ec9e26d 788
59bfa124
CW
789 ret = i915_vma_pin(vma,
790 entry->pad_to_size,
791 entry->alignment,
792 flags);
793 if ((ret == -ENOSPC || ret == -E2BIG) &&
edf4427b 794 only_mappable_for_reloc(entry->flags))
59bfa124
CW
795 ret = i915_vma_pin(vma,
796 entry->pad_to_size,
797 entry->alignment,
798 flags & ~PIN_MAPPABLE);
1690e1eb
CW
799 if (ret)
800 return ret;
801
7788a765
CW
802 entry->flags |= __EXEC_OBJECT_HAS_PIN;
803
82b6b6d7 804 if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
49ef5294 805 ret = i915_vma_get_fence(vma);
82b6b6d7
CW
806 if (ret)
807 return ret;
9a5a53b3 808
49ef5294 809 if (i915_vma_pin_fence(vma))
82b6b6d7 810 entry->flags |= __EXEC_OBJECT_HAS_FENCE;
1690e1eb
CW
811 }
812
27173f1f
BW
813 if (entry->offset != vma->node.start) {
814 entry->offset = vma->node.start;
ed5982e6
DV
815 *need_reloc = true;
816 }
817
818 if (entry->flags & EXEC_OBJECT_WRITE) {
819 obj->base.pending_read_domains = I915_GEM_DOMAIN_RENDER;
820 obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER;
821 }
822
1690e1eb 823 return 0;
7788a765 824}
1690e1eb 825
d23db88c 826static bool
e6a84468 827need_reloc_mappable(struct i915_vma *vma)
d23db88c
CW
828{
829 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
d23db88c 830
e6a84468
CW
831 if (entry->relocation_count == 0)
832 return false;
833
3272db53 834 if (!i915_vma_is_ggtt(vma))
e6a84468
CW
835 return false;
836
837 /* See also use_cpu_reloc() */
0031fb96 838 if (HAS_LLC(to_i915(vma->obj->base.dev)))
e6a84468
CW
839 return false;
840
841 if (vma->obj->base.write_domain == I915_GEM_DOMAIN_CPU)
842 return false;
843
844 return true;
845}
846
847static bool
848eb_vma_misplaced(struct i915_vma *vma)
849{
850 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
d23db88c 851
3272db53
CW
852 WARN_ON(entry->flags & __EXEC_OBJECT_NEEDS_MAP &&
853 !i915_vma_is_ggtt(vma));
d23db88c 854
f51455d4 855 if (entry->alignment && !IS_ALIGNED(vma->node.start, entry->alignment))
d23db88c
CW
856 return true;
857
91b2db6f
CW
858 if (vma->node.size < entry->pad_to_size)
859 return true;
860
506a8e87
CW
861 if (entry->flags & EXEC_OBJECT_PINNED &&
862 vma->node.start != entry->offset)
863 return true;
864
d23db88c
CW
865 if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS &&
866 vma->node.start < BATCH_OFFSET_BIAS)
867 return true;
868
edf4427b 869 /* avoid costly ping-pong once a batch bo ended up non-mappable */
05a20d09
CW
870 if (entry->flags & __EXEC_OBJECT_NEEDS_MAP &&
871 !i915_vma_is_map_and_fenceable(vma))
edf4427b
CW
872 return !only_mappable_for_reloc(entry->flags);
873
101b506a
MT
874 if ((entry->flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) == 0 &&
875 (vma->node.start + vma->node.size - 1) >> 32)
876 return true;
877
d23db88c
CW
878 return false;
879}
880
54cf91dc 881static int
0bc40be8 882i915_gem_execbuffer_reserve(struct intel_engine_cs *engine,
27173f1f 883 struct list_head *vmas,
e2efd130 884 struct i915_gem_context *ctx,
ed5982e6 885 bool *need_relocs)
54cf91dc 886{
432e58ed 887 struct drm_i915_gem_object *obj;
27173f1f 888 struct i915_vma *vma;
68c8c17f 889 struct i915_address_space *vm;
27173f1f 890 struct list_head ordered_vmas;
506a8e87 891 struct list_head pinned_vmas;
c033666a 892 bool has_fenced_gpu_access = INTEL_GEN(engine->i915) < 4;
f4ce766f 893 bool needs_unfenced_map = INTEL_INFO(engine->i915)->unfenced_needs_alignment;
7788a765 894 int retry;
6fe4f140 895
68c8c17f
BW
896 vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;
897
27173f1f 898 INIT_LIST_HEAD(&ordered_vmas);
506a8e87 899 INIT_LIST_HEAD(&pinned_vmas);
27173f1f 900 while (!list_empty(vmas)) {
6fe4f140
CW
901 struct drm_i915_gem_exec_object2 *entry;
902 bool need_fence, need_mappable;
903
27173f1f
BW
904 vma = list_first_entry(vmas, struct i915_vma, exec_list);
905 obj = vma->obj;
906 entry = vma->exec_entry;
6fe4f140 907
b1b38278
DW
908 if (ctx->flags & CONTEXT_NO_ZEROMAP)
909 entry->flags |= __EXEC_OBJECT_NEEDS_BIAS;
910
82b6b6d7
CW
911 if (!has_fenced_gpu_access)
912 entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE;
6fe4f140 913 need_fence =
f4ce766f
CW
914 (entry->flags & EXEC_OBJECT_NEEDS_FENCE ||
915 needs_unfenced_map) &&
3e510a8e 916 i915_gem_object_is_tiled(obj);
27173f1f 917 need_mappable = need_fence || need_reloc_mappable(vma);
6fe4f140 918
506a8e87
CW
919 if (entry->flags & EXEC_OBJECT_PINNED)
920 list_move_tail(&vma->exec_list, &pinned_vmas);
921 else if (need_mappable) {
e6a84468 922 entry->flags |= __EXEC_OBJECT_NEEDS_MAP;
27173f1f 923 list_move(&vma->exec_list, &ordered_vmas);
e6a84468 924 } else
27173f1f 925 list_move_tail(&vma->exec_list, &ordered_vmas);
595dad76 926
ed5982e6 927 obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND;
595dad76 928 obj->base.pending_write_domain = 0;
6fe4f140 929 }
27173f1f 930 list_splice(&ordered_vmas, vmas);
506a8e87 931 list_splice(&pinned_vmas, vmas);
54cf91dc
CW
932
933 /* Attempt to pin all of the buffers into the GTT.
934 * This is done in 3 phases:
935 *
936 * 1a. Unbind all objects that do not match the GTT constraints for
937 * the execbuffer (fenceable, mappable, alignment etc).
938 * 1b. Increment pin count for already bound objects.
939 * 2. Bind new objects.
940 * 3. Decrement pin count.
941 *
7788a765 942 * This avoid unnecessary unbinding of later objects in order to make
54cf91dc
CW
943 * room for the earlier objects *unless* we need to defragment.
944 */
945 retry = 0;
946 do {
7788a765 947 int ret = 0;
54cf91dc
CW
948
949 /* Unbind any ill-fitting objects or pin. */
27173f1f 950 list_for_each_entry(vma, vmas, exec_list) {
27173f1f 951 if (!drm_mm_node_allocated(&vma->node))
54cf91dc
CW
952 continue;
953
e6a84468 954 if (eb_vma_misplaced(vma))
27173f1f 955 ret = i915_vma_unbind(vma);
54cf91dc 956 else
0bc40be8
TU
957 ret = i915_gem_execbuffer_reserve_vma(vma,
958 engine,
959 need_relocs);
432e58ed 960 if (ret)
54cf91dc 961 goto err;
54cf91dc
CW
962 }
963
964 /* Bind fresh objects */
27173f1f
BW
965 list_for_each_entry(vma, vmas, exec_list) {
966 if (drm_mm_node_allocated(&vma->node))
1690e1eb 967 continue;
54cf91dc 968
0bc40be8
TU
969 ret = i915_gem_execbuffer_reserve_vma(vma, engine,
970 need_relocs);
7788a765
CW
971 if (ret)
972 goto err;
54cf91dc
CW
973 }
974
a415d355 975err:
6c085a72 976 if (ret != -ENOSPC || retry++)
54cf91dc
CW
977 return ret;
978
a415d355
CW
979 /* Decrement pin count for bound objects */
980 list_for_each_entry(vma, vmas, exec_list)
981 i915_gem_execbuffer_unreserve_vma(vma);
982
68c8c17f 983 ret = i915_gem_evict_vm(vm, true);
54cf91dc
CW
984 if (ret)
985 return ret;
54cf91dc
CW
986 } while (1);
987}
988
989static int
990i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
ed5982e6 991 struct drm_i915_gem_execbuffer2 *args,
54cf91dc 992 struct drm_file *file,
0bc40be8 993 struct intel_engine_cs *engine,
27173f1f 994 struct eb_vmas *eb,
b1b38278 995 struct drm_i915_gem_exec_object2 *exec,
e2efd130 996 struct i915_gem_context *ctx)
54cf91dc
CW
997{
998 struct drm_i915_gem_relocation_entry *reloc;
27173f1f
BW
999 struct i915_address_space *vm;
1000 struct i915_vma *vma;
ed5982e6 1001 bool need_relocs;
dd6864a4 1002 int *reloc_offset;
54cf91dc 1003 int i, total, ret;
b205ca57 1004 unsigned count = args->buffer_count;
54cf91dc 1005
27173f1f
BW
1006 vm = list_first_entry(&eb->vmas, struct i915_vma, exec_list)->vm;
1007
67731b87 1008 /* We may process another execbuffer during the unlock... */
27173f1f
BW
1009 while (!list_empty(&eb->vmas)) {
1010 vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list);
1011 list_del_init(&vma->exec_list);
a415d355 1012 i915_gem_execbuffer_unreserve_vma(vma);
624192cf 1013 i915_vma_put(vma);
67731b87
CW
1014 }
1015
54cf91dc
CW
1016 mutex_unlock(&dev->struct_mutex);
1017
1018 total = 0;
1019 for (i = 0; i < count; i++)
432e58ed 1020 total += exec[i].relocation_count;
54cf91dc 1021
dd6864a4 1022 reloc_offset = drm_malloc_ab(count, sizeof(*reloc_offset));
54cf91dc 1023 reloc = drm_malloc_ab(total, sizeof(*reloc));
dd6864a4
CW
1024 if (reloc == NULL || reloc_offset == NULL) {
1025 drm_free_large(reloc);
1026 drm_free_large(reloc_offset);
54cf91dc
CW
1027 mutex_lock(&dev->struct_mutex);
1028 return -ENOMEM;
1029 }
1030
1031 total = 0;
1032 for (i = 0; i < count; i++) {
1033 struct drm_i915_gem_relocation_entry __user *user_relocs;
262b6d36
CW
1034 u64 invalid_offset = (u64)-1;
1035 int j;
54cf91dc 1036
3ed605bc 1037 user_relocs = u64_to_user_ptr(exec[i].relocs_ptr);
54cf91dc
CW
1038
1039 if (copy_from_user(reloc+total, user_relocs,
432e58ed 1040 exec[i].relocation_count * sizeof(*reloc))) {
54cf91dc
CW
1041 ret = -EFAULT;
1042 mutex_lock(&dev->struct_mutex);
1043 goto err;
1044 }
1045
262b6d36
CW
1046 /* As we do not update the known relocation offsets after
1047 * relocating (due to the complexities in lock handling),
1048 * we need to mark them as invalid now so that we force the
1049 * relocation processing next time. Just in case the target
1050 * object is evicted and then rebound into its old
1051 * presumed_offset before the next execbuffer - if that
1052 * happened we would make the mistake of assuming that the
1053 * relocations were valid.
1054 */
1055 for (j = 0; j < exec[i].relocation_count; j++) {
9aab8bff
CW
1056 if (__copy_to_user(&user_relocs[j].presumed_offset,
1057 &invalid_offset,
1058 sizeof(invalid_offset))) {
262b6d36
CW
1059 ret = -EFAULT;
1060 mutex_lock(&dev->struct_mutex);
1061 goto err;
1062 }
1063 }
1064
dd6864a4 1065 reloc_offset[i] = total;
432e58ed 1066 total += exec[i].relocation_count;
54cf91dc
CW
1067 }
1068
1069 ret = i915_mutex_lock_interruptible(dev);
1070 if (ret) {
1071 mutex_lock(&dev->struct_mutex);
1072 goto err;
1073 }
1074
67731b87 1075 /* reacquire the objects */
67731b87 1076 eb_reset(eb);
27173f1f 1077 ret = eb_lookup_vmas(eb, exec, args, vm, file);
3b96eff4
CW
1078 if (ret)
1079 goto err;
67731b87 1080
ed5982e6 1081 need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
0bc40be8
TU
1082 ret = i915_gem_execbuffer_reserve(engine, &eb->vmas, ctx,
1083 &need_relocs);
54cf91dc
CW
1084 if (ret)
1085 goto err;
1086
27173f1f
BW
1087 list_for_each_entry(vma, &eb->vmas, exec_list) {
1088 int offset = vma->exec_entry - exec;
1089 ret = i915_gem_execbuffer_relocate_vma_slow(vma, eb,
1090 reloc + reloc_offset[offset]);
54cf91dc
CW
1091 if (ret)
1092 goto err;
54cf91dc
CW
1093 }
1094
1095 /* Leave the user relocations as are, this is the painfully slow path,
1096 * and we want to avoid the complication of dropping the lock whilst
1097 * having buffers reserved in the aperture and so causing spurious
1098 * ENOSPC for random operations.
1099 */
1100
1101err:
1102 drm_free_large(reloc);
dd6864a4 1103 drm_free_large(reloc_offset);
54cf91dc
CW
1104 return ret;
1105}
1106
54cf91dc 1107static int
535fbe82 1108i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
27173f1f 1109 struct list_head *vmas)
54cf91dc 1110{
27173f1f 1111 struct i915_vma *vma;
432e58ed 1112 int ret;
54cf91dc 1113
27173f1f
BW
1114 list_for_each_entry(vma, vmas, exec_list) {
1115 struct drm_i915_gem_object *obj = vma->obj;
03ade511 1116
77ae9957
CW
1117 if (vma->exec_entry->flags & EXEC_OBJECT_ASYNC)
1118 continue;
1119
57822dc6
CW
1120 if (obj->base.write_domain & I915_GEM_DOMAIN_CPU) {
1121 i915_gem_clflush_object(obj, 0);
1122 obj->base.write_domain = 0;
1123 }
1124
d07f0e59
CW
1125 ret = i915_gem_request_await_object
1126 (req, obj, obj->base.pending_write_domain);
1127 if (ret)
1128 return ret;
c59a333f
CW
1129 }
1130
dcd79934
CW
1131 /* Unconditionally flush any chipset caches (for streaming writes). */
1132 i915_gem_chipset_flush(req->engine->i915);
6ac42f41 1133
c7fe7d25 1134 /* Unconditionally invalidate GPU caches and TLBs. */
7c9cf4e3 1135 return req->engine->emit_flush(req, EMIT_INVALIDATE);
54cf91dc
CW
1136}
1137
432e58ed
CW
1138static bool
1139i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
54cf91dc 1140{
ed5982e6
DV
1141 if (exec->flags & __I915_EXEC_UNKNOWN_FLAGS)
1142 return false;
1143
2f5945bc
CW
1144 /* Kernel clipping was a DRI1 misfeature */
1145 if (exec->num_cliprects || exec->cliprects_ptr)
1146 return false;
1147
1148 if (exec->DR4 == 0xffffffff) {
1149 DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
1150 exec->DR4 = 0;
1151 }
1152 if (exec->DR1 || exec->DR4)
1153 return false;
1154
1155 if ((exec->batch_start_offset | exec->batch_len) & 0x7)
1156 return false;
1157
1158 return true;
54cf91dc
CW
1159}
1160
1161static int
ad19f10b
CW
1162validate_exec_list(struct drm_device *dev,
1163 struct drm_i915_gem_exec_object2 *exec,
54cf91dc
CW
1164 int count)
1165{
b205ca57
DV
1166 unsigned relocs_total = 0;
1167 unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
ad19f10b
CW
1168 unsigned invalid_flags;
1169 int i;
1170
9e2793f6
DG
1171 /* INTERNAL flags must not overlap with external ones */
1172 BUILD_BUG_ON(__EXEC_OBJECT_INTERNAL_FLAGS & ~__EXEC_OBJECT_UNKNOWN_FLAGS);
1173
ad19f10b
CW
1174 invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
1175 if (USES_FULL_PPGTT(dev))
1176 invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
54cf91dc
CW
1177
1178 for (i = 0; i < count; i++) {
3ed605bc 1179 char __user *ptr = u64_to_user_ptr(exec[i].relocs_ptr);
54cf91dc
CW
1180 int length; /* limited by fault_in_pages_readable() */
1181
ad19f10b 1182 if (exec[i].flags & invalid_flags)
ed5982e6
DV
1183 return -EINVAL;
1184
934acce3
MW
1185 /* Offset can be used as input (EXEC_OBJECT_PINNED), reject
1186 * any non-page-aligned or non-canonical addresses.
1187 */
1188 if (exec[i].flags & EXEC_OBJECT_PINNED) {
1189 if (exec[i].offset !=
1190 gen8_canonical_addr(exec[i].offset & PAGE_MASK))
1191 return -EINVAL;
934acce3
MW
1192 }
1193
038c95a3
MW
1194 /* From drm_mm perspective address space is continuous,
1195 * so from this point we're always using non-canonical
1196 * form internally.
1197 */
1198 exec[i].offset = gen8_noncanonical_addr(exec[i].offset);
1199
55a9785d
CW
1200 if (exec[i].alignment && !is_power_of_2(exec[i].alignment))
1201 return -EINVAL;
1202
91b2db6f
CW
1203 /* pad_to_size was once a reserved field, so sanitize it */
1204 if (exec[i].flags & EXEC_OBJECT_PAD_TO_SIZE) {
1205 if (offset_in_page(exec[i].pad_to_size))
1206 return -EINVAL;
1207 } else {
1208 exec[i].pad_to_size = 0;
1209 }
1210
3118a4f6
KC
1211 /* First check for malicious input causing overflow in
1212 * the worst case where we need to allocate the entire
1213 * relocation tree as a single array.
1214 */
1215 if (exec[i].relocation_count > relocs_max - relocs_total)
54cf91dc 1216 return -EINVAL;
3118a4f6 1217 relocs_total += exec[i].relocation_count;
54cf91dc
CW
1218
1219 length = exec[i].relocation_count *
1220 sizeof(struct drm_i915_gem_relocation_entry);
30587535
KC
1221 /*
1222 * We must check that the entire relocation array is safe
1223 * to read, but since we may need to update the presumed
1224 * offsets during execution, check for full write access.
1225 */
54cf91dc
CW
1226 if (!access_ok(VERIFY_WRITE, ptr, length))
1227 return -EFAULT;
1228
d330a953 1229 if (likely(!i915.prefault_disable)) {
4bce9f6e 1230 if (fault_in_pages_readable(ptr, length))
0b74b508
XZ
1231 return -EFAULT;
1232 }
54cf91dc
CW
1233 }
1234
1235 return 0;
1236}
1237
e2efd130 1238static struct i915_gem_context *
d299cce7 1239i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
0bc40be8 1240 struct intel_engine_cs *engine, const u32 ctx_id)
d299cce7 1241{
f7978a0c 1242 struct i915_gem_context *ctx;
d299cce7 1243
ca585b5d 1244 ctx = i915_gem_context_lookup(file->driver_priv, ctx_id);
72ad5c45 1245 if (IS_ERR(ctx))
41bde553 1246 return ctx;
d299cce7 1247
6095868a 1248 if (i915_gem_context_is_banned(ctx)) {
d299cce7 1249 DRM_DEBUG("Context %u tried to submit while banned\n", ctx_id);
41bde553 1250 return ERR_PTR(-EIO);
d299cce7
MK
1251 }
1252
41bde553 1253 return ctx;
d299cce7
MK
1254}
1255
7aa6ca61
CW
1256static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj)
1257{
1258 return !(obj->cache_level == I915_CACHE_NONE ||
1259 obj->cache_level == I915_CACHE_WT);
1260}
1261
5cf3d280
CW
1262void i915_vma_move_to_active(struct i915_vma *vma,
1263 struct drm_i915_gem_request *req,
1264 unsigned int flags)
1265{
1266 struct drm_i915_gem_object *obj = vma->obj;
1267 const unsigned int idx = req->engine->id;
1268
81147b07 1269 lockdep_assert_held(&req->i915->drm.struct_mutex);
5cf3d280
CW
1270 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1271
b0decaf7
CW
1272 /* Add a reference if we're newly entering the active list.
1273 * The order in which we add operations to the retirement queue is
1274 * vital here: mark_active adds to the start of the callback list,
1275 * such that subsequent callbacks are called first. Therefore we
1276 * add the active reference first and queue for it to be dropped
1277 * *last*.
1278 */
d07f0e59
CW
1279 if (!i915_vma_is_active(vma))
1280 obj->active_count++;
1281 i915_vma_set_active(vma, idx);
1282 i915_gem_active_set(&vma->last_read[idx], req);
1283 list_move_tail(&vma->vm_link, &vma->vm->active_list);
5cf3d280
CW
1284
1285 if (flags & EXEC_OBJECT_WRITE) {
5b8c8aec
CW
1286 if (intel_fb_obj_invalidate(obj, ORIGIN_CS))
1287 i915_gem_active_set(&obj->frontbuffer_write, req);
5cf3d280
CW
1288
1289 /* update for the implicit flush after a batch */
1290 obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
7aa6ca61
CW
1291 if (!obj->cache_dirty && gpu_write_needs_clflush(obj))
1292 obj->cache_dirty = true;
5cf3d280
CW
1293 }
1294
49ef5294
CW
1295 if (flags & EXEC_OBJECT_NEEDS_FENCE)
1296 i915_gem_active_set(&vma->last_fence, req);
5cf3d280
CW
1297}
1298
ad778f89
CW
1299static void eb_export_fence(struct drm_i915_gem_object *obj,
1300 struct drm_i915_gem_request *req,
1301 unsigned int flags)
1302{
d07f0e59 1303 struct reservation_object *resv = obj->resv;
ad778f89
CW
1304
1305 /* Ignore errors from failing to allocate the new fence, we can't
1306 * handle an error right now. Worst case should be missed
1307 * synchronisation leading to rendering corruption.
1308 */
e2989f14 1309 reservation_object_lock(resv, NULL);
ad778f89
CW
1310 if (flags & EXEC_OBJECT_WRITE)
1311 reservation_object_add_excl_fence(resv, &req->fence);
1312 else if (reservation_object_reserve_shared(resv) == 0)
1313 reservation_object_add_shared_fence(resv, &req->fence);
e2989f14 1314 reservation_object_unlock(resv);
ad778f89
CW
1315}
1316
5b043f4e 1317static void
27173f1f 1318i915_gem_execbuffer_move_to_active(struct list_head *vmas,
8a8edb59 1319 struct drm_i915_gem_request *req)
432e58ed 1320{
27173f1f 1321 struct i915_vma *vma;
432e58ed 1322
27173f1f
BW
1323 list_for_each_entry(vma, vmas, exec_list) {
1324 struct drm_i915_gem_object *obj = vma->obj;
db53a302 1325
432e58ed 1326 obj->base.write_domain = obj->base.pending_write_domain;
5cf3d280
CW
1327 if (obj->base.write_domain)
1328 vma->exec_entry->flags |= EXEC_OBJECT_WRITE;
1329 else
ed5982e6
DV
1330 obj->base.pending_read_domains |= obj->base.read_domains;
1331 obj->base.read_domains = obj->base.pending_read_domains;
432e58ed 1332
5cf3d280 1333 i915_vma_move_to_active(vma, req, vma->exec_entry->flags);
ad778f89 1334 eb_export_fence(obj, req, vma->exec_entry->flags);
432e58ed
CW
1335 }
1336}
1337
ae662d31 1338static int
b5321f30 1339i915_reset_gen7_sol_offsets(struct drm_i915_gem_request *req)
ae662d31 1340{
73dec95e
TU
1341 u32 *cs;
1342 int i;
ae662d31 1343
b5321f30 1344 if (!IS_GEN7(req->i915) || req->engine->id != RCS) {
9d662da8
DV
1345 DRM_DEBUG("sol reset is gen7/rcs only\n");
1346 return -EINVAL;
1347 }
ae662d31 1348
73dec95e
TU
1349 cs = intel_ring_begin(req, 4 * 3);
1350 if (IS_ERR(cs))
1351 return PTR_ERR(cs);
ae662d31
EA
1352
1353 for (i = 0; i < 4; i++) {
73dec95e
TU
1354 *cs++ = MI_LOAD_REGISTER_IMM(1);
1355 *cs++ = i915_mmio_reg_offset(GEN7_SO_WRITE_OFFSET(i));
1356 *cs++ = 0;
ae662d31
EA
1357 }
1358
73dec95e 1359 intel_ring_advance(req, cs);
ae662d31
EA
1360
1361 return 0;
1362}
1363
058d88c4 1364static struct i915_vma *
0bc40be8 1365i915_gem_execbuffer_parse(struct intel_engine_cs *engine,
71745376 1366 struct drm_i915_gem_exec_object2 *shadow_exec_entry,
71745376 1367 struct drm_i915_gem_object *batch_obj,
59bfa124 1368 struct eb_vmas *eb,
71745376
BV
1369 u32 batch_start_offset,
1370 u32 batch_len,
17cabf57 1371 bool is_master)
71745376 1372{
71745376 1373 struct drm_i915_gem_object *shadow_batch_obj;
17cabf57 1374 struct i915_vma *vma;
71745376
BV
1375 int ret;
1376
0bc40be8 1377 shadow_batch_obj = i915_gem_batch_pool_get(&engine->batch_pool,
17cabf57 1378 PAGE_ALIGN(batch_len));
71745376 1379 if (IS_ERR(shadow_batch_obj))
59bfa124 1380 return ERR_CAST(shadow_batch_obj);
71745376 1381
33a051a5
CW
1382 ret = intel_engine_cmd_parser(engine,
1383 batch_obj,
1384 shadow_batch_obj,
1385 batch_start_offset,
1386 batch_len,
1387 is_master);
058d88c4
CW
1388 if (ret) {
1389 if (ret == -EACCES) /* unhandled chained batch */
1390 vma = NULL;
1391 else
1392 vma = ERR_PTR(ret);
1393 goto out;
1394 }
71745376 1395
058d88c4
CW
1396 vma = i915_gem_object_ggtt_pin(shadow_batch_obj, NULL, 0, 0, 0);
1397 if (IS_ERR(vma))
1398 goto out;
de4e783a 1399
17cabf57 1400 memset(shadow_exec_entry, 0, sizeof(*shadow_exec_entry));
71745376 1401
17cabf57 1402 vma->exec_entry = shadow_exec_entry;
de4e783a 1403 vma->exec_entry->flags = __EXEC_OBJECT_HAS_PIN;
25dc556a 1404 i915_gem_object_get(shadow_batch_obj);
17cabf57 1405 list_add_tail(&vma->exec_list, &eb->vmas);
71745376 1406
058d88c4 1407out:
de4e783a 1408 i915_gem_object_unpin_pages(shadow_batch_obj);
058d88c4 1409 return vma;
71745376 1410}
5c6c6003 1411
c8659efa
CW
1412static void
1413add_to_client(struct drm_i915_gem_request *req,
1414 struct drm_file *file)
1415{
1416 req->file_priv = file->driver_priv;
1417 list_add_tail(&req->client_link, &req->file_priv->mm.request_list);
1418}
1419
5b043f4e
CW
1420static int
1421execbuf_submit(struct i915_execbuffer_params *params,
1422 struct drm_i915_gem_execbuffer2 *args,
1423 struct list_head *vmas)
78382593 1424{
5f19e2bf 1425 u64 exec_start, exec_len;
2f5945bc 1426 int ret;
78382593 1427
535fbe82 1428 ret = i915_gem_execbuffer_move_to_gpu(params->request, vmas);
78382593 1429 if (ret)
2f5945bc 1430 return ret;
78382593 1431
ba01cc93 1432 ret = i915_switch_context(params->request);
78382593 1433 if (ret)
2f5945bc 1434 return ret;
78382593 1435
ef0f411f
KG
1436 if (args->flags & I915_EXEC_CONSTANTS_MASK) {
1437 DRM_DEBUG("I915_EXEC_CONSTANTS_* unsupported\n");
2f5945bc 1438 return -EINVAL;
78382593
OM
1439 }
1440
78382593 1441 if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
b5321f30 1442 ret = i915_reset_gen7_sol_offsets(params->request);
78382593 1443 if (ret)
2f5945bc 1444 return ret;
78382593
OM
1445 }
1446
5f19e2bf 1447 exec_len = args->batch_len;
59bfa124 1448 exec_start = params->batch->node.start +
5f19e2bf
JH
1449 params->args_batch_start_offset;
1450
9d611c03 1451 if (exec_len == 0)
0b537272 1452 exec_len = params->batch->size - params->args_batch_start_offset;
9d611c03 1453
803688ba
CW
1454 ret = params->engine->emit_bb_start(params->request,
1455 exec_start, exec_len,
1456 params->dispatch_flags);
2f5945bc
CW
1457 if (ret)
1458 return ret;
78382593 1459
8a8edb59 1460 i915_gem_execbuffer_move_to_active(vmas, params->request);
78382593 1461
2f5945bc 1462 return 0;
78382593
OM
1463}
1464
a8ebba75
ZY
1465/**
1466 * Find one BSD ring to dispatch the corresponding BSD command.
c80ff16e 1467 * The engine index is returned.
a8ebba75 1468 */
de1add36 1469static unsigned int
c80ff16e
CW
1470gen8_dispatch_bsd_engine(struct drm_i915_private *dev_priv,
1471 struct drm_file *file)
a8ebba75 1472{
a8ebba75
ZY
1473 struct drm_i915_file_private *file_priv = file->driver_priv;
1474
de1add36 1475 /* Check whether the file_priv has already selected one ring. */
6f633402
JL
1476 if ((int)file_priv->bsd_engine < 0)
1477 file_priv->bsd_engine = atomic_fetch_xor(1,
1478 &dev_priv->mm.bsd_engine_dispatch_index);
d23db88c 1479
c80ff16e 1480 return file_priv->bsd_engine;
d23db88c
CW
1481}
1482
de1add36
TU
1483#define I915_USER_RINGS (4)
1484
117897f4 1485static const enum intel_engine_id user_ring_map[I915_USER_RINGS + 1] = {
de1add36
TU
1486 [I915_EXEC_DEFAULT] = RCS,
1487 [I915_EXEC_RENDER] = RCS,
1488 [I915_EXEC_BLT] = BCS,
1489 [I915_EXEC_BSD] = VCS,
1490 [I915_EXEC_VEBOX] = VECS
1491};
1492
f8ca0c07
DG
1493static struct intel_engine_cs *
1494eb_select_engine(struct drm_i915_private *dev_priv,
1495 struct drm_file *file,
1496 struct drm_i915_gem_execbuffer2 *args)
de1add36
TU
1497{
1498 unsigned int user_ring_id = args->flags & I915_EXEC_RING_MASK;
f8ca0c07 1499 struct intel_engine_cs *engine;
de1add36
TU
1500
1501 if (user_ring_id > I915_USER_RINGS) {
1502 DRM_DEBUG("execbuf with unknown ring: %u\n", user_ring_id);
f8ca0c07 1503 return NULL;
de1add36
TU
1504 }
1505
1506 if ((user_ring_id != I915_EXEC_BSD) &&
1507 ((args->flags & I915_EXEC_BSD_MASK) != 0)) {
1508 DRM_DEBUG("execbuf with non bsd ring but with invalid "
1509 "bsd dispatch flags: %d\n", (int)(args->flags));
f8ca0c07 1510 return NULL;
de1add36
TU
1511 }
1512
1513 if (user_ring_id == I915_EXEC_BSD && HAS_BSD2(dev_priv)) {
1514 unsigned int bsd_idx = args->flags & I915_EXEC_BSD_MASK;
1515
1516 if (bsd_idx == I915_EXEC_BSD_DEFAULT) {
c80ff16e 1517 bsd_idx = gen8_dispatch_bsd_engine(dev_priv, file);
de1add36
TU
1518 } else if (bsd_idx >= I915_EXEC_BSD_RING1 &&
1519 bsd_idx <= I915_EXEC_BSD_RING2) {
d9da6aa0 1520 bsd_idx >>= I915_EXEC_BSD_SHIFT;
de1add36
TU
1521 bsd_idx--;
1522 } else {
1523 DRM_DEBUG("execbuf with unknown bsd ring: %u\n",
1524 bsd_idx);
f8ca0c07 1525 return NULL;
de1add36
TU
1526 }
1527
3b3f1650 1528 engine = dev_priv->engine[_VCS(bsd_idx)];
de1add36 1529 } else {
3b3f1650 1530 engine = dev_priv->engine[user_ring_map[user_ring_id]];
de1add36
TU
1531 }
1532
3b3f1650 1533 if (!engine) {
de1add36 1534 DRM_DEBUG("execbuf with invalid ring: %u\n", user_ring_id);
f8ca0c07 1535 return NULL;
de1add36
TU
1536 }
1537
f8ca0c07 1538 return engine;
de1add36
TU
1539}
1540
54cf91dc
CW
1541static int
1542i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1543 struct drm_file *file,
1544 struct drm_i915_gem_execbuffer2 *args,
41bde553 1545 struct drm_i915_gem_exec_object2 *exec)
54cf91dc 1546{
72e96d64
JL
1547 struct drm_i915_private *dev_priv = to_i915(dev);
1548 struct i915_ggtt *ggtt = &dev_priv->ggtt;
27173f1f 1549 struct eb_vmas *eb;
78a42377 1550 struct drm_i915_gem_exec_object2 shadow_exec_entry;
e2f80391 1551 struct intel_engine_cs *engine;
e2efd130 1552 struct i915_gem_context *ctx;
41bde553 1553 struct i915_address_space *vm;
5f19e2bf
JH
1554 struct i915_execbuffer_params params_master; /* XXX: will be removed later */
1555 struct i915_execbuffer_params *params = &params_master;
d299cce7 1556 const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
8e004efc 1557 u32 dispatch_flags;
fec0445c
CW
1558 struct dma_fence *in_fence = NULL;
1559 struct sync_file *out_fence = NULL;
1560 int out_fence_fd = -1;
78382593 1561 int ret;
ed5982e6 1562 bool need_relocs;
54cf91dc 1563
ed5982e6 1564 if (!i915_gem_check_execbuffer(args))
432e58ed 1565 return -EINVAL;
432e58ed 1566
ad19f10b 1567 ret = validate_exec_list(dev, exec, args->buffer_count);
54cf91dc
CW
1568 if (ret)
1569 return ret;
1570
8e004efc 1571 dispatch_flags = 0;
d7d4eedd 1572 if (args->flags & I915_EXEC_SECURE) {
b3ac9f25 1573 if (!drm_is_current_master(file) || !capable(CAP_SYS_ADMIN))
d7d4eedd
CW
1574 return -EPERM;
1575
8e004efc 1576 dispatch_flags |= I915_DISPATCH_SECURE;
d7d4eedd 1577 }
b45305fc 1578 if (args->flags & I915_EXEC_IS_PINNED)
8e004efc 1579 dispatch_flags |= I915_DISPATCH_PINNED;
d7d4eedd 1580
f8ca0c07
DG
1581 engine = eb_select_engine(dev_priv, file, args);
1582 if (!engine)
1583 return -EINVAL;
54cf91dc
CW
1584
1585 if (args->buffer_count < 1) {
ff240199 1586 DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
54cf91dc
CW
1587 return -EINVAL;
1588 }
54cf91dc 1589
a9ed33ca 1590 if (args->flags & I915_EXEC_RESOURCE_STREAMER) {
4805fe82 1591 if (!HAS_RESOURCE_STREAMER(dev_priv)) {
a9ed33ca
AJ
1592 DRM_DEBUG("RS is only allowed for Haswell, Gen8 and above\n");
1593 return -EINVAL;
1594 }
e2f80391 1595 if (engine->id != RCS) {
a9ed33ca 1596 DRM_DEBUG("RS is not available on %s\n",
e2f80391 1597 engine->name);
a9ed33ca
AJ
1598 return -EINVAL;
1599 }
1600
1601 dispatch_flags |= I915_DISPATCH_RS;
1602 }
1603
fec0445c
CW
1604 if (args->flags & I915_EXEC_FENCE_IN) {
1605 in_fence = sync_file_get_fence(lower_32_bits(args->rsvd2));
4a04e371
DCS
1606 if (!in_fence)
1607 return -EINVAL;
fec0445c
CW
1608 }
1609
1610 if (args->flags & I915_EXEC_FENCE_OUT) {
1611 out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
1612 if (out_fence_fd < 0) {
1613 ret = out_fence_fd;
4a04e371 1614 goto err_in_fence;
fec0445c
CW
1615 }
1616 }
1617
67d97da3
CW
1618 /* Take a local wakeref for preparing to dispatch the execbuf as
1619 * we expect to access the hardware fairly frequently in the
1620 * process. Upon first dispatch, we acquire another prolonged
1621 * wakeref that we hold until the GPU has been idle for at least
1622 * 100ms.
1623 */
f65c9168
PZ
1624 intel_runtime_pm_get(dev_priv);
1625
54cf91dc
CW
1626 ret = i915_mutex_lock_interruptible(dev);
1627 if (ret)
1628 goto pre_mutex_err;
1629
e2f80391 1630 ctx = i915_gem_validate_context(dev, file, engine, ctx_id);
72ad5c45 1631 if (IS_ERR(ctx)) {
d299cce7 1632 mutex_unlock(&dev->struct_mutex);
41bde553 1633 ret = PTR_ERR(ctx);
d299cce7 1634 goto pre_mutex_err;
935f38d6 1635 }
41bde553 1636
9a6feaf0 1637 i915_gem_context_get(ctx);
41bde553 1638
ae6c4806
DV
1639 if (ctx->ppgtt)
1640 vm = &ctx->ppgtt->base;
1641 else
72e96d64 1642 vm = &ggtt->base;
d299cce7 1643
5f19e2bf
JH
1644 memset(&params_master, 0x00, sizeof(params_master));
1645
d50415cc 1646 eb = eb_create(dev_priv, args);
67731b87 1647 if (eb == NULL) {
9a6feaf0 1648 i915_gem_context_put(ctx);
67731b87
CW
1649 mutex_unlock(&dev->struct_mutex);
1650 ret = -ENOMEM;
1651 goto pre_mutex_err;
1652 }
1653
54cf91dc 1654 /* Look up object handles */
27173f1f 1655 ret = eb_lookup_vmas(eb, exec, args, vm, file);
3b96eff4
CW
1656 if (ret)
1657 goto err;
54cf91dc 1658
6fe4f140 1659 /* take note of the batch buffer before we might reorder the lists */
59bfa124 1660 params->batch = eb_get_batch(eb);
6fe4f140 1661
54cf91dc 1662 /* Move the objects en-masse into the GTT, evicting if necessary. */
ed5982e6 1663 need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
e2f80391
TU
1664 ret = i915_gem_execbuffer_reserve(engine, &eb->vmas, ctx,
1665 &need_relocs);
54cf91dc
CW
1666 if (ret)
1667 goto err;
1668
1669 /* The objects are in their final locations, apply the relocations. */
ed5982e6 1670 if (need_relocs)
17601cbc 1671 ret = i915_gem_execbuffer_relocate(eb);
54cf91dc
CW
1672 if (ret) {
1673 if (ret == -EFAULT) {
e2f80391
TU
1674 ret = i915_gem_execbuffer_relocate_slow(dev, args, file,
1675 engine,
b1b38278 1676 eb, exec, ctx);
54cf91dc
CW
1677 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1678 }
1679 if (ret)
1680 goto err;
1681 }
1682
1683 /* Set the pending read domains for the batch buffer to COMMAND */
59bfa124 1684 if (params->batch->obj->base.pending_write_domain) {
ff240199 1685 DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
54cf91dc
CW
1686 ret = -EINVAL;
1687 goto err;
1688 }
0b537272
CW
1689 if (args->batch_start_offset > params->batch->size ||
1690 args->batch_len > params->batch->size - args->batch_start_offset) {
1691 DRM_DEBUG("Attempting to use out-of-bounds batch\n");
1692 ret = -EINVAL;
1693 goto err;
1694 }
54cf91dc 1695
5f19e2bf 1696 params->args_batch_start_offset = args->batch_start_offset;
41736a8e 1697 if (engine->needs_cmd_parser && args->batch_len) {
59bfa124
CW
1698 struct i915_vma *vma;
1699
1700 vma = i915_gem_execbuffer_parse(engine, &shadow_exec_entry,
1701 params->batch->obj,
1702 eb,
1703 args->batch_start_offset,
1704 args->batch_len,
1705 drm_is_current_master(file));
1706 if (IS_ERR(vma)) {
1707 ret = PTR_ERR(vma);
78a42377
BV
1708 goto err;
1709 }
17cabf57 1710
59bfa124 1711 if (vma) {
c7c7372e
RP
1712 /*
1713 * Batch parsed and accepted:
1714 *
1715 * Set the DISPATCH_SECURE bit to remove the NON_SECURE
1716 * bit from MI_BATCH_BUFFER_START commands issued in
1717 * the dispatch_execbuffer implementations. We
1718 * specifically don't want that set on batches the
1719 * command parser has accepted.
1720 */
1721 dispatch_flags |= I915_DISPATCH_SECURE;
5f19e2bf 1722 params->args_batch_start_offset = 0;
59bfa124 1723 params->batch = vma;
c7c7372e 1724 }
351e3db2
BV
1725 }
1726
59bfa124 1727 params->batch->obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
78a42377 1728
d7d4eedd
CW
1729 /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
1730 * batch" bit. Hence we need to pin secure batches into the global gtt.
28cf5415 1731 * hsw should have this fixed, but bdw mucks it up again. */
8e004efc 1732 if (dispatch_flags & I915_DISPATCH_SECURE) {
59bfa124 1733 struct drm_i915_gem_object *obj = params->batch->obj;
058d88c4 1734 struct i915_vma *vma;
59bfa124 1735
da51a1e7
DV
1736 /*
1737 * So on first glance it looks freaky that we pin the batch here
1738 * outside of the reservation loop. But:
1739 * - The batch is already pinned into the relevant ppgtt, so we
1740 * already have the backing storage fully allocated.
1741 * - No other BO uses the global gtt (well contexts, but meh),
fd0753cf 1742 * so we don't really have issues with multiple objects not
da51a1e7
DV
1743 * fitting due to fragmentation.
1744 * So this is actually safe.
1745 */
058d88c4
CW
1746 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
1747 if (IS_ERR(vma)) {
1748 ret = PTR_ERR(vma);
da51a1e7 1749 goto err;
058d88c4 1750 }
d7d4eedd 1751
058d88c4 1752 params->batch = vma;
59bfa124 1753 }
d7d4eedd 1754
0c8dac88 1755 /* Allocate a request for this batch buffer nice and early. */
8e637178
CW
1756 params->request = i915_gem_request_alloc(engine, ctx);
1757 if (IS_ERR(params->request)) {
1758 ret = PTR_ERR(params->request);
0c8dac88 1759 goto err_batch_unpin;
26827088 1760 }
0c8dac88 1761
fec0445c
CW
1762 if (in_fence) {
1763 ret = i915_gem_request_await_dma_fence(params->request,
1764 in_fence);
1765 if (ret < 0)
1766 goto err_request;
1767 }
1768
1769 if (out_fence_fd != -1) {
1770 out_fence = sync_file_create(&params->request->fence);
1771 if (!out_fence) {
1772 ret = -ENOMEM;
1773 goto err_request;
1774 }
1775 }
1776
17f298cf
CW
1777 /* Whilst this request exists, batch_obj will be on the
1778 * active_list, and so will hold the active reference. Only when this
1779 * request is retired will the the batch_obj be moved onto the
1780 * inactive_list and lose its active reference. Hence we do not need
1781 * to explicitly hold another reference here.
1782 */
058d88c4 1783 params->request->batch = params->batch;
17f298cf 1784
5f19e2bf
JH
1785 /*
1786 * Save assorted stuff away to pass through to *_submission().
1787 * NB: This data should be 'persistent' and not local as it will
1788 * kept around beyond the duration of the IOCTL once the GPU
1789 * scheduler arrives.
1790 */
1791 params->dev = dev;
1792 params->file = file;
4a570db5 1793 params->engine = engine;
5f19e2bf 1794 params->dispatch_flags = dispatch_flags;
5f19e2bf
JH
1795 params->ctx = ctx;
1796
1cce8922
TU
1797 trace_i915_gem_request_queue(params->request, dispatch_flags);
1798
5b043f4e 1799 ret = execbuf_submit(params, args, &eb->vmas);
aa9b7810 1800err_request:
17f298cf 1801 __i915_add_request(params->request, ret == 0);
c8659efa
CW
1802 add_to_client(params->request, file);
1803
fec0445c
CW
1804 if (out_fence) {
1805 if (ret == 0) {
1806 fd_install(out_fence_fd, out_fence->file);
1807 args->rsvd2 &= GENMASK_ULL(0, 31); /* keep in-fence */
1808 args->rsvd2 |= (u64)out_fence_fd << 32;
1809 out_fence_fd = -1;
1810 } else {
1811 fput(out_fence->file);
1812 }
1813 }
54cf91dc 1814
0c8dac88 1815err_batch_unpin:
da51a1e7
DV
1816 /*
1817 * FIXME: We crucially rely upon the active tracking for the (ppgtt)
1818 * batch vma for correctness. For less ugly and less fragility this
1819 * needs to be adjusted to also track the ggtt batch vma properly as
1820 * active.
1821 */
8e004efc 1822 if (dispatch_flags & I915_DISPATCH_SECURE)
59bfa124 1823 i915_vma_unpin(params->batch);
54cf91dc 1824err:
41bde553 1825 /* the request owns the ref now */
9a6feaf0 1826 i915_gem_context_put(ctx);
67731b87 1827 eb_destroy(eb);
54cf91dc
CW
1828
1829 mutex_unlock(&dev->struct_mutex);
1830
1831pre_mutex_err:
f65c9168
PZ
1832 /* intel_gpu_busy should also get a ref, so it will free when the device
1833 * is really idle. */
1834 intel_runtime_pm_put(dev_priv);
fec0445c
CW
1835 if (out_fence_fd != -1)
1836 put_unused_fd(out_fence_fd);
4a04e371 1837err_in_fence:
fec0445c 1838 dma_fence_put(in_fence);
54cf91dc
CW
1839 return ret;
1840}
1841
1842/*
1843 * Legacy execbuffer just creates an exec2 list from the original exec object
1844 * list array and passes it to the real function.
1845 */
1846int
1847i915_gem_execbuffer(struct drm_device *dev, void *data,
1848 struct drm_file *file)
1849{
1850 struct drm_i915_gem_execbuffer *args = data;
1851 struct drm_i915_gem_execbuffer2 exec2;
1852 struct drm_i915_gem_exec_object *exec_list = NULL;
1853 struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1854 int ret, i;
1855
54cf91dc 1856 if (args->buffer_count < 1) {
ff240199 1857 DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
54cf91dc
CW
1858 return -EINVAL;
1859 }
1860
1861 /* Copy in the exec list from userland */
1862 exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
1863 exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
1864 if (exec_list == NULL || exec2_list == NULL) {
ff240199 1865 DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
54cf91dc
CW
1866 args->buffer_count);
1867 drm_free_large(exec_list);
1868 drm_free_large(exec2_list);
1869 return -ENOMEM;
1870 }
1871 ret = copy_from_user(exec_list,
3ed605bc 1872 u64_to_user_ptr(args->buffers_ptr),
54cf91dc
CW
1873 sizeof(*exec_list) * args->buffer_count);
1874 if (ret != 0) {
ff240199 1875 DRM_DEBUG("copy %d exec entries failed %d\n",
54cf91dc
CW
1876 args->buffer_count, ret);
1877 drm_free_large(exec_list);
1878 drm_free_large(exec2_list);
1879 return -EFAULT;
1880 }
1881
1882 for (i = 0; i < args->buffer_count; i++) {
1883 exec2_list[i].handle = exec_list[i].handle;
1884 exec2_list[i].relocation_count = exec_list[i].relocation_count;
1885 exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
1886 exec2_list[i].alignment = exec_list[i].alignment;
1887 exec2_list[i].offset = exec_list[i].offset;
f0836b72 1888 if (INTEL_GEN(to_i915(dev)) < 4)
54cf91dc
CW
1889 exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
1890 else
1891 exec2_list[i].flags = 0;
1892 }
1893
1894 exec2.buffers_ptr = args->buffers_ptr;
1895 exec2.buffer_count = args->buffer_count;
1896 exec2.batch_start_offset = args->batch_start_offset;
1897 exec2.batch_len = args->batch_len;
1898 exec2.DR1 = args->DR1;
1899 exec2.DR4 = args->DR4;
1900 exec2.num_cliprects = args->num_cliprects;
1901 exec2.cliprects_ptr = args->cliprects_ptr;
1902 exec2.flags = I915_EXEC_RENDER;
6e0a69db 1903 i915_execbuffer2_set_context_id(exec2, 0);
54cf91dc 1904
41bde553 1905 ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
54cf91dc 1906 if (!ret) {
9aab8bff 1907 struct drm_i915_gem_exec_object __user *user_exec_list =
3ed605bc 1908 u64_to_user_ptr(args->buffers_ptr);
9aab8bff 1909
54cf91dc 1910 /* Copy the new buffer offsets back to the user's exec list. */
9aab8bff 1911 for (i = 0; i < args->buffer_count; i++) {
934acce3
MW
1912 exec2_list[i].offset =
1913 gen8_canonical_addr(exec2_list[i].offset);
9aab8bff
CW
1914 ret = __copy_to_user(&user_exec_list[i].offset,
1915 &exec2_list[i].offset,
1916 sizeof(user_exec_list[i].offset));
1917 if (ret) {
1918 ret = -EFAULT;
1919 DRM_DEBUG("failed to copy %d exec entries "
1920 "back to user (%d)\n",
1921 args->buffer_count, ret);
1922 break;
1923 }
54cf91dc
CW
1924 }
1925 }
1926
1927 drm_free_large(exec_list);
1928 drm_free_large(exec2_list);
1929 return ret;
1930}
1931
1932int
1933i915_gem_execbuffer2(struct drm_device *dev, void *data,
1934 struct drm_file *file)
1935{
1936 struct drm_i915_gem_execbuffer2 *args = data;
1937 struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1938 int ret;
1939
ed8cd3b2
XW
1940 if (args->buffer_count < 1 ||
1941 args->buffer_count > UINT_MAX / sizeof(*exec2_list)) {
ff240199 1942 DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
54cf91dc
CW
1943 return -EINVAL;
1944 }
1945
f2a85e19
CW
1946 exec2_list = drm_malloc_gfp(args->buffer_count,
1947 sizeof(*exec2_list),
1948 GFP_TEMPORARY);
54cf91dc 1949 if (exec2_list == NULL) {
ff240199 1950 DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
54cf91dc
CW
1951 args->buffer_count);
1952 return -ENOMEM;
1953 }
1954 ret = copy_from_user(exec2_list,
3ed605bc 1955 u64_to_user_ptr(args->buffers_ptr),
54cf91dc
CW
1956 sizeof(*exec2_list) * args->buffer_count);
1957 if (ret != 0) {
ff240199 1958 DRM_DEBUG("copy %d exec entries failed %d\n",
54cf91dc
CW
1959 args->buffer_count, ret);
1960 drm_free_large(exec2_list);
1961 return -EFAULT;
1962 }
1963
41bde553 1964 ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
54cf91dc
CW
1965 if (!ret) {
1966 /* Copy the new buffer offsets back to the user's exec list. */
d593d992 1967 struct drm_i915_gem_exec_object2 __user *user_exec_list =
3ed605bc 1968 u64_to_user_ptr(args->buffers_ptr);
9aab8bff
CW
1969 int i;
1970
1971 for (i = 0; i < args->buffer_count; i++) {
934acce3
MW
1972 exec2_list[i].offset =
1973 gen8_canonical_addr(exec2_list[i].offset);
9aab8bff
CW
1974 ret = __copy_to_user(&user_exec_list[i].offset,
1975 &exec2_list[i].offset,
1976 sizeof(user_exec_list[i].offset));
1977 if (ret) {
1978 ret = -EFAULT;
1979 DRM_DEBUG("failed to copy %d exec entries "
1980 "back to user\n",
1981 args->buffer_count);
1982 break;
1983 }
54cf91dc
CW
1984 }
1985 }
1986
1987 drm_free_large(exec2_list);
1988 return ret;
1989}