]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/gpu/drm/i915/i915_gem_execbuffer.c
Merge branch 'x86-cpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[mirror_ubuntu-artful-kernel.git] / drivers / gpu / drm / i915 / i915_gem_execbuffer.c
CommitLineData
54cf91dc
CW
1/*
2 * Copyright © 2008,2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Chris Wilson <chris@chris-wilson.co.uk>
26 *
27 */
28
760285e7
DH
29#include <drm/drmP.h>
30#include <drm/i915_drm.h>
54cf91dc
CW
31#include "i915_drv.h"
32#include "i915_trace.h"
33#include "intel_drv.h"
f45b5557 34#include <linux/dma_remapping.h>
32d82067 35#include <linux/uaccess.h>
54cf91dc 36
a415d355
CW
37#define __EXEC_OBJECT_HAS_PIN (1<<31)
38#define __EXEC_OBJECT_HAS_FENCE (1<<30)
e6a84468 39#define __EXEC_OBJECT_NEEDS_MAP (1<<29)
d23db88c 40#define __EXEC_OBJECT_NEEDS_BIAS (1<<28)
0079a7df 41#define __EXEC_OBJECT_PURGEABLE (1<<27)
d23db88c
CW
42
43#define BATCH_OFFSET_BIAS (256*1024)
a415d355 44
27173f1f
BW
45struct eb_vmas {
46 struct list_head vmas;
67731b87 47 int and;
eef90ccb 48 union {
27173f1f 49 struct i915_vma *lut[0];
eef90ccb
CW
50 struct hlist_head buckets[0];
51 };
67731b87
CW
52};
53
27173f1f 54static struct eb_vmas *
17601cbc 55eb_create(struct drm_i915_gem_execbuffer2 *args)
67731b87 56{
27173f1f 57 struct eb_vmas *eb = NULL;
eef90ccb
CW
58
59 if (args->flags & I915_EXEC_HANDLE_LUT) {
b205ca57 60 unsigned size = args->buffer_count;
27173f1f
BW
61 size *= sizeof(struct i915_vma *);
62 size += sizeof(struct eb_vmas);
eef90ccb
CW
63 eb = kmalloc(size, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
64 }
65
66 if (eb == NULL) {
b205ca57
DV
67 unsigned size = args->buffer_count;
68 unsigned count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
27b7c63a 69 BUILD_BUG_ON_NOT_POWER_OF_2(PAGE_SIZE / sizeof(struct hlist_head));
eef90ccb
CW
70 while (count > 2*size)
71 count >>= 1;
72 eb = kzalloc(count*sizeof(struct hlist_head) +
27173f1f 73 sizeof(struct eb_vmas),
eef90ccb
CW
74 GFP_TEMPORARY);
75 if (eb == NULL)
76 return eb;
77
78 eb->and = count - 1;
79 } else
80 eb->and = -args->buffer_count;
81
27173f1f 82 INIT_LIST_HEAD(&eb->vmas);
67731b87
CW
83 return eb;
84}
85
86static void
27173f1f 87eb_reset(struct eb_vmas *eb)
67731b87 88{
eef90ccb
CW
89 if (eb->and >= 0)
90 memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
67731b87
CW
91}
92
3b96eff4 93static int
27173f1f
BW
94eb_lookup_vmas(struct eb_vmas *eb,
95 struct drm_i915_gem_exec_object2 *exec,
96 const struct drm_i915_gem_execbuffer2 *args,
97 struct i915_address_space *vm,
98 struct drm_file *file)
3b96eff4 99{
27173f1f
BW
100 struct drm_i915_gem_object *obj;
101 struct list_head objects;
9ae9ab52 102 int i, ret;
3b96eff4 103
27173f1f 104 INIT_LIST_HEAD(&objects);
3b96eff4 105 spin_lock(&file->table_lock);
27173f1f
BW
106 /* Grab a reference to the object and release the lock so we can lookup
107 * or create the VMA without using GFP_ATOMIC */
eef90ccb 108 for (i = 0; i < args->buffer_count; i++) {
3b96eff4
CW
109 obj = to_intel_bo(idr_find(&file->object_idr, exec[i].handle));
110 if (obj == NULL) {
111 spin_unlock(&file->table_lock);
112 DRM_DEBUG("Invalid object handle %d at index %d\n",
113 exec[i].handle, i);
27173f1f 114 ret = -ENOENT;
9ae9ab52 115 goto err;
3b96eff4
CW
116 }
117
27173f1f 118 if (!list_empty(&obj->obj_exec_link)) {
3b96eff4
CW
119 spin_unlock(&file->table_lock);
120 DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
121 obj, exec[i].handle, i);
27173f1f 122 ret = -EINVAL;
9ae9ab52 123 goto err;
3b96eff4
CW
124 }
125
126 drm_gem_object_reference(&obj->base);
27173f1f
BW
127 list_add_tail(&obj->obj_exec_link, &objects);
128 }
129 spin_unlock(&file->table_lock);
3b96eff4 130
27173f1f 131 i = 0;
9ae9ab52 132 while (!list_empty(&objects)) {
27173f1f 133 struct i915_vma *vma;
6f65e29a 134
9ae9ab52
CW
135 obj = list_first_entry(&objects,
136 struct drm_i915_gem_object,
137 obj_exec_link);
138
e656a6cb
DV
139 /*
140 * NOTE: We can leak any vmas created here when something fails
141 * later on. But that's no issue since vma_unbind can deal with
142 * vmas which are not actually bound. And since only
143 * lookup_or_create exists as an interface to get at the vma
144 * from the (obj, vm) we don't run the risk of creating
145 * duplicated vmas for the same vm.
146 */
da51a1e7 147 vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
27173f1f 148 if (IS_ERR(vma)) {
27173f1f
BW
149 DRM_DEBUG("Failed to lookup VMA\n");
150 ret = PTR_ERR(vma);
9ae9ab52 151 goto err;
27173f1f
BW
152 }
153
9ae9ab52 154 /* Transfer ownership from the objects list to the vmas list. */
27173f1f 155 list_add_tail(&vma->exec_list, &eb->vmas);
9ae9ab52 156 list_del_init(&obj->obj_exec_link);
27173f1f
BW
157
158 vma->exec_entry = &exec[i];
eef90ccb 159 if (eb->and < 0) {
27173f1f 160 eb->lut[i] = vma;
eef90ccb
CW
161 } else {
162 uint32_t handle = args->flags & I915_EXEC_HANDLE_LUT ? i : exec[i].handle;
27173f1f
BW
163 vma->exec_handle = handle;
164 hlist_add_head(&vma->exec_node,
eef90ccb
CW
165 &eb->buckets[handle & eb->and]);
166 }
27173f1f 167 ++i;
3b96eff4 168 }
3b96eff4 169
9ae9ab52 170 return 0;
27173f1f 171
27173f1f 172
9ae9ab52 173err:
27173f1f
BW
174 while (!list_empty(&objects)) {
175 obj = list_first_entry(&objects,
176 struct drm_i915_gem_object,
177 obj_exec_link);
178 list_del_init(&obj->obj_exec_link);
9ae9ab52 179 drm_gem_object_unreference(&obj->base);
27173f1f 180 }
9ae9ab52
CW
181 /*
182 * Objects already transfered to the vmas list will be unreferenced by
183 * eb_destroy.
184 */
185
27173f1f 186 return ret;
3b96eff4
CW
187}
188
27173f1f 189static struct i915_vma *eb_get_vma(struct eb_vmas *eb, unsigned long handle)
67731b87 190{
eef90ccb
CW
191 if (eb->and < 0) {
192 if (handle >= -eb->and)
193 return NULL;
194 return eb->lut[handle];
195 } else {
196 struct hlist_head *head;
197 struct hlist_node *node;
67731b87 198
eef90ccb
CW
199 head = &eb->buckets[handle & eb->and];
200 hlist_for_each(node, head) {
27173f1f 201 struct i915_vma *vma;
67731b87 202
27173f1f
BW
203 vma = hlist_entry(node, struct i915_vma, exec_node);
204 if (vma->exec_handle == handle)
205 return vma;
eef90ccb
CW
206 }
207 return NULL;
208 }
67731b87
CW
209}
210
a415d355
CW
211static void
212i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
213{
214 struct drm_i915_gem_exec_object2 *entry;
215 struct drm_i915_gem_object *obj = vma->obj;
216
217 if (!drm_mm_node_allocated(&vma->node))
218 return;
219
220 entry = vma->exec_entry;
221
222 if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
223 i915_gem_object_unpin_fence(obj);
224
225 if (entry->flags & __EXEC_OBJECT_HAS_PIN)
3d7f0f9d 226 vma->pin_count--;
a415d355 227
0079a7df
BV
228 if (entry->flags & __EXEC_OBJECT_PURGEABLE)
229 obj->madv = I915_MADV_DONTNEED;
230
231 entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE |
232 __EXEC_OBJECT_HAS_PIN |
233 __EXEC_OBJECT_PURGEABLE);
a415d355
CW
234}
235
236static void eb_destroy(struct eb_vmas *eb)
237{
27173f1f
BW
238 while (!list_empty(&eb->vmas)) {
239 struct i915_vma *vma;
bcffc3fa 240
27173f1f
BW
241 vma = list_first_entry(&eb->vmas,
242 struct i915_vma,
bcffc3fa 243 exec_list);
27173f1f 244 list_del_init(&vma->exec_list);
a415d355 245 i915_gem_execbuffer_unreserve_vma(vma);
27173f1f 246 drm_gem_object_unreference(&vma->obj->base);
bcffc3fa 247 }
67731b87
CW
248 kfree(eb);
249}
250
dabdfe02
CW
251static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
252{
2cc86b82
CW
253 return (HAS_LLC(obj->base.dev) ||
254 obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
dabdfe02
CW
255 obj->cache_level != I915_CACHE_NONE);
256}
257
5032d871
RB
258static int
259relocate_entry_cpu(struct drm_i915_gem_object *obj,
d9ceb957
BW
260 struct drm_i915_gem_relocation_entry *reloc,
261 uint64_t target_offset)
5032d871 262{
3c94ceee 263 struct drm_device *dev = obj->base.dev;
5032d871 264 uint32_t page_offset = offset_in_page(reloc->offset);
d9ceb957 265 uint64_t delta = reloc->delta + target_offset;
5032d871 266 char *vaddr;
8b78f0e5 267 int ret;
5032d871 268
2cc86b82 269 ret = i915_gem_object_set_to_cpu_domain(obj, true);
5032d871
RB
270 if (ret)
271 return ret;
272
273 vaddr = kmap_atomic(i915_gem_object_get_page(obj,
274 reloc->offset >> PAGE_SHIFT));
d9ceb957 275 *(uint32_t *)(vaddr + page_offset) = lower_32_bits(delta);
3c94ceee
BW
276
277 if (INTEL_INFO(dev)->gen >= 8) {
278 page_offset = offset_in_page(page_offset + sizeof(uint32_t));
279
280 if (page_offset == 0) {
281 kunmap_atomic(vaddr);
282 vaddr = kmap_atomic(i915_gem_object_get_page(obj,
283 (reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT));
284 }
285
d9ceb957 286 *(uint32_t *)(vaddr + page_offset) = upper_32_bits(delta);
3c94ceee
BW
287 }
288
5032d871
RB
289 kunmap_atomic(vaddr);
290
291 return 0;
292}
293
294static int
295relocate_entry_gtt(struct drm_i915_gem_object *obj,
d9ceb957
BW
296 struct drm_i915_gem_relocation_entry *reloc,
297 uint64_t target_offset)
5032d871
RB
298{
299 struct drm_device *dev = obj->base.dev;
300 struct drm_i915_private *dev_priv = dev->dev_private;
d9ceb957 301 uint64_t delta = reloc->delta + target_offset;
906843c3 302 uint64_t offset;
5032d871 303 void __iomem *reloc_page;
8b78f0e5 304 int ret;
5032d871
RB
305
306 ret = i915_gem_object_set_to_gtt_domain(obj, true);
307 if (ret)
308 return ret;
309
310 ret = i915_gem_object_put_fence(obj);
311 if (ret)
312 return ret;
313
314 /* Map the page containing the relocation we're going to perform. */
906843c3
CW
315 offset = i915_gem_obj_ggtt_offset(obj);
316 offset += reloc->offset;
5032d871 317 reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
906843c3
CW
318 offset & PAGE_MASK);
319 iowrite32(lower_32_bits(delta), reloc_page + offset_in_page(offset));
3c94ceee
BW
320
321 if (INTEL_INFO(dev)->gen >= 8) {
906843c3 322 offset += sizeof(uint32_t);
3c94ceee 323
906843c3 324 if (offset_in_page(offset) == 0) {
3c94ceee 325 io_mapping_unmap_atomic(reloc_page);
906843c3
CW
326 reloc_page =
327 io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
328 offset);
3c94ceee
BW
329 }
330
906843c3
CW
331 iowrite32(upper_32_bits(delta),
332 reloc_page + offset_in_page(offset));
3c94ceee
BW
333 }
334
5032d871
RB
335 io_mapping_unmap_atomic(reloc_page);
336
337 return 0;
338}
339
edf4427b
CW
340static void
341clflush_write32(void *addr, uint32_t value)
342{
343 /* This is not a fast path, so KISS. */
344 drm_clflush_virt_range(addr, sizeof(uint32_t));
345 *(uint32_t *)addr = value;
346 drm_clflush_virt_range(addr, sizeof(uint32_t));
347}
348
349static int
350relocate_entry_clflush(struct drm_i915_gem_object *obj,
351 struct drm_i915_gem_relocation_entry *reloc,
352 uint64_t target_offset)
353{
354 struct drm_device *dev = obj->base.dev;
355 uint32_t page_offset = offset_in_page(reloc->offset);
356 uint64_t delta = (int)reloc->delta + target_offset;
357 char *vaddr;
358 int ret;
359
360 ret = i915_gem_object_set_to_gtt_domain(obj, true);
361 if (ret)
362 return ret;
363
364 vaddr = kmap_atomic(i915_gem_object_get_page(obj,
365 reloc->offset >> PAGE_SHIFT));
366 clflush_write32(vaddr + page_offset, lower_32_bits(delta));
367
368 if (INTEL_INFO(dev)->gen >= 8) {
369 page_offset = offset_in_page(page_offset + sizeof(uint32_t));
370
371 if (page_offset == 0) {
372 kunmap_atomic(vaddr);
373 vaddr = kmap_atomic(i915_gem_object_get_page(obj,
374 (reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT));
375 }
376
377 clflush_write32(vaddr + page_offset, upper_32_bits(delta));
378 }
379
380 kunmap_atomic(vaddr);
381
382 return 0;
383}
384
54cf91dc
CW
385static int
386i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
27173f1f 387 struct eb_vmas *eb,
3e7a0322 388 struct drm_i915_gem_relocation_entry *reloc)
54cf91dc
CW
389{
390 struct drm_device *dev = obj->base.dev;
391 struct drm_gem_object *target_obj;
149c8407 392 struct drm_i915_gem_object *target_i915_obj;
27173f1f 393 struct i915_vma *target_vma;
d9ceb957 394 uint64_t target_offset;
8b78f0e5 395 int ret;
54cf91dc 396
67731b87 397 /* we've already hold a reference to all valid objects */
27173f1f
BW
398 target_vma = eb_get_vma(eb, reloc->target_handle);
399 if (unlikely(target_vma == NULL))
54cf91dc 400 return -ENOENT;
27173f1f
BW
401 target_i915_obj = target_vma->obj;
402 target_obj = &target_vma->obj->base;
54cf91dc 403
5ce09725 404 target_offset = target_vma->node.start;
54cf91dc 405
e844b990
EA
406 /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
407 * pipe_control writes because the gpu doesn't properly redirect them
408 * through the ppgtt for non_secure batchbuffers. */
409 if (unlikely(IS_GEN6(dev) &&
410 reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
fe14d5f4
TU
411 !(target_vma->bound & GLOBAL_BIND))) {
412 ret = i915_vma_bind(target_vma, target_i915_obj->cache_level,
413 GLOBAL_BIND);
414 if (WARN_ONCE(ret, "Unexpected failure to bind target VMA!"))
415 return ret;
416 }
e844b990 417
54cf91dc 418 /* Validate that the target is in a valid r/w GPU domain */
b8f7ab17 419 if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
ff240199 420 DRM_DEBUG("reloc with multiple write domains: "
54cf91dc
CW
421 "obj %p target %d offset %d "
422 "read %08x write %08x",
423 obj, reloc->target_handle,
424 (int) reloc->offset,
425 reloc->read_domains,
426 reloc->write_domain);
8b78f0e5 427 return -EINVAL;
54cf91dc 428 }
4ca4a250
DV
429 if (unlikely((reloc->write_domain | reloc->read_domains)
430 & ~I915_GEM_GPU_DOMAINS)) {
ff240199 431 DRM_DEBUG("reloc with read/write non-GPU domains: "
54cf91dc
CW
432 "obj %p target %d offset %d "
433 "read %08x write %08x",
434 obj, reloc->target_handle,
435 (int) reloc->offset,
436 reloc->read_domains,
437 reloc->write_domain);
8b78f0e5 438 return -EINVAL;
54cf91dc 439 }
54cf91dc
CW
440
441 target_obj->pending_read_domains |= reloc->read_domains;
442 target_obj->pending_write_domain |= reloc->write_domain;
443
444 /* If the relocation already has the right value in it, no
445 * more work needs to be done.
446 */
447 if (target_offset == reloc->presumed_offset)
67731b87 448 return 0;
54cf91dc
CW
449
450 /* Check that the relocation address is valid... */
3c94ceee
BW
451 if (unlikely(reloc->offset >
452 obj->base.size - (INTEL_INFO(dev)->gen >= 8 ? 8 : 4))) {
ff240199 453 DRM_DEBUG("Relocation beyond object bounds: "
54cf91dc
CW
454 "obj %p target %d offset %d size %d.\n",
455 obj, reloc->target_handle,
456 (int) reloc->offset,
457 (int) obj->base.size);
8b78f0e5 458 return -EINVAL;
54cf91dc 459 }
b8f7ab17 460 if (unlikely(reloc->offset & 3)) {
ff240199 461 DRM_DEBUG("Relocation not 4-byte aligned: "
54cf91dc
CW
462 "obj %p target %d offset %d.\n",
463 obj, reloc->target_handle,
464 (int) reloc->offset);
8b78f0e5 465 return -EINVAL;
54cf91dc
CW
466 }
467
dabdfe02 468 /* We can't wait for rendering with pagefaults disabled */
32d82067 469 if (obj->active && pagefault_disabled())
dabdfe02
CW
470 return -EFAULT;
471
5032d871 472 if (use_cpu_reloc(obj))
d9ceb957 473 ret = relocate_entry_cpu(obj, reloc, target_offset);
edf4427b 474 else if (obj->map_and_fenceable)
d9ceb957 475 ret = relocate_entry_gtt(obj, reloc, target_offset);
edf4427b
CW
476 else if (cpu_has_clflush)
477 ret = relocate_entry_clflush(obj, reloc, target_offset);
478 else {
479 WARN_ONCE(1, "Impossible case in relocation handling\n");
480 ret = -ENODEV;
481 }
54cf91dc 482
d4d36014
DV
483 if (ret)
484 return ret;
485
54cf91dc
CW
486 /* and update the user's relocation entry */
487 reloc->presumed_offset = target_offset;
488
67731b87 489 return 0;
54cf91dc
CW
490}
491
492static int
27173f1f
BW
493i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
494 struct eb_vmas *eb)
54cf91dc 495{
1d83f442
CW
496#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
497 struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
54cf91dc 498 struct drm_i915_gem_relocation_entry __user *user_relocs;
27173f1f 499 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
1d83f442 500 int remain, ret;
54cf91dc 501
2bb4629a 502 user_relocs = to_user_ptr(entry->relocs_ptr);
54cf91dc 503
1d83f442
CW
504 remain = entry->relocation_count;
505 while (remain) {
506 struct drm_i915_gem_relocation_entry *r = stack_reloc;
507 int count = remain;
508 if (count > ARRAY_SIZE(stack_reloc))
509 count = ARRAY_SIZE(stack_reloc);
510 remain -= count;
511
512 if (__copy_from_user_inatomic(r, user_relocs, count*sizeof(r[0])))
54cf91dc
CW
513 return -EFAULT;
514
1d83f442
CW
515 do {
516 u64 offset = r->presumed_offset;
54cf91dc 517
3e7a0322 518 ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r);
1d83f442
CW
519 if (ret)
520 return ret;
521
522 if (r->presumed_offset != offset &&
523 __copy_to_user_inatomic(&user_relocs->presumed_offset,
524 &r->presumed_offset,
525 sizeof(r->presumed_offset))) {
526 return -EFAULT;
527 }
528
529 user_relocs++;
530 r++;
531 } while (--count);
54cf91dc
CW
532 }
533
534 return 0;
1d83f442 535#undef N_RELOC
54cf91dc
CW
536}
537
538static int
27173f1f
BW
539i915_gem_execbuffer_relocate_vma_slow(struct i915_vma *vma,
540 struct eb_vmas *eb,
541 struct drm_i915_gem_relocation_entry *relocs)
54cf91dc 542{
27173f1f 543 const struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
54cf91dc
CW
544 int i, ret;
545
546 for (i = 0; i < entry->relocation_count; i++) {
3e7a0322 547 ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i]);
54cf91dc
CW
548 if (ret)
549 return ret;
550 }
551
552 return 0;
553}
554
555static int
17601cbc 556i915_gem_execbuffer_relocate(struct eb_vmas *eb)
54cf91dc 557{
27173f1f 558 struct i915_vma *vma;
d4aeee77
CW
559 int ret = 0;
560
561 /* This is the fast path and we cannot handle a pagefault whilst
562 * holding the struct mutex lest the user pass in the relocations
563 * contained within a mmaped bo. For in such a case we, the page
564 * fault handler would call i915_gem_fault() and we would try to
565 * acquire the struct mutex again. Obviously this is bad and so
566 * lockdep complains vehemently.
567 */
568 pagefault_disable();
27173f1f
BW
569 list_for_each_entry(vma, &eb->vmas, exec_list) {
570 ret = i915_gem_execbuffer_relocate_vma(vma, eb);
54cf91dc 571 if (ret)
d4aeee77 572 break;
54cf91dc 573 }
d4aeee77 574 pagefault_enable();
54cf91dc 575
d4aeee77 576 return ret;
54cf91dc
CW
577}
578
edf4427b
CW
579static bool only_mappable_for_reloc(unsigned int flags)
580{
581 return (flags & (EXEC_OBJECT_NEEDS_FENCE | __EXEC_OBJECT_NEEDS_MAP)) ==
582 __EXEC_OBJECT_NEEDS_MAP;
583}
584
1690e1eb 585static int
27173f1f 586i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
a4872ba6 587 struct intel_engine_cs *ring,
27173f1f 588 bool *need_reloc)
1690e1eb 589{
6f65e29a 590 struct drm_i915_gem_object *obj = vma->obj;
27173f1f 591 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
d23db88c 592 uint64_t flags;
1690e1eb
CW
593 int ret;
594
1ec9e26d 595 flags = 0;
edf4427b
CW
596 if (!drm_mm_node_allocated(&vma->node)) {
597 if (entry->flags & __EXEC_OBJECT_NEEDS_MAP)
598 flags |= PIN_GLOBAL | PIN_MAPPABLE;
599 if (entry->flags & EXEC_OBJECT_NEEDS_GTT)
600 flags |= PIN_GLOBAL;
601 if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS)
602 flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
603 }
1ec9e26d
DV
604
605 ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, flags);
edf4427b
CW
606 if ((ret == -ENOSPC || ret == -E2BIG) &&
607 only_mappable_for_reloc(entry->flags))
608 ret = i915_gem_object_pin(obj, vma->vm,
609 entry->alignment,
610 flags & ~(PIN_GLOBAL | PIN_MAPPABLE));
1690e1eb
CW
611 if (ret)
612 return ret;
613
7788a765
CW
614 entry->flags |= __EXEC_OBJECT_HAS_PIN;
615
82b6b6d7
CW
616 if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
617 ret = i915_gem_object_get_fence(obj);
618 if (ret)
619 return ret;
9a5a53b3 620
82b6b6d7
CW
621 if (i915_gem_object_pin_fence(obj))
622 entry->flags |= __EXEC_OBJECT_HAS_FENCE;
1690e1eb
CW
623 }
624
27173f1f
BW
625 if (entry->offset != vma->node.start) {
626 entry->offset = vma->node.start;
ed5982e6
DV
627 *need_reloc = true;
628 }
629
630 if (entry->flags & EXEC_OBJECT_WRITE) {
631 obj->base.pending_read_domains = I915_GEM_DOMAIN_RENDER;
632 obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER;
633 }
634
1690e1eb 635 return 0;
7788a765 636}
1690e1eb 637
d23db88c 638static bool
e6a84468 639need_reloc_mappable(struct i915_vma *vma)
d23db88c
CW
640{
641 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
d23db88c 642
e6a84468
CW
643 if (entry->relocation_count == 0)
644 return false;
645
646 if (!i915_is_ggtt(vma->vm))
647 return false;
648
649 /* See also use_cpu_reloc() */
650 if (HAS_LLC(vma->obj->base.dev))
651 return false;
652
653 if (vma->obj->base.write_domain == I915_GEM_DOMAIN_CPU)
654 return false;
655
656 return true;
657}
658
659static bool
660eb_vma_misplaced(struct i915_vma *vma)
661{
662 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
663 struct drm_i915_gem_object *obj = vma->obj;
d23db88c 664
e6a84468 665 WARN_ON(entry->flags & __EXEC_OBJECT_NEEDS_MAP &&
d23db88c
CW
666 !i915_is_ggtt(vma->vm));
667
668 if (entry->alignment &&
669 vma->node.start & (entry->alignment - 1))
670 return true;
671
d23db88c
CW
672 if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS &&
673 vma->node.start < BATCH_OFFSET_BIAS)
674 return true;
675
edf4427b
CW
676 /* avoid costly ping-pong once a batch bo ended up non-mappable */
677 if (entry->flags & __EXEC_OBJECT_NEEDS_MAP && !obj->map_and_fenceable)
678 return !only_mappable_for_reloc(entry->flags);
679
d23db88c
CW
680 return false;
681}
682
54cf91dc 683static int
a4872ba6 684i915_gem_execbuffer_reserve(struct intel_engine_cs *ring,
27173f1f 685 struct list_head *vmas,
ed5982e6 686 bool *need_relocs)
54cf91dc 687{
432e58ed 688 struct drm_i915_gem_object *obj;
27173f1f 689 struct i915_vma *vma;
68c8c17f 690 struct i915_address_space *vm;
27173f1f 691 struct list_head ordered_vmas;
7788a765
CW
692 bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
693 int retry;
6fe4f140 694
227f782e
CW
695 i915_gem_retire_requests_ring(ring);
696
68c8c17f
BW
697 vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;
698
27173f1f
BW
699 INIT_LIST_HEAD(&ordered_vmas);
700 while (!list_empty(vmas)) {
6fe4f140
CW
701 struct drm_i915_gem_exec_object2 *entry;
702 bool need_fence, need_mappable;
703
27173f1f
BW
704 vma = list_first_entry(vmas, struct i915_vma, exec_list);
705 obj = vma->obj;
706 entry = vma->exec_entry;
6fe4f140 707
82b6b6d7
CW
708 if (!has_fenced_gpu_access)
709 entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE;
6fe4f140 710 need_fence =
6fe4f140
CW
711 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
712 obj->tiling_mode != I915_TILING_NONE;
27173f1f 713 need_mappable = need_fence || need_reloc_mappable(vma);
6fe4f140 714
e6a84468
CW
715 if (need_mappable) {
716 entry->flags |= __EXEC_OBJECT_NEEDS_MAP;
27173f1f 717 list_move(&vma->exec_list, &ordered_vmas);
e6a84468 718 } else
27173f1f 719 list_move_tail(&vma->exec_list, &ordered_vmas);
595dad76 720
ed5982e6 721 obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND;
595dad76 722 obj->base.pending_write_domain = 0;
6fe4f140 723 }
27173f1f 724 list_splice(&ordered_vmas, vmas);
54cf91dc
CW
725
726 /* Attempt to pin all of the buffers into the GTT.
727 * This is done in 3 phases:
728 *
729 * 1a. Unbind all objects that do not match the GTT constraints for
730 * the execbuffer (fenceable, mappable, alignment etc).
731 * 1b. Increment pin count for already bound objects.
732 * 2. Bind new objects.
733 * 3. Decrement pin count.
734 *
7788a765 735 * This avoid unnecessary unbinding of later objects in order to make
54cf91dc
CW
736 * room for the earlier objects *unless* we need to defragment.
737 */
738 retry = 0;
739 do {
7788a765 740 int ret = 0;
54cf91dc
CW
741
742 /* Unbind any ill-fitting objects or pin. */
27173f1f 743 list_for_each_entry(vma, vmas, exec_list) {
27173f1f 744 if (!drm_mm_node_allocated(&vma->node))
54cf91dc
CW
745 continue;
746
e6a84468 747 if (eb_vma_misplaced(vma))
27173f1f 748 ret = i915_vma_unbind(vma);
54cf91dc 749 else
27173f1f 750 ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
432e58ed 751 if (ret)
54cf91dc 752 goto err;
54cf91dc
CW
753 }
754
755 /* Bind fresh objects */
27173f1f
BW
756 list_for_each_entry(vma, vmas, exec_list) {
757 if (drm_mm_node_allocated(&vma->node))
1690e1eb 758 continue;
54cf91dc 759
27173f1f 760 ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
7788a765
CW
761 if (ret)
762 goto err;
54cf91dc
CW
763 }
764
a415d355 765err:
6c085a72 766 if (ret != -ENOSPC || retry++)
54cf91dc
CW
767 return ret;
768
a415d355
CW
769 /* Decrement pin count for bound objects */
770 list_for_each_entry(vma, vmas, exec_list)
771 i915_gem_execbuffer_unreserve_vma(vma);
772
68c8c17f 773 ret = i915_gem_evict_vm(vm, true);
54cf91dc
CW
774 if (ret)
775 return ret;
54cf91dc
CW
776 } while (1);
777}
778
779static int
780i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
ed5982e6 781 struct drm_i915_gem_execbuffer2 *args,
54cf91dc 782 struct drm_file *file,
a4872ba6 783 struct intel_engine_cs *ring,
27173f1f
BW
784 struct eb_vmas *eb,
785 struct drm_i915_gem_exec_object2 *exec)
54cf91dc
CW
786{
787 struct drm_i915_gem_relocation_entry *reloc;
27173f1f
BW
788 struct i915_address_space *vm;
789 struct i915_vma *vma;
ed5982e6 790 bool need_relocs;
dd6864a4 791 int *reloc_offset;
54cf91dc 792 int i, total, ret;
b205ca57 793 unsigned count = args->buffer_count;
54cf91dc 794
27173f1f
BW
795 vm = list_first_entry(&eb->vmas, struct i915_vma, exec_list)->vm;
796
67731b87 797 /* We may process another execbuffer during the unlock... */
27173f1f
BW
798 while (!list_empty(&eb->vmas)) {
799 vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list);
800 list_del_init(&vma->exec_list);
a415d355 801 i915_gem_execbuffer_unreserve_vma(vma);
27173f1f 802 drm_gem_object_unreference(&vma->obj->base);
67731b87
CW
803 }
804
54cf91dc
CW
805 mutex_unlock(&dev->struct_mutex);
806
807 total = 0;
808 for (i = 0; i < count; i++)
432e58ed 809 total += exec[i].relocation_count;
54cf91dc 810
dd6864a4 811 reloc_offset = drm_malloc_ab(count, sizeof(*reloc_offset));
54cf91dc 812 reloc = drm_malloc_ab(total, sizeof(*reloc));
dd6864a4
CW
813 if (reloc == NULL || reloc_offset == NULL) {
814 drm_free_large(reloc);
815 drm_free_large(reloc_offset);
54cf91dc
CW
816 mutex_lock(&dev->struct_mutex);
817 return -ENOMEM;
818 }
819
820 total = 0;
821 for (i = 0; i < count; i++) {
822 struct drm_i915_gem_relocation_entry __user *user_relocs;
262b6d36
CW
823 u64 invalid_offset = (u64)-1;
824 int j;
54cf91dc 825
2bb4629a 826 user_relocs = to_user_ptr(exec[i].relocs_ptr);
54cf91dc
CW
827
828 if (copy_from_user(reloc+total, user_relocs,
432e58ed 829 exec[i].relocation_count * sizeof(*reloc))) {
54cf91dc
CW
830 ret = -EFAULT;
831 mutex_lock(&dev->struct_mutex);
832 goto err;
833 }
834
262b6d36
CW
835 /* As we do not update the known relocation offsets after
836 * relocating (due to the complexities in lock handling),
837 * we need to mark them as invalid now so that we force the
838 * relocation processing next time. Just in case the target
839 * object is evicted and then rebound into its old
840 * presumed_offset before the next execbuffer - if that
841 * happened we would make the mistake of assuming that the
842 * relocations were valid.
843 */
844 for (j = 0; j < exec[i].relocation_count; j++) {
9aab8bff
CW
845 if (__copy_to_user(&user_relocs[j].presumed_offset,
846 &invalid_offset,
847 sizeof(invalid_offset))) {
262b6d36
CW
848 ret = -EFAULT;
849 mutex_lock(&dev->struct_mutex);
850 goto err;
851 }
852 }
853
dd6864a4 854 reloc_offset[i] = total;
432e58ed 855 total += exec[i].relocation_count;
54cf91dc
CW
856 }
857
858 ret = i915_mutex_lock_interruptible(dev);
859 if (ret) {
860 mutex_lock(&dev->struct_mutex);
861 goto err;
862 }
863
67731b87 864 /* reacquire the objects */
67731b87 865 eb_reset(eb);
27173f1f 866 ret = eb_lookup_vmas(eb, exec, args, vm, file);
3b96eff4
CW
867 if (ret)
868 goto err;
67731b87 869
ed5982e6 870 need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
27173f1f 871 ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs);
54cf91dc
CW
872 if (ret)
873 goto err;
874
27173f1f
BW
875 list_for_each_entry(vma, &eb->vmas, exec_list) {
876 int offset = vma->exec_entry - exec;
877 ret = i915_gem_execbuffer_relocate_vma_slow(vma, eb,
878 reloc + reloc_offset[offset]);
54cf91dc
CW
879 if (ret)
880 goto err;
54cf91dc
CW
881 }
882
883 /* Leave the user relocations as are, this is the painfully slow path,
884 * and we want to avoid the complication of dropping the lock whilst
885 * having buffers reserved in the aperture and so causing spurious
886 * ENOSPC for random operations.
887 */
888
889err:
890 drm_free_large(reloc);
dd6864a4 891 drm_free_large(reloc_offset);
54cf91dc
CW
892 return ret;
893}
894
54cf91dc 895static int
a4872ba6 896i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring,
27173f1f 897 struct list_head *vmas)
54cf91dc 898{
27173f1f 899 struct i915_vma *vma;
6ac42f41 900 uint32_t flush_domains = 0;
000433b6 901 bool flush_chipset = false;
432e58ed 902 int ret;
54cf91dc 903
27173f1f
BW
904 list_for_each_entry(vma, vmas, exec_list) {
905 struct drm_i915_gem_object *obj = vma->obj;
6ac42f41 906 ret = i915_gem_object_sync(obj, ring);
c59a333f
CW
907 if (ret)
908 return ret;
6ac42f41
DV
909
910 if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
000433b6 911 flush_chipset |= i915_gem_clflush_object(obj, false);
6ac42f41 912
6ac42f41 913 flush_domains |= obj->base.write_domain;
c59a333f
CW
914 }
915
000433b6 916 if (flush_chipset)
e76e9aeb 917 i915_gem_chipset_flush(ring->dev);
6ac42f41
DV
918
919 if (flush_domains & I915_GEM_DOMAIN_GTT)
920 wmb();
921
09cf7c9a
CW
922 /* Unconditionally invalidate gpu caches and ensure that we do flush
923 * any residual writes from the previous batch.
924 */
a7b9761d 925 return intel_ring_invalidate_all_caches(ring);
54cf91dc
CW
926}
927
432e58ed
CW
928static bool
929i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
54cf91dc 930{
ed5982e6
DV
931 if (exec->flags & __I915_EXEC_UNKNOWN_FLAGS)
932 return false;
933
432e58ed 934 return ((exec->batch_start_offset | exec->batch_len) & 0x7) == 0;
54cf91dc
CW
935}
936
937static int
ad19f10b
CW
938validate_exec_list(struct drm_device *dev,
939 struct drm_i915_gem_exec_object2 *exec,
54cf91dc
CW
940 int count)
941{
b205ca57
DV
942 unsigned relocs_total = 0;
943 unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
ad19f10b
CW
944 unsigned invalid_flags;
945 int i;
946
947 invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
948 if (USES_FULL_PPGTT(dev))
949 invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
54cf91dc
CW
950
951 for (i = 0; i < count; i++) {
2bb4629a 952 char __user *ptr = to_user_ptr(exec[i].relocs_ptr);
54cf91dc
CW
953 int length; /* limited by fault_in_pages_readable() */
954
ad19f10b 955 if (exec[i].flags & invalid_flags)
ed5982e6
DV
956 return -EINVAL;
957
3118a4f6
KC
958 /* First check for malicious input causing overflow in
959 * the worst case where we need to allocate the entire
960 * relocation tree as a single array.
961 */
962 if (exec[i].relocation_count > relocs_max - relocs_total)
54cf91dc 963 return -EINVAL;
3118a4f6 964 relocs_total += exec[i].relocation_count;
54cf91dc
CW
965
966 length = exec[i].relocation_count *
967 sizeof(struct drm_i915_gem_relocation_entry);
30587535
KC
968 /*
969 * We must check that the entire relocation array is safe
970 * to read, but since we may need to update the presumed
971 * offsets during execution, check for full write access.
972 */
54cf91dc
CW
973 if (!access_ok(VERIFY_WRITE, ptr, length))
974 return -EFAULT;
975
d330a953 976 if (likely(!i915.prefault_disable)) {
0b74b508
XZ
977 if (fault_in_multipages_readable(ptr, length))
978 return -EFAULT;
979 }
54cf91dc
CW
980 }
981
982 return 0;
983}
984
273497e5 985static struct intel_context *
d299cce7 986i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
a4872ba6 987 struct intel_engine_cs *ring, const u32 ctx_id)
d299cce7 988{
273497e5 989 struct intel_context *ctx = NULL;
d299cce7
MK
990 struct i915_ctx_hang_stats *hs;
991
821d66dd 992 if (ring->id != RCS && ctx_id != DEFAULT_CONTEXT_HANDLE)
7c9c4b8f
DV
993 return ERR_PTR(-EINVAL);
994
41bde553 995 ctx = i915_gem_context_get(file->driver_priv, ctx_id);
72ad5c45 996 if (IS_ERR(ctx))
41bde553 997 return ctx;
d299cce7 998
41bde553 999 hs = &ctx->hang_stats;
d299cce7
MK
1000 if (hs->banned) {
1001 DRM_DEBUG("Context %u tried to submit while banned\n", ctx_id);
41bde553 1002 return ERR_PTR(-EIO);
d299cce7
MK
1003 }
1004
ec3e9963
OM
1005 if (i915.enable_execlists && !ctx->engine[ring->id].state) {
1006 int ret = intel_lr_context_deferred_create(ctx, ring);
1007 if (ret) {
1008 DRM_DEBUG("Could not create LRC %u: %d\n", ctx_id, ret);
1009 return ERR_PTR(ret);
1010 }
1011 }
1012
41bde553 1013 return ctx;
d299cce7
MK
1014}
1015
ba8b7ccb 1016void
27173f1f 1017i915_gem_execbuffer_move_to_active(struct list_head *vmas,
a4872ba6 1018 struct intel_engine_cs *ring)
432e58ed 1019{
97b2a6a1 1020 struct drm_i915_gem_request *req = intel_ring_get_request(ring);
27173f1f 1021 struct i915_vma *vma;
432e58ed 1022
27173f1f 1023 list_for_each_entry(vma, vmas, exec_list) {
82b6b6d7 1024 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
27173f1f 1025 struct drm_i915_gem_object *obj = vma->obj;
69c2fc89
CW
1026 u32 old_read = obj->base.read_domains;
1027 u32 old_write = obj->base.write_domain;
db53a302 1028
432e58ed 1029 obj->base.write_domain = obj->base.pending_write_domain;
ed5982e6
DV
1030 if (obj->base.write_domain == 0)
1031 obj->base.pending_read_domains |= obj->base.read_domains;
1032 obj->base.read_domains = obj->base.pending_read_domains;
432e58ed 1033
e2d05a8b 1034 i915_vma_move_to_active(vma, ring);
432e58ed
CW
1035 if (obj->base.write_domain) {
1036 obj->dirty = 1;
97b2a6a1 1037 i915_gem_request_assign(&obj->last_write_req, req);
f99d7069 1038
a4001f1b 1039 intel_fb_obj_invalidate(obj, ring, ORIGIN_CS);
c8725f3d
CW
1040
1041 /* update for the implicit flush after a batch */
1042 obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
432e58ed 1043 }
82b6b6d7 1044 if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
97b2a6a1 1045 i915_gem_request_assign(&obj->last_fenced_req, req);
82b6b6d7
CW
1046 if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
1047 struct drm_i915_private *dev_priv = to_i915(ring->dev);
1048 list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list,
1049 &dev_priv->mm.fence_list);
1050 }
1051 }
432e58ed 1052
db53a302 1053 trace_i915_gem_object_change_domain(obj, old_read, old_write);
432e58ed
CW
1054 }
1055}
1056
ba8b7ccb 1057void
54cf91dc 1058i915_gem_execbuffer_retire_commands(struct drm_device *dev,
432e58ed 1059 struct drm_file *file,
a4872ba6 1060 struct intel_engine_cs *ring,
7d736f4f 1061 struct drm_i915_gem_object *obj)
54cf91dc 1062{
cc889e0f
DV
1063 /* Unconditionally force add_request to emit a full flush. */
1064 ring->gpu_caches_dirty = true;
54cf91dc 1065
432e58ed 1066 /* Add a breadcrumb for the completion of the batch buffer */
9400ae5c 1067 (void)__i915_add_request(ring, file, obj);
432e58ed 1068}
54cf91dc 1069
ae662d31
EA
1070static int
1071i915_reset_gen7_sol_offsets(struct drm_device *dev,
a4872ba6 1072 struct intel_engine_cs *ring)
ae662d31 1073{
50227e1c 1074 struct drm_i915_private *dev_priv = dev->dev_private;
ae662d31
EA
1075 int ret, i;
1076
9d662da8
DV
1077 if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS]) {
1078 DRM_DEBUG("sol reset is gen7/rcs only\n");
1079 return -EINVAL;
1080 }
ae662d31
EA
1081
1082 ret = intel_ring_begin(ring, 4 * 3);
1083 if (ret)
1084 return ret;
1085
1086 for (i = 0; i < 4; i++) {
1087 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
1088 intel_ring_emit(ring, GEN7_SO_WRITE_OFFSET(i));
1089 intel_ring_emit(ring, 0);
1090 }
1091
1092 intel_ring_advance(ring);
1093
1094 return 0;
1095}
1096
5c6c6003
CW
1097static int
1098i915_emit_box(struct intel_engine_cs *ring,
1099 struct drm_clip_rect *box,
1100 int DR1, int DR4)
1101{
1102 int ret;
1103
1104 if (box->y2 <= box->y1 || box->x2 <= box->x1 ||
1105 box->y2 <= 0 || box->x2 <= 0) {
1106 DRM_ERROR("Bad box %d,%d..%d,%d\n",
1107 box->x1, box->y1, box->x2, box->y2);
1108 return -EINVAL;
1109 }
1110
1111 if (INTEL_INFO(ring->dev)->gen >= 4) {
1112 ret = intel_ring_begin(ring, 4);
1113 if (ret)
1114 return ret;
1115
1116 intel_ring_emit(ring, GFX_OP_DRAWRECT_INFO_I965);
1117 intel_ring_emit(ring, (box->x1 & 0xffff) | box->y1 << 16);
1118 intel_ring_emit(ring, ((box->x2 - 1) & 0xffff) | (box->y2 - 1) << 16);
1119 intel_ring_emit(ring, DR4);
1120 } else {
1121 ret = intel_ring_begin(ring, 6);
1122 if (ret)
1123 return ret;
1124
1125 intel_ring_emit(ring, GFX_OP_DRAWRECT_INFO);
1126 intel_ring_emit(ring, DR1);
1127 intel_ring_emit(ring, (box->x1 & 0xffff) | box->y1 << 16);
1128 intel_ring_emit(ring, ((box->x2 - 1) & 0xffff) | (box->y2 - 1) << 16);
1129 intel_ring_emit(ring, DR4);
1130 intel_ring_emit(ring, 0);
1131 }
1132 intel_ring_advance(ring);
1133
1134 return 0;
1135}
1136
71745376
BV
1137static struct drm_i915_gem_object*
1138i915_gem_execbuffer_parse(struct intel_engine_cs *ring,
1139 struct drm_i915_gem_exec_object2 *shadow_exec_entry,
1140 struct eb_vmas *eb,
1141 struct drm_i915_gem_object *batch_obj,
1142 u32 batch_start_offset,
1143 u32 batch_len,
17cabf57 1144 bool is_master)
71745376
BV
1145{
1146 struct drm_i915_private *dev_priv = to_i915(batch_obj->base.dev);
1147 struct drm_i915_gem_object *shadow_batch_obj;
17cabf57 1148 struct i915_vma *vma;
71745376
BV
1149 int ret;
1150
1151 shadow_batch_obj = i915_gem_batch_pool_get(&dev_priv->mm.batch_pool,
17cabf57 1152 PAGE_ALIGN(batch_len));
71745376
BV
1153 if (IS_ERR(shadow_batch_obj))
1154 return shadow_batch_obj;
1155
1156 ret = i915_parse_cmds(ring,
1157 batch_obj,
1158 shadow_batch_obj,
1159 batch_start_offset,
1160 batch_len,
1161 is_master);
17cabf57
CW
1162 if (ret)
1163 goto err;
71745376 1164
17cabf57
CW
1165 ret = i915_gem_obj_ggtt_pin(shadow_batch_obj, 0, 0);
1166 if (ret)
1167 goto err;
71745376 1168
17cabf57 1169 memset(shadow_exec_entry, 0, sizeof(*shadow_exec_entry));
71745376 1170
17cabf57
CW
1171 vma = i915_gem_obj_to_ggtt(shadow_batch_obj);
1172 vma->exec_entry = shadow_exec_entry;
1173 vma->exec_entry->flags = __EXEC_OBJECT_PURGEABLE | __EXEC_OBJECT_HAS_PIN;
1174 drm_gem_object_reference(&shadow_batch_obj->base);
1175 list_add_tail(&vma->exec_list, &eb->vmas);
71745376 1176
17cabf57
CW
1177 shadow_batch_obj->base.pending_read_domains = I915_GEM_DOMAIN_COMMAND;
1178
1179 return shadow_batch_obj;
71745376 1180
17cabf57
CW
1181err:
1182 if (ret == -EACCES) /* unhandled chained batch */
1183 return batch_obj;
1184 else
1185 return ERR_PTR(ret);
71745376 1186}
5c6c6003 1187
a83014d3
OM
1188int
1189i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
1190 struct intel_engine_cs *ring,
1191 struct intel_context *ctx,
1192 struct drm_i915_gem_execbuffer2 *args,
1193 struct list_head *vmas,
1194 struct drm_i915_gem_object *batch_obj,
8e004efc 1195 u64 exec_start, u32 dispatch_flags)
78382593
OM
1196{
1197 struct drm_clip_rect *cliprects = NULL;
1198 struct drm_i915_private *dev_priv = dev->dev_private;
1199 u64 exec_len;
1200 int instp_mode;
1201 u32 instp_mask;
1202 int i, ret = 0;
1203
1204 if (args->num_cliprects != 0) {
1205 if (ring != &dev_priv->ring[RCS]) {
1206 DRM_DEBUG("clip rectangles are only valid with the render ring\n");
1207 return -EINVAL;
1208 }
1209
1210 if (INTEL_INFO(dev)->gen >= 5) {
1211 DRM_DEBUG("clip rectangles are only valid on pre-gen5\n");
1212 return -EINVAL;
1213 }
1214
1215 if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) {
1216 DRM_DEBUG("execbuf with %u cliprects\n",
1217 args->num_cliprects);
1218 return -EINVAL;
1219 }
1220
1221 cliprects = kcalloc(args->num_cliprects,
1222 sizeof(*cliprects),
1223 GFP_KERNEL);
1224 if (cliprects == NULL) {
1225 ret = -ENOMEM;
1226 goto error;
1227 }
1228
1229 if (copy_from_user(cliprects,
1230 to_user_ptr(args->cliprects_ptr),
1231 sizeof(*cliprects)*args->num_cliprects)) {
1232 ret = -EFAULT;
1233 goto error;
1234 }
1235 } else {
1236 if (args->DR4 == 0xffffffff) {
1237 DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
1238 args->DR4 = 0;
1239 }
1240
1241 if (args->DR1 || args->DR4 || args->cliprects_ptr) {
1242 DRM_DEBUG("0 cliprects but dirt in cliprects fields\n");
1243 return -EINVAL;
1244 }
1245 }
1246
1247 ret = i915_gem_execbuffer_move_to_gpu(ring, vmas);
1248 if (ret)
1249 goto error;
1250
1251 ret = i915_switch_context(ring, ctx);
1252 if (ret)
1253 goto error;
1254
563222a7
BW
1255 if (ctx->ppgtt)
1256 WARN(ctx->ppgtt->pd_dirty_rings & (1<<ring->id),
1257 "%s didn't clear reload\n", ring->name);
1258 else if (dev_priv->mm.aliasing_ppgtt)
1259 WARN(dev_priv->mm.aliasing_ppgtt->pd_dirty_rings &
1260 (1<<ring->id), "%s didn't clear reload\n", ring->name);
1261
78382593
OM
1262 instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
1263 instp_mask = I915_EXEC_CONSTANTS_MASK;
1264 switch (instp_mode) {
1265 case I915_EXEC_CONSTANTS_REL_GENERAL:
1266 case I915_EXEC_CONSTANTS_ABSOLUTE:
1267 case I915_EXEC_CONSTANTS_REL_SURFACE:
1268 if (instp_mode != 0 && ring != &dev_priv->ring[RCS]) {
1269 DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
1270 ret = -EINVAL;
1271 goto error;
1272 }
1273
1274 if (instp_mode != dev_priv->relative_constants_mode) {
1275 if (INTEL_INFO(dev)->gen < 4) {
1276 DRM_DEBUG("no rel constants on pre-gen4\n");
1277 ret = -EINVAL;
1278 goto error;
1279 }
1280
1281 if (INTEL_INFO(dev)->gen > 5 &&
1282 instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
1283 DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
1284 ret = -EINVAL;
1285 goto error;
1286 }
1287
1288 /* The HW changed the meaning on this bit on gen6 */
1289 if (INTEL_INFO(dev)->gen >= 6)
1290 instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
1291 }
1292 break;
1293 default:
1294 DRM_DEBUG("execbuf with unknown constants: %d\n", instp_mode);
1295 ret = -EINVAL;
1296 goto error;
1297 }
1298
1299 if (ring == &dev_priv->ring[RCS] &&
1300 instp_mode != dev_priv->relative_constants_mode) {
1301 ret = intel_ring_begin(ring, 4);
1302 if (ret)
1303 goto error;
1304
1305 intel_ring_emit(ring, MI_NOOP);
1306 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
1307 intel_ring_emit(ring, INSTPM);
1308 intel_ring_emit(ring, instp_mask << 16 | instp_mode);
1309 intel_ring_advance(ring);
1310
1311 dev_priv->relative_constants_mode = instp_mode;
1312 }
1313
1314 if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
1315 ret = i915_reset_gen7_sol_offsets(dev, ring);
1316 if (ret)
1317 goto error;
1318 }
1319
1320 exec_len = args->batch_len;
1321 if (cliprects) {
1322 for (i = 0; i < args->num_cliprects; i++) {
5c6c6003 1323 ret = i915_emit_box(ring, &cliprects[i],
78382593
OM
1324 args->DR1, args->DR4);
1325 if (ret)
1326 goto error;
1327
1328 ret = ring->dispatch_execbuffer(ring,
1329 exec_start, exec_len,
8e004efc 1330 dispatch_flags);
78382593
OM
1331 if (ret)
1332 goto error;
1333 }
1334 } else {
1335 ret = ring->dispatch_execbuffer(ring,
1336 exec_start, exec_len,
8e004efc 1337 dispatch_flags);
78382593
OM
1338 if (ret)
1339 return ret;
1340 }
1341
8e004efc 1342 trace_i915_gem_ring_dispatch(intel_ring_get_request(ring), dispatch_flags);
78382593
OM
1343
1344 i915_gem_execbuffer_move_to_active(vmas, ring);
1345 i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
1346
1347error:
1348 kfree(cliprects);
1349 return ret;
1350}
1351
a8ebba75
ZY
1352/**
1353 * Find one BSD ring to dispatch the corresponding BSD command.
1354 * The Ring ID is returned.
1355 */
1356static int gen8_dispatch_bsd_ring(struct drm_device *dev,
1357 struct drm_file *file)
1358{
1359 struct drm_i915_private *dev_priv = dev->dev_private;
1360 struct drm_i915_file_private *file_priv = file->driver_priv;
1361
1362 /* Check whether the file_priv is using one ring */
1363 if (file_priv->bsd_ring)
1364 return file_priv->bsd_ring->id;
1365 else {
1366 /* If no, use the ping-pong mechanism to select one ring */
1367 int ring_id;
1368
1369 mutex_lock(&dev->struct_mutex);
bdf1e7e3 1370 if (dev_priv->mm.bsd_ring_dispatch_index == 0) {
a8ebba75 1371 ring_id = VCS;
bdf1e7e3 1372 dev_priv->mm.bsd_ring_dispatch_index = 1;
a8ebba75
ZY
1373 } else {
1374 ring_id = VCS2;
bdf1e7e3 1375 dev_priv->mm.bsd_ring_dispatch_index = 0;
a8ebba75
ZY
1376 }
1377 file_priv->bsd_ring = &dev_priv->ring[ring_id];
1378 mutex_unlock(&dev->struct_mutex);
1379 return ring_id;
1380 }
1381}
1382
d23db88c
CW
1383static struct drm_i915_gem_object *
1384eb_get_batch(struct eb_vmas *eb)
1385{
1386 struct i915_vma *vma = list_entry(eb->vmas.prev, typeof(*vma), exec_list);
1387
1388 /*
1389 * SNA is doing fancy tricks with compressing batch buffers, which leads
1390 * to negative relocation deltas. Usually that works out ok since the
1391 * relocate address is still positive, except when the batch is placed
1392 * very low in the GTT. Ensure this doesn't happen.
1393 *
1394 * Note that actual hangs have only been observed on gen7, but for
1395 * paranoia do it everywhere.
1396 */
1397 vma->exec_entry->flags |= __EXEC_OBJECT_NEEDS_BIAS;
1398
1399 return vma->obj;
1400}
1401
54cf91dc
CW
1402static int
1403i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1404 struct drm_file *file,
1405 struct drm_i915_gem_execbuffer2 *args,
41bde553 1406 struct drm_i915_gem_exec_object2 *exec)
54cf91dc 1407{
50227e1c 1408 struct drm_i915_private *dev_priv = dev->dev_private;
27173f1f 1409 struct eb_vmas *eb;
54cf91dc 1410 struct drm_i915_gem_object *batch_obj;
78a42377 1411 struct drm_i915_gem_exec_object2 shadow_exec_entry;
a4872ba6 1412 struct intel_engine_cs *ring;
273497e5 1413 struct intel_context *ctx;
41bde553 1414 struct i915_address_space *vm;
d299cce7 1415 const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
78382593 1416 u64 exec_start = args->batch_start_offset;
8e004efc 1417 u32 dispatch_flags;
78382593 1418 int ret;
ed5982e6 1419 bool need_relocs;
54cf91dc 1420
ed5982e6 1421 if (!i915_gem_check_execbuffer(args))
432e58ed 1422 return -EINVAL;
432e58ed 1423
ad19f10b 1424 ret = validate_exec_list(dev, exec, args->buffer_count);
54cf91dc
CW
1425 if (ret)
1426 return ret;
1427
8e004efc 1428 dispatch_flags = 0;
d7d4eedd
CW
1429 if (args->flags & I915_EXEC_SECURE) {
1430 if (!file->is_master || !capable(CAP_SYS_ADMIN))
1431 return -EPERM;
1432
8e004efc 1433 dispatch_flags |= I915_DISPATCH_SECURE;
d7d4eedd 1434 }
b45305fc 1435 if (args->flags & I915_EXEC_IS_PINNED)
8e004efc 1436 dispatch_flags |= I915_DISPATCH_PINNED;
d7d4eedd 1437
b1a93306 1438 if ((args->flags & I915_EXEC_RING_MASK) > LAST_USER_RING) {
ff240199 1439 DRM_DEBUG("execbuf with unknown ring: %d\n",
54cf91dc
CW
1440 (int)(args->flags & I915_EXEC_RING_MASK));
1441 return -EINVAL;
1442 }
ca01b12b 1443
8d360dff
ZG
1444 if (((args->flags & I915_EXEC_RING_MASK) != I915_EXEC_BSD) &&
1445 ((args->flags & I915_EXEC_BSD_MASK) != 0)) {
1446 DRM_DEBUG("execbuf with non bsd ring but with invalid "
1447 "bsd dispatch flags: %d\n", (int)(args->flags));
1448 return -EINVAL;
1449 }
1450
ca01b12b
BW
1451 if ((args->flags & I915_EXEC_RING_MASK) == I915_EXEC_DEFAULT)
1452 ring = &dev_priv->ring[RCS];
a8ebba75
ZY
1453 else if ((args->flags & I915_EXEC_RING_MASK) == I915_EXEC_BSD) {
1454 if (HAS_BSD2(dev)) {
1455 int ring_id;
8d360dff
ZG
1456
1457 switch (args->flags & I915_EXEC_BSD_MASK) {
1458 case I915_EXEC_BSD_DEFAULT:
1459 ring_id = gen8_dispatch_bsd_ring(dev, file);
1460 ring = &dev_priv->ring[ring_id];
1461 break;
1462 case I915_EXEC_BSD_RING1:
1463 ring = &dev_priv->ring[VCS];
1464 break;
1465 case I915_EXEC_BSD_RING2:
1466 ring = &dev_priv->ring[VCS2];
1467 break;
1468 default:
1469 DRM_DEBUG("execbuf with unknown bsd ring: %d\n",
1470 (int)(args->flags & I915_EXEC_BSD_MASK));
1471 return -EINVAL;
1472 }
a8ebba75
ZY
1473 } else
1474 ring = &dev_priv->ring[VCS];
1475 } else
ca01b12b
BW
1476 ring = &dev_priv->ring[(args->flags & I915_EXEC_RING_MASK) - 1];
1477
a15817cf
CW
1478 if (!intel_ring_initialized(ring)) {
1479 DRM_DEBUG("execbuf with invalid ring: %d\n",
1480 (int)(args->flags & I915_EXEC_RING_MASK));
1481 return -EINVAL;
1482 }
54cf91dc
CW
1483
1484 if (args->buffer_count < 1) {
ff240199 1485 DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
54cf91dc
CW
1486 return -EINVAL;
1487 }
54cf91dc 1488
f65c9168
PZ
1489 intel_runtime_pm_get(dev_priv);
1490
54cf91dc
CW
1491 ret = i915_mutex_lock_interruptible(dev);
1492 if (ret)
1493 goto pre_mutex_err;
1494
7c9c4b8f 1495 ctx = i915_gem_validate_context(dev, file, ring, ctx_id);
72ad5c45 1496 if (IS_ERR(ctx)) {
d299cce7 1497 mutex_unlock(&dev->struct_mutex);
41bde553 1498 ret = PTR_ERR(ctx);
d299cce7 1499 goto pre_mutex_err;
935f38d6 1500 }
41bde553
BW
1501
1502 i915_gem_context_reference(ctx);
1503
ae6c4806
DV
1504 if (ctx->ppgtt)
1505 vm = &ctx->ppgtt->base;
1506 else
7e0d96bc 1507 vm = &dev_priv->gtt.base;
d299cce7 1508
17601cbc 1509 eb = eb_create(args);
67731b87 1510 if (eb == NULL) {
935f38d6 1511 i915_gem_context_unreference(ctx);
67731b87
CW
1512 mutex_unlock(&dev->struct_mutex);
1513 ret = -ENOMEM;
1514 goto pre_mutex_err;
1515 }
1516
54cf91dc 1517 /* Look up object handles */
27173f1f 1518 ret = eb_lookup_vmas(eb, exec, args, vm, file);
3b96eff4
CW
1519 if (ret)
1520 goto err;
54cf91dc 1521
6fe4f140 1522 /* take note of the batch buffer before we might reorder the lists */
d23db88c 1523 batch_obj = eb_get_batch(eb);
6fe4f140 1524
54cf91dc 1525 /* Move the objects en-masse into the GTT, evicting if necessary. */
ed5982e6 1526 need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
27173f1f 1527 ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs);
54cf91dc
CW
1528 if (ret)
1529 goto err;
1530
1531 /* The objects are in their final locations, apply the relocations. */
ed5982e6 1532 if (need_relocs)
17601cbc 1533 ret = i915_gem_execbuffer_relocate(eb);
54cf91dc
CW
1534 if (ret) {
1535 if (ret == -EFAULT) {
ed5982e6 1536 ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
27173f1f 1537 eb, exec);
54cf91dc
CW
1538 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1539 }
1540 if (ret)
1541 goto err;
1542 }
1543
1544 /* Set the pending read domains for the batch buffer to COMMAND */
54cf91dc 1545 if (batch_obj->base.pending_write_domain) {
ff240199 1546 DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
54cf91dc
CW
1547 ret = -EINVAL;
1548 goto err;
1549 }
54cf91dc 1550
743e78c1 1551 if (i915_needs_cmd_parser(ring) && args->batch_len) {
71745376
BV
1552 batch_obj = i915_gem_execbuffer_parse(ring,
1553 &shadow_exec_entry,
1554 eb,
1555 batch_obj,
1556 args->batch_start_offset,
1557 args->batch_len,
17cabf57 1558 file->is_master);
71745376
BV
1559 if (IS_ERR(batch_obj)) {
1560 ret = PTR_ERR(batch_obj);
78a42377
BV
1561 goto err;
1562 }
17cabf57
CW
1563
1564 /*
1565 * Set the DISPATCH_SECURE bit to remove the NON_SECURE
1566 * bit from MI_BATCH_BUFFER_START commands issued in the
1567 * dispatch_execbuffer implementations. We specifically
1568 * don't want that set when the command parser is
1569 * enabled.
1570 *
1571 * FIXME: with aliasing ppgtt, buffers that should only
1572 * be in ggtt still end up in the aliasing ppgtt. remove
1573 * this check when that is fixed.
1574 */
1575 if (USES_FULL_PPGTT(dev))
8e004efc 1576 dispatch_flags |= I915_DISPATCH_SECURE;
17cabf57
CW
1577
1578 exec_start = 0;
351e3db2
BV
1579 }
1580
78a42377
BV
1581 batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
1582
d7d4eedd
CW
1583 /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
1584 * batch" bit. Hence we need to pin secure batches into the global gtt.
28cf5415 1585 * hsw should have this fixed, but bdw mucks it up again. */
8e004efc 1586 if (dispatch_flags & I915_DISPATCH_SECURE) {
da51a1e7
DV
1587 /*
1588 * So on first glance it looks freaky that we pin the batch here
1589 * outside of the reservation loop. But:
1590 * - The batch is already pinned into the relevant ppgtt, so we
1591 * already have the backing storage fully allocated.
1592 * - No other BO uses the global gtt (well contexts, but meh),
fd0753cf 1593 * so we don't really have issues with multiple objects not
da51a1e7
DV
1594 * fitting due to fragmentation.
1595 * So this is actually safe.
1596 */
1597 ret = i915_gem_obj_ggtt_pin(batch_obj, 0, 0);
1598 if (ret)
1599 goto err;
d7d4eedd 1600
7e0d96bc 1601 exec_start += i915_gem_obj_ggtt_offset(batch_obj);
da51a1e7 1602 } else
7e0d96bc 1603 exec_start += i915_gem_obj_offset(batch_obj, vm);
d7d4eedd 1604
a83014d3 1605 ret = dev_priv->gt.do_execbuf(dev, file, ring, ctx, args,
8e004efc
JH
1606 &eb->vmas, batch_obj, exec_start,
1607 dispatch_flags);
54cf91dc 1608
da51a1e7
DV
1609 /*
1610 * FIXME: We crucially rely upon the active tracking for the (ppgtt)
1611 * batch vma for correctness. For less ugly and less fragility this
1612 * needs to be adjusted to also track the ggtt batch vma properly as
1613 * active.
1614 */
8e004efc 1615 if (dispatch_flags & I915_DISPATCH_SECURE)
da51a1e7 1616 i915_gem_object_ggtt_unpin(batch_obj);
54cf91dc 1617err:
41bde553
BW
1618 /* the request owns the ref now */
1619 i915_gem_context_unreference(ctx);
67731b87 1620 eb_destroy(eb);
54cf91dc
CW
1621
1622 mutex_unlock(&dev->struct_mutex);
1623
1624pre_mutex_err:
f65c9168
PZ
1625 /* intel_gpu_busy should also get a ref, so it will free when the device
1626 * is really idle. */
1627 intel_runtime_pm_put(dev_priv);
54cf91dc
CW
1628 return ret;
1629}
1630
1631/*
1632 * Legacy execbuffer just creates an exec2 list from the original exec object
1633 * list array and passes it to the real function.
1634 */
1635int
1636i915_gem_execbuffer(struct drm_device *dev, void *data,
1637 struct drm_file *file)
1638{
1639 struct drm_i915_gem_execbuffer *args = data;
1640 struct drm_i915_gem_execbuffer2 exec2;
1641 struct drm_i915_gem_exec_object *exec_list = NULL;
1642 struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1643 int ret, i;
1644
54cf91dc 1645 if (args->buffer_count < 1) {
ff240199 1646 DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
54cf91dc
CW
1647 return -EINVAL;
1648 }
1649
1650 /* Copy in the exec list from userland */
1651 exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
1652 exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
1653 if (exec_list == NULL || exec2_list == NULL) {
ff240199 1654 DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
54cf91dc
CW
1655 args->buffer_count);
1656 drm_free_large(exec_list);
1657 drm_free_large(exec2_list);
1658 return -ENOMEM;
1659 }
1660 ret = copy_from_user(exec_list,
2bb4629a 1661 to_user_ptr(args->buffers_ptr),
54cf91dc
CW
1662 sizeof(*exec_list) * args->buffer_count);
1663 if (ret != 0) {
ff240199 1664 DRM_DEBUG("copy %d exec entries failed %d\n",
54cf91dc
CW
1665 args->buffer_count, ret);
1666 drm_free_large(exec_list);
1667 drm_free_large(exec2_list);
1668 return -EFAULT;
1669 }
1670
1671 for (i = 0; i < args->buffer_count; i++) {
1672 exec2_list[i].handle = exec_list[i].handle;
1673 exec2_list[i].relocation_count = exec_list[i].relocation_count;
1674 exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
1675 exec2_list[i].alignment = exec_list[i].alignment;
1676 exec2_list[i].offset = exec_list[i].offset;
1677 if (INTEL_INFO(dev)->gen < 4)
1678 exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
1679 else
1680 exec2_list[i].flags = 0;
1681 }
1682
1683 exec2.buffers_ptr = args->buffers_ptr;
1684 exec2.buffer_count = args->buffer_count;
1685 exec2.batch_start_offset = args->batch_start_offset;
1686 exec2.batch_len = args->batch_len;
1687 exec2.DR1 = args->DR1;
1688 exec2.DR4 = args->DR4;
1689 exec2.num_cliprects = args->num_cliprects;
1690 exec2.cliprects_ptr = args->cliprects_ptr;
1691 exec2.flags = I915_EXEC_RENDER;
6e0a69db 1692 i915_execbuffer2_set_context_id(exec2, 0);
54cf91dc 1693
41bde553 1694 ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
54cf91dc 1695 if (!ret) {
9aab8bff
CW
1696 struct drm_i915_gem_exec_object __user *user_exec_list =
1697 to_user_ptr(args->buffers_ptr);
1698
54cf91dc 1699 /* Copy the new buffer offsets back to the user's exec list. */
9aab8bff
CW
1700 for (i = 0; i < args->buffer_count; i++) {
1701 ret = __copy_to_user(&user_exec_list[i].offset,
1702 &exec2_list[i].offset,
1703 sizeof(user_exec_list[i].offset));
1704 if (ret) {
1705 ret = -EFAULT;
1706 DRM_DEBUG("failed to copy %d exec entries "
1707 "back to user (%d)\n",
1708 args->buffer_count, ret);
1709 break;
1710 }
54cf91dc
CW
1711 }
1712 }
1713
1714 drm_free_large(exec_list);
1715 drm_free_large(exec2_list);
1716 return ret;
1717}
1718
1719int
1720i915_gem_execbuffer2(struct drm_device *dev, void *data,
1721 struct drm_file *file)
1722{
1723 struct drm_i915_gem_execbuffer2 *args = data;
1724 struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1725 int ret;
1726
ed8cd3b2
XW
1727 if (args->buffer_count < 1 ||
1728 args->buffer_count > UINT_MAX / sizeof(*exec2_list)) {
ff240199 1729 DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
54cf91dc
CW
1730 return -EINVAL;
1731 }
1732
9cb34664
DV
1733 if (args->rsvd2 != 0) {
1734 DRM_DEBUG("dirty rvsd2 field\n");
1735 return -EINVAL;
1736 }
1737
8408c282 1738 exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count,
419fa72a 1739 GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
8408c282
CW
1740 if (exec2_list == NULL)
1741 exec2_list = drm_malloc_ab(sizeof(*exec2_list),
1742 args->buffer_count);
54cf91dc 1743 if (exec2_list == NULL) {
ff240199 1744 DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
54cf91dc
CW
1745 args->buffer_count);
1746 return -ENOMEM;
1747 }
1748 ret = copy_from_user(exec2_list,
2bb4629a 1749 to_user_ptr(args->buffers_ptr),
54cf91dc
CW
1750 sizeof(*exec2_list) * args->buffer_count);
1751 if (ret != 0) {
ff240199 1752 DRM_DEBUG("copy %d exec entries failed %d\n",
54cf91dc
CW
1753 args->buffer_count, ret);
1754 drm_free_large(exec2_list);
1755 return -EFAULT;
1756 }
1757
41bde553 1758 ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
54cf91dc
CW
1759 if (!ret) {
1760 /* Copy the new buffer offsets back to the user's exec list. */
d593d992 1761 struct drm_i915_gem_exec_object2 __user *user_exec_list =
9aab8bff
CW
1762 to_user_ptr(args->buffers_ptr);
1763 int i;
1764
1765 for (i = 0; i < args->buffer_count; i++) {
1766 ret = __copy_to_user(&user_exec_list[i].offset,
1767 &exec2_list[i].offset,
1768 sizeof(user_exec_list[i].offset));
1769 if (ret) {
1770 ret = -EFAULT;
1771 DRM_DEBUG("failed to copy %d exec entries "
1772 "back to user\n",
1773 args->buffer_count);
1774 break;
1775 }
54cf91dc
CW
1776 }
1777 }
1778
1779 drm_free_large(exec2_list);
1780 return ret;
1781}