2 * Copyright © 2008-2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
29 #include <drm/drm_vma_manager.h>
30 #include <drm/i915_drm.h>
32 #include "i915_vgpu.h"
33 #include "i915_trace.h"
34 #include "intel_drv.h"
35 #include "intel_mocs.h"
36 #include <linux/shmem_fs.h>
37 #include <linux/slab.h>
38 #include <linux/swap.h>
39 #include <linux/pci.h>
40 #include <linux/dma-buf.h>
42 static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object
*obj
);
43 static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object
*obj
);
45 i915_gem_object_retire__write(struct drm_i915_gem_object
*obj
);
47 i915_gem_object_retire__read(struct drm_i915_gem_object
*obj
, int ring
);
49 static bool cpu_cache_is_coherent(struct drm_device
*dev
,
50 enum i915_cache_level level
)
52 return HAS_LLC(dev
) || level
!= I915_CACHE_NONE
;
55 static bool cpu_write_needs_clflush(struct drm_i915_gem_object
*obj
)
57 if (obj
->base
.write_domain
== I915_GEM_DOMAIN_CPU
)
60 if (!cpu_cache_is_coherent(obj
->base
.dev
, obj
->cache_level
))
63 return obj
->pin_display
;
67 insert_mappable_node(struct drm_i915_private
*i915
,
68 struct drm_mm_node
*node
, u32 size
)
70 memset(node
, 0, sizeof(*node
));
71 return drm_mm_insert_node_in_range_generic(&i915
->ggtt
.base
.mm
, node
,
73 i915
->ggtt
.mappable_end
,
74 DRM_MM_SEARCH_DEFAULT
,
75 DRM_MM_CREATE_DEFAULT
);
79 remove_mappable_node(struct drm_mm_node
*node
)
81 drm_mm_remove_node(node
);
84 /* some bookkeeping */
85 static void i915_gem_info_add_obj(struct drm_i915_private
*dev_priv
,
88 spin_lock(&dev_priv
->mm
.object_stat_lock
);
89 dev_priv
->mm
.object_count
++;
90 dev_priv
->mm
.object_memory
+= size
;
91 spin_unlock(&dev_priv
->mm
.object_stat_lock
);
94 static void i915_gem_info_remove_obj(struct drm_i915_private
*dev_priv
,
97 spin_lock(&dev_priv
->mm
.object_stat_lock
);
98 dev_priv
->mm
.object_count
--;
99 dev_priv
->mm
.object_memory
-= size
;
100 spin_unlock(&dev_priv
->mm
.object_stat_lock
);
104 i915_gem_wait_for_error(struct i915_gpu_error
*error
)
108 if (!i915_reset_in_progress(error
))
112 * Only wait 10 seconds for the gpu reset to complete to avoid hanging
113 * userspace. If it takes that long something really bad is going on and
114 * we should simply try to bail out and fail as gracefully as possible.
116 ret
= wait_event_interruptible_timeout(error
->reset_queue
,
117 !i915_reset_in_progress(error
),
120 DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
122 } else if (ret
< 0) {
129 int i915_mutex_lock_interruptible(struct drm_device
*dev
)
131 struct drm_i915_private
*dev_priv
= to_i915(dev
);
134 ret
= i915_gem_wait_for_error(&dev_priv
->gpu_error
);
138 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
142 WARN_ON(i915_verify_lists(dev
));
147 i915_gem_get_aperture_ioctl(struct drm_device
*dev
, void *data
,
148 struct drm_file
*file
)
150 struct drm_i915_private
*dev_priv
= to_i915(dev
);
151 struct i915_ggtt
*ggtt
= &dev_priv
->ggtt
;
152 struct drm_i915_gem_get_aperture
*args
= data
;
153 struct i915_vma
*vma
;
157 mutex_lock(&dev
->struct_mutex
);
158 list_for_each_entry(vma
, &ggtt
->base
.active_list
, vm_link
)
160 pinned
+= vma
->node
.size
;
161 list_for_each_entry(vma
, &ggtt
->base
.inactive_list
, vm_link
)
163 pinned
+= vma
->node
.size
;
164 mutex_unlock(&dev
->struct_mutex
);
166 args
->aper_size
= ggtt
->base
.total
;
167 args
->aper_available_size
= args
->aper_size
- pinned
;
173 i915_gem_object_get_pages_phys(struct drm_i915_gem_object
*obj
)
175 struct address_space
*mapping
= file_inode(obj
->base
.filp
)->i_mapping
;
176 char *vaddr
= obj
->phys_handle
->vaddr
;
178 struct scatterlist
*sg
;
181 if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj
)))
184 for (i
= 0; i
< obj
->base
.size
/ PAGE_SIZE
; i
++) {
188 page
= shmem_read_mapping_page(mapping
, i
);
190 return PTR_ERR(page
);
192 src
= kmap_atomic(page
);
193 memcpy(vaddr
, src
, PAGE_SIZE
);
194 drm_clflush_virt_range(vaddr
, PAGE_SIZE
);
201 i915_gem_chipset_flush(to_i915(obj
->base
.dev
));
203 st
= kmalloc(sizeof(*st
), GFP_KERNEL
);
207 if (sg_alloc_table(st
, 1, GFP_KERNEL
)) {
214 sg
->length
= obj
->base
.size
;
216 sg_dma_address(sg
) = obj
->phys_handle
->busaddr
;
217 sg_dma_len(sg
) = obj
->base
.size
;
224 i915_gem_object_put_pages_phys(struct drm_i915_gem_object
*obj
)
228 BUG_ON(obj
->madv
== __I915_MADV_PURGED
);
230 ret
= i915_gem_object_set_to_cpu_domain(obj
, true);
232 /* In the event of a disaster, abandon all caches and
235 obj
->base
.read_domains
= obj
->base
.write_domain
= I915_GEM_DOMAIN_CPU
;
238 if (obj
->madv
== I915_MADV_DONTNEED
)
242 struct address_space
*mapping
= file_inode(obj
->base
.filp
)->i_mapping
;
243 char *vaddr
= obj
->phys_handle
->vaddr
;
246 for (i
= 0; i
< obj
->base
.size
/ PAGE_SIZE
; i
++) {
250 page
= shmem_read_mapping_page(mapping
, i
);
254 dst
= kmap_atomic(page
);
255 drm_clflush_virt_range(vaddr
, PAGE_SIZE
);
256 memcpy(dst
, vaddr
, PAGE_SIZE
);
259 set_page_dirty(page
);
260 if (obj
->madv
== I915_MADV_WILLNEED
)
261 mark_page_accessed(page
);
268 sg_free_table(obj
->pages
);
273 i915_gem_object_release_phys(struct drm_i915_gem_object
*obj
)
275 drm_pci_free(obj
->base
.dev
, obj
->phys_handle
);
278 static const struct drm_i915_gem_object_ops i915_gem_phys_ops
= {
279 .get_pages
= i915_gem_object_get_pages_phys
,
280 .put_pages
= i915_gem_object_put_pages_phys
,
281 .release
= i915_gem_object_release_phys
,
285 drop_pages(struct drm_i915_gem_object
*obj
)
287 struct i915_vma
*vma
, *next
;
290 drm_gem_object_reference(&obj
->base
);
291 list_for_each_entry_safe(vma
, next
, &obj
->vma_list
, obj_link
)
292 if (i915_vma_unbind(vma
))
295 ret
= i915_gem_object_put_pages(obj
);
296 drm_gem_object_unreference(&obj
->base
);
302 i915_gem_object_attach_phys(struct drm_i915_gem_object
*obj
,
305 drm_dma_handle_t
*phys
;
308 if (obj
->phys_handle
) {
309 if ((unsigned long)obj
->phys_handle
->vaddr
& (align
-1))
315 if (obj
->madv
!= I915_MADV_WILLNEED
)
318 if (obj
->base
.filp
== NULL
)
321 ret
= drop_pages(obj
);
325 /* create a new object */
326 phys
= drm_pci_alloc(obj
->base
.dev
, obj
->base
.size
, align
);
330 obj
->phys_handle
= phys
;
331 obj
->ops
= &i915_gem_phys_ops
;
333 return i915_gem_object_get_pages(obj
);
337 i915_gem_phys_pwrite(struct drm_i915_gem_object
*obj
,
338 struct drm_i915_gem_pwrite
*args
,
339 struct drm_file
*file_priv
)
341 struct drm_device
*dev
= obj
->base
.dev
;
342 void *vaddr
= obj
->phys_handle
->vaddr
+ args
->offset
;
343 char __user
*user_data
= u64_to_user_ptr(args
->data_ptr
);
346 /* We manually control the domain here and pretend that it
347 * remains coherent i.e. in the GTT domain, like shmem_pwrite.
349 ret
= i915_gem_object_wait_rendering(obj
, false);
353 intel_fb_obj_invalidate(obj
, ORIGIN_CPU
);
354 if (__copy_from_user_inatomic_nocache(vaddr
, user_data
, args
->size
)) {
355 unsigned long unwritten
;
357 /* The physical object once assigned is fixed for the lifetime
358 * of the obj, so we can safely drop the lock and continue
361 mutex_unlock(&dev
->struct_mutex
);
362 unwritten
= copy_from_user(vaddr
, user_data
, args
->size
);
363 mutex_lock(&dev
->struct_mutex
);
370 drm_clflush_virt_range(vaddr
, args
->size
);
371 i915_gem_chipset_flush(to_i915(dev
));
374 intel_fb_obj_flush(obj
, false, ORIGIN_CPU
);
378 void *i915_gem_object_alloc(struct drm_device
*dev
)
380 struct drm_i915_private
*dev_priv
= to_i915(dev
);
381 return kmem_cache_zalloc(dev_priv
->objects
, GFP_KERNEL
);
384 void i915_gem_object_free(struct drm_i915_gem_object
*obj
)
386 struct drm_i915_private
*dev_priv
= to_i915(obj
->base
.dev
);
387 kmem_cache_free(dev_priv
->objects
, obj
);
391 i915_gem_create(struct drm_file
*file
,
392 struct drm_device
*dev
,
396 struct drm_i915_gem_object
*obj
;
400 size
= roundup(size
, PAGE_SIZE
);
404 /* Allocate the new object */
405 obj
= i915_gem_object_create(dev
, size
);
409 ret
= drm_gem_handle_create(file
, &obj
->base
, &handle
);
410 /* drop reference from allocate - handle holds it now */
411 drm_gem_object_unreference_unlocked(&obj
->base
);
420 i915_gem_dumb_create(struct drm_file
*file
,
421 struct drm_device
*dev
,
422 struct drm_mode_create_dumb
*args
)
424 /* have to work out size/pitch and return them */
425 args
->pitch
= ALIGN(args
->width
* DIV_ROUND_UP(args
->bpp
, 8), 64);
426 args
->size
= args
->pitch
* args
->height
;
427 return i915_gem_create(file
, dev
,
428 args
->size
, &args
->handle
);
432 * Creates a new mm object and returns a handle to it.
433 * @dev: drm device pointer
434 * @data: ioctl data blob
435 * @file: drm file pointer
438 i915_gem_create_ioctl(struct drm_device
*dev
, void *data
,
439 struct drm_file
*file
)
441 struct drm_i915_gem_create
*args
= data
;
443 return i915_gem_create(file
, dev
,
444 args
->size
, &args
->handle
);
448 __copy_to_user_swizzled(char __user
*cpu_vaddr
,
449 const char *gpu_vaddr
, int gpu_offset
,
452 int ret
, cpu_offset
= 0;
455 int cacheline_end
= ALIGN(gpu_offset
+ 1, 64);
456 int this_length
= min(cacheline_end
- gpu_offset
, length
);
457 int swizzled_gpu_offset
= gpu_offset
^ 64;
459 ret
= __copy_to_user(cpu_vaddr
+ cpu_offset
,
460 gpu_vaddr
+ swizzled_gpu_offset
,
465 cpu_offset
+= this_length
;
466 gpu_offset
+= this_length
;
467 length
-= this_length
;
474 __copy_from_user_swizzled(char *gpu_vaddr
, int gpu_offset
,
475 const char __user
*cpu_vaddr
,
478 int ret
, cpu_offset
= 0;
481 int cacheline_end
= ALIGN(gpu_offset
+ 1, 64);
482 int this_length
= min(cacheline_end
- gpu_offset
, length
);
483 int swizzled_gpu_offset
= gpu_offset
^ 64;
485 ret
= __copy_from_user(gpu_vaddr
+ swizzled_gpu_offset
,
486 cpu_vaddr
+ cpu_offset
,
491 cpu_offset
+= this_length
;
492 gpu_offset
+= this_length
;
493 length
-= this_length
;
500 * Pins the specified object's pages and synchronizes the object with
501 * GPU accesses. Sets needs_clflush to non-zero if the caller should
502 * flush the object from the CPU cache.
504 int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object
*obj
,
511 if (WARN_ON(!i915_gem_object_has_struct_page(obj
)))
514 if (!(obj
->base
.read_domains
& I915_GEM_DOMAIN_CPU
)) {
515 /* If we're not in the cpu read domain, set ourself into the gtt
516 * read domain and manually flush cachelines (if required). This
517 * optimizes for the case when the gpu will dirty the data
518 * anyway again before the next pread happens. */
519 *needs_clflush
= !cpu_cache_is_coherent(obj
->base
.dev
,
521 ret
= i915_gem_object_wait_rendering(obj
, true);
526 ret
= i915_gem_object_get_pages(obj
);
530 i915_gem_object_pin_pages(obj
);
535 /* Per-page copy function for the shmem pread fastpath.
536 * Flushes invalid cachelines before reading the target if
537 * needs_clflush is set. */
539 shmem_pread_fast(struct page
*page
, int shmem_page_offset
, int page_length
,
540 char __user
*user_data
,
541 bool page_do_bit17_swizzling
, bool needs_clflush
)
546 if (unlikely(page_do_bit17_swizzling
))
549 vaddr
= kmap_atomic(page
);
551 drm_clflush_virt_range(vaddr
+ shmem_page_offset
,
553 ret
= __copy_to_user_inatomic(user_data
,
554 vaddr
+ shmem_page_offset
,
556 kunmap_atomic(vaddr
);
558 return ret
? -EFAULT
: 0;
562 shmem_clflush_swizzled_range(char *addr
, unsigned long length
,
565 if (unlikely(swizzled
)) {
566 unsigned long start
= (unsigned long) addr
;
567 unsigned long end
= (unsigned long) addr
+ length
;
569 /* For swizzling simply ensure that we always flush both
570 * channels. Lame, but simple and it works. Swizzled
571 * pwrite/pread is far from a hotpath - current userspace
572 * doesn't use it at all. */
573 start
= round_down(start
, 128);
574 end
= round_up(end
, 128);
576 drm_clflush_virt_range((void *)start
, end
- start
);
578 drm_clflush_virt_range(addr
, length
);
583 /* Only difference to the fast-path function is that this can handle bit17
584 * and uses non-atomic copy and kmap functions. */
586 shmem_pread_slow(struct page
*page
, int shmem_page_offset
, int page_length
,
587 char __user
*user_data
,
588 bool page_do_bit17_swizzling
, bool needs_clflush
)
595 shmem_clflush_swizzled_range(vaddr
+ shmem_page_offset
,
597 page_do_bit17_swizzling
);
599 if (page_do_bit17_swizzling
)
600 ret
= __copy_to_user_swizzled(user_data
,
601 vaddr
, shmem_page_offset
,
604 ret
= __copy_to_user(user_data
,
605 vaddr
+ shmem_page_offset
,
609 return ret
? - EFAULT
: 0;
612 static inline unsigned long
613 slow_user_access(struct io_mapping
*mapping
,
614 uint64_t page_base
, int page_offset
,
615 char __user
*user_data
,
616 unsigned long length
, bool pwrite
)
618 void __iomem
*ioaddr
;
622 ioaddr
= io_mapping_map_wc(mapping
, page_base
, PAGE_SIZE
);
623 /* We can use the cpu mem copy function because this is X86. */
624 vaddr
= (void __force
*)ioaddr
+ page_offset
;
626 unwritten
= __copy_from_user(vaddr
, user_data
, length
);
628 unwritten
= __copy_to_user(user_data
, vaddr
, length
);
630 io_mapping_unmap(ioaddr
);
635 i915_gem_gtt_pread(struct drm_device
*dev
,
636 struct drm_i915_gem_object
*obj
, uint64_t size
,
637 uint64_t data_offset
, uint64_t data_ptr
)
639 struct drm_i915_private
*dev_priv
= to_i915(dev
);
640 struct i915_ggtt
*ggtt
= &dev_priv
->ggtt
;
641 struct drm_mm_node node
;
642 char __user
*user_data
;
647 ret
= i915_gem_obj_ggtt_pin(obj
, 0, PIN_MAPPABLE
);
649 ret
= insert_mappable_node(dev_priv
, &node
, PAGE_SIZE
);
653 ret
= i915_gem_object_get_pages(obj
);
655 remove_mappable_node(&node
);
659 i915_gem_object_pin_pages(obj
);
661 node
.start
= i915_gem_obj_ggtt_offset(obj
);
662 node
.allocated
= false;
663 ret
= i915_gem_object_put_fence(obj
);
668 ret
= i915_gem_object_set_to_gtt_domain(obj
, false);
672 user_data
= u64_to_user_ptr(data_ptr
);
674 offset
= data_offset
;
676 mutex_unlock(&dev
->struct_mutex
);
677 if (likely(!i915
.prefault_disable
)) {
678 ret
= fault_in_multipages_writeable(user_data
, remain
);
680 mutex_lock(&dev
->struct_mutex
);
686 /* Operation in this page
688 * page_base = page offset within aperture
689 * page_offset = offset within page
690 * page_length = bytes to copy for this page
692 u32 page_base
= node
.start
;
693 unsigned page_offset
= offset_in_page(offset
);
694 unsigned page_length
= PAGE_SIZE
- page_offset
;
695 page_length
= remain
< page_length
? remain
: page_length
;
696 if (node
.allocated
) {
698 ggtt
->base
.insert_page(&ggtt
->base
,
699 i915_gem_object_get_dma_address(obj
, offset
>> PAGE_SHIFT
),
704 page_base
+= offset
& PAGE_MASK
;
706 /* This is a slow read/write as it tries to read from
707 * and write to user memory which may result into page
708 * faults, and so we cannot perform this under struct_mutex.
710 if (slow_user_access(ggtt
->mappable
, page_base
,
711 page_offset
, user_data
,
712 page_length
, false)) {
717 remain
-= page_length
;
718 user_data
+= page_length
;
719 offset
+= page_length
;
722 mutex_lock(&dev
->struct_mutex
);
723 if (ret
== 0 && (obj
->base
.read_domains
& I915_GEM_DOMAIN_GTT
) == 0) {
724 /* The user has modified the object whilst we tried
725 * reading from it, and we now have no idea what domain
726 * the pages should be in. As we have just been touching
727 * them directly, flush everything back to the GTT
730 ret
= i915_gem_object_set_to_gtt_domain(obj
, false);
734 if (node
.allocated
) {
736 ggtt
->base
.clear_range(&ggtt
->base
,
737 node
.start
, node
.size
,
739 i915_gem_object_unpin_pages(obj
);
740 remove_mappable_node(&node
);
742 i915_gem_object_ggtt_unpin(obj
);
749 i915_gem_shmem_pread(struct drm_device
*dev
,
750 struct drm_i915_gem_object
*obj
,
751 struct drm_i915_gem_pread
*args
,
752 struct drm_file
*file
)
754 char __user
*user_data
;
757 int shmem_page_offset
, page_length
, ret
= 0;
758 int obj_do_bit17_swizzling
, page_do_bit17_swizzling
;
760 int needs_clflush
= 0;
761 struct sg_page_iter sg_iter
;
763 if (!i915_gem_object_has_struct_page(obj
))
766 user_data
= u64_to_user_ptr(args
->data_ptr
);
769 obj_do_bit17_swizzling
= i915_gem_object_needs_bit17_swizzle(obj
);
771 ret
= i915_gem_obj_prepare_shmem_read(obj
, &needs_clflush
);
775 offset
= args
->offset
;
777 for_each_sg_page(obj
->pages
->sgl
, &sg_iter
, obj
->pages
->nents
,
778 offset
>> PAGE_SHIFT
) {
779 struct page
*page
= sg_page_iter_page(&sg_iter
);
784 /* Operation in this page
786 * shmem_page_offset = offset within page in shmem file
787 * page_length = bytes to copy for this page
789 shmem_page_offset
= offset_in_page(offset
);
790 page_length
= remain
;
791 if ((shmem_page_offset
+ page_length
) > PAGE_SIZE
)
792 page_length
= PAGE_SIZE
- shmem_page_offset
;
794 page_do_bit17_swizzling
= obj_do_bit17_swizzling
&&
795 (page_to_phys(page
) & (1 << 17)) != 0;
797 ret
= shmem_pread_fast(page
, shmem_page_offset
, page_length
,
798 user_data
, page_do_bit17_swizzling
,
803 mutex_unlock(&dev
->struct_mutex
);
805 if (likely(!i915
.prefault_disable
) && !prefaulted
) {
806 ret
= fault_in_multipages_writeable(user_data
, remain
);
807 /* Userspace is tricking us, but we've already clobbered
808 * its pages with the prefault and promised to write the
809 * data up to the first fault. Hence ignore any errors
810 * and just continue. */
815 ret
= shmem_pread_slow(page
, shmem_page_offset
, page_length
,
816 user_data
, page_do_bit17_swizzling
,
819 mutex_lock(&dev
->struct_mutex
);
825 remain
-= page_length
;
826 user_data
+= page_length
;
827 offset
+= page_length
;
831 i915_gem_object_unpin_pages(obj
);
837 * Reads data from the object referenced by handle.
838 * @dev: drm device pointer
839 * @data: ioctl data blob
840 * @file: drm file pointer
842 * On error, the contents of *data are undefined.
845 i915_gem_pread_ioctl(struct drm_device
*dev
, void *data
,
846 struct drm_file
*file
)
848 struct drm_i915_gem_pread
*args
= data
;
849 struct drm_i915_gem_object
*obj
;
855 if (!access_ok(VERIFY_WRITE
,
856 u64_to_user_ptr(args
->data_ptr
),
860 ret
= i915_mutex_lock_interruptible(dev
);
864 obj
= to_intel_bo(drm_gem_object_lookup(file
, args
->handle
));
865 if (&obj
->base
== NULL
) {
870 /* Bounds check source. */
871 if (args
->offset
> obj
->base
.size
||
872 args
->size
> obj
->base
.size
- args
->offset
) {
877 trace_i915_gem_object_pread(obj
, args
->offset
, args
->size
);
879 ret
= i915_gem_shmem_pread(dev
, obj
, args
, file
);
881 /* pread for non shmem backed objects */
882 if (ret
== -EFAULT
|| ret
== -ENODEV
)
883 ret
= i915_gem_gtt_pread(dev
, obj
, args
->size
,
884 args
->offset
, args
->data_ptr
);
887 drm_gem_object_unreference(&obj
->base
);
889 mutex_unlock(&dev
->struct_mutex
);
893 /* This is the fast write path which cannot handle
894 * page faults in the source data
898 fast_user_write(struct io_mapping
*mapping
,
899 loff_t page_base
, int page_offset
,
900 char __user
*user_data
,
903 void __iomem
*vaddr_atomic
;
905 unsigned long unwritten
;
907 vaddr_atomic
= io_mapping_map_atomic_wc(mapping
, page_base
);
908 /* We can use the cpu mem copy function because this is X86. */
909 vaddr
= (void __force
*)vaddr_atomic
+ page_offset
;
910 unwritten
= __copy_from_user_inatomic_nocache(vaddr
,
912 io_mapping_unmap_atomic(vaddr_atomic
);
917 * This is the fast pwrite path, where we copy the data directly from the
918 * user into the GTT, uncached.
919 * @dev: drm device pointer
920 * @obj: i915 gem object
921 * @args: pwrite arguments structure
922 * @file: drm file pointer
925 i915_gem_gtt_pwrite_fast(struct drm_i915_private
*i915
,
926 struct drm_i915_gem_object
*obj
,
927 struct drm_i915_gem_pwrite
*args
,
928 struct drm_file
*file
)
930 struct i915_ggtt
*ggtt
= &i915
->ggtt
;
931 struct drm_device
*dev
= obj
->base
.dev
;
932 struct drm_mm_node node
;
933 uint64_t remain
, offset
;
934 char __user
*user_data
;
936 bool hit_slow_path
= false;
938 if (obj
->tiling_mode
!= I915_TILING_NONE
)
941 ret
= i915_gem_obj_ggtt_pin(obj
, 0, PIN_MAPPABLE
| PIN_NONBLOCK
);
943 ret
= insert_mappable_node(i915
, &node
, PAGE_SIZE
);
947 ret
= i915_gem_object_get_pages(obj
);
949 remove_mappable_node(&node
);
953 i915_gem_object_pin_pages(obj
);
955 node
.start
= i915_gem_obj_ggtt_offset(obj
);
956 node
.allocated
= false;
957 ret
= i915_gem_object_put_fence(obj
);
962 ret
= i915_gem_object_set_to_gtt_domain(obj
, true);
966 intel_fb_obj_invalidate(obj
, ORIGIN_GTT
);
969 user_data
= u64_to_user_ptr(args
->data_ptr
);
970 offset
= args
->offset
;
973 /* Operation in this page
975 * page_base = page offset within aperture
976 * page_offset = offset within page
977 * page_length = bytes to copy for this page
979 u32 page_base
= node
.start
;
980 unsigned page_offset
= offset_in_page(offset
);
981 unsigned page_length
= PAGE_SIZE
- page_offset
;
982 page_length
= remain
< page_length
? remain
: page_length
;
983 if (node
.allocated
) {
984 wmb(); /* flush the write before we modify the GGTT */
985 ggtt
->base
.insert_page(&ggtt
->base
,
986 i915_gem_object_get_dma_address(obj
, offset
>> PAGE_SHIFT
),
987 node
.start
, I915_CACHE_NONE
, 0);
988 wmb(); /* flush modifications to the GGTT (insert_page) */
990 page_base
+= offset
& PAGE_MASK
;
992 /* If we get a fault while copying data, then (presumably) our
993 * source page isn't available. Return the error and we'll
994 * retry in the slow path.
995 * If the object is non-shmem backed, we retry again with the
996 * path that handles page fault.
998 if (fast_user_write(ggtt
->mappable
, page_base
,
999 page_offset
, user_data
, page_length
)) {
1000 hit_slow_path
= true;
1001 mutex_unlock(&dev
->struct_mutex
);
1002 if (slow_user_access(ggtt
->mappable
,
1004 page_offset
, user_data
,
1005 page_length
, true)) {
1007 mutex_lock(&dev
->struct_mutex
);
1011 mutex_lock(&dev
->struct_mutex
);
1014 remain
-= page_length
;
1015 user_data
+= page_length
;
1016 offset
+= page_length
;
1020 if (hit_slow_path
) {
1022 (obj
->base
.read_domains
& I915_GEM_DOMAIN_GTT
) == 0) {
1023 /* The user has modified the object whilst we tried
1024 * reading from it, and we now have no idea what domain
1025 * the pages should be in. As we have just been touching
1026 * them directly, flush everything back to the GTT
1029 ret
= i915_gem_object_set_to_gtt_domain(obj
, false);
1033 intel_fb_obj_flush(obj
, false, ORIGIN_GTT
);
1035 if (node
.allocated
) {
1037 ggtt
->base
.clear_range(&ggtt
->base
,
1038 node
.start
, node
.size
,
1040 i915_gem_object_unpin_pages(obj
);
1041 remove_mappable_node(&node
);
1043 i915_gem_object_ggtt_unpin(obj
);
1049 /* Per-page copy function for the shmem pwrite fastpath.
1050 * Flushes invalid cachelines before writing to the target if
1051 * needs_clflush_before is set and flushes out any written cachelines after
1052 * writing if needs_clflush is set. */
1054 shmem_pwrite_fast(struct page
*page
, int shmem_page_offset
, int page_length
,
1055 char __user
*user_data
,
1056 bool page_do_bit17_swizzling
,
1057 bool needs_clflush_before
,
1058 bool needs_clflush_after
)
1063 if (unlikely(page_do_bit17_swizzling
))
1066 vaddr
= kmap_atomic(page
);
1067 if (needs_clflush_before
)
1068 drm_clflush_virt_range(vaddr
+ shmem_page_offset
,
1070 ret
= __copy_from_user_inatomic(vaddr
+ shmem_page_offset
,
1071 user_data
, page_length
);
1072 if (needs_clflush_after
)
1073 drm_clflush_virt_range(vaddr
+ shmem_page_offset
,
1075 kunmap_atomic(vaddr
);
1077 return ret
? -EFAULT
: 0;
1080 /* Only difference to the fast-path function is that this can handle bit17
1081 * and uses non-atomic copy and kmap functions. */
1083 shmem_pwrite_slow(struct page
*page
, int shmem_page_offset
, int page_length
,
1084 char __user
*user_data
,
1085 bool page_do_bit17_swizzling
,
1086 bool needs_clflush_before
,
1087 bool needs_clflush_after
)
1093 if (unlikely(needs_clflush_before
|| page_do_bit17_swizzling
))
1094 shmem_clflush_swizzled_range(vaddr
+ shmem_page_offset
,
1096 page_do_bit17_swizzling
);
1097 if (page_do_bit17_swizzling
)
1098 ret
= __copy_from_user_swizzled(vaddr
, shmem_page_offset
,
1102 ret
= __copy_from_user(vaddr
+ shmem_page_offset
,
1105 if (needs_clflush_after
)
1106 shmem_clflush_swizzled_range(vaddr
+ shmem_page_offset
,
1108 page_do_bit17_swizzling
);
1111 return ret
? -EFAULT
: 0;
1115 i915_gem_shmem_pwrite(struct drm_device
*dev
,
1116 struct drm_i915_gem_object
*obj
,
1117 struct drm_i915_gem_pwrite
*args
,
1118 struct drm_file
*file
)
1122 char __user
*user_data
;
1123 int shmem_page_offset
, page_length
, ret
= 0;
1124 int obj_do_bit17_swizzling
, page_do_bit17_swizzling
;
1125 int hit_slowpath
= 0;
1126 int needs_clflush_after
= 0;
1127 int needs_clflush_before
= 0;
1128 struct sg_page_iter sg_iter
;
1130 user_data
= u64_to_user_ptr(args
->data_ptr
);
1131 remain
= args
->size
;
1133 obj_do_bit17_swizzling
= i915_gem_object_needs_bit17_swizzle(obj
);
1135 if (obj
->base
.write_domain
!= I915_GEM_DOMAIN_CPU
) {
1136 /* If we're not in the cpu write domain, set ourself into the gtt
1137 * write domain and manually flush cachelines (if required). This
1138 * optimizes for the case when the gpu will use the data
1139 * right away and we therefore have to clflush anyway. */
1140 needs_clflush_after
= cpu_write_needs_clflush(obj
);
1141 ret
= i915_gem_object_wait_rendering(obj
, false);
1145 /* Same trick applies to invalidate partially written cachelines read
1146 * before writing. */
1147 if ((obj
->base
.read_domains
& I915_GEM_DOMAIN_CPU
) == 0)
1148 needs_clflush_before
=
1149 !cpu_cache_is_coherent(dev
, obj
->cache_level
);
1151 ret
= i915_gem_object_get_pages(obj
);
1155 intel_fb_obj_invalidate(obj
, ORIGIN_CPU
);
1157 i915_gem_object_pin_pages(obj
);
1159 offset
= args
->offset
;
1162 for_each_sg_page(obj
->pages
->sgl
, &sg_iter
, obj
->pages
->nents
,
1163 offset
>> PAGE_SHIFT
) {
1164 struct page
*page
= sg_page_iter_page(&sg_iter
);
1165 int partial_cacheline_write
;
1170 /* Operation in this page
1172 * shmem_page_offset = offset within page in shmem file
1173 * page_length = bytes to copy for this page
1175 shmem_page_offset
= offset_in_page(offset
);
1177 page_length
= remain
;
1178 if ((shmem_page_offset
+ page_length
) > PAGE_SIZE
)
1179 page_length
= PAGE_SIZE
- shmem_page_offset
;
1181 /* If we don't overwrite a cacheline completely we need to be
1182 * careful to have up-to-date data by first clflushing. Don't
1183 * overcomplicate things and flush the entire patch. */
1184 partial_cacheline_write
= needs_clflush_before
&&
1185 ((shmem_page_offset
| page_length
)
1186 & (boot_cpu_data
.x86_clflush_size
- 1));
1188 page_do_bit17_swizzling
= obj_do_bit17_swizzling
&&
1189 (page_to_phys(page
) & (1 << 17)) != 0;
1191 ret
= shmem_pwrite_fast(page
, shmem_page_offset
, page_length
,
1192 user_data
, page_do_bit17_swizzling
,
1193 partial_cacheline_write
,
1194 needs_clflush_after
);
1199 mutex_unlock(&dev
->struct_mutex
);
1200 ret
= shmem_pwrite_slow(page
, shmem_page_offset
, page_length
,
1201 user_data
, page_do_bit17_swizzling
,
1202 partial_cacheline_write
,
1203 needs_clflush_after
);
1205 mutex_lock(&dev
->struct_mutex
);
1211 remain
-= page_length
;
1212 user_data
+= page_length
;
1213 offset
+= page_length
;
1217 i915_gem_object_unpin_pages(obj
);
1221 * Fixup: Flush cpu caches in case we didn't flush the dirty
1222 * cachelines in-line while writing and the object moved
1223 * out of the cpu write domain while we've dropped the lock.
1225 if (!needs_clflush_after
&&
1226 obj
->base
.write_domain
!= I915_GEM_DOMAIN_CPU
) {
1227 if (i915_gem_clflush_object(obj
, obj
->pin_display
))
1228 needs_clflush_after
= true;
1232 if (needs_clflush_after
)
1233 i915_gem_chipset_flush(to_i915(dev
));
1235 obj
->cache_dirty
= true;
1237 intel_fb_obj_flush(obj
, false, ORIGIN_CPU
);
1242 * Writes data to the object referenced by handle.
1244 * @data: ioctl data blob
1247 * On error, the contents of the buffer that were to be modified are undefined.
1250 i915_gem_pwrite_ioctl(struct drm_device
*dev
, void *data
,
1251 struct drm_file
*file
)
1253 struct drm_i915_private
*dev_priv
= to_i915(dev
);
1254 struct drm_i915_gem_pwrite
*args
= data
;
1255 struct drm_i915_gem_object
*obj
;
1258 if (args
->size
== 0)
1261 if (!access_ok(VERIFY_READ
,
1262 u64_to_user_ptr(args
->data_ptr
),
1266 if (likely(!i915
.prefault_disable
)) {
1267 ret
= fault_in_multipages_readable(u64_to_user_ptr(args
->data_ptr
),
1273 intel_runtime_pm_get(dev_priv
);
1275 ret
= i915_mutex_lock_interruptible(dev
);
1279 obj
= to_intel_bo(drm_gem_object_lookup(file
, args
->handle
));
1280 if (&obj
->base
== NULL
) {
1285 /* Bounds check destination. */
1286 if (args
->offset
> obj
->base
.size
||
1287 args
->size
> obj
->base
.size
- args
->offset
) {
1292 trace_i915_gem_object_pwrite(obj
, args
->offset
, args
->size
);
1295 /* We can only do the GTT pwrite on untiled buffers, as otherwise
1296 * it would end up going through the fenced access, and we'll get
1297 * different detiling behavior between reading and writing.
1298 * pread/pwrite currently are reading and writing from the CPU
1299 * perspective, requiring manual detiling by the client.
1301 if (!i915_gem_object_has_struct_page(obj
) ||
1302 cpu_write_needs_clflush(obj
)) {
1303 ret
= i915_gem_gtt_pwrite_fast(dev_priv
, obj
, args
, file
);
1304 /* Note that the gtt paths might fail with non-page-backed user
1305 * pointers (e.g. gtt mappings when moving data between
1306 * textures). Fallback to the shmem path in that case. */
1309 if (ret
== -EFAULT
) {
1310 if (obj
->phys_handle
)
1311 ret
= i915_gem_phys_pwrite(obj
, args
, file
);
1312 else if (i915_gem_object_has_struct_page(obj
))
1313 ret
= i915_gem_shmem_pwrite(dev
, obj
, args
, file
);
1319 drm_gem_object_unreference(&obj
->base
);
1321 mutex_unlock(&dev
->struct_mutex
);
1323 intel_runtime_pm_put(dev_priv
);
1329 i915_gem_check_wedge(unsigned reset_counter
, bool interruptible
)
1331 if (__i915_terminally_wedged(reset_counter
))
1334 if (__i915_reset_in_progress(reset_counter
)) {
1335 /* Non-interruptible callers can't handle -EAGAIN, hence return
1336 * -EIO unconditionally for these. */
1346 static unsigned long local_clock_us(unsigned *cpu
)
1350 /* Cheaply and approximately convert from nanoseconds to microseconds.
1351 * The result and subsequent calculations are also defined in the same
1352 * approximate microseconds units. The principal source of timing
1353 * error here is from the simple truncation.
1355 * Note that local_clock() is only defined wrt to the current CPU;
1356 * the comparisons are no longer valid if we switch CPUs. Instead of
1357 * blocking preemption for the entire busywait, we can detect the CPU
1358 * switch and use that as indicator of system load and a reason to
1359 * stop busywaiting, see busywait_stop().
1362 t
= local_clock() >> 10;
1368 static bool busywait_stop(unsigned long timeout
, unsigned cpu
)
1372 if (time_after(local_clock_us(&this_cpu
), timeout
))
1375 return this_cpu
!= cpu
;
1378 bool __i915_spin_request(const struct drm_i915_gem_request
*req
,
1379 int state
, unsigned long timeout_us
)
1383 /* When waiting for high frequency requests, e.g. during synchronous
1384 * rendering split between the CPU and GPU, the finite amount of time
1385 * required to set up the irq and wait upon it limits the response
1386 * rate. By busywaiting on the request completion for a short while we
1387 * can service the high frequency waits as quick as possible. However,
1388 * if it is a slow request, we want to sleep as quickly as possible.
1389 * The tradeoff between waiting and sleeping is roughly the time it
1390 * takes to sleep on a request, on the order of a microsecond.
1393 timeout_us
+= local_clock_us(&cpu
);
1395 if (i915_gem_request_completed(req
))
1398 if (signal_pending_state(state
, current
))
1401 if (busywait_stop(timeout_us
, cpu
))
1404 cpu_relax_lowlatency();
1405 } while (!need_resched());
1411 * __i915_wait_request - wait until execution of request has finished
1413 * @interruptible: do an interruptible wait (normally yes)
1414 * @timeout: in - how long to wait (NULL forever); out - how much time remaining
1417 * Note: It is of utmost importance that the passed in seqno and reset_counter
1418 * values have been read by the caller in an smp safe manner. Where read-side
1419 * locks are involved, it is sufficient to read the reset_counter before
1420 * unlocking the lock that protects the seqno. For lockless tricks, the
1421 * reset_counter _must_ be read before, and an appropriate smp_rmb must be
1424 * Returns 0 if the request was found within the alloted time. Else returns the
1425 * errno with remaining time filled in timeout argument.
1427 int __i915_wait_request(struct drm_i915_gem_request
*req
,
1430 struct intel_rps_client
*rps
)
1432 int state
= interruptible
? TASK_INTERRUPTIBLE
: TASK_UNINTERRUPTIBLE
;
1434 struct intel_wait wait
;
1435 unsigned long timeout_remain
;
1436 s64 before
= 0; /* Only to silence a compiler warning. */
1441 if (list_empty(&req
->list
))
1444 if (i915_gem_request_completed(req
))
1447 timeout_remain
= MAX_SCHEDULE_TIMEOUT
;
1449 if (WARN_ON(*timeout
< 0))
1455 timeout_remain
= nsecs_to_jiffies_timeout(*timeout
);
1458 * Record current time in case interrupted by signal, or wedged.
1460 before
= ktime_get_raw_ns();
1463 trace_i915_gem_request_wait_begin(req
);
1465 /* This client is about to stall waiting for the GPU. In many cases
1466 * this is undesirable and limits the throughput of the system, as
1467 * many clients cannot continue processing user input/output whilst
1468 * blocked. RPS autotuning may take tens of milliseconds to respond
1469 * to the GPU load and thus incurs additional latency for the client.
1470 * We can circumvent that by promoting the GPU frequency to maximum
1471 * before we wait. This makes the GPU throttle up much more quickly
1472 * (good for benchmarks and user experience, e.g. window animations),
1473 * but at a cost of spending more power processing the workload
1474 * (bad for battery). Not all clients even want their results
1475 * immediately and for them we should just let the GPU select its own
1476 * frequency to maximise efficiency. To prevent a single client from
1477 * forcing the clocks too high for the whole system, we only allow
1478 * each client to waitboost once in a busy period.
1480 if (INTEL_INFO(req
->i915
)->gen
>= 6)
1481 gen6_rps_boost(req
->i915
, rps
, req
->emitted_jiffies
);
1483 /* Optimistic spin for the next ~jiffie before touching IRQs */
1484 if (i915_spin_request(req
, state
, 5))
1487 set_current_state(state
);
1488 add_wait_queue(&req
->i915
->gpu_error
.wait_queue
, &reset
);
1490 intel_wait_init(&wait
, req
->seqno
);
1491 if (intel_engine_add_wait(req
->engine
, &wait
))
1492 /* In order to check that we haven't missed the interrupt
1493 * as we enabled it, we need to kick ourselves to do a
1494 * coherent check on the seqno before we sleep.
1499 if (signal_pending_state(state
, current
)) {
1504 /* Ensure that even if the GPU hangs, we get woken up.
1506 * However, note that if no one is waiting, we never notice
1507 * a gpu hang. Eventually, we will have to wait for a resource
1508 * held by the GPU and so trigger a hangcheck. In the most
1509 * pathological case, this will be upon memory starvation!
1511 i915_queue_hangcheck(req
->i915
);
1513 timeout_remain
= io_schedule_timeout(timeout_remain
);
1514 if (timeout_remain
== 0) {
1519 if (intel_wait_complete(&wait
))
1522 set_current_state(state
);
1525 /* Carefully check if the request is complete, giving time
1526 * for the seqno to be visible following the interrupt.
1527 * We also have to check in case we are kicked by the GPU
1528 * reset in order to drop the struct_mutex.
1530 if (__i915_request_irq_complete(req
))
1533 /* Only spin if we know the GPU is processing this request */
1534 if (i915_spin_request(req
, state
, 2))
1537 remove_wait_queue(&req
->i915
->gpu_error
.wait_queue
, &reset
);
1539 intel_engine_remove_wait(req
->engine
, &wait
);
1540 __set_current_state(TASK_RUNNING
);
1542 trace_i915_gem_request_wait_end(req
);
1545 s64 tres
= *timeout
- (ktime_get_raw_ns() - before
);
1547 *timeout
= tres
< 0 ? 0 : tres
;
1550 * Apparently ktime isn't accurate enough and occasionally has a
1551 * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch
1552 * things up to make the test happy. We allow up to 1 jiffy.
1554 * This is a regrssion from the timespec->ktime conversion.
1556 if (ret
== -ETIME
&& *timeout
< jiffies_to_usecs(1)*1000)
1560 if (rps
&& req
->seqno
== req
->engine
->last_submitted_seqno
) {
1561 /* The GPU is now idle and this client has stalled.
1562 * Since no other client has submitted a request in the
1563 * meantime, assume that this client is the only one
1564 * supplying work to the GPU but is unable to keep that
1565 * work supplied because it is waiting. Since the GPU is
1566 * then never kept fully busy, RPS autoclocking will
1567 * keep the clocks relatively low, causing further delays.
1568 * Compensate by giving the synchronous client credit for
1569 * a waitboost next time.
1571 spin_lock(&req
->i915
->rps
.client_lock
);
1572 list_del_init(&rps
->link
);
1573 spin_unlock(&req
->i915
->rps
.client_lock
);
1579 int i915_gem_request_add_to_client(struct drm_i915_gem_request
*req
,
1580 struct drm_file
*file
)
1582 struct drm_i915_file_private
*file_priv
;
1584 WARN_ON(!req
|| !file
|| req
->file_priv
);
1592 file_priv
= file
->driver_priv
;
1594 spin_lock(&file_priv
->mm
.lock
);
1595 req
->file_priv
= file_priv
;
1596 list_add_tail(&req
->client_list
, &file_priv
->mm
.request_list
);
1597 spin_unlock(&file_priv
->mm
.lock
);
1599 req
->pid
= get_pid(task_pid(current
));
1605 i915_gem_request_remove_from_client(struct drm_i915_gem_request
*request
)
1607 struct drm_i915_file_private
*file_priv
= request
->file_priv
;
1612 spin_lock(&file_priv
->mm
.lock
);
1613 list_del(&request
->client_list
);
1614 request
->file_priv
= NULL
;
1615 spin_unlock(&file_priv
->mm
.lock
);
1617 put_pid(request
->pid
);
1618 request
->pid
= NULL
;
1621 static void i915_gem_request_retire(struct drm_i915_gem_request
*request
)
1623 trace_i915_gem_request_retire(request
);
1625 /* We know the GPU must have read the request to have
1626 * sent us the seqno + interrupt, so use the position
1627 * of tail of the request to update the last known position
1630 * Note this requires that we are always called in request
1633 request
->ringbuf
->last_retired_head
= request
->postfix
;
1635 list_del_init(&request
->list
);
1636 i915_gem_request_remove_from_client(request
);
1638 if (request
->previous_context
) {
1639 if (i915
.enable_execlists
)
1640 intel_lr_context_unpin(request
->previous_context
,
1644 i915_gem_context_unreference(request
->ctx
);
1645 i915_gem_request_unreference(request
);
1649 __i915_gem_request_retire__upto(struct drm_i915_gem_request
*req
)
1651 struct intel_engine_cs
*engine
= req
->engine
;
1652 struct drm_i915_gem_request
*tmp
;
1654 lockdep_assert_held(&engine
->i915
->drm
.struct_mutex
);
1656 if (list_empty(&req
->list
))
1660 tmp
= list_first_entry(&engine
->request_list
,
1661 typeof(*tmp
), list
);
1663 i915_gem_request_retire(tmp
);
1664 } while (tmp
!= req
);
1666 WARN_ON(i915_verify_lists(engine
->dev
));
1670 * Waits for a request to be signaled, and cleans up the
1671 * request and object lists appropriately for that event.
1672 * @req: request to wait on
1675 i915_wait_request(struct drm_i915_gem_request
*req
)
1677 struct drm_i915_private
*dev_priv
= req
->i915
;
1681 interruptible
= dev_priv
->mm
.interruptible
;
1683 BUG_ON(!mutex_is_locked(&dev_priv
->drm
.struct_mutex
));
1685 ret
= __i915_wait_request(req
, interruptible
, NULL
, NULL
);
1689 /* If the GPU hung, we want to keep the requests to find the guilty. */
1690 if (!i915_reset_in_progress(&dev_priv
->gpu_error
))
1691 __i915_gem_request_retire__upto(req
);
1697 * Ensures that all rendering to the object has completed and the object is
1698 * safe to unbind from the GTT or access from the CPU.
1699 * @obj: i915 gem object
1700 * @readonly: waiting for read access or write
1703 i915_gem_object_wait_rendering(struct drm_i915_gem_object
*obj
,
1712 if (obj
->last_write_req
!= NULL
) {
1713 ret
= i915_wait_request(obj
->last_write_req
);
1717 i
= obj
->last_write_req
->engine
->id
;
1718 if (obj
->last_read_req
[i
] == obj
->last_write_req
)
1719 i915_gem_object_retire__read(obj
, i
);
1721 i915_gem_object_retire__write(obj
);
1724 for (i
= 0; i
< I915_NUM_ENGINES
; i
++) {
1725 if (obj
->last_read_req
[i
] == NULL
)
1728 ret
= i915_wait_request(obj
->last_read_req
[i
]);
1732 i915_gem_object_retire__read(obj
, i
);
1734 GEM_BUG_ON(obj
->active
);
1741 i915_gem_object_retire_request(struct drm_i915_gem_object
*obj
,
1742 struct drm_i915_gem_request
*req
)
1744 int ring
= req
->engine
->id
;
1746 if (obj
->last_read_req
[ring
] == req
)
1747 i915_gem_object_retire__read(obj
, ring
);
1748 else if (obj
->last_write_req
== req
)
1749 i915_gem_object_retire__write(obj
);
1751 if (!i915_reset_in_progress(&req
->i915
->gpu_error
))
1752 __i915_gem_request_retire__upto(req
);
1755 /* A nonblocking variant of the above wait. This is a highly dangerous routine
1756 * as the object state may change during this call.
1758 static __must_check
int
1759 i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object
*obj
,
1760 struct intel_rps_client
*rps
,
1763 struct drm_device
*dev
= obj
->base
.dev
;
1764 struct drm_i915_private
*dev_priv
= to_i915(dev
);
1765 struct drm_i915_gem_request
*requests
[I915_NUM_ENGINES
];
1768 BUG_ON(!mutex_is_locked(&dev
->struct_mutex
));
1769 BUG_ON(!dev_priv
->mm
.interruptible
);
1775 struct drm_i915_gem_request
*req
;
1777 req
= obj
->last_write_req
;
1781 requests
[n
++] = i915_gem_request_reference(req
);
1783 for (i
= 0; i
< I915_NUM_ENGINES
; i
++) {
1784 struct drm_i915_gem_request
*req
;
1786 req
= obj
->last_read_req
[i
];
1790 requests
[n
++] = i915_gem_request_reference(req
);
1794 mutex_unlock(&dev
->struct_mutex
);
1796 for (i
= 0; ret
== 0 && i
< n
; i
++)
1797 ret
= __i915_wait_request(requests
[i
], true, NULL
, rps
);
1798 mutex_lock(&dev
->struct_mutex
);
1800 for (i
= 0; i
< n
; i
++) {
1802 i915_gem_object_retire_request(obj
, requests
[i
]);
1803 i915_gem_request_unreference(requests
[i
]);
1809 static struct intel_rps_client
*to_rps_client(struct drm_file
*file
)
1811 struct drm_i915_file_private
*fpriv
= file
->driver_priv
;
1815 static enum fb_op_origin
1816 write_origin(struct drm_i915_gem_object
*obj
, unsigned domain
)
1818 return domain
== I915_GEM_DOMAIN_GTT
&& !obj
->has_wc_mmap
?
1819 ORIGIN_GTT
: ORIGIN_CPU
;
1823 * Called when user space prepares to use an object with the CPU, either
1824 * through the mmap ioctl's mapping or a GTT mapping.
1826 * @data: ioctl data blob
1830 i915_gem_set_domain_ioctl(struct drm_device
*dev
, void *data
,
1831 struct drm_file
*file
)
1833 struct drm_i915_gem_set_domain
*args
= data
;
1834 struct drm_i915_gem_object
*obj
;
1835 uint32_t read_domains
= args
->read_domains
;
1836 uint32_t write_domain
= args
->write_domain
;
1839 /* Only handle setting domains to types used by the CPU. */
1840 if (write_domain
& I915_GEM_GPU_DOMAINS
)
1843 if (read_domains
& I915_GEM_GPU_DOMAINS
)
1846 /* Having something in the write domain implies it's in the read
1847 * domain, and only that read domain. Enforce that in the request.
1849 if (write_domain
!= 0 && read_domains
!= write_domain
)
1852 ret
= i915_mutex_lock_interruptible(dev
);
1856 obj
= to_intel_bo(drm_gem_object_lookup(file
, args
->handle
));
1857 if (&obj
->base
== NULL
) {
1862 /* Try to flush the object off the GPU without holding the lock.
1863 * We will repeat the flush holding the lock in the normal manner
1864 * to catch cases where we are gazumped.
1866 ret
= i915_gem_object_wait_rendering__nonblocking(obj
,
1867 to_rps_client(file
),
1872 if (read_domains
& I915_GEM_DOMAIN_GTT
)
1873 ret
= i915_gem_object_set_to_gtt_domain(obj
, write_domain
!= 0);
1875 ret
= i915_gem_object_set_to_cpu_domain(obj
, write_domain
!= 0);
1877 if (write_domain
!= 0)
1878 intel_fb_obj_invalidate(obj
, write_origin(obj
, write_domain
));
1881 drm_gem_object_unreference(&obj
->base
);
1883 mutex_unlock(&dev
->struct_mutex
);
1888 * Called when user space has done writes to this buffer
1890 * @data: ioctl data blob
1894 i915_gem_sw_finish_ioctl(struct drm_device
*dev
, void *data
,
1895 struct drm_file
*file
)
1897 struct drm_i915_gem_sw_finish
*args
= data
;
1898 struct drm_i915_gem_object
*obj
;
1901 ret
= i915_mutex_lock_interruptible(dev
);
1905 obj
= to_intel_bo(drm_gem_object_lookup(file
, args
->handle
));
1906 if (&obj
->base
== NULL
) {
1911 /* Pinned buffers may be scanout, so flush the cache */
1912 if (obj
->pin_display
)
1913 i915_gem_object_flush_cpu_write_domain(obj
);
1915 drm_gem_object_unreference(&obj
->base
);
1917 mutex_unlock(&dev
->struct_mutex
);
1922 * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address
1925 * @data: ioctl data blob
1928 * While the mapping holds a reference on the contents of the object, it doesn't
1929 * imply a ref on the object itself.
1933 * DRM driver writers who look a this function as an example for how to do GEM
1934 * mmap support, please don't implement mmap support like here. The modern way
1935 * to implement DRM mmap support is with an mmap offset ioctl (like
1936 * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly.
1937 * That way debug tooling like valgrind will understand what's going on, hiding
1938 * the mmap call in a driver private ioctl will break that. The i915 driver only
1939 * does cpu mmaps this way because we didn't know better.
1942 i915_gem_mmap_ioctl(struct drm_device
*dev
, void *data
,
1943 struct drm_file
*file
)
1945 struct drm_i915_gem_mmap
*args
= data
;
1946 struct drm_gem_object
*obj
;
1949 if (args
->flags
& ~(I915_MMAP_WC
))
1952 if (args
->flags
& I915_MMAP_WC
&& !boot_cpu_has(X86_FEATURE_PAT
))
1955 obj
= drm_gem_object_lookup(file
, args
->handle
);
1959 /* prime objects have no backing filp to GEM mmap
1963 drm_gem_object_unreference_unlocked(obj
);
1967 addr
= vm_mmap(obj
->filp
, 0, args
->size
,
1968 PROT_READ
| PROT_WRITE
, MAP_SHARED
,
1970 if (args
->flags
& I915_MMAP_WC
) {
1971 struct mm_struct
*mm
= current
->mm
;
1972 struct vm_area_struct
*vma
;
1974 if (down_write_killable(&mm
->mmap_sem
)) {
1975 drm_gem_object_unreference_unlocked(obj
);
1978 vma
= find_vma(mm
, addr
);
1981 pgprot_writecombine(vm_get_page_prot(vma
->vm_flags
));
1984 up_write(&mm
->mmap_sem
);
1986 /* This may race, but that's ok, it only gets set */
1987 WRITE_ONCE(to_intel_bo(obj
)->has_wc_mmap
, true);
1989 drm_gem_object_unreference_unlocked(obj
);
1990 if (IS_ERR((void *)addr
))
1993 args
->addr_ptr
= (uint64_t) addr
;
1999 * i915_gem_fault - fault a page into the GTT
2000 * @vma: VMA in question
2003 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
2004 * from userspace. The fault handler takes care of binding the object to
2005 * the GTT (if needed), allocating and programming a fence register (again,
2006 * only if needed based on whether the old reg is still valid or the object
2007 * is tiled) and inserting a new PTE into the faulting process.
2009 * Note that the faulting process may involve evicting existing objects
2010 * from the GTT and/or fence registers to make room. So performance may
2011 * suffer if the GTT working set is large or there are few fence registers
2014 int i915_gem_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
2016 struct drm_i915_gem_object
*obj
= to_intel_bo(vma
->vm_private_data
);
2017 struct drm_device
*dev
= obj
->base
.dev
;
2018 struct drm_i915_private
*dev_priv
= to_i915(dev
);
2019 struct i915_ggtt
*ggtt
= &dev_priv
->ggtt
;
2020 struct i915_ggtt_view view
= i915_ggtt_view_normal
;
2021 pgoff_t page_offset
;
2024 bool write
= !!(vmf
->flags
& FAULT_FLAG_WRITE
);
2026 intel_runtime_pm_get(dev_priv
);
2028 /* We don't use vmf->pgoff since that has the fake offset */
2029 page_offset
= ((unsigned long)vmf
->virtual_address
- vma
->vm_start
) >>
2032 ret
= i915_mutex_lock_interruptible(dev
);
2036 trace_i915_gem_object_fault(obj
, page_offset
, true, write
);
2038 /* Try to flush the object off the GPU first without holding the lock.
2039 * Upon reacquiring the lock, we will perform our sanity checks and then
2040 * repeat the flush holding the lock in the normal manner to catch cases
2041 * where we are gazumped.
2043 ret
= i915_gem_object_wait_rendering__nonblocking(obj
, NULL
, !write
);
2047 /* Access to snoopable pages through the GTT is incoherent. */
2048 if (obj
->cache_level
!= I915_CACHE_NONE
&& !HAS_LLC(dev
)) {
2053 /* Use a partial view if the object is bigger than the aperture. */
2054 if (obj
->base
.size
>= ggtt
->mappable_end
&&
2055 obj
->tiling_mode
== I915_TILING_NONE
) {
2056 static const unsigned int chunk_size
= 256; // 1 MiB
2058 memset(&view
, 0, sizeof(view
));
2059 view
.type
= I915_GGTT_VIEW_PARTIAL
;
2060 view
.params
.partial
.offset
= rounddown(page_offset
, chunk_size
);
2061 view
.params
.partial
.size
=
2064 (vma
->vm_end
- vma
->vm_start
)/PAGE_SIZE
-
2065 view
.params
.partial
.offset
);
2068 /* Now pin it into the GTT if needed */
2069 ret
= i915_gem_object_ggtt_pin(obj
, &view
, 0, PIN_MAPPABLE
);
2073 ret
= i915_gem_object_set_to_gtt_domain(obj
, write
);
2077 ret
= i915_gem_object_get_fence(obj
);
2081 /* Finally, remap it using the new GTT offset */
2082 pfn
= ggtt
->mappable_base
+
2083 i915_gem_obj_ggtt_offset_view(obj
, &view
);
2086 if (unlikely(view
.type
== I915_GGTT_VIEW_PARTIAL
)) {
2087 /* Overriding existing pages in partial view does not cause
2088 * us any trouble as TLBs are still valid because the fault
2089 * is due to userspace losing part of the mapping or never
2090 * having accessed it before (at this partials' range).
2092 unsigned long base
= vma
->vm_start
+
2093 (view
.params
.partial
.offset
<< PAGE_SHIFT
);
2096 for (i
= 0; i
< view
.params
.partial
.size
; i
++) {
2097 ret
= vm_insert_pfn(vma
, base
+ i
* PAGE_SIZE
, pfn
+ i
);
2102 obj
->fault_mappable
= true;
2104 if (!obj
->fault_mappable
) {
2105 unsigned long size
= min_t(unsigned long,
2106 vma
->vm_end
- vma
->vm_start
,
2110 for (i
= 0; i
< size
>> PAGE_SHIFT
; i
++) {
2111 ret
= vm_insert_pfn(vma
,
2112 (unsigned long)vma
->vm_start
+ i
* PAGE_SIZE
,
2118 obj
->fault_mappable
= true;
2120 ret
= vm_insert_pfn(vma
,
2121 (unsigned long)vmf
->virtual_address
,
2125 i915_gem_object_ggtt_unpin_view(obj
, &view
);
2127 mutex_unlock(&dev
->struct_mutex
);
2132 * We eat errors when the gpu is terminally wedged to avoid
2133 * userspace unduly crashing (gl has no provisions for mmaps to
2134 * fail). But any other -EIO isn't ours (e.g. swap in failure)
2135 * and so needs to be reported.
2137 if (!i915_terminally_wedged(&dev_priv
->gpu_error
)) {
2138 ret
= VM_FAULT_SIGBUS
;
2143 * EAGAIN means the gpu is hung and we'll wait for the error
2144 * handler to reset everything when re-faulting in
2145 * i915_mutex_lock_interruptible.
2152 * EBUSY is ok: this just means that another thread
2153 * already did the job.
2155 ret
= VM_FAULT_NOPAGE
;
2162 ret
= VM_FAULT_SIGBUS
;
2165 WARN_ONCE(ret
, "unhandled error in i915_gem_fault: %i\n", ret
);
2166 ret
= VM_FAULT_SIGBUS
;
2170 intel_runtime_pm_put(dev_priv
);
2175 * i915_gem_release_mmap - remove physical page mappings
2176 * @obj: obj in question
2178 * Preserve the reservation of the mmapping with the DRM core code, but
2179 * relinquish ownership of the pages back to the system.
2181 * It is vital that we remove the page mapping if we have mapped a tiled
2182 * object through the GTT and then lose the fence register due to
2183 * resource pressure. Similarly if the object has been moved out of the
2184 * aperture, than pages mapped into userspace must be revoked. Removing the
2185 * mapping will then trigger a page fault on the next user access, allowing
2186 * fixup by i915_gem_fault().
2189 i915_gem_release_mmap(struct drm_i915_gem_object
*obj
)
2191 /* Serialisation between user GTT access and our code depends upon
2192 * revoking the CPU's PTE whilst the mutex is held. The next user
2193 * pagefault then has to wait until we release the mutex.
2195 lockdep_assert_held(&obj
->base
.dev
->struct_mutex
);
2197 if (!obj
->fault_mappable
)
2200 drm_vma_node_unmap(&obj
->base
.vma_node
,
2201 obj
->base
.dev
->anon_inode
->i_mapping
);
2203 /* Ensure that the CPU's PTE are revoked and there are not outstanding
2204 * memory transactions from userspace before we return. The TLB
2205 * flushing implied above by changing the PTE above *should* be
2206 * sufficient, an extra barrier here just provides us with a bit
2207 * of paranoid documentation about our requirement to serialise
2208 * memory writes before touching registers / GSM.
2212 obj
->fault_mappable
= false;
2216 i915_gem_release_all_mmaps(struct drm_i915_private
*dev_priv
)
2218 struct drm_i915_gem_object
*obj
;
2220 list_for_each_entry(obj
, &dev_priv
->mm
.bound_list
, global_list
)
2221 i915_gem_release_mmap(obj
);
2225 i915_gem_get_gtt_size(struct drm_device
*dev
, uint32_t size
, int tiling_mode
)
2229 if (INTEL_INFO(dev
)->gen
>= 4 ||
2230 tiling_mode
== I915_TILING_NONE
)
2233 /* Previous chips need a power-of-two fence region when tiling */
2235 gtt_size
= 1024*1024;
2237 gtt_size
= 512*1024;
2239 while (gtt_size
< size
)
2246 * i915_gem_get_gtt_alignment - return required GTT alignment for an object
2248 * @size: object size
2249 * @tiling_mode: tiling mode
2250 * @fenced: is fenced alignemned required or not
2252 * Return the required GTT alignment for an object, taking into account
2253 * potential fence register mapping.
2256 i915_gem_get_gtt_alignment(struct drm_device
*dev
, uint32_t size
,
2257 int tiling_mode
, bool fenced
)
2260 * Minimum alignment is 4k (GTT page size), but might be greater
2261 * if a fence register is needed for the object.
2263 if (INTEL_INFO(dev
)->gen
>= 4 || (!fenced
&& IS_G33(dev
)) ||
2264 tiling_mode
== I915_TILING_NONE
)
2268 * Previous chips need to be aligned to the size of the smallest
2269 * fence register that can contain the object.
2271 return i915_gem_get_gtt_size(dev
, size
, tiling_mode
);
2274 static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object
*obj
)
2276 struct drm_i915_private
*dev_priv
= to_i915(obj
->base
.dev
);
2279 dev_priv
->mm
.shrinker_no_lock_stealing
= true;
2281 ret
= drm_gem_create_mmap_offset(&obj
->base
);
2285 /* Badly fragmented mmap space? The only way we can recover
2286 * space is by destroying unwanted objects. We can't randomly release
2287 * mmap_offsets as userspace expects them to be persistent for the
2288 * lifetime of the objects. The closest we can is to release the
2289 * offsets on purgeable objects by truncating it and marking it purged,
2290 * which prevents userspace from ever using that object again.
2292 i915_gem_shrink(dev_priv
,
2293 obj
->base
.size
>> PAGE_SHIFT
,
2295 I915_SHRINK_UNBOUND
|
2296 I915_SHRINK_PURGEABLE
);
2297 ret
= drm_gem_create_mmap_offset(&obj
->base
);
2301 i915_gem_shrink_all(dev_priv
);
2302 ret
= drm_gem_create_mmap_offset(&obj
->base
);
2304 dev_priv
->mm
.shrinker_no_lock_stealing
= false;
2309 static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object
*obj
)
2311 drm_gem_free_mmap_offset(&obj
->base
);
2315 i915_gem_mmap_gtt(struct drm_file
*file
,
2316 struct drm_device
*dev
,
2320 struct drm_i915_gem_object
*obj
;
2323 ret
= i915_mutex_lock_interruptible(dev
);
2327 obj
= to_intel_bo(drm_gem_object_lookup(file
, handle
));
2328 if (&obj
->base
== NULL
) {
2333 if (obj
->madv
!= I915_MADV_WILLNEED
) {
2334 DRM_DEBUG("Attempting to mmap a purgeable buffer\n");
2339 ret
= i915_gem_object_create_mmap_offset(obj
);
2343 *offset
= drm_vma_node_offset_addr(&obj
->base
.vma_node
);
2346 drm_gem_object_unreference(&obj
->base
);
2348 mutex_unlock(&dev
->struct_mutex
);
2353 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
2355 * @data: GTT mapping ioctl data
2356 * @file: GEM object info
2358 * Simply returns the fake offset to userspace so it can mmap it.
2359 * The mmap call will end up in drm_gem_mmap(), which will set things
2360 * up so we can get faults in the handler above.
2362 * The fault handler will take care of binding the object into the GTT
2363 * (since it may have been evicted to make room for something), allocating
2364 * a fence register, and mapping the appropriate aperture address into
2368 i915_gem_mmap_gtt_ioctl(struct drm_device
*dev
, void *data
,
2369 struct drm_file
*file
)
2371 struct drm_i915_gem_mmap_gtt
*args
= data
;
2373 return i915_gem_mmap_gtt(file
, dev
, args
->handle
, &args
->offset
);
2376 /* Immediately discard the backing storage */
2378 i915_gem_object_truncate(struct drm_i915_gem_object
*obj
)
2380 i915_gem_object_free_mmap_offset(obj
);
2382 if (obj
->base
.filp
== NULL
)
2385 /* Our goal here is to return as much of the memory as
2386 * is possible back to the system as we are called from OOM.
2387 * To do this we must instruct the shmfs to drop all of its
2388 * backing pages, *now*.
2390 shmem_truncate_range(file_inode(obj
->base
.filp
), 0, (loff_t
)-1);
2391 obj
->madv
= __I915_MADV_PURGED
;
2394 /* Try to discard unwanted pages */
2396 i915_gem_object_invalidate(struct drm_i915_gem_object
*obj
)
2398 struct address_space
*mapping
;
2400 switch (obj
->madv
) {
2401 case I915_MADV_DONTNEED
:
2402 i915_gem_object_truncate(obj
);
2403 case __I915_MADV_PURGED
:
2407 if (obj
->base
.filp
== NULL
)
2410 mapping
= file_inode(obj
->base
.filp
)->i_mapping
,
2411 invalidate_mapping_pages(mapping
, 0, (loff_t
)-1);
2415 i915_gem_object_put_pages_gtt(struct drm_i915_gem_object
*obj
)
2417 struct sgt_iter sgt_iter
;
2421 BUG_ON(obj
->madv
== __I915_MADV_PURGED
);
2423 ret
= i915_gem_object_set_to_cpu_domain(obj
, true);
2425 /* In the event of a disaster, abandon all caches and
2426 * hope for the best.
2428 i915_gem_clflush_object(obj
, true);
2429 obj
->base
.read_domains
= obj
->base
.write_domain
= I915_GEM_DOMAIN_CPU
;
2432 i915_gem_gtt_finish_object(obj
);
2434 if (i915_gem_object_needs_bit17_swizzle(obj
))
2435 i915_gem_object_save_bit_17_swizzle(obj
);
2437 if (obj
->madv
== I915_MADV_DONTNEED
)
2440 for_each_sgt_page(page
, sgt_iter
, obj
->pages
) {
2442 set_page_dirty(page
);
2444 if (obj
->madv
== I915_MADV_WILLNEED
)
2445 mark_page_accessed(page
);
2451 sg_free_table(obj
->pages
);
2456 i915_gem_object_put_pages(struct drm_i915_gem_object
*obj
)
2458 const struct drm_i915_gem_object_ops
*ops
= obj
->ops
;
2460 if (obj
->pages
== NULL
)
2463 if (obj
->pages_pin_count
)
2466 BUG_ON(i915_gem_obj_bound_any(obj
));
2468 /* ->put_pages might need to allocate memory for the bit17 swizzle
2469 * array, hence protect them from being reaped by removing them from gtt
2471 list_del(&obj
->global_list
);
2474 if (is_vmalloc_addr(obj
->mapping
))
2475 vunmap(obj
->mapping
);
2477 kunmap(kmap_to_page(obj
->mapping
));
2478 obj
->mapping
= NULL
;
2481 ops
->put_pages(obj
);
2484 i915_gem_object_invalidate(obj
);
2490 i915_gem_object_get_pages_gtt(struct drm_i915_gem_object
*obj
)
2492 struct drm_i915_private
*dev_priv
= to_i915(obj
->base
.dev
);
2494 struct address_space
*mapping
;
2495 struct sg_table
*st
;
2496 struct scatterlist
*sg
;
2497 struct sgt_iter sgt_iter
;
2499 unsigned long last_pfn
= 0; /* suppress gcc warning */
2503 /* Assert that the object is not currently in any GPU domain. As it
2504 * wasn't in the GTT, there shouldn't be any way it could have been in
2507 BUG_ON(obj
->base
.read_domains
& I915_GEM_GPU_DOMAINS
);
2508 BUG_ON(obj
->base
.write_domain
& I915_GEM_GPU_DOMAINS
);
2510 st
= kmalloc(sizeof(*st
), GFP_KERNEL
);
2514 page_count
= obj
->base
.size
/ PAGE_SIZE
;
2515 if (sg_alloc_table(st
, page_count
, GFP_KERNEL
)) {
2520 /* Get the list of pages out of our struct file. They'll be pinned
2521 * at this point until we release them.
2523 * Fail silently without starting the shrinker
2525 mapping
= file_inode(obj
->base
.filp
)->i_mapping
;
2526 gfp
= mapping_gfp_constraint(mapping
, ~(__GFP_IO
| __GFP_RECLAIM
));
2527 gfp
|= __GFP_NORETRY
| __GFP_NOWARN
;
2530 for (i
= 0; i
< page_count
; i
++) {
2531 page
= shmem_read_mapping_page_gfp(mapping
, i
, gfp
);
2533 i915_gem_shrink(dev_priv
,
2536 I915_SHRINK_UNBOUND
|
2537 I915_SHRINK_PURGEABLE
);
2538 page
= shmem_read_mapping_page_gfp(mapping
, i
, gfp
);
2541 /* We've tried hard to allocate the memory by reaping
2542 * our own buffer, now let the real VM do its job and
2543 * go down in flames if truly OOM.
2545 i915_gem_shrink_all(dev_priv
);
2546 page
= shmem_read_mapping_page(mapping
, i
);
2548 ret
= PTR_ERR(page
);
2552 #ifdef CONFIG_SWIOTLB
2553 if (swiotlb_nr_tbl()) {
2555 sg_set_page(sg
, page
, PAGE_SIZE
, 0);
2560 if (!i
|| page_to_pfn(page
) != last_pfn
+ 1) {
2564 sg_set_page(sg
, page
, PAGE_SIZE
, 0);
2566 sg
->length
+= PAGE_SIZE
;
2568 last_pfn
= page_to_pfn(page
);
2570 /* Check that the i965g/gm workaround works. */
2571 WARN_ON((gfp
& __GFP_DMA32
) && (last_pfn
>= 0x00100000UL
));
2573 #ifdef CONFIG_SWIOTLB
2574 if (!swiotlb_nr_tbl())
2579 ret
= i915_gem_gtt_prepare_object(obj
);
2583 if (i915_gem_object_needs_bit17_swizzle(obj
))
2584 i915_gem_object_do_bit_17_swizzle(obj
);
2586 if (obj
->tiling_mode
!= I915_TILING_NONE
&&
2587 dev_priv
->quirks
& QUIRK_PIN_SWIZZLED_PAGES
)
2588 i915_gem_object_pin_pages(obj
);
2594 for_each_sgt_page(page
, sgt_iter
, st
)
2599 /* shmemfs first checks if there is enough memory to allocate the page
2600 * and reports ENOSPC should there be insufficient, along with the usual
2601 * ENOMEM for a genuine allocation failure.
2603 * We use ENOSPC in our driver to mean that we have run out of aperture
2604 * space and so want to translate the error from shmemfs back to our
2605 * usual understanding of ENOMEM.
2613 /* Ensure that the associated pages are gathered from the backing storage
2614 * and pinned into our object. i915_gem_object_get_pages() may be called
2615 * multiple times before they are released by a single call to
2616 * i915_gem_object_put_pages() - once the pages are no longer referenced
2617 * either as a result of memory pressure (reaping pages under the shrinker)
2618 * or as the object is itself released.
2621 i915_gem_object_get_pages(struct drm_i915_gem_object
*obj
)
2623 struct drm_i915_private
*dev_priv
= to_i915(obj
->base
.dev
);
2624 const struct drm_i915_gem_object_ops
*ops
= obj
->ops
;
2630 if (obj
->madv
!= I915_MADV_WILLNEED
) {
2631 DRM_DEBUG("Attempting to obtain a purgeable object\n");
2635 BUG_ON(obj
->pages_pin_count
);
2637 ret
= ops
->get_pages(obj
);
2641 list_add_tail(&obj
->global_list
, &dev_priv
->mm
.unbound_list
);
2643 obj
->get_page
.sg
= obj
->pages
->sgl
;
2644 obj
->get_page
.last
= 0;
2649 /* The 'mapping' part of i915_gem_object_pin_map() below */
2650 static void *i915_gem_object_map(const struct drm_i915_gem_object
*obj
)
2652 unsigned long n_pages
= obj
->base
.size
>> PAGE_SHIFT
;
2653 struct sg_table
*sgt
= obj
->pages
;
2654 struct sgt_iter sgt_iter
;
2656 struct page
*stack_pages
[32];
2657 struct page
**pages
= stack_pages
;
2658 unsigned long i
= 0;
2661 /* A single page can always be kmapped */
2663 return kmap(sg_page(sgt
->sgl
));
2665 if (n_pages
> ARRAY_SIZE(stack_pages
)) {
2666 /* Too big for stack -- allocate temporary array instead */
2667 pages
= drm_malloc_gfp(n_pages
, sizeof(*pages
), GFP_TEMPORARY
);
2672 for_each_sgt_page(page
, sgt_iter
, sgt
)
2675 /* Check that we have the expected number of pages */
2676 GEM_BUG_ON(i
!= n_pages
);
2678 addr
= vmap(pages
, n_pages
, 0, PAGE_KERNEL
);
2680 if (pages
!= stack_pages
)
2681 drm_free_large(pages
);
2686 /* get, pin, and map the pages of the object into kernel space */
2687 void *i915_gem_object_pin_map(struct drm_i915_gem_object
*obj
)
2691 lockdep_assert_held(&obj
->base
.dev
->struct_mutex
);
2693 ret
= i915_gem_object_get_pages(obj
);
2695 return ERR_PTR(ret
);
2697 i915_gem_object_pin_pages(obj
);
2699 if (!obj
->mapping
) {
2700 obj
->mapping
= i915_gem_object_map(obj
);
2701 if (!obj
->mapping
) {
2702 i915_gem_object_unpin_pages(obj
);
2703 return ERR_PTR(-ENOMEM
);
2707 return obj
->mapping
;
2710 void i915_vma_move_to_active(struct i915_vma
*vma
,
2711 struct drm_i915_gem_request
*req
)
2713 struct drm_i915_gem_object
*obj
= vma
->obj
;
2714 struct intel_engine_cs
*engine
;
2716 engine
= i915_gem_request_get_engine(req
);
2718 /* Add a reference if we're newly entering the active list. */
2719 if (obj
->active
== 0)
2720 drm_gem_object_reference(&obj
->base
);
2721 obj
->active
|= intel_engine_flag(engine
);
2723 list_move_tail(&obj
->engine_list
[engine
->id
], &engine
->active_list
);
2724 i915_gem_request_assign(&obj
->last_read_req
[engine
->id
], req
);
2726 list_move_tail(&vma
->vm_link
, &vma
->vm
->active_list
);
2730 i915_gem_object_retire__write(struct drm_i915_gem_object
*obj
)
2732 GEM_BUG_ON(obj
->last_write_req
== NULL
);
2733 GEM_BUG_ON(!(obj
->active
& intel_engine_flag(obj
->last_write_req
->engine
)));
2735 i915_gem_request_assign(&obj
->last_write_req
, NULL
);
2736 intel_fb_obj_flush(obj
, true, ORIGIN_CS
);
2740 i915_gem_object_retire__read(struct drm_i915_gem_object
*obj
, int ring
)
2742 struct i915_vma
*vma
;
2744 GEM_BUG_ON(obj
->last_read_req
[ring
] == NULL
);
2745 GEM_BUG_ON(!(obj
->active
& (1 << ring
)));
2747 list_del_init(&obj
->engine_list
[ring
]);
2748 i915_gem_request_assign(&obj
->last_read_req
[ring
], NULL
);
2750 if (obj
->last_write_req
&& obj
->last_write_req
->engine
->id
== ring
)
2751 i915_gem_object_retire__write(obj
);
2753 obj
->active
&= ~(1 << ring
);
2757 /* Bump our place on the bound list to keep it roughly in LRU order
2758 * so that we don't steal from recently used but inactive objects
2759 * (unless we are forced to ofc!)
2761 list_move_tail(&obj
->global_list
,
2762 &to_i915(obj
->base
.dev
)->mm
.bound_list
);
2764 list_for_each_entry(vma
, &obj
->vma_list
, obj_link
) {
2765 if (!list_empty(&vma
->vm_link
))
2766 list_move_tail(&vma
->vm_link
, &vma
->vm
->inactive_list
);
2769 i915_gem_request_assign(&obj
->last_fenced_req
, NULL
);
2770 drm_gem_object_unreference(&obj
->base
);
2774 i915_gem_init_seqno(struct drm_i915_private
*dev_priv
, u32 seqno
)
2776 struct intel_engine_cs
*engine
;
2779 /* Carefully retire all requests without writing to the rings */
2780 for_each_engine(engine
, dev_priv
) {
2781 ret
= intel_engine_idle(engine
);
2785 i915_gem_retire_requests(dev_priv
);
2787 /* If the seqno wraps around, we need to clear the breadcrumb rbtree */
2788 if (!i915_seqno_passed(seqno
, dev_priv
->next_seqno
)) {
2789 while (intel_kick_waiters(dev_priv
) ||
2790 intel_kick_signalers(dev_priv
))
2794 /* Finally reset hw state */
2795 for_each_engine(engine
, dev_priv
)
2796 intel_ring_init_seqno(engine
, seqno
);
2801 int i915_gem_set_seqno(struct drm_device
*dev
, u32 seqno
)
2803 struct drm_i915_private
*dev_priv
= to_i915(dev
);
2809 /* HWS page needs to be set less than what we
2810 * will inject to ring
2812 ret
= i915_gem_init_seqno(dev_priv
, seqno
- 1);
2816 /* Carefully set the last_seqno value so that wrap
2817 * detection still works
2819 dev_priv
->next_seqno
= seqno
;
2820 dev_priv
->last_seqno
= seqno
- 1;
2821 if (dev_priv
->last_seqno
== 0)
2822 dev_priv
->last_seqno
--;
2828 i915_gem_get_seqno(struct drm_i915_private
*dev_priv
, u32
*seqno
)
2830 /* reserve 0 for non-seqno */
2831 if (dev_priv
->next_seqno
== 0) {
2832 int ret
= i915_gem_init_seqno(dev_priv
, 0);
2836 dev_priv
->next_seqno
= 1;
2839 *seqno
= dev_priv
->last_seqno
= dev_priv
->next_seqno
++;
2843 static void i915_gem_mark_busy(const struct intel_engine_cs
*engine
)
2845 struct drm_i915_private
*dev_priv
= engine
->i915
;
2847 dev_priv
->gt
.active_engines
|= intel_engine_flag(engine
);
2848 if (dev_priv
->gt
.awake
)
2851 intel_runtime_pm_get_noresume(dev_priv
);
2852 dev_priv
->gt
.awake
= true;
2854 i915_update_gfx_val(dev_priv
);
2855 if (INTEL_GEN(dev_priv
) >= 6)
2856 gen6_rps_busy(dev_priv
);
2858 queue_delayed_work(dev_priv
->wq
,
2859 &dev_priv
->gt
.retire_work
,
2860 round_jiffies_up_relative(HZ
));
2864 * NB: This function is not allowed to fail. Doing so would mean the the
2865 * request is not being tracked for completion but the work itself is
2866 * going to happen on the hardware. This would be a Bad Thing(tm).
2868 void __i915_add_request(struct drm_i915_gem_request
*request
,
2869 struct drm_i915_gem_object
*obj
,
2872 struct intel_engine_cs
*engine
;
2873 struct intel_ringbuffer
*ringbuf
;
2878 if (WARN_ON(request
== NULL
))
2881 engine
= request
->engine
;
2882 ringbuf
= request
->ringbuf
;
2885 * To ensure that this call will not fail, space for its emissions
2886 * should already have been reserved in the ring buffer. Let the ring
2887 * know that it is time to use that space up.
2889 request_start
= intel_ring_get_tail(ringbuf
);
2890 reserved_tail
= request
->reserved_space
;
2891 request
->reserved_space
= 0;
2894 * Emit any outstanding flushes - execbuf can fail to emit the flush
2895 * after having emitted the batchbuffer command. Hence we need to fix
2896 * things up similar to emitting the lazy request. The difference here
2897 * is that the flush _must_ happen before the next request, no matter
2901 if (i915
.enable_execlists
)
2902 ret
= logical_ring_flush_all_caches(request
);
2904 ret
= intel_ring_flush_all_caches(request
);
2905 /* Not allowed to fail! */
2906 WARN(ret
, "*_ring_flush_all_caches failed: %d!\n", ret
);
2909 trace_i915_gem_request_add(request
);
2911 request
->head
= request_start
;
2913 /* Whilst this request exists, batch_obj will be on the
2914 * active_list, and so will hold the active reference. Only when this
2915 * request is retired will the the batch_obj be moved onto the
2916 * inactive_list and lose its active reference. Hence we do not need
2917 * to explicitly hold another reference here.
2919 request
->batch_obj
= obj
;
2921 /* Seal the request and mark it as pending execution. Note that
2922 * we may inspect this state, without holding any locks, during
2923 * hangcheck. Hence we apply the barrier to ensure that we do not
2924 * see a more recent value in the hws than we are tracking.
2926 request
->emitted_jiffies
= jiffies
;
2927 request
->previous_seqno
= engine
->last_submitted_seqno
;
2928 smp_store_mb(engine
->last_submitted_seqno
, request
->seqno
);
2929 list_add_tail(&request
->list
, &engine
->request_list
);
2931 /* Record the position of the start of the request so that
2932 * should we detect the updated seqno part-way through the
2933 * GPU processing the request, we never over-estimate the
2934 * position of the head.
2936 request
->postfix
= intel_ring_get_tail(ringbuf
);
2938 if (i915
.enable_execlists
)
2939 ret
= engine
->emit_request(request
);
2941 ret
= engine
->add_request(request
);
2943 request
->tail
= intel_ring_get_tail(ringbuf
);
2945 /* Not allowed to fail! */
2946 WARN(ret
, "emit|add_request failed: %d!\n", ret
);
2947 /* Sanity check that the reserved size was large enough. */
2948 ret
= intel_ring_get_tail(ringbuf
) - request_start
;
2950 ret
+= ringbuf
->size
;
2951 WARN_ONCE(ret
> reserved_tail
,
2952 "Not enough space reserved (%d bytes) "
2953 "for adding the request (%d bytes)\n",
2954 reserved_tail
, ret
);
2956 i915_gem_mark_busy(engine
);
2959 static bool i915_context_is_banned(const struct i915_gem_context
*ctx
)
2961 unsigned long elapsed
;
2963 if (ctx
->hang_stats
.banned
)
2966 elapsed
= get_seconds() - ctx
->hang_stats
.guilty_ts
;
2967 if (ctx
->hang_stats
.ban_period_seconds
&&
2968 elapsed
<= ctx
->hang_stats
.ban_period_seconds
) {
2969 DRM_DEBUG("context hanging too fast, banning!\n");
2976 static void i915_set_reset_status(struct i915_gem_context
*ctx
,
2979 struct i915_ctx_hang_stats
*hs
= &ctx
->hang_stats
;
2982 hs
->banned
= i915_context_is_banned(ctx
);
2984 hs
->guilty_ts
= get_seconds();
2986 hs
->batch_pending
++;
2990 void i915_gem_request_free(struct kref
*req_ref
)
2992 struct drm_i915_gem_request
*req
= container_of(req_ref
,
2994 kmem_cache_free(req
->i915
->requests
, req
);
2998 __i915_gem_request_alloc(struct intel_engine_cs
*engine
,
2999 struct i915_gem_context
*ctx
,
3000 struct drm_i915_gem_request
**req_out
)
3002 struct drm_i915_private
*dev_priv
= engine
->i915
;
3003 unsigned reset_counter
= i915_reset_counter(&dev_priv
->gpu_error
);
3004 struct drm_i915_gem_request
*req
;
3012 /* ABI: Before userspace accesses the GPU (e.g. execbuffer), report
3013 * EIO if the GPU is already wedged, or EAGAIN to drop the struct_mutex
3016 ret
= i915_gem_check_wedge(reset_counter
, dev_priv
->mm
.interruptible
);
3020 req
= kmem_cache_zalloc(dev_priv
->requests
, GFP_KERNEL
);
3024 ret
= i915_gem_get_seqno(engine
->i915
, &req
->seqno
);
3028 kref_init(&req
->ref
);
3029 req
->i915
= dev_priv
;
3030 req
->engine
= engine
;
3032 i915_gem_context_reference(req
->ctx
);
3035 * Reserve space in the ring buffer for all the commands required to
3036 * eventually emit this request. This is to guarantee that the
3037 * i915_add_request() call can't fail. Note that the reserve may need
3038 * to be redone if the request is not actually submitted straight
3039 * away, e.g. because a GPU scheduler has deferred it.
3041 req
->reserved_space
= MIN_SPACE_FOR_ADD_REQUEST
;
3043 if (i915
.enable_execlists
)
3044 ret
= intel_logical_ring_alloc_request_extras(req
);
3046 ret
= intel_ring_alloc_request_extras(req
);
3054 i915_gem_context_unreference(ctx
);
3056 kmem_cache_free(dev_priv
->requests
, req
);
3061 * i915_gem_request_alloc - allocate a request structure
3063 * @engine: engine that we wish to issue the request on.
3064 * @ctx: context that the request will be associated with.
3065 * This can be NULL if the request is not directly related to
3066 * any specific user context, in which case this function will
3067 * choose an appropriate context to use.
3069 * Returns a pointer to the allocated request if successful,
3070 * or an error code if not.
3072 struct drm_i915_gem_request
*
3073 i915_gem_request_alloc(struct intel_engine_cs
*engine
,
3074 struct i915_gem_context
*ctx
)
3076 struct drm_i915_gem_request
*req
;
3080 ctx
= engine
->i915
->kernel_context
;
3081 err
= __i915_gem_request_alloc(engine
, ctx
, &req
);
3082 return err
? ERR_PTR(err
) : req
;
3085 struct drm_i915_gem_request
*
3086 i915_gem_find_active_request(struct intel_engine_cs
*engine
)
3088 struct drm_i915_gem_request
*request
;
3090 /* We are called by the error capture and reset at a random
3091 * point in time. In particular, note that neither is crucially
3092 * ordered with an interrupt. After a hang, the GPU is dead and we
3093 * assume that no more writes can happen (we waited long enough for
3094 * all writes that were in transaction to be flushed) - adding an
3095 * extra delay for a recent interrupt is pointless. Hence, we do
3096 * not need an engine->irq_seqno_barrier() before the seqno reads.
3098 list_for_each_entry(request
, &engine
->request_list
, list
) {
3099 if (i915_gem_request_completed(request
))
3108 static void i915_gem_reset_engine_status(struct intel_engine_cs
*engine
)
3110 struct drm_i915_gem_request
*request
;
3113 request
= i915_gem_find_active_request(engine
);
3114 if (request
== NULL
)
3117 ring_hung
= engine
->hangcheck
.score
>= HANGCHECK_SCORE_RING_HUNG
;
3119 i915_set_reset_status(request
->ctx
, ring_hung
);
3120 list_for_each_entry_continue(request
, &engine
->request_list
, list
)
3121 i915_set_reset_status(request
->ctx
, false);
3124 static void i915_gem_reset_engine_cleanup(struct intel_engine_cs
*engine
)
3126 struct intel_ringbuffer
*buffer
;
3128 while (!list_empty(&engine
->active_list
)) {
3129 struct drm_i915_gem_object
*obj
;
3131 obj
= list_first_entry(&engine
->active_list
,
3132 struct drm_i915_gem_object
,
3133 engine_list
[engine
->id
]);
3135 i915_gem_object_retire__read(obj
, engine
->id
);
3139 * Clear the execlists queue up before freeing the requests, as those
3140 * are the ones that keep the context and ringbuffer backing objects
3144 if (i915
.enable_execlists
) {
3145 /* Ensure irq handler finishes or is cancelled. */
3146 tasklet_kill(&engine
->irq_tasklet
);
3148 intel_execlists_cancel_requests(engine
);
3152 * We must free the requests after all the corresponding objects have
3153 * been moved off active lists. Which is the same order as the normal
3154 * retire_requests function does. This is important if object hold
3155 * implicit references on things like e.g. ppgtt address spaces through
3158 while (!list_empty(&engine
->request_list
)) {
3159 struct drm_i915_gem_request
*request
;
3161 request
= list_first_entry(&engine
->request_list
,
3162 struct drm_i915_gem_request
,
3165 i915_gem_request_retire(request
);
3168 /* Having flushed all requests from all queues, we know that all
3169 * ringbuffers must now be empty. However, since we do not reclaim
3170 * all space when retiring the request (to prevent HEADs colliding
3171 * with rapid ringbuffer wraparound) the amount of available space
3172 * upon reset is less than when we start. Do one more pass over
3173 * all the ringbuffers to reset last_retired_head.
3175 list_for_each_entry(buffer
, &engine
->buffers
, link
) {
3176 buffer
->last_retired_head
= buffer
->tail
;
3177 intel_ring_update_space(buffer
);
3180 intel_ring_init_seqno(engine
, engine
->last_submitted_seqno
);
3183 void i915_gem_reset(struct drm_device
*dev
)
3185 struct drm_i915_private
*dev_priv
= to_i915(dev
);
3186 struct intel_engine_cs
*engine
;
3189 * Before we free the objects from the requests, we need to inspect
3190 * them for finding the guilty party. As the requests only borrow
3191 * their reference to the objects, the inspection must be done first.
3193 for_each_engine(engine
, dev_priv
)
3194 i915_gem_reset_engine_status(engine
);
3196 for_each_engine(engine
, dev_priv
)
3197 i915_gem_reset_engine_cleanup(engine
);
3199 i915_gem_context_reset(dev
);
3201 i915_gem_restore_fences(dev
);
3203 WARN_ON(i915_verify_lists(dev
));
3207 * This function clears the request list as sequence numbers are passed.
3208 * @engine: engine to retire requests on
3211 i915_gem_retire_requests_ring(struct intel_engine_cs
*engine
)
3213 WARN_ON(i915_verify_lists(engine
->dev
));
3215 /* Retire requests first as we use it above for the early return.
3216 * If we retire requests last, we may use a later seqno and so clear
3217 * the requests lists without clearing the active list, leading to
3220 while (!list_empty(&engine
->request_list
)) {
3221 struct drm_i915_gem_request
*request
;
3223 request
= list_first_entry(&engine
->request_list
,
3224 struct drm_i915_gem_request
,
3227 if (!i915_gem_request_completed(request
))
3230 i915_gem_request_retire(request
);
3233 /* Move any buffers on the active list that are no longer referenced
3234 * by the ringbuffer to the flushing/inactive lists as appropriate,
3235 * before we free the context associated with the requests.
3237 while (!list_empty(&engine
->active_list
)) {
3238 struct drm_i915_gem_object
*obj
;
3240 obj
= list_first_entry(&engine
->active_list
,
3241 struct drm_i915_gem_object
,
3242 engine_list
[engine
->id
]);
3244 if (!list_empty(&obj
->last_read_req
[engine
->id
]->list
))
3247 i915_gem_object_retire__read(obj
, engine
->id
);
3250 WARN_ON(i915_verify_lists(engine
->dev
));
3253 void i915_gem_retire_requests(struct drm_i915_private
*dev_priv
)
3255 struct intel_engine_cs
*engine
;
3257 lockdep_assert_held(&dev_priv
->drm
.struct_mutex
);
3259 if (dev_priv
->gt
.active_engines
== 0)
3262 GEM_BUG_ON(!dev_priv
->gt
.awake
);
3264 for_each_engine(engine
, dev_priv
) {
3265 i915_gem_retire_requests_ring(engine
);
3266 if (list_empty(&engine
->request_list
))
3267 dev_priv
->gt
.active_engines
&= ~intel_engine_flag(engine
);
3270 if (dev_priv
->gt
.active_engines
== 0)
3271 queue_delayed_work(dev_priv
->wq
,
3272 &dev_priv
->gt
.idle_work
,
3273 msecs_to_jiffies(100));
3277 i915_gem_retire_work_handler(struct work_struct
*work
)
3279 struct drm_i915_private
*dev_priv
=
3280 container_of(work
, typeof(*dev_priv
), gt
.retire_work
.work
);
3281 struct drm_device
*dev
= &dev_priv
->drm
;
3283 /* Come back later if the device is busy... */
3284 if (mutex_trylock(&dev
->struct_mutex
)) {
3285 i915_gem_retire_requests(dev_priv
);
3286 mutex_unlock(&dev
->struct_mutex
);
3289 /* Keep the retire handler running until we are finally idle.
3290 * We do not need to do this test under locking as in the worst-case
3291 * we queue the retire worker once too often.
3293 if (READ_ONCE(dev_priv
->gt
.awake
))
3294 queue_delayed_work(dev_priv
->wq
,
3295 &dev_priv
->gt
.retire_work
,
3296 round_jiffies_up_relative(HZ
));
3300 i915_gem_idle_work_handler(struct work_struct
*work
)
3302 struct drm_i915_private
*dev_priv
=
3303 container_of(work
, typeof(*dev_priv
), gt
.idle_work
.work
);
3304 struct drm_device
*dev
= &dev_priv
->drm
;
3305 struct intel_engine_cs
*engine
;
3306 unsigned int stuck_engines
;
3307 bool rearm_hangcheck
;
3309 if (!READ_ONCE(dev_priv
->gt
.awake
))
3312 if (READ_ONCE(dev_priv
->gt
.active_engines
))
3316 cancel_delayed_work_sync(&dev_priv
->gpu_error
.hangcheck_work
);
3318 if (!mutex_trylock(&dev
->struct_mutex
)) {
3319 /* Currently busy, come back later */
3320 mod_delayed_work(dev_priv
->wq
,
3321 &dev_priv
->gt
.idle_work
,
3322 msecs_to_jiffies(50));
3326 if (dev_priv
->gt
.active_engines
)
3329 for_each_engine(engine
, dev_priv
)
3330 i915_gem_batch_pool_fini(&engine
->batch_pool
);
3332 GEM_BUG_ON(!dev_priv
->gt
.awake
);
3333 dev_priv
->gt
.awake
= false;
3334 rearm_hangcheck
= false;
3336 stuck_engines
= intel_kick_waiters(dev_priv
);
3337 if (unlikely(stuck_engines
)) {
3338 DRM_DEBUG_DRIVER("kicked stuck waiters...missed irq\n");
3339 dev_priv
->gpu_error
.missed_irq_rings
|= stuck_engines
;
3342 if (INTEL_GEN(dev_priv
) >= 6)
3343 gen6_rps_idle(dev_priv
);
3344 intel_runtime_pm_put(dev_priv
);
3346 mutex_unlock(&dev
->struct_mutex
);
3349 if (rearm_hangcheck
) {
3350 GEM_BUG_ON(!dev_priv
->gt
.awake
);
3351 i915_queue_hangcheck(dev_priv
);
3356 * Ensures that an object will eventually get non-busy by flushing any required
3357 * write domains, emitting any outstanding lazy request and retiring and
3358 * completed requests.
3359 * @obj: object to flush
3362 i915_gem_object_flush_active(struct drm_i915_gem_object
*obj
)
3369 for (i
= 0; i
< I915_NUM_ENGINES
; i
++) {
3370 struct drm_i915_gem_request
*req
;
3372 req
= obj
->last_read_req
[i
];
3376 if (i915_gem_request_completed(req
))
3377 i915_gem_object_retire__read(obj
, i
);
3384 * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
3385 * @dev: drm device pointer
3386 * @data: ioctl data blob
3387 * @file: drm file pointer
3389 * Returns 0 if successful, else an error is returned with the remaining time in
3390 * the timeout parameter.
3391 * -ETIME: object is still busy after timeout
3392 * -ERESTARTSYS: signal interrupted the wait
3393 * -ENONENT: object doesn't exist
3394 * Also possible, but rare:
3395 * -EAGAIN: GPU wedged
3397 * -ENODEV: Internal IRQ fail
3398 * -E?: The add request failed
3400 * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
3401 * non-zero timeout parameter the wait ioctl will wait for the given number of
3402 * nanoseconds on an object becoming unbusy. Since the wait itself does so
3403 * without holding struct_mutex the object may become re-busied before this
3404 * function completes. A similar but shorter * race condition exists in the busy
3408 i915_gem_wait_ioctl(struct drm_device
*dev
, void *data
, struct drm_file
*file
)
3410 struct drm_i915_gem_wait
*args
= data
;
3411 struct drm_i915_gem_object
*obj
;
3412 struct drm_i915_gem_request
*req
[I915_NUM_ENGINES
];
3416 if (args
->flags
!= 0)
3419 ret
= i915_mutex_lock_interruptible(dev
);
3423 obj
= to_intel_bo(drm_gem_object_lookup(file
, args
->bo_handle
));
3424 if (&obj
->base
== NULL
) {
3425 mutex_unlock(&dev
->struct_mutex
);
3429 /* Need to make sure the object gets inactive eventually. */
3430 ret
= i915_gem_object_flush_active(obj
);
3437 /* Do this after OLR check to make sure we make forward progress polling
3438 * on this IOCTL with a timeout == 0 (like busy ioctl)
3440 if (args
->timeout_ns
== 0) {
3445 drm_gem_object_unreference(&obj
->base
);
3447 for (i
= 0; i
< I915_NUM_ENGINES
; i
++) {
3448 if (obj
->last_read_req
[i
] == NULL
)
3451 req
[n
++] = i915_gem_request_reference(obj
->last_read_req
[i
]);
3454 mutex_unlock(&dev
->struct_mutex
);
3456 for (i
= 0; i
< n
; i
++) {
3458 ret
= __i915_wait_request(req
[i
], true,
3459 args
->timeout_ns
> 0 ? &args
->timeout_ns
: NULL
,
3460 to_rps_client(file
));
3461 i915_gem_request_unreference(req
[i
]);
3466 drm_gem_object_unreference(&obj
->base
);
3467 mutex_unlock(&dev
->struct_mutex
);
3472 __i915_gem_object_sync(struct drm_i915_gem_object
*obj
,
3473 struct intel_engine_cs
*to
,
3474 struct drm_i915_gem_request
*from_req
,
3475 struct drm_i915_gem_request
**to_req
)
3477 struct intel_engine_cs
*from
;
3480 from
= i915_gem_request_get_engine(from_req
);
3484 if (i915_gem_request_completed(from_req
))
3487 if (!i915_semaphore_is_enabled(to_i915(obj
->base
.dev
))) {
3488 struct drm_i915_private
*i915
= to_i915(obj
->base
.dev
);
3489 ret
= __i915_wait_request(from_req
,
3490 i915
->mm
.interruptible
,
3492 &i915
->rps
.semaphores
);
3496 i915_gem_object_retire_request(obj
, from_req
);
3498 int idx
= intel_ring_sync_index(from
, to
);
3499 u32 seqno
= i915_gem_request_get_seqno(from_req
);
3503 if (seqno
<= from
->semaphore
.sync_seqno
[idx
])
3506 if (*to_req
== NULL
) {
3507 struct drm_i915_gem_request
*req
;
3509 req
= i915_gem_request_alloc(to
, NULL
);
3511 return PTR_ERR(req
);
3516 trace_i915_gem_ring_sync_to(*to_req
, from
, from_req
);
3517 ret
= to
->semaphore
.sync_to(*to_req
, from
, seqno
);
3521 /* We use last_read_req because sync_to()
3522 * might have just caused seqno wrap under
3525 from
->semaphore
.sync_seqno
[idx
] =
3526 i915_gem_request_get_seqno(obj
->last_read_req
[from
->id
]);
3533 * i915_gem_object_sync - sync an object to a ring.
3535 * @obj: object which may be in use on another ring.
3536 * @to: ring we wish to use the object on. May be NULL.
3537 * @to_req: request we wish to use the object for. See below.
3538 * This will be allocated and returned if a request is
3539 * required but not passed in.
3541 * This code is meant to abstract object synchronization with the GPU.
3542 * Calling with NULL implies synchronizing the object with the CPU
3543 * rather than a particular GPU ring. Conceptually we serialise writes
3544 * between engines inside the GPU. We only allow one engine to write
3545 * into a buffer at any time, but multiple readers. To ensure each has
3546 * a coherent view of memory, we must:
3548 * - If there is an outstanding write request to the object, the new
3549 * request must wait for it to complete (either CPU or in hw, requests
3550 * on the same ring will be naturally ordered).
3552 * - If we are a write request (pending_write_domain is set), the new
3553 * request must wait for outstanding read requests to complete.
3555 * For CPU synchronisation (NULL to) no request is required. For syncing with
3556 * rings to_req must be non-NULL. However, a request does not have to be
3557 * pre-allocated. If *to_req is NULL and sync commands will be emitted then a
3558 * request will be allocated automatically and returned through *to_req. Note
3559 * that it is not guaranteed that commands will be emitted (because the system
3560 * might already be idle). Hence there is no need to create a request that
3561 * might never have any work submitted. Note further that if a request is
3562 * returned in *to_req, it is the responsibility of the caller to submit
3563 * that request (after potentially adding more work to it).
3565 * Returns 0 if successful, else propagates up the lower layer error.
3568 i915_gem_object_sync(struct drm_i915_gem_object
*obj
,
3569 struct intel_engine_cs
*to
,
3570 struct drm_i915_gem_request
**to_req
)
3572 const bool readonly
= obj
->base
.pending_write_domain
== 0;
3573 struct drm_i915_gem_request
*req
[I915_NUM_ENGINES
];
3580 return i915_gem_object_wait_rendering(obj
, readonly
);
3584 if (obj
->last_write_req
)
3585 req
[n
++] = obj
->last_write_req
;
3587 for (i
= 0; i
< I915_NUM_ENGINES
; i
++)
3588 if (obj
->last_read_req
[i
])
3589 req
[n
++] = obj
->last_read_req
[i
];
3591 for (i
= 0; i
< n
; i
++) {
3592 ret
= __i915_gem_object_sync(obj
, to
, req
[i
], to_req
);
3600 static void i915_gem_object_finish_gtt(struct drm_i915_gem_object
*obj
)
3602 u32 old_write_domain
, old_read_domains
;
3604 /* Force a pagefault for domain tracking on next user access */
3605 i915_gem_release_mmap(obj
);
3607 if ((obj
->base
.read_domains
& I915_GEM_DOMAIN_GTT
) == 0)
3610 old_read_domains
= obj
->base
.read_domains
;
3611 old_write_domain
= obj
->base
.write_domain
;
3613 obj
->base
.read_domains
&= ~I915_GEM_DOMAIN_GTT
;
3614 obj
->base
.write_domain
&= ~I915_GEM_DOMAIN_GTT
;
3616 trace_i915_gem_object_change_domain(obj
,
3621 static void __i915_vma_iounmap(struct i915_vma
*vma
)
3623 GEM_BUG_ON(vma
->pin_count
);
3625 if (vma
->iomap
== NULL
)
3628 io_mapping_unmap(vma
->iomap
);
3632 static int __i915_vma_unbind(struct i915_vma
*vma
, bool wait
)
3634 struct drm_i915_gem_object
*obj
= vma
->obj
;
3635 struct drm_i915_private
*dev_priv
= to_i915(obj
->base
.dev
);
3638 if (list_empty(&vma
->obj_link
))
3641 if (!drm_mm_node_allocated(&vma
->node
)) {
3642 i915_gem_vma_destroy(vma
);
3649 BUG_ON(obj
->pages
== NULL
);
3652 ret
= i915_gem_object_wait_rendering(obj
, false);
3657 if (vma
->is_ggtt
&& vma
->ggtt_view
.type
== I915_GGTT_VIEW_NORMAL
) {
3658 i915_gem_object_finish_gtt(obj
);
3660 /* release the fence reg _after_ flushing */
3661 ret
= i915_gem_object_put_fence(obj
);
3665 __i915_vma_iounmap(vma
);
3668 trace_i915_vma_unbind(vma
);
3670 vma
->vm
->unbind_vma(vma
);
3673 list_del_init(&vma
->vm_link
);
3675 if (vma
->ggtt_view
.type
== I915_GGTT_VIEW_NORMAL
) {
3676 obj
->map_and_fenceable
= false;
3677 } else if (vma
->ggtt_view
.pages
) {
3678 sg_free_table(vma
->ggtt_view
.pages
);
3679 kfree(vma
->ggtt_view
.pages
);
3681 vma
->ggtt_view
.pages
= NULL
;
3684 drm_mm_remove_node(&vma
->node
);
3685 i915_gem_vma_destroy(vma
);
3687 /* Since the unbound list is global, only move to that list if
3688 * no more VMAs exist. */
3689 if (list_empty(&obj
->vma_list
))
3690 list_move_tail(&obj
->global_list
, &dev_priv
->mm
.unbound_list
);
3692 /* And finally now the object is completely decoupled from this vma,
3693 * we can drop its hold on the backing storage and allow it to be
3694 * reaped by the shrinker.
3696 i915_gem_object_unpin_pages(obj
);
3701 int i915_vma_unbind(struct i915_vma
*vma
)
3703 return __i915_vma_unbind(vma
, true);
3706 int __i915_vma_unbind_no_wait(struct i915_vma
*vma
)
3708 return __i915_vma_unbind(vma
, false);
3711 int i915_gem_wait_for_idle(struct drm_i915_private
*dev_priv
)
3713 struct intel_engine_cs
*engine
;
3716 lockdep_assert_held(&dev_priv
->drm
.struct_mutex
);
3718 for_each_engine(engine
, dev_priv
) {
3719 if (engine
->last_context
== NULL
)
3722 ret
= intel_engine_idle(engine
);
3727 WARN_ON(i915_verify_lists(dev
));
3731 static bool i915_gem_valid_gtt_space(struct i915_vma
*vma
,
3732 unsigned long cache_level
)
3734 struct drm_mm_node
*gtt_space
= &vma
->node
;
3735 struct drm_mm_node
*other
;
3738 * On some machines we have to be careful when putting differing types
3739 * of snoopable memory together to avoid the prefetcher crossing memory
3740 * domains and dying. During vm initialisation, we decide whether or not
3741 * these constraints apply and set the drm_mm.color_adjust
3744 if (vma
->vm
->mm
.color_adjust
== NULL
)
3747 if (!drm_mm_node_allocated(gtt_space
))
3750 if (list_empty(>t_space
->node_list
))
3753 other
= list_entry(gtt_space
->node_list
.prev
, struct drm_mm_node
, node_list
);
3754 if (other
->allocated
&& !other
->hole_follows
&& other
->color
!= cache_level
)
3757 other
= list_entry(gtt_space
->node_list
.next
, struct drm_mm_node
, node_list
);
3758 if (other
->allocated
&& !gtt_space
->hole_follows
&& other
->color
!= cache_level
)
3765 * Finds free space in the GTT aperture and binds the object or a view of it
3767 * @obj: object to bind
3768 * @vm: address space to bind into
3769 * @ggtt_view: global gtt view if applicable
3770 * @alignment: requested alignment
3771 * @flags: mask of PIN_* flags to use
3773 static struct i915_vma
*
3774 i915_gem_object_bind_to_vm(struct drm_i915_gem_object
*obj
,
3775 struct i915_address_space
*vm
,
3776 const struct i915_ggtt_view
*ggtt_view
,
3780 struct drm_device
*dev
= obj
->base
.dev
;
3781 struct drm_i915_private
*dev_priv
= to_i915(dev
);
3782 struct i915_ggtt
*ggtt
= &dev_priv
->ggtt
;
3783 u32 fence_alignment
, unfenced_alignment
;
3784 u32 search_flag
, alloc_flag
;
3786 u64 size
, fence_size
;
3787 struct i915_vma
*vma
;
3790 if (i915_is_ggtt(vm
)) {
3793 if (WARN_ON(!ggtt_view
))
3794 return ERR_PTR(-EINVAL
);
3796 view_size
= i915_ggtt_view_size(obj
, ggtt_view
);
3798 fence_size
= i915_gem_get_gtt_size(dev
,
3801 fence_alignment
= i915_gem_get_gtt_alignment(dev
,
3805 unfenced_alignment
= i915_gem_get_gtt_alignment(dev
,
3809 size
= flags
& PIN_MAPPABLE
? fence_size
: view_size
;
3811 fence_size
= i915_gem_get_gtt_size(dev
,
3814 fence_alignment
= i915_gem_get_gtt_alignment(dev
,
3818 unfenced_alignment
=
3819 i915_gem_get_gtt_alignment(dev
,
3823 size
= flags
& PIN_MAPPABLE
? fence_size
: obj
->base
.size
;
3826 start
= flags
& PIN_OFFSET_BIAS
? flags
& PIN_OFFSET_MASK
: 0;
3828 if (flags
& PIN_MAPPABLE
)
3829 end
= min_t(u64
, end
, ggtt
->mappable_end
);
3830 if (flags
& PIN_ZONE_4G
)
3831 end
= min_t(u64
, end
, (1ULL << 32) - PAGE_SIZE
);
3834 alignment
= flags
& PIN_MAPPABLE
? fence_alignment
:
3836 if (flags
& PIN_MAPPABLE
&& alignment
& (fence_alignment
- 1)) {
3837 DRM_DEBUG("Invalid object (view type=%u) alignment requested %u\n",
3838 ggtt_view
? ggtt_view
->type
: 0,
3840 return ERR_PTR(-EINVAL
);
3843 /* If binding the object/GGTT view requires more space than the entire
3844 * aperture has, reject it early before evicting everything in a vain
3845 * attempt to find space.
3848 DRM_DEBUG("Attempting to bind an object (view type=%u) larger than the aperture: size=%llu > %s aperture=%llu\n",
3849 ggtt_view
? ggtt_view
->type
: 0,
3851 flags
& PIN_MAPPABLE
? "mappable" : "total",
3853 return ERR_PTR(-E2BIG
);
3856 ret
= i915_gem_object_get_pages(obj
);
3858 return ERR_PTR(ret
);
3860 i915_gem_object_pin_pages(obj
);
3862 vma
= ggtt_view
? i915_gem_obj_lookup_or_create_ggtt_vma(obj
, ggtt_view
) :
3863 i915_gem_obj_lookup_or_create_vma(obj
, vm
);
3868 if (flags
& PIN_OFFSET_FIXED
) {
3869 uint64_t offset
= flags
& PIN_OFFSET_MASK
;
3871 if (offset
& (alignment
- 1) || offset
+ size
> end
) {
3875 vma
->node
.start
= offset
;
3876 vma
->node
.size
= size
;
3877 vma
->node
.color
= obj
->cache_level
;
3878 ret
= drm_mm_reserve_node(&vm
->mm
, &vma
->node
);
3880 ret
= i915_gem_evict_for_vma(vma
);
3882 ret
= drm_mm_reserve_node(&vm
->mm
, &vma
->node
);
3887 if (flags
& PIN_HIGH
) {
3888 search_flag
= DRM_MM_SEARCH_BELOW
;
3889 alloc_flag
= DRM_MM_CREATE_TOP
;
3891 search_flag
= DRM_MM_SEARCH_DEFAULT
;
3892 alloc_flag
= DRM_MM_CREATE_DEFAULT
;
3896 ret
= drm_mm_insert_node_in_range_generic(&vm
->mm
, &vma
->node
,
3903 ret
= i915_gem_evict_something(dev
, vm
, size
, alignment
,
3913 if (WARN_ON(!i915_gem_valid_gtt_space(vma
, obj
->cache_level
))) {
3915 goto err_remove_node
;
3918 trace_i915_vma_bind(vma
, flags
);
3919 ret
= i915_vma_bind(vma
, obj
->cache_level
, flags
);
3921 goto err_remove_node
;
3923 list_move_tail(&obj
->global_list
, &dev_priv
->mm
.bound_list
);
3924 list_add_tail(&vma
->vm_link
, &vm
->inactive_list
);
3929 drm_mm_remove_node(&vma
->node
);
3931 i915_gem_vma_destroy(vma
);
3934 i915_gem_object_unpin_pages(obj
);
3939 i915_gem_clflush_object(struct drm_i915_gem_object
*obj
,
3942 /* If we don't have a page list set up, then we're not pinned
3943 * to GPU, and we can ignore the cache flush because it'll happen
3944 * again at bind time.
3946 if (obj
->pages
== NULL
)
3950 * Stolen memory is always coherent with the GPU as it is explicitly
3951 * marked as wc by the system, or the system is cache-coherent.
3953 if (obj
->stolen
|| obj
->phys_handle
)
3956 /* If the GPU is snooping the contents of the CPU cache,
3957 * we do not need to manually clear the CPU cache lines. However,
3958 * the caches are only snooped when the render cache is
3959 * flushed/invalidated. As we always have to emit invalidations
3960 * and flushes when moving into and out of the RENDER domain, correct
3961 * snooping behaviour occurs naturally as the result of our domain
3964 if (!force
&& cpu_cache_is_coherent(obj
->base
.dev
, obj
->cache_level
)) {
3965 obj
->cache_dirty
= true;
3969 trace_i915_gem_object_clflush(obj
);
3970 drm_clflush_sg(obj
->pages
);
3971 obj
->cache_dirty
= false;
3976 /** Flushes the GTT write domain for the object if it's dirty. */
3978 i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object
*obj
)
3980 uint32_t old_write_domain
;
3982 if (obj
->base
.write_domain
!= I915_GEM_DOMAIN_GTT
)
3985 /* No actual flushing is required for the GTT write domain. Writes
3986 * to it immediately go to main memory as far as we know, so there's
3987 * no chipset flush. It also doesn't land in render cache.
3989 * However, we do have to enforce the order so that all writes through
3990 * the GTT land before any writes to the device, such as updates to
3995 old_write_domain
= obj
->base
.write_domain
;
3996 obj
->base
.write_domain
= 0;
3998 intel_fb_obj_flush(obj
, false, ORIGIN_GTT
);
4000 trace_i915_gem_object_change_domain(obj
,
4001 obj
->base
.read_domains
,
4005 /** Flushes the CPU write domain for the object if it's dirty. */
4007 i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object
*obj
)
4009 uint32_t old_write_domain
;
4011 if (obj
->base
.write_domain
!= I915_GEM_DOMAIN_CPU
)
4014 if (i915_gem_clflush_object(obj
, obj
->pin_display
))
4015 i915_gem_chipset_flush(to_i915(obj
->base
.dev
));
4017 old_write_domain
= obj
->base
.write_domain
;
4018 obj
->base
.write_domain
= 0;
4020 intel_fb_obj_flush(obj
, false, ORIGIN_CPU
);
4022 trace_i915_gem_object_change_domain(obj
,
4023 obj
->base
.read_domains
,
4028 * Moves a single object to the GTT read, and possibly write domain.
4029 * @obj: object to act on
4030 * @write: ask for write access or read only
4032 * This function returns when the move is complete, including waiting on
4036 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object
*obj
, bool write
)
4038 struct drm_device
*dev
= obj
->base
.dev
;
4039 struct drm_i915_private
*dev_priv
= to_i915(dev
);
4040 struct i915_ggtt
*ggtt
= &dev_priv
->ggtt
;
4041 uint32_t old_write_domain
, old_read_domains
;
4042 struct i915_vma
*vma
;
4045 if (obj
->base
.write_domain
== I915_GEM_DOMAIN_GTT
)
4048 ret
= i915_gem_object_wait_rendering(obj
, !write
);
4052 /* Flush and acquire obj->pages so that we are coherent through
4053 * direct access in memory with previous cached writes through
4054 * shmemfs and that our cache domain tracking remains valid.
4055 * For example, if the obj->filp was moved to swap without us
4056 * being notified and releasing the pages, we would mistakenly
4057 * continue to assume that the obj remained out of the CPU cached
4060 ret
= i915_gem_object_get_pages(obj
);
4064 i915_gem_object_flush_cpu_write_domain(obj
);
4066 /* Serialise direct access to this object with the barriers for
4067 * coherent writes from the GPU, by effectively invalidating the
4068 * GTT domain upon first access.
4070 if ((obj
->base
.read_domains
& I915_GEM_DOMAIN_GTT
) == 0)
4073 old_write_domain
= obj
->base
.write_domain
;
4074 old_read_domains
= obj
->base
.read_domains
;
4076 /* It should now be out of any other write domains, and we can update
4077 * the domain values for our changes.
4079 BUG_ON((obj
->base
.write_domain
& ~I915_GEM_DOMAIN_GTT
) != 0);
4080 obj
->base
.read_domains
|= I915_GEM_DOMAIN_GTT
;
4082 obj
->base
.read_domains
= I915_GEM_DOMAIN_GTT
;
4083 obj
->base
.write_domain
= I915_GEM_DOMAIN_GTT
;
4087 trace_i915_gem_object_change_domain(obj
,
4091 /* And bump the LRU for this access */
4092 vma
= i915_gem_obj_to_ggtt(obj
);
4093 if (vma
&& drm_mm_node_allocated(&vma
->node
) && !obj
->active
)
4094 list_move_tail(&vma
->vm_link
,
4095 &ggtt
->base
.inactive_list
);
4101 * Changes the cache-level of an object across all VMA.
4102 * @obj: object to act on
4103 * @cache_level: new cache level to set for the object
4105 * After this function returns, the object will be in the new cache-level
4106 * across all GTT and the contents of the backing storage will be coherent,
4107 * with respect to the new cache-level. In order to keep the backing storage
4108 * coherent for all users, we only allow a single cache level to be set
4109 * globally on the object and prevent it from being changed whilst the
4110 * hardware is reading from the object. That is if the object is currently
4111 * on the scanout it will be set to uncached (or equivalent display
4112 * cache coherency) and all non-MOCS GPU access will also be uncached so
4113 * that all direct access to the scanout remains coherent.
4115 int i915_gem_object_set_cache_level(struct drm_i915_gem_object
*obj
,
4116 enum i915_cache_level cache_level
)
4118 struct drm_device
*dev
= obj
->base
.dev
;
4119 struct i915_vma
*vma
, *next
;
4123 if (obj
->cache_level
== cache_level
)
4126 /* Inspect the list of currently bound VMA and unbind any that would
4127 * be invalid given the new cache-level. This is principally to
4128 * catch the issue of the CS prefetch crossing page boundaries and
4129 * reading an invalid PTE on older architectures.
4131 list_for_each_entry_safe(vma
, next
, &obj
->vma_list
, obj_link
) {
4132 if (!drm_mm_node_allocated(&vma
->node
))
4135 if (vma
->pin_count
) {
4136 DRM_DEBUG("can not change the cache level of pinned objects\n");
4140 if (!i915_gem_valid_gtt_space(vma
, cache_level
)) {
4141 ret
= i915_vma_unbind(vma
);
4148 /* We can reuse the existing drm_mm nodes but need to change the
4149 * cache-level on the PTE. We could simply unbind them all and
4150 * rebind with the correct cache-level on next use. However since
4151 * we already have a valid slot, dma mapping, pages etc, we may as
4152 * rewrite the PTE in the belief that doing so tramples upon less
4153 * state and so involves less work.
4156 /* Before we change the PTE, the GPU must not be accessing it.
4157 * If we wait upon the object, we know that all the bound
4158 * VMA are no longer active.
4160 ret
= i915_gem_object_wait_rendering(obj
, false);
4164 if (!HAS_LLC(dev
) && cache_level
!= I915_CACHE_NONE
) {
4165 /* Access to snoopable pages through the GTT is
4166 * incoherent and on some machines causes a hard
4167 * lockup. Relinquish the CPU mmaping to force
4168 * userspace to refault in the pages and we can
4169 * then double check if the GTT mapping is still
4170 * valid for that pointer access.
4172 i915_gem_release_mmap(obj
);
4174 /* As we no longer need a fence for GTT access,
4175 * we can relinquish it now (and so prevent having
4176 * to steal a fence from someone else on the next
4177 * fence request). Note GPU activity would have
4178 * dropped the fence as all snoopable access is
4179 * supposed to be linear.
4181 ret
= i915_gem_object_put_fence(obj
);
4185 /* We either have incoherent backing store and
4186 * so no GTT access or the architecture is fully
4187 * coherent. In such cases, existing GTT mmaps
4188 * ignore the cache bit in the PTE and we can
4189 * rewrite it without confusing the GPU or having
4190 * to force userspace to fault back in its mmaps.
4194 list_for_each_entry(vma
, &obj
->vma_list
, obj_link
) {
4195 if (!drm_mm_node_allocated(&vma
->node
))
4198 ret
= i915_vma_bind(vma
, cache_level
, PIN_UPDATE
);
4204 list_for_each_entry(vma
, &obj
->vma_list
, obj_link
)
4205 vma
->node
.color
= cache_level
;
4206 obj
->cache_level
= cache_level
;
4209 /* Flush the dirty CPU caches to the backing storage so that the
4210 * object is now coherent at its new cache level (with respect
4211 * to the access domain).
4213 if (obj
->cache_dirty
&& cpu_write_needs_clflush(obj
)) {
4214 if (i915_gem_clflush_object(obj
, true))
4215 i915_gem_chipset_flush(to_i915(obj
->base
.dev
));
4221 int i915_gem_get_caching_ioctl(struct drm_device
*dev
, void *data
,
4222 struct drm_file
*file
)
4224 struct drm_i915_gem_caching
*args
= data
;
4225 struct drm_i915_gem_object
*obj
;
4227 obj
= to_intel_bo(drm_gem_object_lookup(file
, args
->handle
));
4228 if (&obj
->base
== NULL
)
4231 switch (obj
->cache_level
) {
4232 case I915_CACHE_LLC
:
4233 case I915_CACHE_L3_LLC
:
4234 args
->caching
= I915_CACHING_CACHED
;
4238 args
->caching
= I915_CACHING_DISPLAY
;
4242 args
->caching
= I915_CACHING_NONE
;
4246 drm_gem_object_unreference_unlocked(&obj
->base
);
4250 int i915_gem_set_caching_ioctl(struct drm_device
*dev
, void *data
,
4251 struct drm_file
*file
)
4253 struct drm_i915_private
*dev_priv
= to_i915(dev
);
4254 struct drm_i915_gem_caching
*args
= data
;
4255 struct drm_i915_gem_object
*obj
;
4256 enum i915_cache_level level
;
4259 switch (args
->caching
) {
4260 case I915_CACHING_NONE
:
4261 level
= I915_CACHE_NONE
;
4263 case I915_CACHING_CACHED
:
4265 * Due to a HW issue on BXT A stepping, GPU stores via a
4266 * snooped mapping may leave stale data in a corresponding CPU
4267 * cacheline, whereas normally such cachelines would get
4270 if (!HAS_LLC(dev
) && !HAS_SNOOP(dev
))
4273 level
= I915_CACHE_LLC
;
4275 case I915_CACHING_DISPLAY
:
4276 level
= HAS_WT(dev
) ? I915_CACHE_WT
: I915_CACHE_NONE
;
4282 intel_runtime_pm_get(dev_priv
);
4284 ret
= i915_mutex_lock_interruptible(dev
);
4288 obj
= to_intel_bo(drm_gem_object_lookup(file
, args
->handle
));
4289 if (&obj
->base
== NULL
) {
4294 ret
= i915_gem_object_set_cache_level(obj
, level
);
4296 drm_gem_object_unreference(&obj
->base
);
4298 mutex_unlock(&dev
->struct_mutex
);
4300 intel_runtime_pm_put(dev_priv
);
4306 * Prepare buffer for display plane (scanout, cursors, etc).
4307 * Can be called from an uninterruptible phase (modesetting) and allows
4308 * any flushes to be pipelined (for pageflips).
4311 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object
*obj
,
4313 const struct i915_ggtt_view
*view
)
4315 u32 old_read_domains
, old_write_domain
;
4318 /* Mark the pin_display early so that we account for the
4319 * display coherency whilst setting up the cache domains.
4323 /* The display engine is not coherent with the LLC cache on gen6. As
4324 * a result, we make sure that the pinning that is about to occur is
4325 * done with uncached PTEs. This is lowest common denominator for all
4328 * However for gen6+, we could do better by using the GFDT bit instead
4329 * of uncaching, which would allow us to flush all the LLC-cached data
4330 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
4332 ret
= i915_gem_object_set_cache_level(obj
,
4333 HAS_WT(obj
->base
.dev
) ? I915_CACHE_WT
: I915_CACHE_NONE
);
4335 goto err_unpin_display
;
4337 /* As the user may map the buffer once pinned in the display plane
4338 * (e.g. libkms for the bootup splash), we have to ensure that we
4339 * always use map_and_fenceable for all scanout buffers.
4341 ret
= i915_gem_object_ggtt_pin(obj
, view
, alignment
,
4342 view
->type
== I915_GGTT_VIEW_NORMAL
?
4345 goto err_unpin_display
;
4347 i915_gem_object_flush_cpu_write_domain(obj
);
4349 old_write_domain
= obj
->base
.write_domain
;
4350 old_read_domains
= obj
->base
.read_domains
;
4352 /* It should now be out of any other write domains, and we can update
4353 * the domain values for our changes.
4355 obj
->base
.write_domain
= 0;
4356 obj
->base
.read_domains
|= I915_GEM_DOMAIN_GTT
;
4358 trace_i915_gem_object_change_domain(obj
,
4370 i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object
*obj
,
4371 const struct i915_ggtt_view
*view
)
4373 if (WARN_ON(obj
->pin_display
== 0))
4376 i915_gem_object_ggtt_unpin_view(obj
, view
);
4382 * Moves a single object to the CPU read, and possibly write domain.
4383 * @obj: object to act on
4384 * @write: requesting write or read-only access
4386 * This function returns when the move is complete, including waiting on
4390 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object
*obj
, bool write
)
4392 uint32_t old_write_domain
, old_read_domains
;
4395 if (obj
->base
.write_domain
== I915_GEM_DOMAIN_CPU
)
4398 ret
= i915_gem_object_wait_rendering(obj
, !write
);
4402 i915_gem_object_flush_gtt_write_domain(obj
);
4404 old_write_domain
= obj
->base
.write_domain
;
4405 old_read_domains
= obj
->base
.read_domains
;
4407 /* Flush the CPU cache if it's still invalid. */
4408 if ((obj
->base
.read_domains
& I915_GEM_DOMAIN_CPU
) == 0) {
4409 i915_gem_clflush_object(obj
, false);
4411 obj
->base
.read_domains
|= I915_GEM_DOMAIN_CPU
;
4414 /* It should now be out of any other write domains, and we can update
4415 * the domain values for our changes.
4417 BUG_ON((obj
->base
.write_domain
& ~I915_GEM_DOMAIN_CPU
) != 0);
4419 /* If we're writing through the CPU, then the GPU read domains will
4420 * need to be invalidated at next use.
4423 obj
->base
.read_domains
= I915_GEM_DOMAIN_CPU
;
4424 obj
->base
.write_domain
= I915_GEM_DOMAIN_CPU
;
4427 trace_i915_gem_object_change_domain(obj
,
4434 /* Throttle our rendering by waiting until the ring has completed our requests
4435 * emitted over 20 msec ago.
4437 * Note that if we were to use the current jiffies each time around the loop,
4438 * we wouldn't escape the function with any frames outstanding if the time to
4439 * render a frame was over 20ms.
4441 * This should get us reasonable parallelism between CPU and GPU but also
4442 * relatively low latency when blocking on a particular request to finish.
4445 i915_gem_ring_throttle(struct drm_device
*dev
, struct drm_file
*file
)
4447 struct drm_i915_private
*dev_priv
= to_i915(dev
);
4448 struct drm_i915_file_private
*file_priv
= file
->driver_priv
;
4449 unsigned long recent_enough
= jiffies
- DRM_I915_THROTTLE_JIFFIES
;
4450 struct drm_i915_gem_request
*request
, *target
= NULL
;
4453 ret
= i915_gem_wait_for_error(&dev_priv
->gpu_error
);
4457 /* ABI: return -EIO if already wedged */
4458 if (i915_terminally_wedged(&dev_priv
->gpu_error
))
4461 spin_lock(&file_priv
->mm
.lock
);
4462 list_for_each_entry(request
, &file_priv
->mm
.request_list
, client_list
) {
4463 if (time_after_eq(request
->emitted_jiffies
, recent_enough
))
4467 * Note that the request might not have been submitted yet.
4468 * In which case emitted_jiffies will be zero.
4470 if (!request
->emitted_jiffies
)
4476 i915_gem_request_reference(target
);
4477 spin_unlock(&file_priv
->mm
.lock
);
4482 ret
= __i915_wait_request(target
, true, NULL
, NULL
);
4483 i915_gem_request_unreference(target
);
4489 i915_vma_misplaced(struct i915_vma
*vma
, uint32_t alignment
, uint64_t flags
)
4491 struct drm_i915_gem_object
*obj
= vma
->obj
;
4494 vma
->node
.start
& (alignment
- 1))
4497 if (flags
& PIN_MAPPABLE
&& !obj
->map_and_fenceable
)
4500 if (flags
& PIN_OFFSET_BIAS
&&
4501 vma
->node
.start
< (flags
& PIN_OFFSET_MASK
))
4504 if (flags
& PIN_OFFSET_FIXED
&&
4505 vma
->node
.start
!= (flags
& PIN_OFFSET_MASK
))
4511 void __i915_vma_set_map_and_fenceable(struct i915_vma
*vma
)
4513 struct drm_i915_gem_object
*obj
= vma
->obj
;
4514 bool mappable
, fenceable
;
4515 u32 fence_size
, fence_alignment
;
4517 fence_size
= i915_gem_get_gtt_size(obj
->base
.dev
,
4520 fence_alignment
= i915_gem_get_gtt_alignment(obj
->base
.dev
,
4525 fenceable
= (vma
->node
.size
== fence_size
&&
4526 (vma
->node
.start
& (fence_alignment
- 1)) == 0);
4528 mappable
= (vma
->node
.start
+ fence_size
<=
4529 to_i915(obj
->base
.dev
)->ggtt
.mappable_end
);
4531 obj
->map_and_fenceable
= mappable
&& fenceable
;
4535 i915_gem_object_do_pin(struct drm_i915_gem_object
*obj
,
4536 struct i915_address_space
*vm
,
4537 const struct i915_ggtt_view
*ggtt_view
,
4541 struct drm_i915_private
*dev_priv
= to_i915(obj
->base
.dev
);
4542 struct i915_vma
*vma
;
4546 if (WARN_ON(vm
== &dev_priv
->mm
.aliasing_ppgtt
->base
))
4549 if (WARN_ON(flags
& (PIN_GLOBAL
| PIN_MAPPABLE
) && !i915_is_ggtt(vm
)))
4552 if (WARN_ON((flags
& (PIN_MAPPABLE
| PIN_GLOBAL
)) == PIN_MAPPABLE
))
4555 if (WARN_ON(i915_is_ggtt(vm
) != !!ggtt_view
))
4558 vma
= ggtt_view
? i915_gem_obj_to_ggtt_view(obj
, ggtt_view
) :
4559 i915_gem_obj_to_vma(obj
, vm
);
4562 if (WARN_ON(vma
->pin_count
== DRM_I915_GEM_OBJECT_MAX_PIN_COUNT
))
4565 if (i915_vma_misplaced(vma
, alignment
, flags
)) {
4566 WARN(vma
->pin_count
,
4567 "bo is already pinned in %s with incorrect alignment:"
4568 " offset=%08x %08x, req.alignment=%x, req.map_and_fenceable=%d,"
4569 " obj->map_and_fenceable=%d\n",
4570 ggtt_view
? "ggtt" : "ppgtt",
4571 upper_32_bits(vma
->node
.start
),
4572 lower_32_bits(vma
->node
.start
),
4574 !!(flags
& PIN_MAPPABLE
),
4575 obj
->map_and_fenceable
);
4576 ret
= i915_vma_unbind(vma
);
4584 bound
= vma
? vma
->bound
: 0;
4585 if (vma
== NULL
|| !drm_mm_node_allocated(&vma
->node
)) {
4586 vma
= i915_gem_object_bind_to_vm(obj
, vm
, ggtt_view
, alignment
,
4589 return PTR_ERR(vma
);
4591 ret
= i915_vma_bind(vma
, obj
->cache_level
, flags
);
4596 if (ggtt_view
&& ggtt_view
->type
== I915_GGTT_VIEW_NORMAL
&&
4597 (bound
^ vma
->bound
) & GLOBAL_BIND
) {
4598 __i915_vma_set_map_and_fenceable(vma
);
4599 WARN_ON(flags
& PIN_MAPPABLE
&& !obj
->map_and_fenceable
);
4607 i915_gem_object_pin(struct drm_i915_gem_object
*obj
,
4608 struct i915_address_space
*vm
,
4612 return i915_gem_object_do_pin(obj
, vm
,
4613 i915_is_ggtt(vm
) ? &i915_ggtt_view_normal
: NULL
,
4618 i915_gem_object_ggtt_pin(struct drm_i915_gem_object
*obj
,
4619 const struct i915_ggtt_view
*view
,
4623 struct drm_device
*dev
= obj
->base
.dev
;
4624 struct drm_i915_private
*dev_priv
= to_i915(dev
);
4625 struct i915_ggtt
*ggtt
= &dev_priv
->ggtt
;
4629 return i915_gem_object_do_pin(obj
, &ggtt
->base
, view
,
4630 alignment
, flags
| PIN_GLOBAL
);
4634 i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object
*obj
,
4635 const struct i915_ggtt_view
*view
)
4637 struct i915_vma
*vma
= i915_gem_obj_to_ggtt_view(obj
, view
);
4639 WARN_ON(vma
->pin_count
== 0);
4640 WARN_ON(!i915_gem_obj_ggtt_bound_view(obj
, view
));
4646 i915_gem_busy_ioctl(struct drm_device
*dev
, void *data
,
4647 struct drm_file
*file
)
4649 struct drm_i915_gem_busy
*args
= data
;
4650 struct drm_i915_gem_object
*obj
;
4653 ret
= i915_mutex_lock_interruptible(dev
);
4657 obj
= to_intel_bo(drm_gem_object_lookup(file
, args
->handle
));
4658 if (&obj
->base
== NULL
) {
4663 /* Count all active objects as busy, even if they are currently not used
4664 * by the gpu. Users of this interface expect objects to eventually
4665 * become non-busy without any further actions, therefore emit any
4666 * necessary flushes here.
4668 ret
= i915_gem_object_flush_active(obj
);
4676 for (i
= 0; i
< I915_NUM_ENGINES
; i
++) {
4677 struct drm_i915_gem_request
*req
;
4679 req
= obj
->last_read_req
[i
];
4681 args
->busy
|= 1 << (16 + req
->engine
->exec_id
);
4683 if (obj
->last_write_req
)
4684 args
->busy
|= obj
->last_write_req
->engine
->exec_id
;
4688 drm_gem_object_unreference(&obj
->base
);
4690 mutex_unlock(&dev
->struct_mutex
);
4695 i915_gem_throttle_ioctl(struct drm_device
*dev
, void *data
,
4696 struct drm_file
*file_priv
)
4698 return i915_gem_ring_throttle(dev
, file_priv
);
4702 i915_gem_madvise_ioctl(struct drm_device
*dev
, void *data
,
4703 struct drm_file
*file_priv
)
4705 struct drm_i915_private
*dev_priv
= to_i915(dev
);
4706 struct drm_i915_gem_madvise
*args
= data
;
4707 struct drm_i915_gem_object
*obj
;
4710 switch (args
->madv
) {
4711 case I915_MADV_DONTNEED
:
4712 case I915_MADV_WILLNEED
:
4718 ret
= i915_mutex_lock_interruptible(dev
);
4722 obj
= to_intel_bo(drm_gem_object_lookup(file_priv
, args
->handle
));
4723 if (&obj
->base
== NULL
) {
4728 if (i915_gem_obj_is_pinned(obj
)) {
4734 obj
->tiling_mode
!= I915_TILING_NONE
&&
4735 dev_priv
->quirks
& QUIRK_PIN_SWIZZLED_PAGES
) {
4736 if (obj
->madv
== I915_MADV_WILLNEED
)
4737 i915_gem_object_unpin_pages(obj
);
4738 if (args
->madv
== I915_MADV_WILLNEED
)
4739 i915_gem_object_pin_pages(obj
);
4742 if (obj
->madv
!= __I915_MADV_PURGED
)
4743 obj
->madv
= args
->madv
;
4745 /* if the object is no longer attached, discard its backing storage */
4746 if (obj
->madv
== I915_MADV_DONTNEED
&& obj
->pages
== NULL
)
4747 i915_gem_object_truncate(obj
);
4749 args
->retained
= obj
->madv
!= __I915_MADV_PURGED
;
4752 drm_gem_object_unreference(&obj
->base
);
4754 mutex_unlock(&dev
->struct_mutex
);
4758 void i915_gem_object_init(struct drm_i915_gem_object
*obj
,
4759 const struct drm_i915_gem_object_ops
*ops
)
4763 INIT_LIST_HEAD(&obj
->global_list
);
4764 for (i
= 0; i
< I915_NUM_ENGINES
; i
++)
4765 INIT_LIST_HEAD(&obj
->engine_list
[i
]);
4766 INIT_LIST_HEAD(&obj
->obj_exec_link
);
4767 INIT_LIST_HEAD(&obj
->vma_list
);
4768 INIT_LIST_HEAD(&obj
->batch_pool_link
);
4772 obj
->fence_reg
= I915_FENCE_REG_NONE
;
4773 obj
->madv
= I915_MADV_WILLNEED
;
4775 i915_gem_info_add_obj(to_i915(obj
->base
.dev
), obj
->base
.size
);
4778 static const struct drm_i915_gem_object_ops i915_gem_object_ops
= {
4779 .flags
= I915_GEM_OBJECT_HAS_STRUCT_PAGE
,
4780 .get_pages
= i915_gem_object_get_pages_gtt
,
4781 .put_pages
= i915_gem_object_put_pages_gtt
,
4784 struct drm_i915_gem_object
*i915_gem_object_create(struct drm_device
*dev
,
4787 struct drm_i915_gem_object
*obj
;
4788 struct address_space
*mapping
;
4792 obj
= i915_gem_object_alloc(dev
);
4794 return ERR_PTR(-ENOMEM
);
4796 ret
= drm_gem_object_init(dev
, &obj
->base
, size
);
4800 mask
= GFP_HIGHUSER
| __GFP_RECLAIMABLE
;
4801 if (IS_CRESTLINE(dev
) || IS_BROADWATER(dev
)) {
4802 /* 965gm cannot relocate objects above 4GiB. */
4803 mask
&= ~__GFP_HIGHMEM
;
4804 mask
|= __GFP_DMA32
;
4807 mapping
= file_inode(obj
->base
.filp
)->i_mapping
;
4808 mapping_set_gfp_mask(mapping
, mask
);
4810 i915_gem_object_init(obj
, &i915_gem_object_ops
);
4812 obj
->base
.write_domain
= I915_GEM_DOMAIN_CPU
;
4813 obj
->base
.read_domains
= I915_GEM_DOMAIN_CPU
;
4816 /* On some devices, we can have the GPU use the LLC (the CPU
4817 * cache) for about a 10% performance improvement
4818 * compared to uncached. Graphics requests other than
4819 * display scanout are coherent with the CPU in
4820 * accessing this cache. This means in this mode we
4821 * don't need to clflush on the CPU side, and on the
4822 * GPU side we only need to flush internal caches to
4823 * get data visible to the CPU.
4825 * However, we maintain the display planes as UC, and so
4826 * need to rebind when first used as such.
4828 obj
->cache_level
= I915_CACHE_LLC
;
4830 obj
->cache_level
= I915_CACHE_NONE
;
4832 trace_i915_gem_object_create(obj
);
4837 i915_gem_object_free(obj
);
4839 return ERR_PTR(ret
);
4842 static bool discard_backing_storage(struct drm_i915_gem_object
*obj
)
4844 /* If we are the last user of the backing storage (be it shmemfs
4845 * pages or stolen etc), we know that the pages are going to be
4846 * immediately released. In this case, we can then skip copying
4847 * back the contents from the GPU.
4850 if (obj
->madv
!= I915_MADV_WILLNEED
)
4853 if (obj
->base
.filp
== NULL
)
4856 /* At first glance, this looks racy, but then again so would be
4857 * userspace racing mmap against close. However, the first external
4858 * reference to the filp can only be obtained through the
4859 * i915_gem_mmap_ioctl() which safeguards us against the user
4860 * acquiring such a reference whilst we are in the middle of
4861 * freeing the object.
4863 return atomic_long_read(&obj
->base
.filp
->f_count
) == 1;
4866 void i915_gem_free_object(struct drm_gem_object
*gem_obj
)
4868 struct drm_i915_gem_object
*obj
= to_intel_bo(gem_obj
);
4869 struct drm_device
*dev
= obj
->base
.dev
;
4870 struct drm_i915_private
*dev_priv
= to_i915(dev
);
4871 struct i915_vma
*vma
, *next
;
4873 intel_runtime_pm_get(dev_priv
);
4875 trace_i915_gem_object_destroy(obj
);
4877 list_for_each_entry_safe(vma
, next
, &obj
->vma_list
, obj_link
) {
4881 ret
= i915_vma_unbind(vma
);
4882 if (WARN_ON(ret
== -ERESTARTSYS
)) {
4883 bool was_interruptible
;
4885 was_interruptible
= dev_priv
->mm
.interruptible
;
4886 dev_priv
->mm
.interruptible
= false;
4888 WARN_ON(i915_vma_unbind(vma
));
4890 dev_priv
->mm
.interruptible
= was_interruptible
;
4894 /* Stolen objects don't hold a ref, but do hold pin count. Fix that up
4895 * before progressing. */
4897 i915_gem_object_unpin_pages(obj
);
4899 WARN_ON(obj
->frontbuffer_bits
);
4901 if (obj
->pages
&& obj
->madv
== I915_MADV_WILLNEED
&&
4902 dev_priv
->quirks
& QUIRK_PIN_SWIZZLED_PAGES
&&
4903 obj
->tiling_mode
!= I915_TILING_NONE
)
4904 i915_gem_object_unpin_pages(obj
);
4906 if (WARN_ON(obj
->pages_pin_count
))
4907 obj
->pages_pin_count
= 0;
4908 if (discard_backing_storage(obj
))
4909 obj
->madv
= I915_MADV_DONTNEED
;
4910 i915_gem_object_put_pages(obj
);
4911 i915_gem_object_free_mmap_offset(obj
);
4915 if (obj
->base
.import_attach
)
4916 drm_prime_gem_destroy(&obj
->base
, NULL
);
4918 if (obj
->ops
->release
)
4919 obj
->ops
->release(obj
);
4921 drm_gem_object_release(&obj
->base
);
4922 i915_gem_info_remove_obj(dev_priv
, obj
->base
.size
);
4925 i915_gem_object_free(obj
);
4927 intel_runtime_pm_put(dev_priv
);
4930 struct i915_vma
*i915_gem_obj_to_vma(struct drm_i915_gem_object
*obj
,
4931 struct i915_address_space
*vm
)
4933 struct i915_vma
*vma
;
4934 list_for_each_entry(vma
, &obj
->vma_list
, obj_link
) {
4935 if (vma
->ggtt_view
.type
== I915_GGTT_VIEW_NORMAL
&&
4942 struct i915_vma
*i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object
*obj
,
4943 const struct i915_ggtt_view
*view
)
4945 struct i915_vma
*vma
;
4949 list_for_each_entry(vma
, &obj
->vma_list
, obj_link
)
4950 if (vma
->is_ggtt
&& i915_ggtt_view_equal(&vma
->ggtt_view
, view
))
4955 void i915_gem_vma_destroy(struct i915_vma
*vma
)
4957 WARN_ON(vma
->node
.allocated
);
4959 /* Keep the vma as a placeholder in the execbuffer reservation lists */
4960 if (!list_empty(&vma
->exec_list
))
4964 i915_ppgtt_put(i915_vm_to_ppgtt(vma
->vm
));
4966 list_del(&vma
->obj_link
);
4968 kmem_cache_free(to_i915(vma
->obj
->base
.dev
)->vmas
, vma
);
4972 i915_gem_stop_engines(struct drm_device
*dev
)
4974 struct drm_i915_private
*dev_priv
= to_i915(dev
);
4975 struct intel_engine_cs
*engine
;
4977 for_each_engine(engine
, dev_priv
)
4978 dev_priv
->gt
.stop_engine(engine
);
4982 i915_gem_suspend(struct drm_device
*dev
)
4984 struct drm_i915_private
*dev_priv
= to_i915(dev
);
4987 mutex_lock(&dev
->struct_mutex
);
4988 ret
= i915_gem_wait_for_idle(dev_priv
);
4992 i915_gem_retire_requests(dev_priv
);
4994 i915_gem_stop_engines(dev
);
4995 i915_gem_context_lost(dev_priv
);
4996 mutex_unlock(&dev
->struct_mutex
);
4998 cancel_delayed_work_sync(&dev_priv
->gpu_error
.hangcheck_work
);
4999 cancel_delayed_work_sync(&dev_priv
->gt
.retire_work
);
5000 flush_delayed_work(&dev_priv
->gt
.idle_work
);
5002 /* Assert that we sucessfully flushed all the work and
5003 * reset the GPU back to its idle, low power state.
5005 WARN_ON(dev_priv
->gt
.awake
);
5010 mutex_unlock(&dev
->struct_mutex
);
5014 void i915_gem_init_swizzling(struct drm_device
*dev
)
5016 struct drm_i915_private
*dev_priv
= to_i915(dev
);
5018 if (INTEL_INFO(dev
)->gen
< 5 ||
5019 dev_priv
->mm
.bit_6_swizzle_x
== I915_BIT_6_SWIZZLE_NONE
)
5022 I915_WRITE(DISP_ARB_CTL
, I915_READ(DISP_ARB_CTL
) |
5023 DISP_TILE_SURFACE_SWIZZLING
);
5028 I915_WRITE(TILECTL
, I915_READ(TILECTL
) | TILECTL_SWZCTL
);
5030 I915_WRITE(ARB_MODE
, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB
));
5031 else if (IS_GEN7(dev
))
5032 I915_WRITE(ARB_MODE
, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB
));
5033 else if (IS_GEN8(dev
))
5034 I915_WRITE(GAMTARBMODE
, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW
));
5039 static void init_unused_ring(struct drm_device
*dev
, u32 base
)
5041 struct drm_i915_private
*dev_priv
= to_i915(dev
);
5043 I915_WRITE(RING_CTL(base
), 0);
5044 I915_WRITE(RING_HEAD(base
), 0);
5045 I915_WRITE(RING_TAIL(base
), 0);
5046 I915_WRITE(RING_START(base
), 0);
5049 static void init_unused_rings(struct drm_device
*dev
)
5052 init_unused_ring(dev
, PRB1_BASE
);
5053 init_unused_ring(dev
, SRB0_BASE
);
5054 init_unused_ring(dev
, SRB1_BASE
);
5055 init_unused_ring(dev
, SRB2_BASE
);
5056 init_unused_ring(dev
, SRB3_BASE
);
5057 } else if (IS_GEN2(dev
)) {
5058 init_unused_ring(dev
, SRB0_BASE
);
5059 init_unused_ring(dev
, SRB1_BASE
);
5060 } else if (IS_GEN3(dev
)) {
5061 init_unused_ring(dev
, PRB1_BASE
);
5062 init_unused_ring(dev
, PRB2_BASE
);
5066 int i915_gem_init_engines(struct drm_device
*dev
)
5068 struct drm_i915_private
*dev_priv
= to_i915(dev
);
5071 ret
= intel_init_render_ring_buffer(dev
);
5076 ret
= intel_init_bsd_ring_buffer(dev
);
5078 goto cleanup_render_ring
;
5082 ret
= intel_init_blt_ring_buffer(dev
);
5084 goto cleanup_bsd_ring
;
5087 if (HAS_VEBOX(dev
)) {
5088 ret
= intel_init_vebox_ring_buffer(dev
);
5090 goto cleanup_blt_ring
;
5093 if (HAS_BSD2(dev
)) {
5094 ret
= intel_init_bsd2_ring_buffer(dev
);
5096 goto cleanup_vebox_ring
;
5102 intel_cleanup_engine(&dev_priv
->engine
[VECS
]);
5104 intel_cleanup_engine(&dev_priv
->engine
[BCS
]);
5106 intel_cleanup_engine(&dev_priv
->engine
[VCS
]);
5107 cleanup_render_ring
:
5108 intel_cleanup_engine(&dev_priv
->engine
[RCS
]);
5114 i915_gem_init_hw(struct drm_device
*dev
)
5116 struct drm_i915_private
*dev_priv
= to_i915(dev
);
5117 struct intel_engine_cs
*engine
;
5120 /* Double layer security blanket, see i915_gem_init() */
5121 intel_uncore_forcewake_get(dev_priv
, FORCEWAKE_ALL
);
5123 if (HAS_EDRAM(dev
) && INTEL_GEN(dev_priv
) < 9)
5124 I915_WRITE(HSW_IDICR
, I915_READ(HSW_IDICR
) | IDIHASHMSK(0xf));
5126 if (IS_HASWELL(dev
))
5127 I915_WRITE(MI_PREDICATE_RESULT_2
, IS_HSW_GT3(dev
) ?
5128 LOWER_SLICE_ENABLED
: LOWER_SLICE_DISABLED
);
5130 if (HAS_PCH_NOP(dev
)) {
5131 if (IS_IVYBRIDGE(dev
)) {
5132 u32 temp
= I915_READ(GEN7_MSG_CTL
);
5133 temp
&= ~(WAIT_FOR_PCH_FLR_ACK
| WAIT_FOR_PCH_RESET_ACK
);
5134 I915_WRITE(GEN7_MSG_CTL
, temp
);
5135 } else if (INTEL_INFO(dev
)->gen
>= 7) {
5136 u32 temp
= I915_READ(HSW_NDE_RSTWRN_OPT
);
5137 temp
&= ~RESET_PCH_HANDSHAKE_ENABLE
;
5138 I915_WRITE(HSW_NDE_RSTWRN_OPT
, temp
);
5142 i915_gem_init_swizzling(dev
);
5145 * At least 830 can leave some of the unused rings
5146 * "active" (ie. head != tail) after resume which
5147 * will prevent c3 entry. Makes sure all unused rings
5150 init_unused_rings(dev
);
5152 BUG_ON(!dev_priv
->kernel_context
);
5154 ret
= i915_ppgtt_init_hw(dev
);
5156 DRM_ERROR("PPGTT enable HW failed %d\n", ret
);
5160 /* Need to do basic initialisation of all rings first: */
5161 for_each_engine(engine
, dev_priv
) {
5162 ret
= engine
->init_hw(engine
);
5167 intel_mocs_init_l3cc_table(dev
);
5169 /* We can't enable contexts until all firmware is loaded */
5170 ret
= intel_guc_setup(dev
);
5175 intel_uncore_forcewake_put(dev_priv
, FORCEWAKE_ALL
);
5179 int i915_gem_init(struct drm_device
*dev
)
5181 struct drm_i915_private
*dev_priv
= to_i915(dev
);
5184 mutex_lock(&dev
->struct_mutex
);
5186 if (!i915
.enable_execlists
) {
5187 dev_priv
->gt
.execbuf_submit
= i915_gem_ringbuffer_submission
;
5188 dev_priv
->gt
.init_engines
= i915_gem_init_engines
;
5189 dev_priv
->gt
.cleanup_engine
= intel_cleanup_engine
;
5190 dev_priv
->gt
.stop_engine
= intel_stop_engine
;
5192 dev_priv
->gt
.execbuf_submit
= intel_execlists_submission
;
5193 dev_priv
->gt
.init_engines
= intel_logical_rings_init
;
5194 dev_priv
->gt
.cleanup_engine
= intel_logical_ring_cleanup
;
5195 dev_priv
->gt
.stop_engine
= intel_logical_ring_stop
;
5198 /* This is just a security blanket to placate dragons.
5199 * On some systems, we very sporadically observe that the first TLBs
5200 * used by the CS may be stale, despite us poking the TLB reset. If
5201 * we hold the forcewake during initialisation these problems
5202 * just magically go away.
5204 intel_uncore_forcewake_get(dev_priv
, FORCEWAKE_ALL
);
5206 i915_gem_init_userptr(dev_priv
);
5207 i915_gem_init_ggtt(dev
);
5209 ret
= i915_gem_context_init(dev
);
5213 ret
= dev_priv
->gt
.init_engines(dev
);
5217 ret
= i915_gem_init_hw(dev
);
5219 /* Allow ring initialisation to fail by marking the GPU as
5220 * wedged. But we only want to do this where the GPU is angry,
5221 * for all other failure, such as an allocation failure, bail.
5223 DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");
5224 atomic_or(I915_WEDGED
, &dev_priv
->gpu_error
.reset_counter
);
5229 intel_uncore_forcewake_put(dev_priv
, FORCEWAKE_ALL
);
5230 mutex_unlock(&dev
->struct_mutex
);
5236 i915_gem_cleanup_engines(struct drm_device
*dev
)
5238 struct drm_i915_private
*dev_priv
= to_i915(dev
);
5239 struct intel_engine_cs
*engine
;
5241 for_each_engine(engine
, dev_priv
)
5242 dev_priv
->gt
.cleanup_engine(engine
);
5246 init_engine_lists(struct intel_engine_cs
*engine
)
5248 INIT_LIST_HEAD(&engine
->active_list
);
5249 INIT_LIST_HEAD(&engine
->request_list
);
5253 i915_gem_load_init_fences(struct drm_i915_private
*dev_priv
)
5255 struct drm_device
*dev
= &dev_priv
->drm
;
5257 if (INTEL_INFO(dev_priv
)->gen
>= 7 && !IS_VALLEYVIEW(dev_priv
) &&
5258 !IS_CHERRYVIEW(dev_priv
))
5259 dev_priv
->num_fence_regs
= 32;
5260 else if (INTEL_INFO(dev_priv
)->gen
>= 4 || IS_I945G(dev_priv
) ||
5261 IS_I945GM(dev_priv
) || IS_G33(dev_priv
))
5262 dev_priv
->num_fence_regs
= 16;
5264 dev_priv
->num_fence_regs
= 8;
5266 if (intel_vgpu_active(dev_priv
))
5267 dev_priv
->num_fence_regs
=
5268 I915_READ(vgtif_reg(avail_rs
.fence_num
));
5270 /* Initialize fence registers to zero */
5271 i915_gem_restore_fences(dev
);
5273 i915_gem_detect_bit_6_swizzle(dev
);
5277 i915_gem_load_init(struct drm_device
*dev
)
5279 struct drm_i915_private
*dev_priv
= to_i915(dev
);
5283 kmem_cache_create("i915_gem_object",
5284 sizeof(struct drm_i915_gem_object
), 0,
5288 kmem_cache_create("i915_gem_vma",
5289 sizeof(struct i915_vma
), 0,
5292 dev_priv
->requests
=
5293 kmem_cache_create("i915_gem_request",
5294 sizeof(struct drm_i915_gem_request
), 0,
5298 INIT_LIST_HEAD(&dev_priv
->vm_list
);
5299 INIT_LIST_HEAD(&dev_priv
->context_list
);
5300 INIT_LIST_HEAD(&dev_priv
->mm
.unbound_list
);
5301 INIT_LIST_HEAD(&dev_priv
->mm
.bound_list
);
5302 INIT_LIST_HEAD(&dev_priv
->mm
.fence_list
);
5303 for (i
= 0; i
< I915_NUM_ENGINES
; i
++)
5304 init_engine_lists(&dev_priv
->engine
[i
]);
5305 for (i
= 0; i
< I915_MAX_NUM_FENCES
; i
++)
5306 INIT_LIST_HEAD(&dev_priv
->fence_regs
[i
].lru_list
);
5307 INIT_DELAYED_WORK(&dev_priv
->gt
.retire_work
,
5308 i915_gem_retire_work_handler
);
5309 INIT_DELAYED_WORK(&dev_priv
->gt
.idle_work
,
5310 i915_gem_idle_work_handler
);
5311 init_waitqueue_head(&dev_priv
->gpu_error
.wait_queue
);
5312 init_waitqueue_head(&dev_priv
->gpu_error
.reset_queue
);
5314 dev_priv
->relative_constants_mode
= I915_EXEC_CONSTANTS_REL_GENERAL
;
5316 INIT_LIST_HEAD(&dev_priv
->mm
.fence_list
);
5318 init_waitqueue_head(&dev_priv
->pending_flip_queue
);
5320 dev_priv
->mm
.interruptible
= true;
5322 mutex_init(&dev_priv
->fb_tracking
.lock
);
5325 void i915_gem_load_cleanup(struct drm_device
*dev
)
5327 struct drm_i915_private
*dev_priv
= to_i915(dev
);
5329 kmem_cache_destroy(dev_priv
->requests
);
5330 kmem_cache_destroy(dev_priv
->vmas
);
5331 kmem_cache_destroy(dev_priv
->objects
);
5334 int i915_gem_freeze_late(struct drm_i915_private
*dev_priv
)
5336 struct drm_i915_gem_object
*obj
;
5338 /* Called just before we write the hibernation image.
5340 * We need to update the domain tracking to reflect that the CPU
5341 * will be accessing all the pages to create and restore from the
5342 * hibernation, and so upon restoration those pages will be in the
5345 * To make sure the hibernation image contains the latest state,
5346 * we update that state just before writing out the image.
5349 list_for_each_entry(obj
, &dev_priv
->mm
.unbound_list
, global_list
) {
5350 obj
->base
.read_domains
= I915_GEM_DOMAIN_CPU
;
5351 obj
->base
.write_domain
= I915_GEM_DOMAIN_CPU
;
5354 list_for_each_entry(obj
, &dev_priv
->mm
.bound_list
, global_list
) {
5355 obj
->base
.read_domains
= I915_GEM_DOMAIN_CPU
;
5356 obj
->base
.write_domain
= I915_GEM_DOMAIN_CPU
;
5362 void i915_gem_release(struct drm_device
*dev
, struct drm_file
*file
)
5364 struct drm_i915_file_private
*file_priv
= file
->driver_priv
;
5366 /* Clean up our request list when the client is going away, so that
5367 * later retire_requests won't dereference our soon-to-be-gone
5370 spin_lock(&file_priv
->mm
.lock
);
5371 while (!list_empty(&file_priv
->mm
.request_list
)) {
5372 struct drm_i915_gem_request
*request
;
5374 request
= list_first_entry(&file_priv
->mm
.request_list
,
5375 struct drm_i915_gem_request
,
5377 list_del(&request
->client_list
);
5378 request
->file_priv
= NULL
;
5380 spin_unlock(&file_priv
->mm
.lock
);
5382 if (!list_empty(&file_priv
->rps
.link
)) {
5383 spin_lock(&to_i915(dev
)->rps
.client_lock
);
5384 list_del(&file_priv
->rps
.link
);
5385 spin_unlock(&to_i915(dev
)->rps
.client_lock
);
5389 int i915_gem_open(struct drm_device
*dev
, struct drm_file
*file
)
5391 struct drm_i915_file_private
*file_priv
;
5394 DRM_DEBUG_DRIVER("\n");
5396 file_priv
= kzalloc(sizeof(*file_priv
), GFP_KERNEL
);
5400 file
->driver_priv
= file_priv
;
5401 file_priv
->dev_priv
= to_i915(dev
);
5402 file_priv
->file
= file
;
5403 INIT_LIST_HEAD(&file_priv
->rps
.link
);
5405 spin_lock_init(&file_priv
->mm
.lock
);
5406 INIT_LIST_HEAD(&file_priv
->mm
.request_list
);
5408 file_priv
->bsd_ring
= -1;
5410 ret
= i915_gem_context_open(dev
, file
);
5418 * i915_gem_track_fb - update frontbuffer tracking
5419 * @old: current GEM buffer for the frontbuffer slots
5420 * @new: new GEM buffer for the frontbuffer slots
5421 * @frontbuffer_bits: bitmask of frontbuffer slots
5423 * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them
5424 * from @old and setting them in @new. Both @old and @new can be NULL.
5426 void i915_gem_track_fb(struct drm_i915_gem_object
*old
,
5427 struct drm_i915_gem_object
*new,
5428 unsigned frontbuffer_bits
)
5431 WARN_ON(!mutex_is_locked(&old
->base
.dev
->struct_mutex
));
5432 WARN_ON(!(old
->frontbuffer_bits
& frontbuffer_bits
));
5433 old
->frontbuffer_bits
&= ~frontbuffer_bits
;
5437 WARN_ON(!mutex_is_locked(&new->base
.dev
->struct_mutex
));
5438 WARN_ON(new->frontbuffer_bits
& frontbuffer_bits
);
5439 new->frontbuffer_bits
|= frontbuffer_bits
;
5443 /* All the new VM stuff */
5444 u64
i915_gem_obj_offset(struct drm_i915_gem_object
*o
,
5445 struct i915_address_space
*vm
)
5447 struct drm_i915_private
*dev_priv
= to_i915(o
->base
.dev
);
5448 struct i915_vma
*vma
;
5450 WARN_ON(vm
== &dev_priv
->mm
.aliasing_ppgtt
->base
);
5452 list_for_each_entry(vma
, &o
->vma_list
, obj_link
) {
5454 vma
->ggtt_view
.type
!= I915_GGTT_VIEW_NORMAL
)
5457 return vma
->node
.start
;
5460 WARN(1, "%s vma for this object not found.\n",
5461 i915_is_ggtt(vm
) ? "global" : "ppgtt");
5465 u64
i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object
*o
,
5466 const struct i915_ggtt_view
*view
)
5468 struct i915_vma
*vma
;
5470 list_for_each_entry(vma
, &o
->vma_list
, obj_link
)
5471 if (vma
->is_ggtt
&& i915_ggtt_view_equal(&vma
->ggtt_view
, view
))
5472 return vma
->node
.start
;
5474 WARN(1, "global vma for this object not found. (view=%u)\n", view
->type
);
5478 bool i915_gem_obj_bound(struct drm_i915_gem_object
*o
,
5479 struct i915_address_space
*vm
)
5481 struct i915_vma
*vma
;
5483 list_for_each_entry(vma
, &o
->vma_list
, obj_link
) {
5485 vma
->ggtt_view
.type
!= I915_GGTT_VIEW_NORMAL
)
5487 if (vma
->vm
== vm
&& drm_mm_node_allocated(&vma
->node
))
5494 bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object
*o
,
5495 const struct i915_ggtt_view
*view
)
5497 struct i915_vma
*vma
;
5499 list_for_each_entry(vma
, &o
->vma_list
, obj_link
)
5501 i915_ggtt_view_equal(&vma
->ggtt_view
, view
) &&
5502 drm_mm_node_allocated(&vma
->node
))
5508 bool i915_gem_obj_bound_any(struct drm_i915_gem_object
*o
)
5510 struct i915_vma
*vma
;
5512 list_for_each_entry(vma
, &o
->vma_list
, obj_link
)
5513 if (drm_mm_node_allocated(&vma
->node
))
5519 unsigned long i915_gem_obj_ggtt_size(struct drm_i915_gem_object
*o
)
5521 struct i915_vma
*vma
;
5523 GEM_BUG_ON(list_empty(&o
->vma_list
));
5525 list_for_each_entry(vma
, &o
->vma_list
, obj_link
) {
5527 vma
->ggtt_view
.type
== I915_GGTT_VIEW_NORMAL
)
5528 return vma
->node
.size
;
5534 bool i915_gem_obj_is_pinned(struct drm_i915_gem_object
*obj
)
5536 struct i915_vma
*vma
;
5537 list_for_each_entry(vma
, &obj
->vma_list
, obj_link
)
5538 if (vma
->pin_count
> 0)
5544 /* Like i915_gem_object_get_page(), but mark the returned page dirty */
5546 i915_gem_object_get_dirty_page(struct drm_i915_gem_object
*obj
, int n
)
5550 /* Only default objects have per-page dirty tracking */
5551 if (WARN_ON(!i915_gem_object_has_struct_page(obj
)))
5554 page
= i915_gem_object_get_page(obj
, n
);
5555 set_page_dirty(page
);
5559 /* Allocate a new GEM object and fill it with the supplied data */
5560 struct drm_i915_gem_object
*
5561 i915_gem_object_create_from_data(struct drm_device
*dev
,
5562 const void *data
, size_t size
)
5564 struct drm_i915_gem_object
*obj
;
5565 struct sg_table
*sg
;
5569 obj
= i915_gem_object_create(dev
, round_up(size
, PAGE_SIZE
));
5573 ret
= i915_gem_object_set_to_cpu_domain(obj
, true);
5577 ret
= i915_gem_object_get_pages(obj
);
5581 i915_gem_object_pin_pages(obj
);
5583 bytes
= sg_copy_from_buffer(sg
->sgl
, sg
->nents
, (void *)data
, size
);
5584 obj
->dirty
= 1; /* Backing store is now out of date */
5585 i915_gem_object_unpin_pages(obj
);
5587 if (WARN_ON(bytes
!= size
)) {
5588 DRM_ERROR("Incomplete copy, wrote %zu of %zu", bytes
, size
);
5596 drm_gem_object_unreference(&obj
->base
);
5597 return ERR_PTR(ret
);