2 * Copyright © 2008-2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
29 #include <drm/drm_vma_manager.h>
30 #include <drm/i915_drm.h>
32 #include "i915_gem_dmabuf.h"
33 #include "i915_vgpu.h"
34 #include "i915_trace.h"
35 #include "intel_drv.h"
36 #include "intel_frontbuffer.h"
37 #include "intel_mocs.h"
38 #include <linux/reservation.h>
39 #include <linux/shmem_fs.h>
40 #include <linux/slab.h>
41 #include <linux/swap.h>
42 #include <linux/pci.h>
43 #include <linux/dma-buf.h>
45 static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object
*obj
);
46 static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object
*obj
);
48 static bool cpu_cache_is_coherent(struct drm_device
*dev
,
49 enum i915_cache_level level
)
51 return HAS_LLC(dev
) || level
!= I915_CACHE_NONE
;
54 static bool cpu_write_needs_clflush(struct drm_i915_gem_object
*obj
)
56 if (obj
->base
.write_domain
== I915_GEM_DOMAIN_CPU
)
59 if (!cpu_cache_is_coherent(obj
->base
.dev
, obj
->cache_level
))
62 return obj
->pin_display
;
66 insert_mappable_node(struct drm_i915_private
*i915
,
67 struct drm_mm_node
*node
, u32 size
)
69 memset(node
, 0, sizeof(*node
));
70 return drm_mm_insert_node_in_range_generic(&i915
->ggtt
.base
.mm
, node
,
72 i915
->ggtt
.mappable_end
,
73 DRM_MM_SEARCH_DEFAULT
,
74 DRM_MM_CREATE_DEFAULT
);
78 remove_mappable_node(struct drm_mm_node
*node
)
80 drm_mm_remove_node(node
);
83 /* some bookkeeping */
84 static void i915_gem_info_add_obj(struct drm_i915_private
*dev_priv
,
87 spin_lock(&dev_priv
->mm
.object_stat_lock
);
88 dev_priv
->mm
.object_count
++;
89 dev_priv
->mm
.object_memory
+= size
;
90 spin_unlock(&dev_priv
->mm
.object_stat_lock
);
93 static void i915_gem_info_remove_obj(struct drm_i915_private
*dev_priv
,
96 spin_lock(&dev_priv
->mm
.object_stat_lock
);
97 dev_priv
->mm
.object_count
--;
98 dev_priv
->mm
.object_memory
-= size
;
99 spin_unlock(&dev_priv
->mm
.object_stat_lock
);
103 i915_gem_wait_for_error(struct i915_gpu_error
*error
)
107 if (!i915_reset_in_progress(error
))
111 * Only wait 10 seconds for the gpu reset to complete to avoid hanging
112 * userspace. If it takes that long something really bad is going on and
113 * we should simply try to bail out and fail as gracefully as possible.
115 ret
= wait_event_interruptible_timeout(error
->reset_queue
,
116 !i915_reset_in_progress(error
),
119 DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
121 } else if (ret
< 0) {
128 int i915_mutex_lock_interruptible(struct drm_device
*dev
)
130 struct drm_i915_private
*dev_priv
= to_i915(dev
);
133 ret
= i915_gem_wait_for_error(&dev_priv
->gpu_error
);
137 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
145 i915_gem_get_aperture_ioctl(struct drm_device
*dev
, void *data
,
146 struct drm_file
*file
)
148 struct drm_i915_private
*dev_priv
= to_i915(dev
);
149 struct i915_ggtt
*ggtt
= &dev_priv
->ggtt
;
150 struct drm_i915_gem_get_aperture
*args
= data
;
151 struct i915_vma
*vma
;
155 mutex_lock(&dev
->struct_mutex
);
156 list_for_each_entry(vma
, &ggtt
->base
.active_list
, vm_link
)
157 if (i915_vma_is_pinned(vma
))
158 pinned
+= vma
->node
.size
;
159 list_for_each_entry(vma
, &ggtt
->base
.inactive_list
, vm_link
)
160 if (i915_vma_is_pinned(vma
))
161 pinned
+= vma
->node
.size
;
162 mutex_unlock(&dev
->struct_mutex
);
164 args
->aper_size
= ggtt
->base
.total
;
165 args
->aper_available_size
= args
->aper_size
- pinned
;
171 i915_gem_object_get_pages_phys(struct drm_i915_gem_object
*obj
)
173 struct address_space
*mapping
= obj
->base
.filp
->f_mapping
;
174 char *vaddr
= obj
->phys_handle
->vaddr
;
176 struct scatterlist
*sg
;
179 if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj
)))
182 for (i
= 0; i
< obj
->base
.size
/ PAGE_SIZE
; i
++) {
186 page
= shmem_read_mapping_page(mapping
, i
);
188 return PTR_ERR(page
);
190 src
= kmap_atomic(page
);
191 memcpy(vaddr
, src
, PAGE_SIZE
);
192 drm_clflush_virt_range(vaddr
, PAGE_SIZE
);
199 i915_gem_chipset_flush(to_i915(obj
->base
.dev
));
201 st
= kmalloc(sizeof(*st
), GFP_KERNEL
);
205 if (sg_alloc_table(st
, 1, GFP_KERNEL
)) {
212 sg
->length
= obj
->base
.size
;
214 sg_dma_address(sg
) = obj
->phys_handle
->busaddr
;
215 sg_dma_len(sg
) = obj
->base
.size
;
222 i915_gem_object_put_pages_phys(struct drm_i915_gem_object
*obj
)
226 BUG_ON(obj
->madv
== __I915_MADV_PURGED
);
228 ret
= i915_gem_object_set_to_cpu_domain(obj
, true);
230 /* In the event of a disaster, abandon all caches and
233 obj
->base
.read_domains
= obj
->base
.write_domain
= I915_GEM_DOMAIN_CPU
;
236 if (obj
->madv
== I915_MADV_DONTNEED
)
240 struct address_space
*mapping
= obj
->base
.filp
->f_mapping
;
241 char *vaddr
= obj
->phys_handle
->vaddr
;
244 for (i
= 0; i
< obj
->base
.size
/ PAGE_SIZE
; i
++) {
248 page
= shmem_read_mapping_page(mapping
, i
);
252 dst
= kmap_atomic(page
);
253 drm_clflush_virt_range(vaddr
, PAGE_SIZE
);
254 memcpy(dst
, vaddr
, PAGE_SIZE
);
257 set_page_dirty(page
);
258 if (obj
->madv
== I915_MADV_WILLNEED
)
259 mark_page_accessed(page
);
266 sg_free_table(obj
->pages
);
271 i915_gem_object_release_phys(struct drm_i915_gem_object
*obj
)
273 drm_pci_free(obj
->base
.dev
, obj
->phys_handle
);
276 static const struct drm_i915_gem_object_ops i915_gem_phys_ops
= {
277 .get_pages
= i915_gem_object_get_pages_phys
,
278 .put_pages
= i915_gem_object_put_pages_phys
,
279 .release
= i915_gem_object_release_phys
,
282 int i915_gem_object_unbind(struct drm_i915_gem_object
*obj
)
284 struct i915_vma
*vma
;
285 LIST_HEAD(still_in_list
);
288 lockdep_assert_held(&obj
->base
.dev
->struct_mutex
);
290 /* Closed vma are removed from the obj->vma_list - but they may
291 * still have an active binding on the object. To remove those we
292 * must wait for all rendering to complete to the object (as unbinding
293 * must anyway), and retire the requests.
295 ret
= i915_gem_object_wait_rendering(obj
, false);
299 i915_gem_retire_requests(to_i915(obj
->base
.dev
));
301 while ((vma
= list_first_entry_or_null(&obj
->vma_list
,
304 list_move_tail(&vma
->obj_link
, &still_in_list
);
305 ret
= i915_vma_unbind(vma
);
309 list_splice(&still_in_list
, &obj
->vma_list
);
315 * Ensures that all rendering to the object has completed and the object is
316 * safe to unbind from the GTT or access from the CPU.
317 * @obj: i915 gem object
318 * @readonly: waiting for just read access or read-write access
321 i915_gem_object_wait_rendering(struct drm_i915_gem_object
*obj
,
324 struct reservation_object
*resv
;
325 struct i915_gem_active
*active
;
326 unsigned long active_mask
;
329 lockdep_assert_held(&obj
->base
.dev
->struct_mutex
);
332 active
= obj
->last_read
;
333 active_mask
= i915_gem_object_get_active(obj
);
336 active
= &obj
->last_write
;
339 for_each_active(active_mask
, idx
) {
342 ret
= i915_gem_active_wait(&active
[idx
],
343 &obj
->base
.dev
->struct_mutex
);
348 resv
= i915_gem_object_get_dmabuf_resv(obj
);
352 err
= reservation_object_wait_timeout_rcu(resv
, !readonly
, true,
353 MAX_SCHEDULE_TIMEOUT
);
361 /* A nonblocking variant of the above wait. Must be called prior to
362 * acquiring the mutex for the object, as the object state may change
363 * during this call. A reference must be held by the caller for the object.
365 static __must_check
int
366 __unsafe_wait_rendering(struct drm_i915_gem_object
*obj
,
367 struct intel_rps_client
*rps
,
370 struct i915_gem_active
*active
;
371 unsigned long active_mask
;
374 active_mask
= __I915_BO_ACTIVE(obj
);
379 active
= obj
->last_read
;
382 active
= &obj
->last_write
;
385 for_each_active(active_mask
, idx
) {
388 ret
= i915_gem_active_wait_unlocked(&active
[idx
],
397 static struct intel_rps_client
*to_rps_client(struct drm_file
*file
)
399 struct drm_i915_file_private
*fpriv
= file
->driver_priv
;
405 i915_gem_object_attach_phys(struct drm_i915_gem_object
*obj
,
408 drm_dma_handle_t
*phys
;
411 if (obj
->phys_handle
) {
412 if ((unsigned long)obj
->phys_handle
->vaddr
& (align
-1))
418 if (obj
->madv
!= I915_MADV_WILLNEED
)
421 if (obj
->base
.filp
== NULL
)
424 ret
= i915_gem_object_unbind(obj
);
428 ret
= i915_gem_object_put_pages(obj
);
432 /* create a new object */
433 phys
= drm_pci_alloc(obj
->base
.dev
, obj
->base
.size
, align
);
437 obj
->phys_handle
= phys
;
438 obj
->ops
= &i915_gem_phys_ops
;
440 return i915_gem_object_get_pages(obj
);
444 i915_gem_phys_pwrite(struct drm_i915_gem_object
*obj
,
445 struct drm_i915_gem_pwrite
*args
,
446 struct drm_file
*file_priv
)
448 struct drm_device
*dev
= obj
->base
.dev
;
449 void *vaddr
= obj
->phys_handle
->vaddr
+ args
->offset
;
450 char __user
*user_data
= u64_to_user_ptr(args
->data_ptr
);
453 /* We manually control the domain here and pretend that it
454 * remains coherent i.e. in the GTT domain, like shmem_pwrite.
456 ret
= i915_gem_object_wait_rendering(obj
, false);
460 intel_fb_obj_invalidate(obj
, ORIGIN_CPU
);
461 if (__copy_from_user_inatomic_nocache(vaddr
, user_data
, args
->size
)) {
462 unsigned long unwritten
;
464 /* The physical object once assigned is fixed for the lifetime
465 * of the obj, so we can safely drop the lock and continue
468 mutex_unlock(&dev
->struct_mutex
);
469 unwritten
= copy_from_user(vaddr
, user_data
, args
->size
);
470 mutex_lock(&dev
->struct_mutex
);
477 drm_clflush_virt_range(vaddr
, args
->size
);
478 i915_gem_chipset_flush(to_i915(dev
));
481 intel_fb_obj_flush(obj
, false, ORIGIN_CPU
);
485 void *i915_gem_object_alloc(struct drm_device
*dev
)
487 struct drm_i915_private
*dev_priv
= to_i915(dev
);
488 return kmem_cache_zalloc(dev_priv
->objects
, GFP_KERNEL
);
491 void i915_gem_object_free(struct drm_i915_gem_object
*obj
)
493 struct drm_i915_private
*dev_priv
= to_i915(obj
->base
.dev
);
494 kmem_cache_free(dev_priv
->objects
, obj
);
498 i915_gem_create(struct drm_file
*file
,
499 struct drm_device
*dev
,
503 struct drm_i915_gem_object
*obj
;
507 size
= roundup(size
, PAGE_SIZE
);
511 /* Allocate the new object */
512 obj
= i915_gem_object_create(dev
, size
);
516 ret
= drm_gem_handle_create(file
, &obj
->base
, &handle
);
517 /* drop reference from allocate - handle holds it now */
518 i915_gem_object_put_unlocked(obj
);
527 i915_gem_dumb_create(struct drm_file
*file
,
528 struct drm_device
*dev
,
529 struct drm_mode_create_dumb
*args
)
531 /* have to work out size/pitch and return them */
532 args
->pitch
= ALIGN(args
->width
* DIV_ROUND_UP(args
->bpp
, 8), 64);
533 args
->size
= args
->pitch
* args
->height
;
534 return i915_gem_create(file
, dev
,
535 args
->size
, &args
->handle
);
539 * Creates a new mm object and returns a handle to it.
540 * @dev: drm device pointer
541 * @data: ioctl data blob
542 * @file: drm file pointer
545 i915_gem_create_ioctl(struct drm_device
*dev
, void *data
,
546 struct drm_file
*file
)
548 struct drm_i915_gem_create
*args
= data
;
550 return i915_gem_create(file
, dev
,
551 args
->size
, &args
->handle
);
555 __copy_to_user_swizzled(char __user
*cpu_vaddr
,
556 const char *gpu_vaddr
, int gpu_offset
,
559 int ret
, cpu_offset
= 0;
562 int cacheline_end
= ALIGN(gpu_offset
+ 1, 64);
563 int this_length
= min(cacheline_end
- gpu_offset
, length
);
564 int swizzled_gpu_offset
= gpu_offset
^ 64;
566 ret
= __copy_to_user(cpu_vaddr
+ cpu_offset
,
567 gpu_vaddr
+ swizzled_gpu_offset
,
572 cpu_offset
+= this_length
;
573 gpu_offset
+= this_length
;
574 length
-= this_length
;
581 __copy_from_user_swizzled(char *gpu_vaddr
, int gpu_offset
,
582 const char __user
*cpu_vaddr
,
585 int ret
, cpu_offset
= 0;
588 int cacheline_end
= ALIGN(gpu_offset
+ 1, 64);
589 int this_length
= min(cacheline_end
- gpu_offset
, length
);
590 int swizzled_gpu_offset
= gpu_offset
^ 64;
592 ret
= __copy_from_user(gpu_vaddr
+ swizzled_gpu_offset
,
593 cpu_vaddr
+ cpu_offset
,
598 cpu_offset
+= this_length
;
599 gpu_offset
+= this_length
;
600 length
-= this_length
;
607 * Pins the specified object's pages and synchronizes the object with
608 * GPU accesses. Sets needs_clflush to non-zero if the caller should
609 * flush the object from the CPU cache.
611 int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object
*obj
,
612 unsigned int *needs_clflush
)
618 if (!i915_gem_object_has_struct_page(obj
))
621 ret
= i915_gem_object_wait_rendering(obj
, true);
625 ret
= i915_gem_object_get_pages(obj
);
629 i915_gem_object_pin_pages(obj
);
631 i915_gem_object_flush_gtt_write_domain(obj
);
633 /* If we're not in the cpu read domain, set ourself into the gtt
634 * read domain and manually flush cachelines (if required). This
635 * optimizes for the case when the gpu will dirty the data
636 * anyway again before the next pread happens.
638 if (!(obj
->base
.read_domains
& I915_GEM_DOMAIN_CPU
))
639 *needs_clflush
= !cpu_cache_is_coherent(obj
->base
.dev
,
642 if (*needs_clflush
&& !static_cpu_has(X86_FEATURE_CLFLUSH
)) {
643 ret
= i915_gem_object_set_to_cpu_domain(obj
, false);
650 /* return with the pages pinned */
654 i915_gem_object_unpin_pages(obj
);
658 int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object
*obj
,
659 unsigned int *needs_clflush
)
664 if (!i915_gem_object_has_struct_page(obj
))
667 ret
= i915_gem_object_wait_rendering(obj
, false);
671 ret
= i915_gem_object_get_pages(obj
);
675 i915_gem_object_pin_pages(obj
);
677 i915_gem_object_flush_gtt_write_domain(obj
);
679 /* If we're not in the cpu write domain, set ourself into the
680 * gtt write domain and manually flush cachelines (as required).
681 * This optimizes for the case when the gpu will use the data
682 * right away and we therefore have to clflush anyway.
684 if (obj
->base
.write_domain
!= I915_GEM_DOMAIN_CPU
)
685 *needs_clflush
|= cpu_write_needs_clflush(obj
) << 1;
687 /* Same trick applies to invalidate partially written cachelines read
690 if (!(obj
->base
.read_domains
& I915_GEM_DOMAIN_CPU
))
691 *needs_clflush
|= !cpu_cache_is_coherent(obj
->base
.dev
,
694 if (*needs_clflush
&& !static_cpu_has(X86_FEATURE_CLFLUSH
)) {
695 ret
= i915_gem_object_set_to_cpu_domain(obj
, true);
702 if ((*needs_clflush
& CLFLUSH_AFTER
) == 0)
703 obj
->cache_dirty
= true;
705 intel_fb_obj_invalidate(obj
, ORIGIN_CPU
);
707 /* return with the pages pinned */
711 i915_gem_object_unpin_pages(obj
);
715 /* Per-page copy function for the shmem pread fastpath.
716 * Flushes invalid cachelines before reading the target if
717 * needs_clflush is set. */
719 shmem_pread_fast(struct page
*page
, int shmem_page_offset
, int page_length
,
720 char __user
*user_data
,
721 bool page_do_bit17_swizzling
, bool needs_clflush
)
726 if (unlikely(page_do_bit17_swizzling
))
729 vaddr
= kmap_atomic(page
);
731 drm_clflush_virt_range(vaddr
+ shmem_page_offset
,
733 ret
= __copy_to_user_inatomic(user_data
,
734 vaddr
+ shmem_page_offset
,
736 kunmap_atomic(vaddr
);
738 return ret
? -EFAULT
: 0;
742 shmem_clflush_swizzled_range(char *addr
, unsigned long length
,
745 if (unlikely(swizzled
)) {
746 unsigned long start
= (unsigned long) addr
;
747 unsigned long end
= (unsigned long) addr
+ length
;
749 /* For swizzling simply ensure that we always flush both
750 * channels. Lame, but simple and it works. Swizzled
751 * pwrite/pread is far from a hotpath - current userspace
752 * doesn't use it at all. */
753 start
= round_down(start
, 128);
754 end
= round_up(end
, 128);
756 drm_clflush_virt_range((void *)start
, end
- start
);
758 drm_clflush_virt_range(addr
, length
);
763 /* Only difference to the fast-path function is that this can handle bit17
764 * and uses non-atomic copy and kmap functions. */
766 shmem_pread_slow(struct page
*page
, int shmem_page_offset
, int page_length
,
767 char __user
*user_data
,
768 bool page_do_bit17_swizzling
, bool needs_clflush
)
775 shmem_clflush_swizzled_range(vaddr
+ shmem_page_offset
,
777 page_do_bit17_swizzling
);
779 if (page_do_bit17_swizzling
)
780 ret
= __copy_to_user_swizzled(user_data
,
781 vaddr
, shmem_page_offset
,
784 ret
= __copy_to_user(user_data
,
785 vaddr
+ shmem_page_offset
,
789 return ret
? - EFAULT
: 0;
792 static inline unsigned long
793 slow_user_access(struct io_mapping
*mapping
,
794 uint64_t page_base
, int page_offset
,
795 char __user
*user_data
,
796 unsigned long length
, bool pwrite
)
798 void __iomem
*ioaddr
;
802 ioaddr
= io_mapping_map_wc(mapping
, page_base
, PAGE_SIZE
);
803 /* We can use the cpu mem copy function because this is X86. */
804 vaddr
= (void __force
*)ioaddr
+ page_offset
;
806 unwritten
= __copy_from_user(vaddr
, user_data
, length
);
808 unwritten
= __copy_to_user(user_data
, vaddr
, length
);
810 io_mapping_unmap(ioaddr
);
815 i915_gem_gtt_pread(struct drm_device
*dev
,
816 struct drm_i915_gem_object
*obj
, uint64_t size
,
817 uint64_t data_offset
, uint64_t data_ptr
)
819 struct drm_i915_private
*dev_priv
= to_i915(dev
);
820 struct i915_ggtt
*ggtt
= &dev_priv
->ggtt
;
821 struct i915_vma
*vma
;
822 struct drm_mm_node node
;
823 char __user
*user_data
;
828 vma
= i915_gem_object_ggtt_pin(obj
, NULL
, 0, 0, PIN_MAPPABLE
);
830 node
.start
= i915_ggtt_offset(vma
);
831 node
.allocated
= false;
832 ret
= i915_gem_object_put_fence(obj
);
839 ret
= insert_mappable_node(dev_priv
, &node
, PAGE_SIZE
);
843 ret
= i915_gem_object_get_pages(obj
);
845 remove_mappable_node(&node
);
849 i915_gem_object_pin_pages(obj
);
852 ret
= i915_gem_object_set_to_gtt_domain(obj
, false);
856 user_data
= u64_to_user_ptr(data_ptr
);
858 offset
= data_offset
;
860 mutex_unlock(&dev
->struct_mutex
);
861 if (likely(!i915
.prefault_disable
)) {
862 ret
= fault_in_multipages_writeable(user_data
, remain
);
864 mutex_lock(&dev
->struct_mutex
);
870 /* Operation in this page
872 * page_base = page offset within aperture
873 * page_offset = offset within page
874 * page_length = bytes to copy for this page
876 u32 page_base
= node
.start
;
877 unsigned page_offset
= offset_in_page(offset
);
878 unsigned page_length
= PAGE_SIZE
- page_offset
;
879 page_length
= remain
< page_length
? remain
: page_length
;
880 if (node
.allocated
) {
882 ggtt
->base
.insert_page(&ggtt
->base
,
883 i915_gem_object_get_dma_address(obj
, offset
>> PAGE_SHIFT
),
888 page_base
+= offset
& PAGE_MASK
;
890 /* This is a slow read/write as it tries to read from
891 * and write to user memory which may result into page
892 * faults, and so we cannot perform this under struct_mutex.
894 if (slow_user_access(ggtt
->mappable
, page_base
,
895 page_offset
, user_data
,
896 page_length
, false)) {
901 remain
-= page_length
;
902 user_data
+= page_length
;
903 offset
+= page_length
;
906 mutex_lock(&dev
->struct_mutex
);
907 if (ret
== 0 && (obj
->base
.read_domains
& I915_GEM_DOMAIN_GTT
) == 0) {
908 /* The user has modified the object whilst we tried
909 * reading from it, and we now have no idea what domain
910 * the pages should be in. As we have just been touching
911 * them directly, flush everything back to the GTT
914 ret
= i915_gem_object_set_to_gtt_domain(obj
, false);
918 if (node
.allocated
) {
920 ggtt
->base
.clear_range(&ggtt
->base
,
921 node
.start
, node
.size
,
923 i915_gem_object_unpin_pages(obj
);
924 remove_mappable_node(&node
);
933 i915_gem_shmem_pread(struct drm_device
*dev
,
934 struct drm_i915_gem_object
*obj
,
935 struct drm_i915_gem_pread
*args
,
936 struct drm_file
*file
)
938 char __user
*user_data
;
941 int shmem_page_offset
, page_length
, ret
= 0;
942 int obj_do_bit17_swizzling
, page_do_bit17_swizzling
;
944 int needs_clflush
= 0;
945 struct sg_page_iter sg_iter
;
947 ret
= i915_gem_obj_prepare_shmem_read(obj
, &needs_clflush
);
951 obj_do_bit17_swizzling
= i915_gem_object_needs_bit17_swizzle(obj
);
952 user_data
= u64_to_user_ptr(args
->data_ptr
);
953 offset
= args
->offset
;
956 for_each_sg_page(obj
->pages
->sgl
, &sg_iter
, obj
->pages
->nents
,
957 offset
>> PAGE_SHIFT
) {
958 struct page
*page
= sg_page_iter_page(&sg_iter
);
963 /* Operation in this page
965 * shmem_page_offset = offset within page in shmem file
966 * page_length = bytes to copy for this page
968 shmem_page_offset
= offset_in_page(offset
);
969 page_length
= remain
;
970 if ((shmem_page_offset
+ page_length
) > PAGE_SIZE
)
971 page_length
= PAGE_SIZE
- shmem_page_offset
;
973 page_do_bit17_swizzling
= obj_do_bit17_swizzling
&&
974 (page_to_phys(page
) & (1 << 17)) != 0;
976 ret
= shmem_pread_fast(page
, shmem_page_offset
, page_length
,
977 user_data
, page_do_bit17_swizzling
,
982 mutex_unlock(&dev
->struct_mutex
);
984 if (likely(!i915
.prefault_disable
) && !prefaulted
) {
985 ret
= fault_in_multipages_writeable(user_data
, remain
);
986 /* Userspace is tricking us, but we've already clobbered
987 * its pages with the prefault and promised to write the
988 * data up to the first fault. Hence ignore any errors
989 * and just continue. */
994 ret
= shmem_pread_slow(page
, shmem_page_offset
, page_length
,
995 user_data
, page_do_bit17_swizzling
,
998 mutex_lock(&dev
->struct_mutex
);
1004 remain
-= page_length
;
1005 user_data
+= page_length
;
1006 offset
+= page_length
;
1010 i915_gem_obj_finish_shmem_access(obj
);
1016 * Reads data from the object referenced by handle.
1017 * @dev: drm device pointer
1018 * @data: ioctl data blob
1019 * @file: drm file pointer
1021 * On error, the contents of *data are undefined.
1024 i915_gem_pread_ioctl(struct drm_device
*dev
, void *data
,
1025 struct drm_file
*file
)
1027 struct drm_i915_gem_pread
*args
= data
;
1028 struct drm_i915_gem_object
*obj
;
1031 if (args
->size
== 0)
1034 if (!access_ok(VERIFY_WRITE
,
1035 u64_to_user_ptr(args
->data_ptr
),
1039 obj
= i915_gem_object_lookup(file
, args
->handle
);
1043 /* Bounds check source. */
1044 if (args
->offset
> obj
->base
.size
||
1045 args
->size
> obj
->base
.size
- args
->offset
) {
1050 trace_i915_gem_object_pread(obj
, args
->offset
, args
->size
);
1052 ret
= __unsafe_wait_rendering(obj
, to_rps_client(file
), true);
1056 ret
= i915_mutex_lock_interruptible(dev
);
1060 ret
= i915_gem_shmem_pread(dev
, obj
, args
, file
);
1062 /* pread for non shmem backed objects */
1063 if (ret
== -EFAULT
|| ret
== -ENODEV
) {
1064 intel_runtime_pm_get(to_i915(dev
));
1065 ret
= i915_gem_gtt_pread(dev
, obj
, args
->size
,
1066 args
->offset
, args
->data_ptr
);
1067 intel_runtime_pm_put(to_i915(dev
));
1070 i915_gem_object_put(obj
);
1071 mutex_unlock(&dev
->struct_mutex
);
1076 i915_gem_object_put_unlocked(obj
);
1080 /* This is the fast write path which cannot handle
1081 * page faults in the source data
1085 fast_user_write(struct io_mapping
*mapping
,
1086 loff_t page_base
, int page_offset
,
1087 char __user
*user_data
,
1090 void __iomem
*vaddr_atomic
;
1092 unsigned long unwritten
;
1094 vaddr_atomic
= io_mapping_map_atomic_wc(mapping
, page_base
);
1095 /* We can use the cpu mem copy function because this is X86. */
1096 vaddr
= (void __force
*)vaddr_atomic
+ page_offset
;
1097 unwritten
= __copy_from_user_inatomic_nocache(vaddr
,
1099 io_mapping_unmap_atomic(vaddr_atomic
);
1104 * This is the fast pwrite path, where we copy the data directly from the
1105 * user into the GTT, uncached.
1106 * @i915: i915 device private data
1107 * @obj: i915 gem object
1108 * @args: pwrite arguments structure
1109 * @file: drm file pointer
1112 i915_gem_gtt_pwrite_fast(struct drm_i915_private
*i915
,
1113 struct drm_i915_gem_object
*obj
,
1114 struct drm_i915_gem_pwrite
*args
,
1115 struct drm_file
*file
)
1117 struct i915_ggtt
*ggtt
= &i915
->ggtt
;
1118 struct drm_device
*dev
= obj
->base
.dev
;
1119 struct i915_vma
*vma
;
1120 struct drm_mm_node node
;
1121 uint64_t remain
, offset
;
1122 char __user
*user_data
;
1124 bool hit_slow_path
= false;
1126 if (i915_gem_object_is_tiled(obj
))
1129 vma
= i915_gem_object_ggtt_pin(obj
, NULL
, 0, 0,
1130 PIN_MAPPABLE
| PIN_NONBLOCK
);
1132 node
.start
= i915_ggtt_offset(vma
);
1133 node
.allocated
= false;
1134 ret
= i915_gem_object_put_fence(obj
);
1136 i915_vma_unpin(vma
);
1141 ret
= insert_mappable_node(i915
, &node
, PAGE_SIZE
);
1145 ret
= i915_gem_object_get_pages(obj
);
1147 remove_mappable_node(&node
);
1151 i915_gem_object_pin_pages(obj
);
1154 ret
= i915_gem_object_set_to_gtt_domain(obj
, true);
1158 intel_fb_obj_invalidate(obj
, ORIGIN_CPU
);
1161 user_data
= u64_to_user_ptr(args
->data_ptr
);
1162 offset
= args
->offset
;
1163 remain
= args
->size
;
1165 /* Operation in this page
1167 * page_base = page offset within aperture
1168 * page_offset = offset within page
1169 * page_length = bytes to copy for this page
1171 u32 page_base
= node
.start
;
1172 unsigned page_offset
= offset_in_page(offset
);
1173 unsigned page_length
= PAGE_SIZE
- page_offset
;
1174 page_length
= remain
< page_length
? remain
: page_length
;
1175 if (node
.allocated
) {
1176 wmb(); /* flush the write before we modify the GGTT */
1177 ggtt
->base
.insert_page(&ggtt
->base
,
1178 i915_gem_object_get_dma_address(obj
, offset
>> PAGE_SHIFT
),
1179 node
.start
, I915_CACHE_NONE
, 0);
1180 wmb(); /* flush modifications to the GGTT (insert_page) */
1182 page_base
+= offset
& PAGE_MASK
;
1184 /* If we get a fault while copying data, then (presumably) our
1185 * source page isn't available. Return the error and we'll
1186 * retry in the slow path.
1187 * If the object is non-shmem backed, we retry again with the
1188 * path that handles page fault.
1190 if (fast_user_write(ggtt
->mappable
, page_base
,
1191 page_offset
, user_data
, page_length
)) {
1192 hit_slow_path
= true;
1193 mutex_unlock(&dev
->struct_mutex
);
1194 if (slow_user_access(ggtt
->mappable
,
1196 page_offset
, user_data
,
1197 page_length
, true)) {
1199 mutex_lock(&dev
->struct_mutex
);
1203 mutex_lock(&dev
->struct_mutex
);
1206 remain
-= page_length
;
1207 user_data
+= page_length
;
1208 offset
+= page_length
;
1212 if (hit_slow_path
) {
1214 (obj
->base
.read_domains
& I915_GEM_DOMAIN_GTT
) == 0) {
1215 /* The user has modified the object whilst we tried
1216 * reading from it, and we now have no idea what domain
1217 * the pages should be in. As we have just been touching
1218 * them directly, flush everything back to the GTT
1221 ret
= i915_gem_object_set_to_gtt_domain(obj
, false);
1225 intel_fb_obj_flush(obj
, false, ORIGIN_CPU
);
1227 if (node
.allocated
) {
1229 ggtt
->base
.clear_range(&ggtt
->base
,
1230 node
.start
, node
.size
,
1232 i915_gem_object_unpin_pages(obj
);
1233 remove_mappable_node(&node
);
1235 i915_vma_unpin(vma
);
1241 /* Per-page copy function for the shmem pwrite fastpath.
1242 * Flushes invalid cachelines before writing to the target if
1243 * needs_clflush_before is set and flushes out any written cachelines after
1244 * writing if needs_clflush is set. */
1246 shmem_pwrite_fast(struct page
*page
, int shmem_page_offset
, int page_length
,
1247 char __user
*user_data
,
1248 bool page_do_bit17_swizzling
,
1249 bool needs_clflush_before
,
1250 bool needs_clflush_after
)
1255 if (unlikely(page_do_bit17_swizzling
))
1258 vaddr
= kmap_atomic(page
);
1259 if (needs_clflush_before
)
1260 drm_clflush_virt_range(vaddr
+ shmem_page_offset
,
1262 ret
= __copy_from_user_inatomic(vaddr
+ shmem_page_offset
,
1263 user_data
, page_length
);
1264 if (needs_clflush_after
)
1265 drm_clflush_virt_range(vaddr
+ shmem_page_offset
,
1267 kunmap_atomic(vaddr
);
1269 return ret
? -EFAULT
: 0;
1272 /* Only difference to the fast-path function is that this can handle bit17
1273 * and uses non-atomic copy and kmap functions. */
1275 shmem_pwrite_slow(struct page
*page
, int shmem_page_offset
, int page_length
,
1276 char __user
*user_data
,
1277 bool page_do_bit17_swizzling
,
1278 bool needs_clflush_before
,
1279 bool needs_clflush_after
)
1285 if (unlikely(needs_clflush_before
|| page_do_bit17_swizzling
))
1286 shmem_clflush_swizzled_range(vaddr
+ shmem_page_offset
,
1288 page_do_bit17_swizzling
);
1289 if (page_do_bit17_swizzling
)
1290 ret
= __copy_from_user_swizzled(vaddr
, shmem_page_offset
,
1294 ret
= __copy_from_user(vaddr
+ shmem_page_offset
,
1297 if (needs_clflush_after
)
1298 shmem_clflush_swizzled_range(vaddr
+ shmem_page_offset
,
1300 page_do_bit17_swizzling
);
1303 return ret
? -EFAULT
: 0;
1307 i915_gem_shmem_pwrite(struct drm_device
*dev
,
1308 struct drm_i915_gem_object
*obj
,
1309 struct drm_i915_gem_pwrite
*args
,
1310 struct drm_file
*file
)
1314 char __user
*user_data
;
1315 int shmem_page_offset
, page_length
, ret
= 0;
1316 int obj_do_bit17_swizzling
, page_do_bit17_swizzling
;
1317 int hit_slowpath
= 0;
1318 unsigned int needs_clflush
;
1319 struct sg_page_iter sg_iter
;
1321 ret
= i915_gem_obj_prepare_shmem_write(obj
, &needs_clflush
);
1325 obj_do_bit17_swizzling
= i915_gem_object_needs_bit17_swizzle(obj
);
1326 user_data
= u64_to_user_ptr(args
->data_ptr
);
1327 offset
= args
->offset
;
1328 remain
= args
->size
;
1330 for_each_sg_page(obj
->pages
->sgl
, &sg_iter
, obj
->pages
->nents
,
1331 offset
>> PAGE_SHIFT
) {
1332 struct page
*page
= sg_page_iter_page(&sg_iter
);
1333 int partial_cacheline_write
;
1338 /* Operation in this page
1340 * shmem_page_offset = offset within page in shmem file
1341 * page_length = bytes to copy for this page
1343 shmem_page_offset
= offset_in_page(offset
);
1345 page_length
= remain
;
1346 if ((shmem_page_offset
+ page_length
) > PAGE_SIZE
)
1347 page_length
= PAGE_SIZE
- shmem_page_offset
;
1349 /* If we don't overwrite a cacheline completely we need to be
1350 * careful to have up-to-date data by first clflushing. Don't
1351 * overcomplicate things and flush the entire patch. */
1352 partial_cacheline_write
= needs_clflush
& CLFLUSH_BEFORE
&&
1353 ((shmem_page_offset
| page_length
)
1354 & (boot_cpu_data
.x86_clflush_size
- 1));
1356 page_do_bit17_swizzling
= obj_do_bit17_swizzling
&&
1357 (page_to_phys(page
) & (1 << 17)) != 0;
1359 ret
= shmem_pwrite_fast(page
, shmem_page_offset
, page_length
,
1360 user_data
, page_do_bit17_swizzling
,
1361 partial_cacheline_write
,
1362 needs_clflush
& CLFLUSH_AFTER
);
1367 mutex_unlock(&dev
->struct_mutex
);
1368 ret
= shmem_pwrite_slow(page
, shmem_page_offset
, page_length
,
1369 user_data
, page_do_bit17_swizzling
,
1370 partial_cacheline_write
,
1371 needs_clflush
& CLFLUSH_AFTER
);
1373 mutex_lock(&dev
->struct_mutex
);
1379 remain
-= page_length
;
1380 user_data
+= page_length
;
1381 offset
+= page_length
;
1385 i915_gem_obj_finish_shmem_access(obj
);
1389 * Fixup: Flush cpu caches in case we didn't flush the dirty
1390 * cachelines in-line while writing and the object moved
1391 * out of the cpu write domain while we've dropped the lock.
1393 if (!(needs_clflush
& CLFLUSH_AFTER
) &&
1394 obj
->base
.write_domain
!= I915_GEM_DOMAIN_CPU
) {
1395 if (i915_gem_clflush_object(obj
, obj
->pin_display
))
1396 needs_clflush
|= CLFLUSH_AFTER
;
1400 if (needs_clflush
& CLFLUSH_AFTER
)
1401 i915_gem_chipset_flush(to_i915(dev
));
1403 intel_fb_obj_flush(obj
, false, ORIGIN_CPU
);
1408 * Writes data to the object referenced by handle.
1410 * @data: ioctl data blob
1413 * On error, the contents of the buffer that were to be modified are undefined.
1416 i915_gem_pwrite_ioctl(struct drm_device
*dev
, void *data
,
1417 struct drm_file
*file
)
1419 struct drm_i915_private
*dev_priv
= to_i915(dev
);
1420 struct drm_i915_gem_pwrite
*args
= data
;
1421 struct drm_i915_gem_object
*obj
;
1424 if (args
->size
== 0)
1427 if (!access_ok(VERIFY_READ
,
1428 u64_to_user_ptr(args
->data_ptr
),
1432 if (likely(!i915
.prefault_disable
)) {
1433 ret
= fault_in_multipages_readable(u64_to_user_ptr(args
->data_ptr
),
1439 obj
= i915_gem_object_lookup(file
, args
->handle
);
1443 /* Bounds check destination. */
1444 if (args
->offset
> obj
->base
.size
||
1445 args
->size
> obj
->base
.size
- args
->offset
) {
1450 trace_i915_gem_object_pwrite(obj
, args
->offset
, args
->size
);
1452 ret
= __unsafe_wait_rendering(obj
, to_rps_client(file
), false);
1456 intel_runtime_pm_get(dev_priv
);
1458 ret
= i915_mutex_lock_interruptible(dev
);
1463 /* We can only do the GTT pwrite on untiled buffers, as otherwise
1464 * it would end up going through the fenced access, and we'll get
1465 * different detiling behavior between reading and writing.
1466 * pread/pwrite currently are reading and writing from the CPU
1467 * perspective, requiring manual detiling by the client.
1469 if (!i915_gem_object_has_struct_page(obj
) ||
1470 cpu_write_needs_clflush(obj
)) {
1471 ret
= i915_gem_gtt_pwrite_fast(dev_priv
, obj
, args
, file
);
1472 /* Note that the gtt paths might fail with non-page-backed user
1473 * pointers (e.g. gtt mappings when moving data between
1474 * textures). Fallback to the shmem path in that case. */
1477 if (ret
== -EFAULT
|| ret
== -ENOSPC
) {
1478 if (obj
->phys_handle
)
1479 ret
= i915_gem_phys_pwrite(obj
, args
, file
);
1481 ret
= i915_gem_shmem_pwrite(dev
, obj
, args
, file
);
1484 i915_gem_object_put(obj
);
1485 mutex_unlock(&dev
->struct_mutex
);
1486 intel_runtime_pm_put(dev_priv
);
1491 intel_runtime_pm_put(dev_priv
);
1493 i915_gem_object_put_unlocked(obj
);
1497 static inline enum fb_op_origin
1498 write_origin(struct drm_i915_gem_object
*obj
, unsigned domain
)
1500 return domain
== I915_GEM_DOMAIN_GTT
&& !obj
->has_wc_mmap
?
1501 ORIGIN_GTT
: ORIGIN_CPU
;
1505 * Called when user space prepares to use an object with the CPU, either
1506 * through the mmap ioctl's mapping or a GTT mapping.
1508 * @data: ioctl data blob
1512 i915_gem_set_domain_ioctl(struct drm_device
*dev
, void *data
,
1513 struct drm_file
*file
)
1515 struct drm_i915_gem_set_domain
*args
= data
;
1516 struct drm_i915_gem_object
*obj
;
1517 uint32_t read_domains
= args
->read_domains
;
1518 uint32_t write_domain
= args
->write_domain
;
1521 /* Only handle setting domains to types used by the CPU. */
1522 if ((write_domain
| read_domains
) & I915_GEM_GPU_DOMAINS
)
1525 /* Having something in the write domain implies it's in the read
1526 * domain, and only that read domain. Enforce that in the request.
1528 if (write_domain
!= 0 && read_domains
!= write_domain
)
1531 obj
= i915_gem_object_lookup(file
, args
->handle
);
1535 /* Try to flush the object off the GPU without holding the lock.
1536 * We will repeat the flush holding the lock in the normal manner
1537 * to catch cases where we are gazumped.
1539 ret
= __unsafe_wait_rendering(obj
, to_rps_client(file
), !write_domain
);
1543 ret
= i915_mutex_lock_interruptible(dev
);
1547 if (read_domains
& I915_GEM_DOMAIN_GTT
)
1548 ret
= i915_gem_object_set_to_gtt_domain(obj
, write_domain
!= 0);
1550 ret
= i915_gem_object_set_to_cpu_domain(obj
, write_domain
!= 0);
1552 if (write_domain
!= 0)
1553 intel_fb_obj_invalidate(obj
, write_origin(obj
, write_domain
));
1555 i915_gem_object_put(obj
);
1556 mutex_unlock(&dev
->struct_mutex
);
1560 i915_gem_object_put_unlocked(obj
);
1565 * Called when user space has done writes to this buffer
1567 * @data: ioctl data blob
1571 i915_gem_sw_finish_ioctl(struct drm_device
*dev
, void *data
,
1572 struct drm_file
*file
)
1574 struct drm_i915_gem_sw_finish
*args
= data
;
1575 struct drm_i915_gem_object
*obj
;
1578 obj
= i915_gem_object_lookup(file
, args
->handle
);
1582 /* Pinned buffers may be scanout, so flush the cache */
1583 if (READ_ONCE(obj
->pin_display
)) {
1584 err
= i915_mutex_lock_interruptible(dev
);
1586 i915_gem_object_flush_cpu_write_domain(obj
);
1587 mutex_unlock(&dev
->struct_mutex
);
1591 i915_gem_object_put_unlocked(obj
);
1596 * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address
1599 * @data: ioctl data blob
1602 * While the mapping holds a reference on the contents of the object, it doesn't
1603 * imply a ref on the object itself.
1607 * DRM driver writers who look a this function as an example for how to do GEM
1608 * mmap support, please don't implement mmap support like here. The modern way
1609 * to implement DRM mmap support is with an mmap offset ioctl (like
1610 * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly.
1611 * That way debug tooling like valgrind will understand what's going on, hiding
1612 * the mmap call in a driver private ioctl will break that. The i915 driver only
1613 * does cpu mmaps this way because we didn't know better.
1616 i915_gem_mmap_ioctl(struct drm_device
*dev
, void *data
,
1617 struct drm_file
*file
)
1619 struct drm_i915_gem_mmap
*args
= data
;
1620 struct drm_i915_gem_object
*obj
;
1623 if (args
->flags
& ~(I915_MMAP_WC
))
1626 if (args
->flags
& I915_MMAP_WC
&& !boot_cpu_has(X86_FEATURE_PAT
))
1629 obj
= i915_gem_object_lookup(file
, args
->handle
);
1633 /* prime objects have no backing filp to GEM mmap
1636 if (!obj
->base
.filp
) {
1637 i915_gem_object_put_unlocked(obj
);
1641 addr
= vm_mmap(obj
->base
.filp
, 0, args
->size
,
1642 PROT_READ
| PROT_WRITE
, MAP_SHARED
,
1644 if (args
->flags
& I915_MMAP_WC
) {
1645 struct mm_struct
*mm
= current
->mm
;
1646 struct vm_area_struct
*vma
;
1648 if (down_write_killable(&mm
->mmap_sem
)) {
1649 i915_gem_object_put_unlocked(obj
);
1652 vma
= find_vma(mm
, addr
);
1655 pgprot_writecombine(vm_get_page_prot(vma
->vm_flags
));
1658 up_write(&mm
->mmap_sem
);
1660 /* This may race, but that's ok, it only gets set */
1661 WRITE_ONCE(obj
->has_wc_mmap
, true);
1663 i915_gem_object_put_unlocked(obj
);
1664 if (IS_ERR((void *)addr
))
1667 args
->addr_ptr
= (uint64_t) addr
;
1673 * i915_gem_fault - fault a page into the GTT
1674 * @area: CPU VMA in question
1677 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1678 * from userspace. The fault handler takes care of binding the object to
1679 * the GTT (if needed), allocating and programming a fence register (again,
1680 * only if needed based on whether the old reg is still valid or the object
1681 * is tiled) and inserting a new PTE into the faulting process.
1683 * Note that the faulting process may involve evicting existing objects
1684 * from the GTT and/or fence registers to make room. So performance may
1685 * suffer if the GTT working set is large or there are few fence registers
1688 int i915_gem_fault(struct vm_area_struct
*area
, struct vm_fault
*vmf
)
1690 struct drm_i915_gem_object
*obj
= to_intel_bo(area
->vm_private_data
);
1691 struct drm_device
*dev
= obj
->base
.dev
;
1692 struct drm_i915_private
*dev_priv
= to_i915(dev
);
1693 struct i915_ggtt
*ggtt
= &dev_priv
->ggtt
;
1694 struct i915_ggtt_view view
= i915_ggtt_view_normal
;
1695 bool write
= !!(vmf
->flags
& FAULT_FLAG_WRITE
);
1696 struct i915_vma
*vma
;
1697 pgoff_t page_offset
;
1701 /* We don't use vmf->pgoff since that has the fake offset */
1702 page_offset
= ((unsigned long)vmf
->virtual_address
- area
->vm_start
) >>
1705 trace_i915_gem_object_fault(obj
, page_offset
, true, write
);
1707 /* Try to flush the object off the GPU first without holding the lock.
1708 * Upon acquiring the lock, we will perform our sanity checks and then
1709 * repeat the flush holding the lock in the normal manner to catch cases
1710 * where we are gazumped.
1712 ret
= __unsafe_wait_rendering(obj
, NULL
, !write
);
1716 intel_runtime_pm_get(dev_priv
);
1718 ret
= i915_mutex_lock_interruptible(dev
);
1722 /* Access to snoopable pages through the GTT is incoherent. */
1723 if (obj
->cache_level
!= I915_CACHE_NONE
&& !HAS_LLC(dev
)) {
1728 /* Use a partial view if the object is bigger than the aperture. */
1729 if (obj
->base
.size
>= ggtt
->mappable_end
&&
1730 !i915_gem_object_is_tiled(obj
)) {
1731 static const unsigned int chunk_size
= 256; // 1 MiB
1733 memset(&view
, 0, sizeof(view
));
1734 view
.type
= I915_GGTT_VIEW_PARTIAL
;
1735 view
.params
.partial
.offset
= rounddown(page_offset
, chunk_size
);
1736 view
.params
.partial
.size
=
1739 (area
->vm_end
- area
->vm_start
) / PAGE_SIZE
-
1740 view
.params
.partial
.offset
);
1743 /* Now pin it into the GTT if needed */
1744 vma
= i915_gem_object_ggtt_pin(obj
, &view
, 0, 0, PIN_MAPPABLE
);
1750 ret
= i915_gem_object_set_to_gtt_domain(obj
, write
);
1754 ret
= i915_gem_object_get_fence(obj
);
1758 /* Finally, remap it using the new GTT offset */
1759 pfn
= ggtt
->mappable_base
+ i915_ggtt_offset(vma
);
1762 if (unlikely(view
.type
== I915_GGTT_VIEW_PARTIAL
)) {
1763 /* Overriding existing pages in partial view does not cause
1764 * us any trouble as TLBs are still valid because the fault
1765 * is due to userspace losing part of the mapping or never
1766 * having accessed it before (at this partials' range).
1768 unsigned long base
= area
->vm_start
+
1769 (view
.params
.partial
.offset
<< PAGE_SHIFT
);
1772 for (i
= 0; i
< view
.params
.partial
.size
; i
++) {
1773 ret
= vm_insert_pfn(area
,
1774 base
+ i
* PAGE_SIZE
,
1780 obj
->fault_mappable
= true;
1782 if (!obj
->fault_mappable
) {
1783 unsigned long size
=
1784 min_t(unsigned long,
1785 area
->vm_end
- area
->vm_start
,
1786 obj
->base
.size
) >> PAGE_SHIFT
;
1787 unsigned long base
= area
->vm_start
;
1790 for (i
= 0; i
< size
; i
++) {
1791 ret
= vm_insert_pfn(area
,
1792 base
+ i
* PAGE_SIZE
,
1798 obj
->fault_mappable
= true;
1800 ret
= vm_insert_pfn(area
,
1801 (unsigned long)vmf
->virtual_address
,
1805 __i915_vma_unpin(vma
);
1807 mutex_unlock(&dev
->struct_mutex
);
1809 intel_runtime_pm_put(dev_priv
);
1814 * We eat errors when the gpu is terminally wedged to avoid
1815 * userspace unduly crashing (gl has no provisions for mmaps to
1816 * fail). But any other -EIO isn't ours (e.g. swap in failure)
1817 * and so needs to be reported.
1819 if (!i915_terminally_wedged(&dev_priv
->gpu_error
)) {
1820 ret
= VM_FAULT_SIGBUS
;
1825 * EAGAIN means the gpu is hung and we'll wait for the error
1826 * handler to reset everything when re-faulting in
1827 * i915_mutex_lock_interruptible.
1834 * EBUSY is ok: this just means that another thread
1835 * already did the job.
1837 ret
= VM_FAULT_NOPAGE
;
1844 ret
= VM_FAULT_SIGBUS
;
1847 WARN_ONCE(ret
, "unhandled error in i915_gem_fault: %i\n", ret
);
1848 ret
= VM_FAULT_SIGBUS
;
1855 * i915_gem_release_mmap - remove physical page mappings
1856 * @obj: obj in question
1858 * Preserve the reservation of the mmapping with the DRM core code, but
1859 * relinquish ownership of the pages back to the system.
1861 * It is vital that we remove the page mapping if we have mapped a tiled
1862 * object through the GTT and then lose the fence register due to
1863 * resource pressure. Similarly if the object has been moved out of the
1864 * aperture, than pages mapped into userspace must be revoked. Removing the
1865 * mapping will then trigger a page fault on the next user access, allowing
1866 * fixup by i915_gem_fault().
1869 i915_gem_release_mmap(struct drm_i915_gem_object
*obj
)
1871 /* Serialisation between user GTT access and our code depends upon
1872 * revoking the CPU's PTE whilst the mutex is held. The next user
1873 * pagefault then has to wait until we release the mutex.
1875 lockdep_assert_held(&obj
->base
.dev
->struct_mutex
);
1877 if (!obj
->fault_mappable
)
1880 drm_vma_node_unmap(&obj
->base
.vma_node
,
1881 obj
->base
.dev
->anon_inode
->i_mapping
);
1883 /* Ensure that the CPU's PTE are revoked and there are not outstanding
1884 * memory transactions from userspace before we return. The TLB
1885 * flushing implied above by changing the PTE above *should* be
1886 * sufficient, an extra barrier here just provides us with a bit
1887 * of paranoid documentation about our requirement to serialise
1888 * memory writes before touching registers / GSM.
1892 obj
->fault_mappable
= false;
1896 i915_gem_release_all_mmaps(struct drm_i915_private
*dev_priv
)
1898 struct drm_i915_gem_object
*obj
;
1900 list_for_each_entry(obj
, &dev_priv
->mm
.bound_list
, global_list
)
1901 i915_gem_release_mmap(obj
);
1905 * i915_gem_get_ggtt_size - return required global GTT size for an object
1906 * @dev_priv: i915 device
1907 * @size: object size
1908 * @tiling_mode: tiling mode
1910 * Return the required global GTT size for an object, taking into account
1911 * potential fence register mapping.
1913 u64
i915_gem_get_ggtt_size(struct drm_i915_private
*dev_priv
,
1914 u64 size
, int tiling_mode
)
1918 GEM_BUG_ON(size
== 0);
1920 if (INTEL_GEN(dev_priv
) >= 4 ||
1921 tiling_mode
== I915_TILING_NONE
)
1924 /* Previous chips need a power-of-two fence region when tiling */
1925 if (IS_GEN3(dev_priv
))
1926 ggtt_size
= 1024*1024;
1928 ggtt_size
= 512*1024;
1930 while (ggtt_size
< size
)
1937 * i915_gem_get_ggtt_alignment - return required global GTT alignment
1938 * @dev_priv: i915 device
1939 * @size: object size
1940 * @tiling_mode: tiling mode
1941 * @fenced: is fenced alignment required or not
1943 * Return the required global GTT alignment for an object, taking into account
1944 * potential fence register mapping.
1946 u64
i915_gem_get_ggtt_alignment(struct drm_i915_private
*dev_priv
, u64 size
,
1947 int tiling_mode
, bool fenced
)
1949 GEM_BUG_ON(size
== 0);
1952 * Minimum alignment is 4k (GTT page size), but might be greater
1953 * if a fence register is needed for the object.
1955 if (INTEL_GEN(dev_priv
) >= 4 || (!fenced
&& IS_G33(dev_priv
)) ||
1956 tiling_mode
== I915_TILING_NONE
)
1960 * Previous chips need to be aligned to the size of the smallest
1961 * fence register that can contain the object.
1963 return i915_gem_get_ggtt_size(dev_priv
, size
, tiling_mode
);
1966 static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object
*obj
)
1968 struct drm_i915_private
*dev_priv
= to_i915(obj
->base
.dev
);
1971 err
= drm_gem_create_mmap_offset(&obj
->base
);
1975 /* We can idle the GPU locklessly to flush stale objects, but in order
1976 * to claim that space for ourselves, we need to take the big
1977 * struct_mutex to free the requests+objects and allocate our slot.
1979 err
= i915_gem_wait_for_idle(dev_priv
, true);
1983 err
= i915_mutex_lock_interruptible(&dev_priv
->drm
);
1985 i915_gem_retire_requests(dev_priv
);
1986 err
= drm_gem_create_mmap_offset(&obj
->base
);
1987 mutex_unlock(&dev_priv
->drm
.struct_mutex
);
1993 static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object
*obj
)
1995 drm_gem_free_mmap_offset(&obj
->base
);
1999 i915_gem_mmap_gtt(struct drm_file
*file
,
2000 struct drm_device
*dev
,
2004 struct drm_i915_gem_object
*obj
;
2007 obj
= i915_gem_object_lookup(file
, handle
);
2011 ret
= i915_gem_object_create_mmap_offset(obj
);
2013 *offset
= drm_vma_node_offset_addr(&obj
->base
.vma_node
);
2015 i915_gem_object_put_unlocked(obj
);
2020 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
2022 * @data: GTT mapping ioctl data
2023 * @file: GEM object info
2025 * Simply returns the fake offset to userspace so it can mmap it.
2026 * The mmap call will end up in drm_gem_mmap(), which will set things
2027 * up so we can get faults in the handler above.
2029 * The fault handler will take care of binding the object into the GTT
2030 * (since it may have been evicted to make room for something), allocating
2031 * a fence register, and mapping the appropriate aperture address into
2035 i915_gem_mmap_gtt_ioctl(struct drm_device
*dev
, void *data
,
2036 struct drm_file
*file
)
2038 struct drm_i915_gem_mmap_gtt
*args
= data
;
2040 return i915_gem_mmap_gtt(file
, dev
, args
->handle
, &args
->offset
);
2043 /* Immediately discard the backing storage */
2045 i915_gem_object_truncate(struct drm_i915_gem_object
*obj
)
2047 i915_gem_object_free_mmap_offset(obj
);
2049 if (obj
->base
.filp
== NULL
)
2052 /* Our goal here is to return as much of the memory as
2053 * is possible back to the system as we are called from OOM.
2054 * To do this we must instruct the shmfs to drop all of its
2055 * backing pages, *now*.
2057 shmem_truncate_range(file_inode(obj
->base
.filp
), 0, (loff_t
)-1);
2058 obj
->madv
= __I915_MADV_PURGED
;
2061 /* Try to discard unwanted pages */
2063 i915_gem_object_invalidate(struct drm_i915_gem_object
*obj
)
2065 struct address_space
*mapping
;
2067 switch (obj
->madv
) {
2068 case I915_MADV_DONTNEED
:
2069 i915_gem_object_truncate(obj
);
2070 case __I915_MADV_PURGED
:
2074 if (obj
->base
.filp
== NULL
)
2077 mapping
= obj
->base
.filp
->f_mapping
,
2078 invalidate_mapping_pages(mapping
, 0, (loff_t
)-1);
2082 i915_gem_object_put_pages_gtt(struct drm_i915_gem_object
*obj
)
2084 struct sgt_iter sgt_iter
;
2088 BUG_ON(obj
->madv
== __I915_MADV_PURGED
);
2090 ret
= i915_gem_object_set_to_cpu_domain(obj
, true);
2092 /* In the event of a disaster, abandon all caches and
2093 * hope for the best.
2095 i915_gem_clflush_object(obj
, true);
2096 obj
->base
.read_domains
= obj
->base
.write_domain
= I915_GEM_DOMAIN_CPU
;
2099 i915_gem_gtt_finish_object(obj
);
2101 if (i915_gem_object_needs_bit17_swizzle(obj
))
2102 i915_gem_object_save_bit_17_swizzle(obj
);
2104 if (obj
->madv
== I915_MADV_DONTNEED
)
2107 for_each_sgt_page(page
, sgt_iter
, obj
->pages
) {
2109 set_page_dirty(page
);
2111 if (obj
->madv
== I915_MADV_WILLNEED
)
2112 mark_page_accessed(page
);
2118 sg_free_table(obj
->pages
);
2123 i915_gem_object_put_pages(struct drm_i915_gem_object
*obj
)
2125 const struct drm_i915_gem_object_ops
*ops
= obj
->ops
;
2127 if (obj
->pages
== NULL
)
2130 if (obj
->pages_pin_count
)
2133 GEM_BUG_ON(obj
->bind_count
);
2135 /* ->put_pages might need to allocate memory for the bit17 swizzle
2136 * array, hence protect them from being reaped by removing them from gtt
2138 list_del(&obj
->global_list
);
2143 ptr
= ptr_mask_bits(obj
->mapping
);
2144 if (is_vmalloc_addr(ptr
))
2147 kunmap(kmap_to_page(ptr
));
2149 obj
->mapping
= NULL
;
2152 ops
->put_pages(obj
);
2155 i915_gem_object_invalidate(obj
);
2161 i915_gem_object_get_pages_gtt(struct drm_i915_gem_object
*obj
)
2163 struct drm_i915_private
*dev_priv
= to_i915(obj
->base
.dev
);
2165 struct address_space
*mapping
;
2166 struct sg_table
*st
;
2167 struct scatterlist
*sg
;
2168 struct sgt_iter sgt_iter
;
2170 unsigned long last_pfn
= 0; /* suppress gcc warning */
2174 /* Assert that the object is not currently in any GPU domain. As it
2175 * wasn't in the GTT, there shouldn't be any way it could have been in
2178 BUG_ON(obj
->base
.read_domains
& I915_GEM_GPU_DOMAINS
);
2179 BUG_ON(obj
->base
.write_domain
& I915_GEM_GPU_DOMAINS
);
2181 st
= kmalloc(sizeof(*st
), GFP_KERNEL
);
2185 page_count
= obj
->base
.size
/ PAGE_SIZE
;
2186 if (sg_alloc_table(st
, page_count
, GFP_KERNEL
)) {
2191 /* Get the list of pages out of our struct file. They'll be pinned
2192 * at this point until we release them.
2194 * Fail silently without starting the shrinker
2196 mapping
= obj
->base
.filp
->f_mapping
;
2197 gfp
= mapping_gfp_constraint(mapping
, ~(__GFP_IO
| __GFP_RECLAIM
));
2198 gfp
|= __GFP_NORETRY
| __GFP_NOWARN
;
2201 for (i
= 0; i
< page_count
; i
++) {
2202 page
= shmem_read_mapping_page_gfp(mapping
, i
, gfp
);
2204 i915_gem_shrink(dev_priv
,
2207 I915_SHRINK_UNBOUND
|
2208 I915_SHRINK_PURGEABLE
);
2209 page
= shmem_read_mapping_page_gfp(mapping
, i
, gfp
);
2212 /* We've tried hard to allocate the memory by reaping
2213 * our own buffer, now let the real VM do its job and
2214 * go down in flames if truly OOM.
2216 i915_gem_shrink_all(dev_priv
);
2217 page
= shmem_read_mapping_page(mapping
, i
);
2219 ret
= PTR_ERR(page
);
2223 #ifdef CONFIG_SWIOTLB
2224 if (swiotlb_nr_tbl()) {
2226 sg_set_page(sg
, page
, PAGE_SIZE
, 0);
2231 if (!i
|| page_to_pfn(page
) != last_pfn
+ 1) {
2235 sg_set_page(sg
, page
, PAGE_SIZE
, 0);
2237 sg
->length
+= PAGE_SIZE
;
2239 last_pfn
= page_to_pfn(page
);
2241 /* Check that the i965g/gm workaround works. */
2242 WARN_ON((gfp
& __GFP_DMA32
) && (last_pfn
>= 0x00100000UL
));
2244 #ifdef CONFIG_SWIOTLB
2245 if (!swiotlb_nr_tbl())
2250 ret
= i915_gem_gtt_prepare_object(obj
);
2254 if (i915_gem_object_needs_bit17_swizzle(obj
))
2255 i915_gem_object_do_bit_17_swizzle(obj
);
2257 if (i915_gem_object_is_tiled(obj
) &&
2258 dev_priv
->quirks
& QUIRK_PIN_SWIZZLED_PAGES
)
2259 i915_gem_object_pin_pages(obj
);
2265 for_each_sgt_page(page
, sgt_iter
, st
)
2270 /* shmemfs first checks if there is enough memory to allocate the page
2271 * and reports ENOSPC should there be insufficient, along with the usual
2272 * ENOMEM for a genuine allocation failure.
2274 * We use ENOSPC in our driver to mean that we have run out of aperture
2275 * space and so want to translate the error from shmemfs back to our
2276 * usual understanding of ENOMEM.
2284 /* Ensure that the associated pages are gathered from the backing storage
2285 * and pinned into our object. i915_gem_object_get_pages() may be called
2286 * multiple times before they are released by a single call to
2287 * i915_gem_object_put_pages() - once the pages are no longer referenced
2288 * either as a result of memory pressure (reaping pages under the shrinker)
2289 * or as the object is itself released.
2292 i915_gem_object_get_pages(struct drm_i915_gem_object
*obj
)
2294 struct drm_i915_private
*dev_priv
= to_i915(obj
->base
.dev
);
2295 const struct drm_i915_gem_object_ops
*ops
= obj
->ops
;
2301 if (obj
->madv
!= I915_MADV_WILLNEED
) {
2302 DRM_DEBUG("Attempting to obtain a purgeable object\n");
2306 BUG_ON(obj
->pages_pin_count
);
2308 ret
= ops
->get_pages(obj
);
2312 list_add_tail(&obj
->global_list
, &dev_priv
->mm
.unbound_list
);
2314 obj
->get_page
.sg
= obj
->pages
->sgl
;
2315 obj
->get_page
.last
= 0;
2320 /* The 'mapping' part of i915_gem_object_pin_map() below */
2321 static void *i915_gem_object_map(const struct drm_i915_gem_object
*obj
,
2322 enum i915_map_type type
)
2324 unsigned long n_pages
= obj
->base
.size
>> PAGE_SHIFT
;
2325 struct sg_table
*sgt
= obj
->pages
;
2326 struct sgt_iter sgt_iter
;
2328 struct page
*stack_pages
[32];
2329 struct page
**pages
= stack_pages
;
2330 unsigned long i
= 0;
2334 /* A single page can always be kmapped */
2335 if (n_pages
== 1 && type
== I915_MAP_WB
)
2336 return kmap(sg_page(sgt
->sgl
));
2338 if (n_pages
> ARRAY_SIZE(stack_pages
)) {
2339 /* Too big for stack -- allocate temporary array instead */
2340 pages
= drm_malloc_gfp(n_pages
, sizeof(*pages
), GFP_TEMPORARY
);
2345 for_each_sgt_page(page
, sgt_iter
, sgt
)
2348 /* Check that we have the expected number of pages */
2349 GEM_BUG_ON(i
!= n_pages
);
2353 pgprot
= PAGE_KERNEL
;
2356 pgprot
= pgprot_writecombine(PAGE_KERNEL_IO
);
2359 addr
= vmap(pages
, n_pages
, 0, pgprot
);
2361 if (pages
!= stack_pages
)
2362 drm_free_large(pages
);
2367 /* get, pin, and map the pages of the object into kernel space */
2368 void *i915_gem_object_pin_map(struct drm_i915_gem_object
*obj
,
2369 enum i915_map_type type
)
2371 enum i915_map_type has_type
;
2376 lockdep_assert_held(&obj
->base
.dev
->struct_mutex
);
2377 GEM_BUG_ON(!i915_gem_object_has_struct_page(obj
));
2379 ret
= i915_gem_object_get_pages(obj
);
2381 return ERR_PTR(ret
);
2383 i915_gem_object_pin_pages(obj
);
2384 pinned
= obj
->pages_pin_count
> 1;
2386 ptr
= ptr_unpack_bits(obj
->mapping
, has_type
);
2387 if (ptr
&& has_type
!= type
) {
2393 if (is_vmalloc_addr(ptr
))
2396 kunmap(kmap_to_page(ptr
));
2398 ptr
= obj
->mapping
= NULL
;
2402 ptr
= i915_gem_object_map(obj
, type
);
2408 obj
->mapping
= ptr_pack_bits(ptr
, type
);
2414 i915_gem_object_unpin_pages(obj
);
2415 return ERR_PTR(ret
);
2419 i915_gem_object_retire__write(struct i915_gem_active
*active
,
2420 struct drm_i915_gem_request
*request
)
2422 struct drm_i915_gem_object
*obj
=
2423 container_of(active
, struct drm_i915_gem_object
, last_write
);
2425 intel_fb_obj_flush(obj
, true, ORIGIN_CS
);
2429 i915_gem_object_retire__read(struct i915_gem_active
*active
,
2430 struct drm_i915_gem_request
*request
)
2432 int idx
= request
->engine
->id
;
2433 struct drm_i915_gem_object
*obj
=
2434 container_of(active
, struct drm_i915_gem_object
, last_read
[idx
]);
2436 GEM_BUG_ON(!i915_gem_object_has_active_engine(obj
, idx
));
2438 i915_gem_object_clear_active(obj
, idx
);
2439 if (i915_gem_object_is_active(obj
))
2442 /* Bump our place on the bound list to keep it roughly in LRU order
2443 * so that we don't steal from recently used but inactive objects
2444 * (unless we are forced to ofc!)
2446 if (obj
->bind_count
)
2447 list_move_tail(&obj
->global_list
,
2448 &request
->i915
->mm
.bound_list
);
2450 i915_gem_object_put(obj
);
2453 static bool i915_context_is_banned(const struct i915_gem_context
*ctx
)
2455 unsigned long elapsed
;
2457 if (ctx
->hang_stats
.banned
)
2460 elapsed
= get_seconds() - ctx
->hang_stats
.guilty_ts
;
2461 if (ctx
->hang_stats
.ban_period_seconds
&&
2462 elapsed
<= ctx
->hang_stats
.ban_period_seconds
) {
2463 DRM_DEBUG("context hanging too fast, banning!\n");
2470 static void i915_set_reset_status(struct i915_gem_context
*ctx
,
2473 struct i915_ctx_hang_stats
*hs
= &ctx
->hang_stats
;
2476 hs
->banned
= i915_context_is_banned(ctx
);
2478 hs
->guilty_ts
= get_seconds();
2480 hs
->batch_pending
++;
2484 struct drm_i915_gem_request
*
2485 i915_gem_find_active_request(struct intel_engine_cs
*engine
)
2487 struct drm_i915_gem_request
*request
;
2489 /* We are called by the error capture and reset at a random
2490 * point in time. In particular, note that neither is crucially
2491 * ordered with an interrupt. After a hang, the GPU is dead and we
2492 * assume that no more writes can happen (we waited long enough for
2493 * all writes that were in transaction to be flushed) - adding an
2494 * extra delay for a recent interrupt is pointless. Hence, we do
2495 * not need an engine->irq_seqno_barrier() before the seqno reads.
2497 list_for_each_entry(request
, &engine
->request_list
, link
) {
2498 if (i915_gem_request_completed(request
))
2507 static void i915_gem_reset_engine_status(struct intel_engine_cs
*engine
)
2509 struct drm_i915_gem_request
*request
;
2512 request
= i915_gem_find_active_request(engine
);
2513 if (request
== NULL
)
2516 ring_hung
= engine
->hangcheck
.score
>= HANGCHECK_SCORE_RING_HUNG
;
2518 i915_set_reset_status(request
->ctx
, ring_hung
);
2519 list_for_each_entry_continue(request
, &engine
->request_list
, link
)
2520 i915_set_reset_status(request
->ctx
, false);
2523 static void i915_gem_reset_engine_cleanup(struct intel_engine_cs
*engine
)
2525 struct drm_i915_gem_request
*request
;
2526 struct intel_ring
*ring
;
2528 /* Mark all pending requests as complete so that any concurrent
2529 * (lockless) lookup doesn't try and wait upon the request as we
2532 intel_engine_init_seqno(engine
, engine
->last_submitted_seqno
);
2535 * Clear the execlists queue up before freeing the requests, as those
2536 * are the ones that keep the context and ringbuffer backing objects
2540 if (i915
.enable_execlists
) {
2541 /* Ensure irq handler finishes or is cancelled. */
2542 tasklet_kill(&engine
->irq_tasklet
);
2544 intel_execlists_cancel_requests(engine
);
2548 * We must free the requests after all the corresponding objects have
2549 * been moved off active lists. Which is the same order as the normal
2550 * retire_requests function does. This is important if object hold
2551 * implicit references on things like e.g. ppgtt address spaces through
2554 request
= i915_gem_active_raw(&engine
->last_request
,
2555 &engine
->i915
->drm
.struct_mutex
);
2557 i915_gem_request_retire_upto(request
);
2558 GEM_BUG_ON(intel_engine_is_active(engine
));
2560 /* Having flushed all requests from all queues, we know that all
2561 * ringbuffers must now be empty. However, since we do not reclaim
2562 * all space when retiring the request (to prevent HEADs colliding
2563 * with rapid ringbuffer wraparound) the amount of available space
2564 * upon reset is less than when we start. Do one more pass over
2565 * all the ringbuffers to reset last_retired_head.
2567 list_for_each_entry(ring
, &engine
->buffers
, link
) {
2568 ring
->last_retired_head
= ring
->tail
;
2569 intel_ring_update_space(ring
);
2572 engine
->i915
->gt
.active_engines
&= ~intel_engine_flag(engine
);
2575 void i915_gem_reset(struct drm_device
*dev
)
2577 struct drm_i915_private
*dev_priv
= to_i915(dev
);
2578 struct intel_engine_cs
*engine
;
2581 * Before we free the objects from the requests, we need to inspect
2582 * them for finding the guilty party. As the requests only borrow
2583 * their reference to the objects, the inspection must be done first.
2585 for_each_engine(engine
, dev_priv
)
2586 i915_gem_reset_engine_status(engine
);
2588 for_each_engine(engine
, dev_priv
)
2589 i915_gem_reset_engine_cleanup(engine
);
2590 mod_delayed_work(dev_priv
->wq
, &dev_priv
->gt
.idle_work
, 0);
2592 i915_gem_context_reset(dev
);
2594 i915_gem_restore_fences(dev
);
2598 i915_gem_retire_work_handler(struct work_struct
*work
)
2600 struct drm_i915_private
*dev_priv
=
2601 container_of(work
, typeof(*dev_priv
), gt
.retire_work
.work
);
2602 struct drm_device
*dev
= &dev_priv
->drm
;
2604 /* Come back later if the device is busy... */
2605 if (mutex_trylock(&dev
->struct_mutex
)) {
2606 i915_gem_retire_requests(dev_priv
);
2607 mutex_unlock(&dev
->struct_mutex
);
2610 /* Keep the retire handler running until we are finally idle.
2611 * We do not need to do this test under locking as in the worst-case
2612 * we queue the retire worker once too often.
2614 if (READ_ONCE(dev_priv
->gt
.awake
)) {
2615 i915_queue_hangcheck(dev_priv
);
2616 queue_delayed_work(dev_priv
->wq
,
2617 &dev_priv
->gt
.retire_work
,
2618 round_jiffies_up_relative(HZ
));
2623 i915_gem_idle_work_handler(struct work_struct
*work
)
2625 struct drm_i915_private
*dev_priv
=
2626 container_of(work
, typeof(*dev_priv
), gt
.idle_work
.work
);
2627 struct drm_device
*dev
= &dev_priv
->drm
;
2628 struct intel_engine_cs
*engine
;
2629 bool rearm_hangcheck
;
2631 if (!READ_ONCE(dev_priv
->gt
.awake
))
2634 if (READ_ONCE(dev_priv
->gt
.active_engines
))
2638 cancel_delayed_work_sync(&dev_priv
->gpu_error
.hangcheck_work
);
2640 if (!mutex_trylock(&dev
->struct_mutex
)) {
2641 /* Currently busy, come back later */
2642 mod_delayed_work(dev_priv
->wq
,
2643 &dev_priv
->gt
.idle_work
,
2644 msecs_to_jiffies(50));
2648 if (dev_priv
->gt
.active_engines
)
2651 for_each_engine(engine
, dev_priv
)
2652 i915_gem_batch_pool_fini(&engine
->batch_pool
);
2654 GEM_BUG_ON(!dev_priv
->gt
.awake
);
2655 dev_priv
->gt
.awake
= false;
2656 rearm_hangcheck
= false;
2658 if (INTEL_GEN(dev_priv
) >= 6)
2659 gen6_rps_idle(dev_priv
);
2660 intel_runtime_pm_put(dev_priv
);
2662 mutex_unlock(&dev
->struct_mutex
);
2665 if (rearm_hangcheck
) {
2666 GEM_BUG_ON(!dev_priv
->gt
.awake
);
2667 i915_queue_hangcheck(dev_priv
);
2671 void i915_gem_close_object(struct drm_gem_object
*gem
, struct drm_file
*file
)
2673 struct drm_i915_gem_object
*obj
= to_intel_bo(gem
);
2674 struct drm_i915_file_private
*fpriv
= file
->driver_priv
;
2675 struct i915_vma
*vma
, *vn
;
2677 mutex_lock(&obj
->base
.dev
->struct_mutex
);
2678 list_for_each_entry_safe(vma
, vn
, &obj
->vma_list
, obj_link
)
2679 if (vma
->vm
->file
== fpriv
)
2680 i915_vma_close(vma
);
2681 mutex_unlock(&obj
->base
.dev
->struct_mutex
);
2685 * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
2686 * @dev: drm device pointer
2687 * @data: ioctl data blob
2688 * @file: drm file pointer
2690 * Returns 0 if successful, else an error is returned with the remaining time in
2691 * the timeout parameter.
2692 * -ETIME: object is still busy after timeout
2693 * -ERESTARTSYS: signal interrupted the wait
2694 * -ENONENT: object doesn't exist
2695 * Also possible, but rare:
2696 * -EAGAIN: GPU wedged
2698 * -ENODEV: Internal IRQ fail
2699 * -E?: The add request failed
2701 * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
2702 * non-zero timeout parameter the wait ioctl will wait for the given number of
2703 * nanoseconds on an object becoming unbusy. Since the wait itself does so
2704 * without holding struct_mutex the object may become re-busied before this
2705 * function completes. A similar but shorter * race condition exists in the busy
2709 i915_gem_wait_ioctl(struct drm_device
*dev
, void *data
, struct drm_file
*file
)
2711 struct drm_i915_gem_wait
*args
= data
;
2712 struct intel_rps_client
*rps
= to_rps_client(file
);
2713 struct drm_i915_gem_object
*obj
;
2714 unsigned long active
;
2717 if (args
->flags
!= 0)
2720 obj
= i915_gem_object_lookup(file
, args
->bo_handle
);
2724 active
= __I915_BO_ACTIVE(obj
);
2725 for_each_active(active
, idx
) {
2726 s64
*timeout
= args
->timeout_ns
>= 0 ? &args
->timeout_ns
: NULL
;
2727 ret
= i915_gem_active_wait_unlocked(&obj
->last_read
[idx
], true,
2733 i915_gem_object_put_unlocked(obj
);
2738 __i915_gem_object_sync(struct drm_i915_gem_request
*to
,
2739 struct drm_i915_gem_request
*from
)
2743 if (to
->engine
== from
->engine
)
2746 if (!i915
.semaphores
) {
2747 ret
= i915_wait_request(from
,
2748 from
->i915
->mm
.interruptible
,
2754 int idx
= intel_engine_sync_index(from
->engine
, to
->engine
);
2755 if (from
->fence
.seqno
<= from
->engine
->semaphore
.sync_seqno
[idx
])
2758 trace_i915_gem_ring_sync_to(to
, from
);
2759 ret
= to
->engine
->semaphore
.sync_to(to
, from
);
2763 from
->engine
->semaphore
.sync_seqno
[idx
] = from
->fence
.seqno
;
2770 * i915_gem_object_sync - sync an object to a ring.
2772 * @obj: object which may be in use on another ring.
2773 * @to: request we are wishing to use
2775 * This code is meant to abstract object synchronization with the GPU.
2776 * Conceptually we serialise writes between engines inside the GPU.
2777 * We only allow one engine to write into a buffer at any time, but
2778 * multiple readers. To ensure each has a coherent view of memory, we must:
2780 * - If there is an outstanding write request to the object, the new
2781 * request must wait for it to complete (either CPU or in hw, requests
2782 * on the same ring will be naturally ordered).
2784 * - If we are a write request (pending_write_domain is set), the new
2785 * request must wait for outstanding read requests to complete.
2787 * Returns 0 if successful, else propagates up the lower layer error.
2790 i915_gem_object_sync(struct drm_i915_gem_object
*obj
,
2791 struct drm_i915_gem_request
*to
)
2793 struct i915_gem_active
*active
;
2794 unsigned long active_mask
;
2797 lockdep_assert_held(&obj
->base
.dev
->struct_mutex
);
2799 active_mask
= i915_gem_object_get_active(obj
);
2803 if (obj
->base
.pending_write_domain
) {
2804 active
= obj
->last_read
;
2807 active
= &obj
->last_write
;
2810 for_each_active(active_mask
, idx
) {
2811 struct drm_i915_gem_request
*request
;
2814 request
= i915_gem_active_peek(&active
[idx
],
2815 &obj
->base
.dev
->struct_mutex
);
2819 ret
= __i915_gem_object_sync(to
, request
);
2827 static void i915_gem_object_finish_gtt(struct drm_i915_gem_object
*obj
)
2829 u32 old_write_domain
, old_read_domains
;
2831 /* Force a pagefault for domain tracking on next user access */
2832 i915_gem_release_mmap(obj
);
2834 if ((obj
->base
.read_domains
& I915_GEM_DOMAIN_GTT
) == 0)
2837 old_read_domains
= obj
->base
.read_domains
;
2838 old_write_domain
= obj
->base
.write_domain
;
2840 obj
->base
.read_domains
&= ~I915_GEM_DOMAIN_GTT
;
2841 obj
->base
.write_domain
&= ~I915_GEM_DOMAIN_GTT
;
2843 trace_i915_gem_object_change_domain(obj
,
2848 static void __i915_vma_iounmap(struct i915_vma
*vma
)
2850 GEM_BUG_ON(i915_vma_is_pinned(vma
));
2852 if (vma
->iomap
== NULL
)
2855 io_mapping_unmap(vma
->iomap
);
2859 int i915_vma_unbind(struct i915_vma
*vma
)
2861 struct drm_i915_gem_object
*obj
= vma
->obj
;
2862 unsigned long active
;
2865 /* First wait upon any activity as retiring the request may
2866 * have side-effects such as unpinning or even unbinding this vma.
2868 active
= i915_vma_get_active(vma
);
2872 /* When a closed VMA is retired, it is unbound - eek.
2873 * In order to prevent it from being recursively closed,
2874 * take a pin on the vma so that the second unbind is
2877 __i915_vma_pin(vma
);
2879 for_each_active(active
, idx
) {
2880 ret
= i915_gem_active_retire(&vma
->last_read
[idx
],
2881 &vma
->vm
->dev
->struct_mutex
);
2886 __i915_vma_unpin(vma
);
2890 GEM_BUG_ON(i915_vma_is_active(vma
));
2893 if (i915_vma_is_pinned(vma
))
2896 if (!drm_mm_node_allocated(&vma
->node
))
2899 GEM_BUG_ON(obj
->bind_count
== 0);
2900 GEM_BUG_ON(!obj
->pages
);
2902 if (i915_vma_is_map_and_fenceable(vma
)) {
2903 i915_gem_object_finish_gtt(obj
);
2905 /* release the fence reg _after_ flushing */
2906 ret
= i915_gem_object_put_fence(obj
);
2910 __i915_vma_iounmap(vma
);
2911 vma
->flags
&= ~I915_VMA_CAN_FENCE
;
2914 if (likely(!vma
->vm
->closed
)) {
2915 trace_i915_vma_unbind(vma
);
2916 vma
->vm
->unbind_vma(vma
);
2918 vma
->flags
&= ~(I915_VMA_GLOBAL_BIND
| I915_VMA_LOCAL_BIND
);
2920 drm_mm_remove_node(&vma
->node
);
2921 list_move_tail(&vma
->vm_link
, &vma
->vm
->unbound_list
);
2923 if (vma
->pages
!= obj
->pages
) {
2924 GEM_BUG_ON(!vma
->pages
);
2925 sg_free_table(vma
->pages
);
2930 /* Since the unbound list is global, only move to that list if
2931 * no more VMAs exist. */
2932 if (--obj
->bind_count
== 0)
2933 list_move_tail(&obj
->global_list
,
2934 &to_i915(obj
->base
.dev
)->mm
.unbound_list
);
2936 /* And finally now the object is completely decoupled from this vma,
2937 * we can drop its hold on the backing storage and allow it to be
2938 * reaped by the shrinker.
2940 i915_gem_object_unpin_pages(obj
);
2943 if (unlikely(i915_vma_is_closed(vma
)))
2944 i915_vma_destroy(vma
);
2949 int i915_gem_wait_for_idle(struct drm_i915_private
*dev_priv
,
2952 struct intel_engine_cs
*engine
;
2955 for_each_engine(engine
, dev_priv
) {
2956 if (engine
->last_context
== NULL
)
2959 ret
= intel_engine_idle(engine
, interruptible
);
2967 static bool i915_gem_valid_gtt_space(struct i915_vma
*vma
,
2968 unsigned long cache_level
)
2970 struct drm_mm_node
*gtt_space
= &vma
->node
;
2971 struct drm_mm_node
*other
;
2974 * On some machines we have to be careful when putting differing types
2975 * of snoopable memory together to avoid the prefetcher crossing memory
2976 * domains and dying. During vm initialisation, we decide whether or not
2977 * these constraints apply and set the drm_mm.color_adjust
2980 if (vma
->vm
->mm
.color_adjust
== NULL
)
2983 if (!drm_mm_node_allocated(gtt_space
))
2986 if (list_empty(>t_space
->node_list
))
2989 other
= list_entry(gtt_space
->node_list
.prev
, struct drm_mm_node
, node_list
);
2990 if (other
->allocated
&& !other
->hole_follows
&& other
->color
!= cache_level
)
2993 other
= list_entry(gtt_space
->node_list
.next
, struct drm_mm_node
, node_list
);
2994 if (other
->allocated
&& !gtt_space
->hole_follows
&& other
->color
!= cache_level
)
3001 * i915_vma_insert - finds a slot for the vma in its address space
3003 * @size: requested size in bytes (can be larger than the VMA)
3004 * @alignment: required alignment
3005 * @flags: mask of PIN_* flags to use
3007 * First we try to allocate some free space that meets the requirements for
3008 * the VMA. Failiing that, if the flags permit, it will evict an old VMA,
3009 * preferrably the oldest idle entry to make room for the new VMA.
3012 * 0 on success, negative error code otherwise.
3015 i915_vma_insert(struct i915_vma
*vma
, u64 size
, u64 alignment
, u64 flags
)
3017 struct drm_i915_private
*dev_priv
= to_i915(vma
->vm
->dev
);
3018 struct drm_i915_gem_object
*obj
= vma
->obj
;
3023 GEM_BUG_ON(vma
->flags
& (I915_VMA_GLOBAL_BIND
| I915_VMA_LOCAL_BIND
));
3024 GEM_BUG_ON(drm_mm_node_allocated(&vma
->node
));
3026 size
= max(size
, vma
->size
);
3027 if (flags
& PIN_MAPPABLE
)
3028 size
= i915_gem_get_ggtt_size(dev_priv
, size
,
3029 i915_gem_object_get_tiling(obj
));
3032 i915_gem_get_ggtt_alignment(dev_priv
, size
,
3033 i915_gem_object_get_tiling(obj
),
3034 flags
& PIN_MAPPABLE
);
3036 alignment
= min_alignment
;
3037 if (alignment
& (min_alignment
- 1)) {
3038 DRM_DEBUG("Invalid object alignment requested %llu, minimum %llu\n",
3039 alignment
, min_alignment
);
3043 start
= flags
& PIN_OFFSET_BIAS
? flags
& PIN_OFFSET_MASK
: 0;
3045 end
= vma
->vm
->total
;
3046 if (flags
& PIN_MAPPABLE
)
3047 end
= min_t(u64
, end
, dev_priv
->ggtt
.mappable_end
);
3048 if (flags
& PIN_ZONE_4G
)
3049 end
= min_t(u64
, end
, (1ULL << 32) - PAGE_SIZE
);
3051 /* If binding the object/GGTT view requires more space than the entire
3052 * aperture has, reject it early before evicting everything in a vain
3053 * attempt to find space.
3056 DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu [object=%zd] > %s aperture=%llu\n",
3057 size
, obj
->base
.size
,
3058 flags
& PIN_MAPPABLE
? "mappable" : "total",
3063 ret
= i915_gem_object_get_pages(obj
);
3067 i915_gem_object_pin_pages(obj
);
3069 if (flags
& PIN_OFFSET_FIXED
) {
3070 u64 offset
= flags
& PIN_OFFSET_MASK
;
3071 if (offset
& (alignment
- 1) || offset
> end
- size
) {
3076 vma
->node
.start
= offset
;
3077 vma
->node
.size
= size
;
3078 vma
->node
.color
= obj
->cache_level
;
3079 ret
= drm_mm_reserve_node(&vma
->vm
->mm
, &vma
->node
);
3081 ret
= i915_gem_evict_for_vma(vma
);
3083 ret
= drm_mm_reserve_node(&vma
->vm
->mm
, &vma
->node
);
3088 u32 search_flag
, alloc_flag
;
3090 if (flags
& PIN_HIGH
) {
3091 search_flag
= DRM_MM_SEARCH_BELOW
;
3092 alloc_flag
= DRM_MM_CREATE_TOP
;
3094 search_flag
= DRM_MM_SEARCH_DEFAULT
;
3095 alloc_flag
= DRM_MM_CREATE_DEFAULT
;
3098 /* We only allocate in PAGE_SIZE/GTT_PAGE_SIZE (4096) chunks,
3099 * so we know that we always have a minimum alignment of 4096.
3100 * The drm_mm range manager is optimised to return results
3101 * with zero alignment, so where possible use the optimal
3104 if (alignment
<= 4096)
3108 ret
= drm_mm_insert_node_in_range_generic(&vma
->vm
->mm
,
3116 ret
= i915_gem_evict_something(vma
->vm
, size
, alignment
,
3126 GEM_BUG_ON(!i915_gem_valid_gtt_space(vma
, obj
->cache_level
));
3128 list_move_tail(&obj
->global_list
, &dev_priv
->mm
.bound_list
);
3129 list_move_tail(&vma
->vm_link
, &vma
->vm
->inactive_list
);
3135 i915_gem_object_unpin_pages(obj
);
3140 i915_gem_clflush_object(struct drm_i915_gem_object
*obj
,
3143 /* If we don't have a page list set up, then we're not pinned
3144 * to GPU, and we can ignore the cache flush because it'll happen
3145 * again at bind time.
3147 if (obj
->pages
== NULL
)
3151 * Stolen memory is always coherent with the GPU as it is explicitly
3152 * marked as wc by the system, or the system is cache-coherent.
3154 if (obj
->stolen
|| obj
->phys_handle
)
3157 /* If the GPU is snooping the contents of the CPU cache,
3158 * we do not need to manually clear the CPU cache lines. However,
3159 * the caches are only snooped when the render cache is
3160 * flushed/invalidated. As we always have to emit invalidations
3161 * and flushes when moving into and out of the RENDER domain, correct
3162 * snooping behaviour occurs naturally as the result of our domain
3165 if (!force
&& cpu_cache_is_coherent(obj
->base
.dev
, obj
->cache_level
)) {
3166 obj
->cache_dirty
= true;
3170 trace_i915_gem_object_clflush(obj
);
3171 drm_clflush_sg(obj
->pages
);
3172 obj
->cache_dirty
= false;
3177 /** Flushes the GTT write domain for the object if it's dirty. */
3179 i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object
*obj
)
3181 struct drm_i915_private
*dev_priv
= to_i915(obj
->base
.dev
);
3183 if (obj
->base
.write_domain
!= I915_GEM_DOMAIN_GTT
)
3186 /* No actual flushing is required for the GTT write domain. Writes
3187 * to it "immediately" go to main memory as far as we know, so there's
3188 * no chipset flush. It also doesn't land in render cache.
3190 * However, we do have to enforce the order so that all writes through
3191 * the GTT land before any writes to the device, such as updates to
3194 * We also have to wait a bit for the writes to land from the GTT.
3195 * An uncached read (i.e. mmio) seems to be ideal for the round-trip
3196 * timing. This issue has only been observed when switching quickly
3197 * between GTT writes and CPU reads from inside the kernel on recent hw,
3198 * and it appears to only affect discrete GTT blocks (i.e. on LLC
3199 * system agents we cannot reproduce this behaviour).
3202 if (INTEL_GEN(dev_priv
) >= 6 && !HAS_LLC(dev_priv
))
3203 POSTING_READ(RING_ACTHD(dev_priv
->engine
[RCS
].mmio_base
));
3205 intel_fb_obj_flush(obj
, false, write_origin(obj
, I915_GEM_DOMAIN_GTT
));
3207 obj
->base
.write_domain
= 0;
3208 trace_i915_gem_object_change_domain(obj
,
3209 obj
->base
.read_domains
,
3210 I915_GEM_DOMAIN_GTT
);
3213 /** Flushes the CPU write domain for the object if it's dirty. */
3215 i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object
*obj
)
3217 if (obj
->base
.write_domain
!= I915_GEM_DOMAIN_CPU
)
3220 if (i915_gem_clflush_object(obj
, obj
->pin_display
))
3221 i915_gem_chipset_flush(to_i915(obj
->base
.dev
));
3223 intel_fb_obj_flush(obj
, false, ORIGIN_CPU
);
3225 obj
->base
.write_domain
= 0;
3226 trace_i915_gem_object_change_domain(obj
,
3227 obj
->base
.read_domains
,
3228 I915_GEM_DOMAIN_CPU
);
3232 * Moves a single object to the GTT read, and possibly write domain.
3233 * @obj: object to act on
3234 * @write: ask for write access or read only
3236 * This function returns when the move is complete, including waiting on
3240 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object
*obj
, bool write
)
3242 uint32_t old_write_domain
, old_read_domains
;
3243 struct i915_vma
*vma
;
3246 ret
= i915_gem_object_wait_rendering(obj
, !write
);
3250 if (obj
->base
.write_domain
== I915_GEM_DOMAIN_GTT
)
3253 /* Flush and acquire obj->pages so that we are coherent through
3254 * direct access in memory with previous cached writes through
3255 * shmemfs and that our cache domain tracking remains valid.
3256 * For example, if the obj->filp was moved to swap without us
3257 * being notified and releasing the pages, we would mistakenly
3258 * continue to assume that the obj remained out of the CPU cached
3261 ret
= i915_gem_object_get_pages(obj
);
3265 i915_gem_object_flush_cpu_write_domain(obj
);
3267 /* Serialise direct access to this object with the barriers for
3268 * coherent writes from the GPU, by effectively invalidating the
3269 * GTT domain upon first access.
3271 if ((obj
->base
.read_domains
& I915_GEM_DOMAIN_GTT
) == 0)
3274 old_write_domain
= obj
->base
.write_domain
;
3275 old_read_domains
= obj
->base
.read_domains
;
3277 /* It should now be out of any other write domains, and we can update
3278 * the domain values for our changes.
3280 BUG_ON((obj
->base
.write_domain
& ~I915_GEM_DOMAIN_GTT
) != 0);
3281 obj
->base
.read_domains
|= I915_GEM_DOMAIN_GTT
;
3283 obj
->base
.read_domains
= I915_GEM_DOMAIN_GTT
;
3284 obj
->base
.write_domain
= I915_GEM_DOMAIN_GTT
;
3288 trace_i915_gem_object_change_domain(obj
,
3292 /* And bump the LRU for this access */
3293 vma
= i915_gem_object_to_ggtt(obj
, NULL
);
3295 drm_mm_node_allocated(&vma
->node
) &&
3296 !i915_vma_is_active(vma
))
3297 list_move_tail(&vma
->vm_link
, &vma
->vm
->inactive_list
);
3303 * Changes the cache-level of an object across all VMA.
3304 * @obj: object to act on
3305 * @cache_level: new cache level to set for the object
3307 * After this function returns, the object will be in the new cache-level
3308 * across all GTT and the contents of the backing storage will be coherent,
3309 * with respect to the new cache-level. In order to keep the backing storage
3310 * coherent for all users, we only allow a single cache level to be set
3311 * globally on the object and prevent it from being changed whilst the
3312 * hardware is reading from the object. That is if the object is currently
3313 * on the scanout it will be set to uncached (or equivalent display
3314 * cache coherency) and all non-MOCS GPU access will also be uncached so
3315 * that all direct access to the scanout remains coherent.
3317 int i915_gem_object_set_cache_level(struct drm_i915_gem_object
*obj
,
3318 enum i915_cache_level cache_level
)
3320 struct i915_vma
*vma
;
3323 if (obj
->cache_level
== cache_level
)
3326 /* Inspect the list of currently bound VMA and unbind any that would
3327 * be invalid given the new cache-level. This is principally to
3328 * catch the issue of the CS prefetch crossing page boundaries and
3329 * reading an invalid PTE on older architectures.
3332 list_for_each_entry(vma
, &obj
->vma_list
, obj_link
) {
3333 if (!drm_mm_node_allocated(&vma
->node
))
3336 if (i915_vma_is_pinned(vma
)) {
3337 DRM_DEBUG("can not change the cache level of pinned objects\n");
3341 if (i915_gem_valid_gtt_space(vma
, cache_level
))
3344 ret
= i915_vma_unbind(vma
);
3348 /* As unbinding may affect other elements in the
3349 * obj->vma_list (due to side-effects from retiring
3350 * an active vma), play safe and restart the iterator.
3355 /* We can reuse the existing drm_mm nodes but need to change the
3356 * cache-level on the PTE. We could simply unbind them all and
3357 * rebind with the correct cache-level on next use. However since
3358 * we already have a valid slot, dma mapping, pages etc, we may as
3359 * rewrite the PTE in the belief that doing so tramples upon less
3360 * state and so involves less work.
3362 if (obj
->bind_count
) {
3363 /* Before we change the PTE, the GPU must not be accessing it.
3364 * If we wait upon the object, we know that all the bound
3365 * VMA are no longer active.
3367 ret
= i915_gem_object_wait_rendering(obj
, false);
3371 if (!HAS_LLC(obj
->base
.dev
) && cache_level
!= I915_CACHE_NONE
) {
3372 /* Access to snoopable pages through the GTT is
3373 * incoherent and on some machines causes a hard
3374 * lockup. Relinquish the CPU mmaping to force
3375 * userspace to refault in the pages and we can
3376 * then double check if the GTT mapping is still
3377 * valid for that pointer access.
3379 i915_gem_release_mmap(obj
);
3381 /* As we no longer need a fence for GTT access,
3382 * we can relinquish it now (and so prevent having
3383 * to steal a fence from someone else on the next
3384 * fence request). Note GPU activity would have
3385 * dropped the fence as all snoopable access is
3386 * supposed to be linear.
3388 ret
= i915_gem_object_put_fence(obj
);
3392 /* We either have incoherent backing store and
3393 * so no GTT access or the architecture is fully
3394 * coherent. In such cases, existing GTT mmaps
3395 * ignore the cache bit in the PTE and we can
3396 * rewrite it without confusing the GPU or having
3397 * to force userspace to fault back in its mmaps.
3401 list_for_each_entry(vma
, &obj
->vma_list
, obj_link
) {
3402 if (!drm_mm_node_allocated(&vma
->node
))
3405 ret
= i915_vma_bind(vma
, cache_level
, PIN_UPDATE
);
3411 list_for_each_entry(vma
, &obj
->vma_list
, obj_link
)
3412 vma
->node
.color
= cache_level
;
3413 obj
->cache_level
= cache_level
;
3416 /* Flush the dirty CPU caches to the backing storage so that the
3417 * object is now coherent at its new cache level (with respect
3418 * to the access domain).
3420 if (obj
->cache_dirty
&& cpu_write_needs_clflush(obj
)) {
3421 if (i915_gem_clflush_object(obj
, true))
3422 i915_gem_chipset_flush(to_i915(obj
->base
.dev
));
3428 int i915_gem_get_caching_ioctl(struct drm_device
*dev
, void *data
,
3429 struct drm_file
*file
)
3431 struct drm_i915_gem_caching
*args
= data
;
3432 struct drm_i915_gem_object
*obj
;
3434 obj
= i915_gem_object_lookup(file
, args
->handle
);
3438 switch (obj
->cache_level
) {
3439 case I915_CACHE_LLC
:
3440 case I915_CACHE_L3_LLC
:
3441 args
->caching
= I915_CACHING_CACHED
;
3445 args
->caching
= I915_CACHING_DISPLAY
;
3449 args
->caching
= I915_CACHING_NONE
;
3453 i915_gem_object_put_unlocked(obj
);
3457 int i915_gem_set_caching_ioctl(struct drm_device
*dev
, void *data
,
3458 struct drm_file
*file
)
3460 struct drm_i915_private
*dev_priv
= to_i915(dev
);
3461 struct drm_i915_gem_caching
*args
= data
;
3462 struct drm_i915_gem_object
*obj
;
3463 enum i915_cache_level level
;
3466 switch (args
->caching
) {
3467 case I915_CACHING_NONE
:
3468 level
= I915_CACHE_NONE
;
3470 case I915_CACHING_CACHED
:
3472 * Due to a HW issue on BXT A stepping, GPU stores via a
3473 * snooped mapping may leave stale data in a corresponding CPU
3474 * cacheline, whereas normally such cachelines would get
3477 if (!HAS_LLC(dev
) && !HAS_SNOOP(dev
))
3480 level
= I915_CACHE_LLC
;
3482 case I915_CACHING_DISPLAY
:
3483 level
= HAS_WT(dev
) ? I915_CACHE_WT
: I915_CACHE_NONE
;
3489 intel_runtime_pm_get(dev_priv
);
3491 ret
= i915_mutex_lock_interruptible(dev
);
3495 obj
= i915_gem_object_lookup(file
, args
->handle
);
3501 ret
= i915_gem_object_set_cache_level(obj
, level
);
3503 i915_gem_object_put(obj
);
3505 mutex_unlock(&dev
->struct_mutex
);
3507 intel_runtime_pm_put(dev_priv
);
3513 * Prepare buffer for display plane (scanout, cursors, etc).
3514 * Can be called from an uninterruptible phase (modesetting) and allows
3515 * any flushes to be pipelined (for pageflips).
3518 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object
*obj
,
3520 const struct i915_ggtt_view
*view
)
3522 struct i915_vma
*vma
;
3523 u32 old_read_domains
, old_write_domain
;
3526 /* Mark the pin_display early so that we account for the
3527 * display coherency whilst setting up the cache domains.
3531 /* The display engine is not coherent with the LLC cache on gen6. As
3532 * a result, we make sure that the pinning that is about to occur is
3533 * done with uncached PTEs. This is lowest common denominator for all
3536 * However for gen6+, we could do better by using the GFDT bit instead
3537 * of uncaching, which would allow us to flush all the LLC-cached data
3538 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
3540 ret
= i915_gem_object_set_cache_level(obj
,
3541 HAS_WT(obj
->base
.dev
) ? I915_CACHE_WT
: I915_CACHE_NONE
);
3544 goto err_unpin_display
;
3547 /* As the user may map the buffer once pinned in the display plane
3548 * (e.g. libkms for the bootup splash), we have to ensure that we
3549 * always use map_and_fenceable for all scanout buffers.
3551 vma
= i915_gem_object_ggtt_pin(obj
, view
, 0, alignment
,
3552 view
->type
== I915_GGTT_VIEW_NORMAL
?
3555 goto err_unpin_display
;
3557 WARN_ON(obj
->pin_display
> i915_vma_pin_count(vma
));
3559 i915_gem_object_flush_cpu_write_domain(obj
);
3561 old_write_domain
= obj
->base
.write_domain
;
3562 old_read_domains
= obj
->base
.read_domains
;
3564 /* It should now be out of any other write domains, and we can update
3565 * the domain values for our changes.
3567 obj
->base
.write_domain
= 0;
3568 obj
->base
.read_domains
|= I915_GEM_DOMAIN_GTT
;
3570 trace_i915_gem_object_change_domain(obj
,
3582 i915_gem_object_unpin_from_display_plane(struct i915_vma
*vma
)
3584 if (WARN_ON(vma
->obj
->pin_display
== 0))
3587 vma
->obj
->pin_display
--;
3589 i915_vma_unpin(vma
);
3590 WARN_ON(vma
->obj
->pin_display
> i915_vma_pin_count(vma
));
3594 * Moves a single object to the CPU read, and possibly write domain.
3595 * @obj: object to act on
3596 * @write: requesting write or read-only access
3598 * This function returns when the move is complete, including waiting on
3602 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object
*obj
, bool write
)
3604 uint32_t old_write_domain
, old_read_domains
;
3607 ret
= i915_gem_object_wait_rendering(obj
, !write
);
3611 if (obj
->base
.write_domain
== I915_GEM_DOMAIN_CPU
)
3614 i915_gem_object_flush_gtt_write_domain(obj
);
3616 old_write_domain
= obj
->base
.write_domain
;
3617 old_read_domains
= obj
->base
.read_domains
;
3619 /* Flush the CPU cache if it's still invalid. */
3620 if ((obj
->base
.read_domains
& I915_GEM_DOMAIN_CPU
) == 0) {
3621 i915_gem_clflush_object(obj
, false);
3623 obj
->base
.read_domains
|= I915_GEM_DOMAIN_CPU
;
3626 /* It should now be out of any other write domains, and we can update
3627 * the domain values for our changes.
3629 BUG_ON((obj
->base
.write_domain
& ~I915_GEM_DOMAIN_CPU
) != 0);
3631 /* If we're writing through the CPU, then the GPU read domains will
3632 * need to be invalidated at next use.
3635 obj
->base
.read_domains
= I915_GEM_DOMAIN_CPU
;
3636 obj
->base
.write_domain
= I915_GEM_DOMAIN_CPU
;
3639 trace_i915_gem_object_change_domain(obj
,
3646 /* Throttle our rendering by waiting until the ring has completed our requests
3647 * emitted over 20 msec ago.
3649 * Note that if we were to use the current jiffies each time around the loop,
3650 * we wouldn't escape the function with any frames outstanding if the time to
3651 * render a frame was over 20ms.
3653 * This should get us reasonable parallelism between CPU and GPU but also
3654 * relatively low latency when blocking on a particular request to finish.
3657 i915_gem_ring_throttle(struct drm_device
*dev
, struct drm_file
*file
)
3659 struct drm_i915_private
*dev_priv
= to_i915(dev
);
3660 struct drm_i915_file_private
*file_priv
= file
->driver_priv
;
3661 unsigned long recent_enough
= jiffies
- DRM_I915_THROTTLE_JIFFIES
;
3662 struct drm_i915_gem_request
*request
, *target
= NULL
;
3665 ret
= i915_gem_wait_for_error(&dev_priv
->gpu_error
);
3669 /* ABI: return -EIO if already wedged */
3670 if (i915_terminally_wedged(&dev_priv
->gpu_error
))
3673 spin_lock(&file_priv
->mm
.lock
);
3674 list_for_each_entry(request
, &file_priv
->mm
.request_list
, client_list
) {
3675 if (time_after_eq(request
->emitted_jiffies
, recent_enough
))
3679 * Note that the request might not have been submitted yet.
3680 * In which case emitted_jiffies will be zero.
3682 if (!request
->emitted_jiffies
)
3688 i915_gem_request_get(target
);
3689 spin_unlock(&file_priv
->mm
.lock
);
3694 ret
= i915_wait_request(target
, true, NULL
, NULL
);
3695 i915_gem_request_put(target
);
3701 i915_vma_misplaced(struct i915_vma
*vma
, u64 size
, u64 alignment
, u64 flags
)
3703 if (!drm_mm_node_allocated(&vma
->node
))
3706 if (vma
->node
.size
< size
)
3709 if (alignment
&& vma
->node
.start
& (alignment
- 1))
3712 if (flags
& PIN_MAPPABLE
&& !i915_vma_is_map_and_fenceable(vma
))
3715 if (flags
& PIN_OFFSET_BIAS
&&
3716 vma
->node
.start
< (flags
& PIN_OFFSET_MASK
))
3719 if (flags
& PIN_OFFSET_FIXED
&&
3720 vma
->node
.start
!= (flags
& PIN_OFFSET_MASK
))
3726 void __i915_vma_set_map_and_fenceable(struct i915_vma
*vma
)
3728 struct drm_i915_gem_object
*obj
= vma
->obj
;
3729 struct drm_i915_private
*dev_priv
= to_i915(obj
->base
.dev
);
3730 bool mappable
, fenceable
;
3731 u32 fence_size
, fence_alignment
;
3733 fence_size
= i915_gem_get_ggtt_size(dev_priv
,
3735 i915_gem_object_get_tiling(obj
));
3736 fence_alignment
= i915_gem_get_ggtt_alignment(dev_priv
,
3738 i915_gem_object_get_tiling(obj
),
3741 fenceable
= (vma
->node
.size
== fence_size
&&
3742 (vma
->node
.start
& (fence_alignment
- 1)) == 0);
3744 mappable
= (vma
->node
.start
+ fence_size
<=
3745 dev_priv
->ggtt
.mappable_end
);
3747 if (mappable
&& fenceable
)
3748 vma
->flags
|= I915_VMA_CAN_FENCE
;
3750 vma
->flags
&= ~I915_VMA_CAN_FENCE
;
3753 int __i915_vma_do_pin(struct i915_vma
*vma
,
3754 u64 size
, u64 alignment
, u64 flags
)
3756 unsigned int bound
= vma
->flags
;
3759 GEM_BUG_ON((flags
& (PIN_GLOBAL
| PIN_USER
)) == 0);
3760 GEM_BUG_ON((flags
& PIN_GLOBAL
) && !i915_vma_is_ggtt(vma
));
3762 if (WARN_ON(bound
& I915_VMA_PIN_OVERFLOW
)) {
3767 if ((bound
& I915_VMA_BIND_MASK
) == 0) {
3768 ret
= i915_vma_insert(vma
, size
, alignment
, flags
);
3773 ret
= i915_vma_bind(vma
, vma
->obj
->cache_level
, flags
);
3777 if ((bound
^ vma
->flags
) & I915_VMA_GLOBAL_BIND
)
3778 __i915_vma_set_map_and_fenceable(vma
);
3780 GEM_BUG_ON(i915_vma_misplaced(vma
, size
, alignment
, flags
));
3784 __i915_vma_unpin(vma
);
3789 i915_gem_object_ggtt_pin(struct drm_i915_gem_object
*obj
,
3790 const struct i915_ggtt_view
*view
,
3795 struct i915_address_space
*vm
= &to_i915(obj
->base
.dev
)->ggtt
.base
;
3796 struct i915_vma
*vma
;
3799 vma
= i915_gem_obj_lookup_or_create_vma(obj
, vm
, view
);
3803 if (i915_vma_misplaced(vma
, size
, alignment
, flags
)) {
3804 if (flags
& PIN_NONBLOCK
&&
3805 (i915_vma_is_pinned(vma
) || i915_vma_is_active(vma
)))
3806 return ERR_PTR(-ENOSPC
);
3808 WARN(i915_vma_is_pinned(vma
),
3809 "bo is already pinned in ggtt with incorrect alignment:"
3810 " offset=%08x, req.alignment=%llx,"
3811 " req.map_and_fenceable=%d, vma->map_and_fenceable=%d\n",
3812 i915_ggtt_offset(vma
), alignment
,
3813 !!(flags
& PIN_MAPPABLE
),
3814 i915_vma_is_map_and_fenceable(vma
));
3815 ret
= i915_vma_unbind(vma
);
3817 return ERR_PTR(ret
);
3820 ret
= i915_vma_pin(vma
, size
, alignment
, flags
| PIN_GLOBAL
);
3822 return ERR_PTR(ret
);
3827 static __always_inline
unsigned int __busy_read_flag(unsigned int id
)
3829 /* Note that we could alias engines in the execbuf API, but
3830 * that would be very unwise as it prevents userspace from
3831 * fine control over engine selection. Ahem.
3833 * This should be something like EXEC_MAX_ENGINE instead of
3836 BUILD_BUG_ON(I915_NUM_ENGINES
> 16);
3837 return 0x10000 << id
;
3840 static __always_inline
unsigned int __busy_write_id(unsigned int id
)
3842 /* The uABI guarantees an active writer is also amongst the read
3843 * engines. This would be true if we accessed the activity tracking
3844 * under the lock, but as we perform the lookup of the object and
3845 * its activity locklessly we can not guarantee that the last_write
3846 * being active implies that we have set the same engine flag from
3847 * last_read - hence we always set both read and write busy for
3850 return id
| __busy_read_flag(id
);
3853 static __always_inline
unsigned int
3854 __busy_set_if_active(const struct i915_gem_active
*active
,
3855 unsigned int (*flag
)(unsigned int id
))
3857 struct drm_i915_gem_request
*request
;
3859 request
= rcu_dereference(active
->request
);
3860 if (!request
|| i915_gem_request_completed(request
))
3863 /* This is racy. See __i915_gem_active_get_rcu() for an in detail
3864 * discussion of how to handle the race correctly, but for reporting
3865 * the busy state we err on the side of potentially reporting the
3866 * wrong engine as being busy (but we guarantee that the result
3867 * is at least self-consistent).
3869 * As we use SLAB_DESTROY_BY_RCU, the request may be reallocated
3870 * whilst we are inspecting it, even under the RCU read lock as we are.
3871 * This means that there is a small window for the engine and/or the
3872 * seqno to have been overwritten. The seqno will always be in the
3873 * future compared to the intended, and so we know that if that
3874 * seqno is idle (on whatever engine) our request is idle and the
3875 * return 0 above is correct.
3877 * The issue is that if the engine is switched, it is just as likely
3878 * to report that it is busy (but since the switch happened, we know
3879 * the request should be idle). So there is a small chance that a busy
3880 * result is actually the wrong engine.
3882 * So why don't we care?
3884 * For starters, the busy ioctl is a heuristic that is by definition
3885 * racy. Even with perfect serialisation in the driver, the hardware
3886 * state is constantly advancing - the state we report to the user
3889 * The critical information for the busy-ioctl is whether the object
3890 * is idle as userspace relies on that to detect whether its next
3891 * access will stall, or if it has missed submitting commands to
3892 * the hardware allowing the GPU to stall. We never generate a
3893 * false-positive for idleness, thus busy-ioctl is reliable at the
3894 * most fundamental level, and we maintain the guarantee that a
3895 * busy object left to itself will eventually become idle (and stay
3898 * We allow ourselves the leeway of potentially misreporting the busy
3899 * state because that is an optimisation heuristic that is constantly
3900 * in flux. Being quickly able to detect the busy/idle state is much
3901 * more important than accurate logging of exactly which engines were
3904 * For accuracy in reporting the engine, we could use
3907 * request = __i915_gem_active_get_rcu(active);
3909 * if (!i915_gem_request_completed(request))
3910 * result = flag(request->engine->exec_id);
3911 * i915_gem_request_put(request);
3914 * but that still remains susceptible to both hardware and userspace
3915 * races. So we accept making the result of that race slightly worse,
3916 * given the rarity of the race and its low impact on the result.
3918 return flag(READ_ONCE(request
->engine
->exec_id
));
3921 static __always_inline
unsigned int
3922 busy_check_reader(const struct i915_gem_active
*active
)
3924 return __busy_set_if_active(active
, __busy_read_flag
);
3927 static __always_inline
unsigned int
3928 busy_check_writer(const struct i915_gem_active
*active
)
3930 return __busy_set_if_active(active
, __busy_write_id
);
3934 i915_gem_busy_ioctl(struct drm_device
*dev
, void *data
,
3935 struct drm_file
*file
)
3937 struct drm_i915_gem_busy
*args
= data
;
3938 struct drm_i915_gem_object
*obj
;
3939 unsigned long active
;
3941 obj
= i915_gem_object_lookup(file
, args
->handle
);
3946 active
= __I915_BO_ACTIVE(obj
);
3950 /* Yes, the lookups are intentionally racy.
3952 * First, we cannot simply rely on __I915_BO_ACTIVE. We have
3953 * to regard the value as stale and as our ABI guarantees
3954 * forward progress, we confirm the status of each active
3955 * request with the hardware.
3957 * Even though we guard the pointer lookup by RCU, that only
3958 * guarantees that the pointer and its contents remain
3959 * dereferencable and does *not* mean that the request we
3960 * have is the same as the one being tracked by the object.
3962 * Consider that we lookup the request just as it is being
3963 * retired and freed. We take a local copy of the pointer,
3964 * but before we add its engine into the busy set, the other
3965 * thread reallocates it and assigns it to a task on another
3966 * engine with a fresh and incomplete seqno. Guarding against
3967 * that requires careful serialisation and reference counting,
3968 * i.e. using __i915_gem_active_get_request_rcu(). We don't,
3969 * instead we expect that if the result is busy, which engines
3970 * are busy is not completely reliable - we only guarantee
3971 * that the object was busy.
3975 for_each_active(active
, idx
)
3976 args
->busy
|= busy_check_reader(&obj
->last_read
[idx
]);
3978 /* For ABI sanity, we only care that the write engine is in
3979 * the set of read engines. This should be ensured by the
3980 * ordering of setting last_read/last_write in
3981 * i915_vma_move_to_active(), and then in reverse in retire.
3982 * However, for good measure, we always report the last_write
3983 * request as a busy read as well as being a busy write.
3985 * We don't care that the set of active read/write engines
3986 * may change during construction of the result, as it is
3987 * equally liable to change before userspace can inspect
3990 args
->busy
|= busy_check_writer(&obj
->last_write
);
3995 i915_gem_object_put_unlocked(obj
);
4000 i915_gem_throttle_ioctl(struct drm_device
*dev
, void *data
,
4001 struct drm_file
*file_priv
)
4003 return i915_gem_ring_throttle(dev
, file_priv
);
4007 i915_gem_madvise_ioctl(struct drm_device
*dev
, void *data
,
4008 struct drm_file
*file_priv
)
4010 struct drm_i915_private
*dev_priv
= to_i915(dev
);
4011 struct drm_i915_gem_madvise
*args
= data
;
4012 struct drm_i915_gem_object
*obj
;
4015 switch (args
->madv
) {
4016 case I915_MADV_DONTNEED
:
4017 case I915_MADV_WILLNEED
:
4023 ret
= i915_mutex_lock_interruptible(dev
);
4027 obj
= i915_gem_object_lookup(file_priv
, args
->handle
);
4034 i915_gem_object_is_tiled(obj
) &&
4035 dev_priv
->quirks
& QUIRK_PIN_SWIZZLED_PAGES
) {
4036 if (obj
->madv
== I915_MADV_WILLNEED
)
4037 i915_gem_object_unpin_pages(obj
);
4038 if (args
->madv
== I915_MADV_WILLNEED
)
4039 i915_gem_object_pin_pages(obj
);
4042 if (obj
->madv
!= __I915_MADV_PURGED
)
4043 obj
->madv
= args
->madv
;
4045 /* if the object is no longer attached, discard its backing storage */
4046 if (obj
->madv
== I915_MADV_DONTNEED
&& obj
->pages
== NULL
)
4047 i915_gem_object_truncate(obj
);
4049 args
->retained
= obj
->madv
!= __I915_MADV_PURGED
;
4051 i915_gem_object_put(obj
);
4053 mutex_unlock(&dev
->struct_mutex
);
4057 void i915_gem_object_init(struct drm_i915_gem_object
*obj
,
4058 const struct drm_i915_gem_object_ops
*ops
)
4062 INIT_LIST_HEAD(&obj
->global_list
);
4063 for (i
= 0; i
< I915_NUM_ENGINES
; i
++)
4064 init_request_active(&obj
->last_read
[i
],
4065 i915_gem_object_retire__read
);
4066 init_request_active(&obj
->last_write
,
4067 i915_gem_object_retire__write
);
4068 init_request_active(&obj
->last_fence
, NULL
);
4069 INIT_LIST_HEAD(&obj
->obj_exec_link
);
4070 INIT_LIST_HEAD(&obj
->vma_list
);
4071 INIT_LIST_HEAD(&obj
->batch_pool_link
);
4075 obj
->fence_reg
= I915_FENCE_REG_NONE
;
4076 obj
->madv
= I915_MADV_WILLNEED
;
4078 i915_gem_info_add_obj(to_i915(obj
->base
.dev
), obj
->base
.size
);
4081 static const struct drm_i915_gem_object_ops i915_gem_object_ops
= {
4082 .flags
= I915_GEM_OBJECT_HAS_STRUCT_PAGE
,
4083 .get_pages
= i915_gem_object_get_pages_gtt
,
4084 .put_pages
= i915_gem_object_put_pages_gtt
,
4087 struct drm_i915_gem_object
*i915_gem_object_create(struct drm_device
*dev
,
4090 struct drm_i915_gem_object
*obj
;
4091 struct address_space
*mapping
;
4095 obj
= i915_gem_object_alloc(dev
);
4097 return ERR_PTR(-ENOMEM
);
4099 ret
= drm_gem_object_init(dev
, &obj
->base
, size
);
4103 mask
= GFP_HIGHUSER
| __GFP_RECLAIMABLE
;
4104 if (IS_CRESTLINE(dev
) || IS_BROADWATER(dev
)) {
4105 /* 965gm cannot relocate objects above 4GiB. */
4106 mask
&= ~__GFP_HIGHMEM
;
4107 mask
|= __GFP_DMA32
;
4110 mapping
= obj
->base
.filp
->f_mapping
;
4111 mapping_set_gfp_mask(mapping
, mask
);
4113 i915_gem_object_init(obj
, &i915_gem_object_ops
);
4115 obj
->base
.write_domain
= I915_GEM_DOMAIN_CPU
;
4116 obj
->base
.read_domains
= I915_GEM_DOMAIN_CPU
;
4119 /* On some devices, we can have the GPU use the LLC (the CPU
4120 * cache) for about a 10% performance improvement
4121 * compared to uncached. Graphics requests other than
4122 * display scanout are coherent with the CPU in
4123 * accessing this cache. This means in this mode we
4124 * don't need to clflush on the CPU side, and on the
4125 * GPU side we only need to flush internal caches to
4126 * get data visible to the CPU.
4128 * However, we maintain the display planes as UC, and so
4129 * need to rebind when first used as such.
4131 obj
->cache_level
= I915_CACHE_LLC
;
4133 obj
->cache_level
= I915_CACHE_NONE
;
4135 trace_i915_gem_object_create(obj
);
4140 i915_gem_object_free(obj
);
4142 return ERR_PTR(ret
);
4145 static bool discard_backing_storage(struct drm_i915_gem_object
*obj
)
4147 /* If we are the last user of the backing storage (be it shmemfs
4148 * pages or stolen etc), we know that the pages are going to be
4149 * immediately released. In this case, we can then skip copying
4150 * back the contents from the GPU.
4153 if (obj
->madv
!= I915_MADV_WILLNEED
)
4156 if (obj
->base
.filp
== NULL
)
4159 /* At first glance, this looks racy, but then again so would be
4160 * userspace racing mmap against close. However, the first external
4161 * reference to the filp can only be obtained through the
4162 * i915_gem_mmap_ioctl() which safeguards us against the user
4163 * acquiring such a reference whilst we are in the middle of
4164 * freeing the object.
4166 return atomic_long_read(&obj
->base
.filp
->f_count
) == 1;
4169 void i915_gem_free_object(struct drm_gem_object
*gem_obj
)
4171 struct drm_i915_gem_object
*obj
= to_intel_bo(gem_obj
);
4172 struct drm_device
*dev
= obj
->base
.dev
;
4173 struct drm_i915_private
*dev_priv
= to_i915(dev
);
4174 struct i915_vma
*vma
, *next
;
4176 intel_runtime_pm_get(dev_priv
);
4178 trace_i915_gem_object_destroy(obj
);
4180 /* All file-owned VMA should have been released by this point through
4181 * i915_gem_close_object(), or earlier by i915_gem_context_close().
4182 * However, the object may also be bound into the global GTT (e.g.
4183 * older GPUs without per-process support, or for direct access through
4184 * the GTT either for the user or for scanout). Those VMA still need to
4187 list_for_each_entry_safe(vma
, next
, &obj
->vma_list
, obj_link
) {
4188 GEM_BUG_ON(!i915_vma_is_ggtt(vma
));
4189 GEM_BUG_ON(i915_vma_is_active(vma
));
4190 vma
->flags
&= ~I915_VMA_PIN_MASK
;
4191 i915_vma_close(vma
);
4193 GEM_BUG_ON(obj
->bind_count
);
4195 /* Stolen objects don't hold a ref, but do hold pin count. Fix that up
4196 * before progressing. */
4198 i915_gem_object_unpin_pages(obj
);
4200 WARN_ON(atomic_read(&obj
->frontbuffer_bits
));
4202 if (obj
->pages
&& obj
->madv
== I915_MADV_WILLNEED
&&
4203 dev_priv
->quirks
& QUIRK_PIN_SWIZZLED_PAGES
&&
4204 i915_gem_object_is_tiled(obj
))
4205 i915_gem_object_unpin_pages(obj
);
4207 if (WARN_ON(obj
->pages_pin_count
))
4208 obj
->pages_pin_count
= 0;
4209 if (discard_backing_storage(obj
))
4210 obj
->madv
= I915_MADV_DONTNEED
;
4211 i915_gem_object_put_pages(obj
);
4215 if (obj
->base
.import_attach
)
4216 drm_prime_gem_destroy(&obj
->base
, NULL
);
4218 if (obj
->ops
->release
)
4219 obj
->ops
->release(obj
);
4221 drm_gem_object_release(&obj
->base
);
4222 i915_gem_info_remove_obj(dev_priv
, obj
->base
.size
);
4225 i915_gem_object_free(obj
);
4227 intel_runtime_pm_put(dev_priv
);
4230 int i915_gem_suspend(struct drm_device
*dev
)
4232 struct drm_i915_private
*dev_priv
= to_i915(dev
);
4235 intel_suspend_gt_powersave(dev_priv
);
4237 mutex_lock(&dev
->struct_mutex
);
4239 /* We have to flush all the executing contexts to main memory so
4240 * that they can saved in the hibernation image. To ensure the last
4241 * context image is coherent, we have to switch away from it. That
4242 * leaves the dev_priv->kernel_context still active when
4243 * we actually suspend, and its image in memory may not match the GPU
4244 * state. Fortunately, the kernel_context is disposable and we do
4245 * not rely on its state.
4247 ret
= i915_gem_switch_to_kernel_context(dev_priv
);
4251 ret
= i915_gem_wait_for_idle(dev_priv
, true);
4255 i915_gem_retire_requests(dev_priv
);
4257 i915_gem_context_lost(dev_priv
);
4258 mutex_unlock(&dev
->struct_mutex
);
4260 cancel_delayed_work_sync(&dev_priv
->gpu_error
.hangcheck_work
);
4261 cancel_delayed_work_sync(&dev_priv
->gt
.retire_work
);
4262 flush_delayed_work(&dev_priv
->gt
.idle_work
);
4264 /* Assert that we sucessfully flushed all the work and
4265 * reset the GPU back to its idle, low power state.
4267 WARN_ON(dev_priv
->gt
.awake
);
4272 mutex_unlock(&dev
->struct_mutex
);
4276 void i915_gem_resume(struct drm_device
*dev
)
4278 struct drm_i915_private
*dev_priv
= to_i915(dev
);
4280 mutex_lock(&dev
->struct_mutex
);
4281 i915_gem_restore_gtt_mappings(dev
);
4283 /* As we didn't flush the kernel context before suspend, we cannot
4284 * guarantee that the context image is complete. So let's just reset
4285 * it and start again.
4287 if (i915
.enable_execlists
)
4288 intel_lr_context_reset(dev_priv
, dev_priv
->kernel_context
);
4290 mutex_unlock(&dev
->struct_mutex
);
4293 void i915_gem_init_swizzling(struct drm_device
*dev
)
4295 struct drm_i915_private
*dev_priv
= to_i915(dev
);
4297 if (INTEL_INFO(dev
)->gen
< 5 ||
4298 dev_priv
->mm
.bit_6_swizzle_x
== I915_BIT_6_SWIZZLE_NONE
)
4301 I915_WRITE(DISP_ARB_CTL
, I915_READ(DISP_ARB_CTL
) |
4302 DISP_TILE_SURFACE_SWIZZLING
);
4307 I915_WRITE(TILECTL
, I915_READ(TILECTL
) | TILECTL_SWZCTL
);
4309 I915_WRITE(ARB_MODE
, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB
));
4310 else if (IS_GEN7(dev
))
4311 I915_WRITE(ARB_MODE
, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB
));
4312 else if (IS_GEN8(dev
))
4313 I915_WRITE(GAMTARBMODE
, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW
));
4318 static void init_unused_ring(struct drm_device
*dev
, u32 base
)
4320 struct drm_i915_private
*dev_priv
= to_i915(dev
);
4322 I915_WRITE(RING_CTL(base
), 0);
4323 I915_WRITE(RING_HEAD(base
), 0);
4324 I915_WRITE(RING_TAIL(base
), 0);
4325 I915_WRITE(RING_START(base
), 0);
4328 static void init_unused_rings(struct drm_device
*dev
)
4331 init_unused_ring(dev
, PRB1_BASE
);
4332 init_unused_ring(dev
, SRB0_BASE
);
4333 init_unused_ring(dev
, SRB1_BASE
);
4334 init_unused_ring(dev
, SRB2_BASE
);
4335 init_unused_ring(dev
, SRB3_BASE
);
4336 } else if (IS_GEN2(dev
)) {
4337 init_unused_ring(dev
, SRB0_BASE
);
4338 init_unused_ring(dev
, SRB1_BASE
);
4339 } else if (IS_GEN3(dev
)) {
4340 init_unused_ring(dev
, PRB1_BASE
);
4341 init_unused_ring(dev
, PRB2_BASE
);
4346 i915_gem_init_hw(struct drm_device
*dev
)
4348 struct drm_i915_private
*dev_priv
= to_i915(dev
);
4349 struct intel_engine_cs
*engine
;
4352 /* Double layer security blanket, see i915_gem_init() */
4353 intel_uncore_forcewake_get(dev_priv
, FORCEWAKE_ALL
);
4355 if (HAS_EDRAM(dev
) && INTEL_GEN(dev_priv
) < 9)
4356 I915_WRITE(HSW_IDICR
, I915_READ(HSW_IDICR
) | IDIHASHMSK(0xf));
4358 if (IS_HASWELL(dev
))
4359 I915_WRITE(MI_PREDICATE_RESULT_2
, IS_HSW_GT3(dev
) ?
4360 LOWER_SLICE_ENABLED
: LOWER_SLICE_DISABLED
);
4362 if (HAS_PCH_NOP(dev
)) {
4363 if (IS_IVYBRIDGE(dev
)) {
4364 u32 temp
= I915_READ(GEN7_MSG_CTL
);
4365 temp
&= ~(WAIT_FOR_PCH_FLR_ACK
| WAIT_FOR_PCH_RESET_ACK
);
4366 I915_WRITE(GEN7_MSG_CTL
, temp
);
4367 } else if (INTEL_INFO(dev
)->gen
>= 7) {
4368 u32 temp
= I915_READ(HSW_NDE_RSTWRN_OPT
);
4369 temp
&= ~RESET_PCH_HANDSHAKE_ENABLE
;
4370 I915_WRITE(HSW_NDE_RSTWRN_OPT
, temp
);
4374 i915_gem_init_swizzling(dev
);
4377 * At least 830 can leave some of the unused rings
4378 * "active" (ie. head != tail) after resume which
4379 * will prevent c3 entry. Makes sure all unused rings
4382 init_unused_rings(dev
);
4384 BUG_ON(!dev_priv
->kernel_context
);
4386 ret
= i915_ppgtt_init_hw(dev
);
4388 DRM_ERROR("PPGTT enable HW failed %d\n", ret
);
4392 /* Need to do basic initialisation of all rings first: */
4393 for_each_engine(engine
, dev_priv
) {
4394 ret
= engine
->init_hw(engine
);
4399 intel_mocs_init_l3cc_table(dev
);
4401 /* We can't enable contexts until all firmware is loaded */
4402 ret
= intel_guc_setup(dev
);
4407 intel_uncore_forcewake_put(dev_priv
, FORCEWAKE_ALL
);
4411 bool intel_sanitize_semaphores(struct drm_i915_private
*dev_priv
, int value
)
4413 if (INTEL_INFO(dev_priv
)->gen
< 6)
4416 /* TODO: make semaphores and Execlists play nicely together */
4417 if (i915
.enable_execlists
)
4423 #ifdef CONFIG_INTEL_IOMMU
4424 /* Enable semaphores on SNB when IO remapping is off */
4425 if (INTEL_INFO(dev_priv
)->gen
== 6 && intel_iommu_gfx_mapped
)
4432 int i915_gem_init(struct drm_device
*dev
)
4434 struct drm_i915_private
*dev_priv
= to_i915(dev
);
4437 mutex_lock(&dev
->struct_mutex
);
4439 if (!i915
.enable_execlists
) {
4440 dev_priv
->gt
.cleanup_engine
= intel_engine_cleanup
;
4442 dev_priv
->gt
.cleanup_engine
= intel_logical_ring_cleanup
;
4445 /* This is just a security blanket to placate dragons.
4446 * On some systems, we very sporadically observe that the first TLBs
4447 * used by the CS may be stale, despite us poking the TLB reset. If
4448 * we hold the forcewake during initialisation these problems
4449 * just magically go away.
4451 intel_uncore_forcewake_get(dev_priv
, FORCEWAKE_ALL
);
4453 i915_gem_init_userptr(dev_priv
);
4455 ret
= i915_gem_init_ggtt(dev_priv
);
4459 ret
= i915_gem_context_init(dev
);
4463 ret
= intel_engines_init(dev
);
4467 ret
= i915_gem_init_hw(dev
);
4469 /* Allow engine initialisation to fail by marking the GPU as
4470 * wedged. But we only want to do this where the GPU is angry,
4471 * for all other failure, such as an allocation failure, bail.
4473 DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");
4474 atomic_or(I915_WEDGED
, &dev_priv
->gpu_error
.reset_counter
);
4479 intel_uncore_forcewake_put(dev_priv
, FORCEWAKE_ALL
);
4480 mutex_unlock(&dev
->struct_mutex
);
4486 i915_gem_cleanup_engines(struct drm_device
*dev
)
4488 struct drm_i915_private
*dev_priv
= to_i915(dev
);
4489 struct intel_engine_cs
*engine
;
4491 for_each_engine(engine
, dev_priv
)
4492 dev_priv
->gt
.cleanup_engine(engine
);
4496 init_engine_lists(struct intel_engine_cs
*engine
)
4498 INIT_LIST_HEAD(&engine
->request_list
);
4502 i915_gem_load_init_fences(struct drm_i915_private
*dev_priv
)
4504 struct drm_device
*dev
= &dev_priv
->drm
;
4506 if (INTEL_INFO(dev_priv
)->gen
>= 7 && !IS_VALLEYVIEW(dev_priv
) &&
4507 !IS_CHERRYVIEW(dev_priv
))
4508 dev_priv
->num_fence_regs
= 32;
4509 else if (INTEL_INFO(dev_priv
)->gen
>= 4 || IS_I945G(dev_priv
) ||
4510 IS_I945GM(dev_priv
) || IS_G33(dev_priv
))
4511 dev_priv
->num_fence_regs
= 16;
4513 dev_priv
->num_fence_regs
= 8;
4515 if (intel_vgpu_active(dev_priv
))
4516 dev_priv
->num_fence_regs
=
4517 I915_READ(vgtif_reg(avail_rs
.fence_num
));
4519 /* Initialize fence registers to zero */
4520 i915_gem_restore_fences(dev
);
4522 i915_gem_detect_bit_6_swizzle(dev
);
4526 i915_gem_load_init(struct drm_device
*dev
)
4528 struct drm_i915_private
*dev_priv
= to_i915(dev
);
4532 kmem_cache_create("i915_gem_object",
4533 sizeof(struct drm_i915_gem_object
), 0,
4537 kmem_cache_create("i915_gem_vma",
4538 sizeof(struct i915_vma
), 0,
4541 dev_priv
->requests
=
4542 kmem_cache_create("i915_gem_request",
4543 sizeof(struct drm_i915_gem_request
), 0,
4544 SLAB_HWCACHE_ALIGN
|
4545 SLAB_RECLAIM_ACCOUNT
|
4546 SLAB_DESTROY_BY_RCU
,
4549 INIT_LIST_HEAD(&dev_priv
->context_list
);
4550 INIT_LIST_HEAD(&dev_priv
->mm
.unbound_list
);
4551 INIT_LIST_HEAD(&dev_priv
->mm
.bound_list
);
4552 INIT_LIST_HEAD(&dev_priv
->mm
.fence_list
);
4553 for (i
= 0; i
< I915_NUM_ENGINES
; i
++)
4554 init_engine_lists(&dev_priv
->engine
[i
]);
4555 for (i
= 0; i
< I915_MAX_NUM_FENCES
; i
++)
4556 INIT_LIST_HEAD(&dev_priv
->fence_regs
[i
].link
);
4557 INIT_DELAYED_WORK(&dev_priv
->gt
.retire_work
,
4558 i915_gem_retire_work_handler
);
4559 INIT_DELAYED_WORK(&dev_priv
->gt
.idle_work
,
4560 i915_gem_idle_work_handler
);
4561 init_waitqueue_head(&dev_priv
->gpu_error
.wait_queue
);
4562 init_waitqueue_head(&dev_priv
->gpu_error
.reset_queue
);
4564 dev_priv
->relative_constants_mode
= I915_EXEC_CONSTANTS_REL_GENERAL
;
4566 INIT_LIST_HEAD(&dev_priv
->mm
.fence_list
);
4568 init_waitqueue_head(&dev_priv
->pending_flip_queue
);
4570 dev_priv
->mm
.interruptible
= true;
4572 spin_lock_init(&dev_priv
->fb_tracking
.lock
);
4575 void i915_gem_load_cleanup(struct drm_device
*dev
)
4577 struct drm_i915_private
*dev_priv
= to_i915(dev
);
4579 kmem_cache_destroy(dev_priv
->requests
);
4580 kmem_cache_destroy(dev_priv
->vmas
);
4581 kmem_cache_destroy(dev_priv
->objects
);
4583 /* And ensure that our DESTROY_BY_RCU slabs are truly destroyed */
4587 int i915_gem_freeze_late(struct drm_i915_private
*dev_priv
)
4589 struct drm_i915_gem_object
*obj
;
4591 /* Called just before we write the hibernation image.
4593 * We need to update the domain tracking to reflect that the CPU
4594 * will be accessing all the pages to create and restore from the
4595 * hibernation, and so upon restoration those pages will be in the
4598 * To make sure the hibernation image contains the latest state,
4599 * we update that state just before writing out the image.
4602 list_for_each_entry(obj
, &dev_priv
->mm
.unbound_list
, global_list
) {
4603 obj
->base
.read_domains
= I915_GEM_DOMAIN_CPU
;
4604 obj
->base
.write_domain
= I915_GEM_DOMAIN_CPU
;
4607 list_for_each_entry(obj
, &dev_priv
->mm
.bound_list
, global_list
) {
4608 obj
->base
.read_domains
= I915_GEM_DOMAIN_CPU
;
4609 obj
->base
.write_domain
= I915_GEM_DOMAIN_CPU
;
4615 void i915_gem_release(struct drm_device
*dev
, struct drm_file
*file
)
4617 struct drm_i915_file_private
*file_priv
= file
->driver_priv
;
4618 struct drm_i915_gem_request
*request
;
4620 /* Clean up our request list when the client is going away, so that
4621 * later retire_requests won't dereference our soon-to-be-gone
4624 spin_lock(&file_priv
->mm
.lock
);
4625 list_for_each_entry(request
, &file_priv
->mm
.request_list
, client_list
)
4626 request
->file_priv
= NULL
;
4627 spin_unlock(&file_priv
->mm
.lock
);
4629 if (!list_empty(&file_priv
->rps
.link
)) {
4630 spin_lock(&to_i915(dev
)->rps
.client_lock
);
4631 list_del(&file_priv
->rps
.link
);
4632 spin_unlock(&to_i915(dev
)->rps
.client_lock
);
4636 int i915_gem_open(struct drm_device
*dev
, struct drm_file
*file
)
4638 struct drm_i915_file_private
*file_priv
;
4641 DRM_DEBUG_DRIVER("\n");
4643 file_priv
= kzalloc(sizeof(*file_priv
), GFP_KERNEL
);
4647 file
->driver_priv
= file_priv
;
4648 file_priv
->dev_priv
= to_i915(dev
);
4649 file_priv
->file
= file
;
4650 INIT_LIST_HEAD(&file_priv
->rps
.link
);
4652 spin_lock_init(&file_priv
->mm
.lock
);
4653 INIT_LIST_HEAD(&file_priv
->mm
.request_list
);
4655 file_priv
->bsd_engine
= -1;
4657 ret
= i915_gem_context_open(dev
, file
);
4665 * i915_gem_track_fb - update frontbuffer tracking
4666 * @old: current GEM buffer for the frontbuffer slots
4667 * @new: new GEM buffer for the frontbuffer slots
4668 * @frontbuffer_bits: bitmask of frontbuffer slots
4670 * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them
4671 * from @old and setting them in @new. Both @old and @new can be NULL.
4673 void i915_gem_track_fb(struct drm_i915_gem_object
*old
,
4674 struct drm_i915_gem_object
*new,
4675 unsigned frontbuffer_bits
)
4677 /* Control of individual bits within the mask are guarded by
4678 * the owning plane->mutex, i.e. we can never see concurrent
4679 * manipulation of individual bits. But since the bitfield as a whole
4680 * is updated using RMW, we need to use atomics in order to update
4683 BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE
* I915_MAX_PIPES
>
4684 sizeof(atomic_t
) * BITS_PER_BYTE
);
4687 WARN_ON(!(atomic_read(&old
->frontbuffer_bits
) & frontbuffer_bits
));
4688 atomic_andnot(frontbuffer_bits
, &old
->frontbuffer_bits
);
4692 WARN_ON(atomic_read(&new->frontbuffer_bits
) & frontbuffer_bits
);
4693 atomic_or(frontbuffer_bits
, &new->frontbuffer_bits
);
4697 /* Like i915_gem_object_get_page(), but mark the returned page dirty */
4699 i915_gem_object_get_dirty_page(struct drm_i915_gem_object
*obj
, int n
)
4703 /* Only default objects have per-page dirty tracking */
4704 if (WARN_ON(!i915_gem_object_has_struct_page(obj
)))
4707 page
= i915_gem_object_get_page(obj
, n
);
4708 set_page_dirty(page
);
4712 /* Allocate a new GEM object and fill it with the supplied data */
4713 struct drm_i915_gem_object
*
4714 i915_gem_object_create_from_data(struct drm_device
*dev
,
4715 const void *data
, size_t size
)
4717 struct drm_i915_gem_object
*obj
;
4718 struct sg_table
*sg
;
4722 obj
= i915_gem_object_create(dev
, round_up(size
, PAGE_SIZE
));
4726 ret
= i915_gem_object_set_to_cpu_domain(obj
, true);
4730 ret
= i915_gem_object_get_pages(obj
);
4734 i915_gem_object_pin_pages(obj
);
4736 bytes
= sg_copy_from_buffer(sg
->sgl
, sg
->nents
, (void *)data
, size
);
4737 obj
->dirty
= 1; /* Backing store is now out of date */
4738 i915_gem_object_unpin_pages(obj
);
4740 if (WARN_ON(bytes
!= size
)) {
4741 DRM_ERROR("Incomplete copy, wrote %zu of %zu", bytes
, size
);
4749 i915_gem_object_put(obj
);
4750 return ERR_PTR(ret
);