2 * Copyright © 2016 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #ifndef __I915_GEM_OBJECT_H__
26 #define __I915_GEM_OBJECT_H__
28 #include <linux/reservation.h>
30 #include <drm/drm_vma_manager.h>
31 #include <drm/drm_gem.h>
34 #include <drm/i915_drm.h>
36 struct drm_i915_gem_object_ops
{
38 #define I915_GEM_OBJECT_HAS_STRUCT_PAGE 0x1
39 #define I915_GEM_OBJECT_IS_SHRINKABLE 0x2
41 /* Interface between the GEM object and its backing storage.
42 * get_pages() is called once prior to the use of the associated set
43 * of pages before to binding them into the GTT, and put_pages() is
44 * called after we no longer need them. As we expect there to be
45 * associated cost with migrating pages between the backing storage
46 * and making them available for the GPU (e.g. clflush), we may hold
47 * onto the pages after they are no longer referenced by the GPU
48 * in case they may be used again shortly (for example migrating the
49 * pages to a different memory domain within the GTT). put_pages()
50 * will therefore most likely be called when the object itself is
51 * being released or under memory pressure (where we attempt to
52 * reap pages for the shrinker).
54 struct sg_table
*(*get_pages
)(struct drm_i915_gem_object
*);
55 void (*put_pages
)(struct drm_i915_gem_object
*, struct sg_table
*);
57 int (*dmabuf_export
)(struct drm_i915_gem_object
*);
58 void (*release
)(struct drm_i915_gem_object
*);
61 struct drm_i915_gem_object
{
62 struct drm_gem_object base
;
64 const struct drm_i915_gem_object_ops
*ops
;
66 /** List of VMAs backed by this object */
67 struct list_head vma_list
;
68 struct rb_root vma_tree
;
70 /** Stolen memory for this object, instead of being backed by shmem. */
71 struct drm_mm_node
*stolen
;
72 struct list_head global_link
;
75 struct llist_node freed
;
79 * Whether the object is currently in the GGTT mmap.
81 struct list_head userfault_link
;
83 /** Used in execbuf to temporarily hold a ref */
84 struct list_head obj_exec_link
;
86 struct list_head batch_pool_link
;
91 * Have we taken a reference for the object for incomplete GPU
94 #define I915_BO_ACTIVE_REF 0
97 * Is the object to be mapped as read-only to the GPU
98 * Only honoured if hardware has relevant pte bit
100 unsigned long gt_ro
:1;
101 unsigned int cache_level
:3;
102 unsigned int cache_dirty
:1;
104 atomic_t frontbuffer_bits
;
105 unsigned int frontbuffer_ggtt_origin
; /* write once */
106 struct i915_gem_active frontbuffer_write
;
108 /** Current tiling stride for the object, if it's tiled. */
109 unsigned int tiling_and_stride
;
110 #define FENCE_MINIMUM_STRIDE 128 /* See i915_tiling_ok() */
111 #define TILING_MASK (FENCE_MINIMUM_STRIDE-1)
112 #define STRIDE_MASK (~TILING_MASK)
114 /** Count of VMA actually bound by this object */
115 unsigned int bind_count
;
116 unsigned int active_count
;
117 unsigned int pin_display
;
120 struct mutex lock
; /* protects the pages and their use */
121 atomic_t pages_pin_count
;
123 struct sg_table
*pages
;
126 struct i915_gem_object_page_iter
{
127 struct scatterlist
*sg_pos
;
128 unsigned int sg_idx
; /* in pages, but 32bit eek! */
130 struct radix_tree_root radix
;
131 struct mutex lock
; /* protects this cache */
135 * Advice: are the backing pages purgeable?
140 * This is set if the object has been written to since the
141 * pages were last acquired.
146 * This is set if the object has been pinned due to unknown
152 /** Breadcrumb of last rendering to the buffer.
153 * There can only be one writer, but we allow for multiple readers.
154 * If there is a writer that necessarily implies that all other
155 * read requests are complete - but we may only be lazily clearing
156 * the read requests. A read request is naturally the most recent
157 * request on a ring, so we may have two different write and read
158 * requests on one ring where the write request is older than the
159 * read request. This allows for the CPU to read from an active
160 * buffer by only waiting for the write to complete.
162 struct reservation_object
*resv
;
164 /** References from framebuffers, locks out tiling changes. */
165 unsigned long framebuffer_references
;
167 /** Record of address bit 17 of each page at last unbind. */
168 unsigned long *bit_17
;
170 struct i915_gem_userptr
{
172 unsigned read_only
:1;
174 struct i915_mm_struct
*mm
;
175 struct i915_mmu_object
*mmu_object
;
176 struct work_struct
*work
;
179 /** for phys allocated objects */
180 struct drm_dma_handle
*phys_handle
;
182 struct reservation_object __builtin_resv
;
185 static inline struct drm_i915_gem_object
*
186 to_intel_bo(struct drm_gem_object
*gem
)
188 /* Assert that to_intel_bo(NULL) == NULL */
189 BUILD_BUG_ON(offsetof(struct drm_i915_gem_object
, base
));
191 return container_of(gem
, struct drm_i915_gem_object
, base
);
195 * i915_gem_object_lookup_rcu - look up a temporary GEM object from its handle
196 * @filp: DRM file private date
197 * @handle: userspace handle
201 * A pointer to the object named by the handle if such exists on @filp, NULL
202 * otherwise. This object is only valid whilst under the RCU read lock, and
203 * note carefully the object may be in the process of being destroyed.
205 static inline struct drm_i915_gem_object
*
206 i915_gem_object_lookup_rcu(struct drm_file
*file
, u32 handle
)
208 #ifdef CONFIG_LOCKDEP
209 WARN_ON(debug_locks
&& !lock_is_held(&rcu_lock_map
));
211 return idr_find(&file
->object_idr
, handle
);
214 static inline struct drm_i915_gem_object
*
215 i915_gem_object_lookup(struct drm_file
*file
, u32 handle
)
217 struct drm_i915_gem_object
*obj
;
220 obj
= i915_gem_object_lookup_rcu(file
, handle
);
221 if (obj
&& !kref_get_unless_zero(&obj
->base
.refcount
))
229 extern struct drm_gem_object
*
230 drm_gem_object_lookup(struct drm_file
*file
, u32 handle
);
232 __attribute__((nonnull
))
233 static inline struct drm_i915_gem_object
*
234 i915_gem_object_get(struct drm_i915_gem_object
*obj
)
236 drm_gem_object_reference(&obj
->base
);
241 extern void drm_gem_object_reference(struct drm_gem_object
*);
243 __attribute__((nonnull
))
245 i915_gem_object_put(struct drm_i915_gem_object
*obj
)
247 __drm_gem_object_unreference(&obj
->base
);
251 extern void drm_gem_object_unreference(struct drm_gem_object
*);
254 extern void drm_gem_object_unreference_unlocked(struct drm_gem_object
*);
257 i915_gem_object_is_dead(const struct drm_i915_gem_object
*obj
)
259 return atomic_read(&obj
->base
.refcount
.refcount
) == 0;
263 i915_gem_object_has_struct_page(const struct drm_i915_gem_object
*obj
)
265 return obj
->ops
->flags
& I915_GEM_OBJECT_HAS_STRUCT_PAGE
;
269 i915_gem_object_is_shrinkable(const struct drm_i915_gem_object
*obj
)
271 return obj
->ops
->flags
& I915_GEM_OBJECT_IS_SHRINKABLE
;
275 i915_gem_object_is_active(const struct drm_i915_gem_object
*obj
)
277 return obj
->active_count
;
281 i915_gem_object_has_active_reference(const struct drm_i915_gem_object
*obj
)
283 return test_bit(I915_BO_ACTIVE_REF
, &obj
->flags
);
287 i915_gem_object_set_active_reference(struct drm_i915_gem_object
*obj
)
289 lockdep_assert_held(&obj
->base
.dev
->struct_mutex
);
290 __set_bit(I915_BO_ACTIVE_REF
, &obj
->flags
);
294 i915_gem_object_clear_active_reference(struct drm_i915_gem_object
*obj
)
296 lockdep_assert_held(&obj
->base
.dev
->struct_mutex
);
297 __clear_bit(I915_BO_ACTIVE_REF
, &obj
->flags
);
300 void __i915_gem_object_release_unless_active(struct drm_i915_gem_object
*obj
);
302 static inline unsigned int
303 i915_gem_object_get_tiling(struct drm_i915_gem_object
*obj
)
305 return obj
->tiling_and_stride
& TILING_MASK
;
309 i915_gem_object_is_tiled(struct drm_i915_gem_object
*obj
)
311 return i915_gem_object_get_tiling(obj
) != I915_TILING_NONE
;
314 static inline unsigned int
315 i915_gem_object_get_stride(struct drm_i915_gem_object
*obj
)
317 return obj
->tiling_and_stride
& STRIDE_MASK
;
320 static inline unsigned int
321 i915_gem_tile_height(unsigned int tiling
)
324 return tiling
== I915_TILING_Y
? 32 : 8;
327 static inline unsigned int
328 i915_gem_object_get_tile_height(struct drm_i915_gem_object
*obj
)
330 return i915_gem_tile_height(i915_gem_object_get_tiling(obj
));
333 static inline unsigned int
334 i915_gem_object_get_tile_row_size(struct drm_i915_gem_object
*obj
)
336 return (i915_gem_object_get_stride(obj
) *
337 i915_gem_object_get_tile_height(obj
));
340 int i915_gem_object_set_tiling(struct drm_i915_gem_object
*obj
,
341 unsigned int tiling
, unsigned int stride
);
343 static inline struct intel_engine_cs
*
344 i915_gem_object_last_write_engine(struct drm_i915_gem_object
*obj
)
346 struct intel_engine_cs
*engine
= NULL
;
347 struct dma_fence
*fence
;
350 fence
= reservation_object_get_excl_rcu(obj
->resv
);
353 if (fence
&& dma_fence_is_i915(fence
) && !dma_fence_is_signaled(fence
))
354 engine
= to_request(fence
)->engine
;
355 dma_fence_put(fence
);