]>
Commit | Line | Data |
---|---|---|
b42fe9ca JL |
1 | /* |
2 | * Copyright © 2016 Intel Corporation | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice (including the next | |
12 | * paragraph) shall be included in all copies or substantial portions of the | |
13 | * Software. | |
14 | * | |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | |
21 | * IN THE SOFTWARE. | |
22 | * | |
23 | */ | |
24 | ||
25 | #ifndef __I915_GEM_OBJECT_H__ | |
26 | #define __I915_GEM_OBJECT_H__ | |
27 | ||
28 | #include <linux/reservation.h> | |
29 | ||
30 | #include <drm/drm_vma_manager.h> | |
31 | #include <drm/drm_gem.h> | |
32 | #include <drm/drmP.h> | |
33 | ||
34 | #include <drm/i915_drm.h> | |
35 | ||
b8f55be6 | 36 | #include "i915_gem_request.h" |
8d28ba45 CW |
37 | #include "i915_selftest.h" |
38 | ||
b8f55be6 CW |
39 | struct drm_i915_gem_object; |
40 | ||
b42fe9ca JL |
41 | struct drm_i915_gem_object_ops { |
42 | unsigned int flags; | |
22284f40 CW |
43 | #define I915_GEM_OBJECT_HAS_STRUCT_PAGE BIT(0) |
44 | #define I915_GEM_OBJECT_IS_SHRINKABLE BIT(1) | |
b42fe9ca JL |
45 | |
46 | /* Interface between the GEM object and its backing storage. | |
47 | * get_pages() is called once prior to the use of the associated set | |
48 | * of pages before to binding them into the GTT, and put_pages() is | |
49 | * called after we no longer need them. As we expect there to be | |
50 | * associated cost with migrating pages between the backing storage | |
51 | * and making them available for the GPU (e.g. clflush), we may hold | |
52 | * onto the pages after they are no longer referenced by the GPU | |
53 | * in case they may be used again shortly (for example migrating the | |
54 | * pages to a different memory domain within the GTT). put_pages() | |
55 | * will therefore most likely be called when the object itself is | |
56 | * being released or under memory pressure (where we attempt to | |
57 | * reap pages for the shrinker). | |
58 | */ | |
59 | struct sg_table *(*get_pages)(struct drm_i915_gem_object *); | |
60 | void (*put_pages)(struct drm_i915_gem_object *, struct sg_table *); | |
61 | ||
7c55e2c5 CW |
62 | int (*pwrite)(struct drm_i915_gem_object *, |
63 | const struct drm_i915_gem_pwrite *); | |
64 | ||
b42fe9ca JL |
65 | int (*dmabuf_export)(struct drm_i915_gem_object *); |
66 | void (*release)(struct drm_i915_gem_object *); | |
67 | }; | |
68 | ||
69 | struct drm_i915_gem_object { | |
70 | struct drm_gem_object base; | |
71 | ||
72 | const struct drm_i915_gem_object_ops *ops; | |
73 | ||
b5a82425 CW |
74 | /** |
75 | * @vma_list: List of VMAs backed by this object | |
76 | * | |
77 | * The VMA on this list are ordered by type, all GGTT vma are placed | |
78 | * at the head and all ppGTT vma are placed at the tail. The different | |
79 | * types of GGTT vma are unordered between themselves, use the | |
80 | * @vma_tree (which has a defined order between all VMA) to find an | |
81 | * exact match. | |
82 | */ | |
b42fe9ca | 83 | struct list_head vma_list; |
b5a82425 CW |
84 | /** |
85 | * @vma_tree: Ordered tree of VMAs backed by this object | |
86 | * | |
87 | * All VMA created for this object are placed in the @vma_tree for | |
88 | * fast retrieval via a binary search in i915_vma_instance(). | |
89 | * They are also added to @vma_list for easy iteration. | |
90 | */ | |
b42fe9ca | 91 | struct rb_root vma_tree; |
4ff4b44c | 92 | struct i915_vma *vma_hashed; |
b42fe9ca JL |
93 | |
94 | /** Stolen memory for this object, instead of being backed by shmem. */ | |
95 | struct drm_mm_node *stolen; | |
96 | struct list_head global_link; | |
97 | union { | |
98 | struct rcu_head rcu; | |
99 | struct llist_node freed; | |
100 | }; | |
101 | ||
102 | /** | |
103 | * Whether the object is currently in the GGTT mmap. | |
104 | */ | |
105 | struct list_head userfault_link; | |
106 | ||
b42fe9ca | 107 | struct list_head batch_pool_link; |
8d28ba45 | 108 | I915_SELFTEST_DECLARE(struct list_head st_link); |
b42fe9ca JL |
109 | |
110 | unsigned long flags; | |
111 | ||
112 | /** | |
113 | * Have we taken a reference for the object for incomplete GPU | |
114 | * activity? | |
115 | */ | |
116 | #define I915_BO_ACTIVE_REF 0 | |
117 | ||
118 | /* | |
119 | * Is the object to be mapped as read-only to the GPU | |
120 | * Only honoured if hardware has relevant pte bit | |
121 | */ | |
122 | unsigned long gt_ro:1; | |
123 | unsigned int cache_level:3; | |
b8f55be6 CW |
124 | unsigned int cache_coherent:2; |
125 | #define I915_BO_CACHE_COHERENT_FOR_READ BIT(0) | |
126 | #define I915_BO_CACHE_COHERENT_FOR_WRITE BIT(1) | |
b42fe9ca JL |
127 | unsigned int cache_dirty:1; |
128 | ||
129 | atomic_t frontbuffer_bits; | |
130 | unsigned int frontbuffer_ggtt_origin; /* write once */ | |
5b8c8aec | 131 | struct i915_gem_active frontbuffer_write; |
b42fe9ca JL |
132 | |
133 | /** Current tiling stride for the object, if it's tiled. */ | |
134 | unsigned int tiling_and_stride; | |
135 | #define FENCE_MINIMUM_STRIDE 128 /* See i915_tiling_ok() */ | |
136 | #define TILING_MASK (FENCE_MINIMUM_STRIDE-1) | |
137 | #define STRIDE_MASK (~TILING_MASK) | |
138 | ||
139 | /** Count of VMA actually bound by this object */ | |
140 | unsigned int bind_count; | |
141 | unsigned int active_count; | |
142 | unsigned int pin_display; | |
143 | ||
144 | struct { | |
145 | struct mutex lock; /* protects the pages and their use */ | |
146 | atomic_t pages_pin_count; | |
147 | ||
148 | struct sg_table *pages; | |
149 | void *mapping; | |
150 | ||
151 | struct i915_gem_object_page_iter { | |
152 | struct scatterlist *sg_pos; | |
153 | unsigned int sg_idx; /* in pages, but 32bit eek! */ | |
154 | ||
155 | struct radix_tree_root radix; | |
156 | struct mutex lock; /* protects this cache */ | |
157 | } get_page; | |
158 | ||
159 | /** | |
160 | * Advice: are the backing pages purgeable? | |
161 | */ | |
162 | unsigned int madv:2; | |
163 | ||
164 | /** | |
165 | * This is set if the object has been written to since the | |
166 | * pages were last acquired. | |
167 | */ | |
168 | bool dirty:1; | |
169 | ||
170 | /** | |
171 | * This is set if the object has been pinned due to unknown | |
172 | * swizzling. | |
173 | */ | |
174 | bool quirked:1; | |
175 | } mm; | |
176 | ||
177 | /** Breadcrumb of last rendering to the buffer. | |
178 | * There can only be one writer, but we allow for multiple readers. | |
179 | * If there is a writer that necessarily implies that all other | |
180 | * read requests are complete - but we may only be lazily clearing | |
181 | * the read requests. A read request is naturally the most recent | |
182 | * request on a ring, so we may have two different write and read | |
183 | * requests on one ring where the write request is older than the | |
184 | * read request. This allows for the CPU to read from an active | |
185 | * buffer by only waiting for the write to complete. | |
186 | */ | |
187 | struct reservation_object *resv; | |
188 | ||
189 | /** References from framebuffers, locks out tiling changes. */ | |
dd689287 | 190 | unsigned int framebuffer_references; |
b42fe9ca JL |
191 | |
192 | /** Record of address bit 17 of each page at last unbind. */ | |
193 | unsigned long *bit_17; | |
194 | ||
44653988 CW |
195 | union { |
196 | struct i915_gem_userptr { | |
197 | uintptr_t ptr; | |
198 | unsigned read_only :1; | |
199 | ||
200 | struct i915_mm_struct *mm; | |
201 | struct i915_mmu_object *mmu_object; | |
202 | struct work_struct *work; | |
203 | } userptr; | |
204 | ||
205 | unsigned long scratch; | |
206 | }; | |
b42fe9ca JL |
207 | |
208 | /** for phys allocated objects */ | |
209 | struct drm_dma_handle *phys_handle; | |
210 | ||
211 | struct reservation_object __builtin_resv; | |
212 | }; | |
213 | ||
214 | static inline struct drm_i915_gem_object * | |
215 | to_intel_bo(struct drm_gem_object *gem) | |
216 | { | |
217 | /* Assert that to_intel_bo(NULL) == NULL */ | |
218 | BUILD_BUG_ON(offsetof(struct drm_i915_gem_object, base)); | |
219 | ||
220 | return container_of(gem, struct drm_i915_gem_object, base); | |
221 | } | |
222 | ||
223 | /** | |
224 | * i915_gem_object_lookup_rcu - look up a temporary GEM object from its handle | |
225 | * @filp: DRM file private date | |
226 | * @handle: userspace handle | |
227 | * | |
228 | * Returns: | |
229 | * | |
230 | * A pointer to the object named by the handle if such exists on @filp, NULL | |
231 | * otherwise. This object is only valid whilst under the RCU read lock, and | |
232 | * note carefully the object may be in the process of being destroyed. | |
233 | */ | |
234 | static inline struct drm_i915_gem_object * | |
235 | i915_gem_object_lookup_rcu(struct drm_file *file, u32 handle) | |
236 | { | |
237 | #ifdef CONFIG_LOCKDEP | |
238 | WARN_ON(debug_locks && !lock_is_held(&rcu_lock_map)); | |
239 | #endif | |
240 | return idr_find(&file->object_idr, handle); | |
241 | } | |
242 | ||
243 | static inline struct drm_i915_gem_object * | |
244 | i915_gem_object_lookup(struct drm_file *file, u32 handle) | |
245 | { | |
246 | struct drm_i915_gem_object *obj; | |
247 | ||
248 | rcu_read_lock(); | |
249 | obj = i915_gem_object_lookup_rcu(file, handle); | |
250 | if (obj && !kref_get_unless_zero(&obj->base.refcount)) | |
251 | obj = NULL; | |
252 | rcu_read_unlock(); | |
253 | ||
254 | return obj; | |
255 | } | |
256 | ||
257 | __deprecated | |
258 | extern struct drm_gem_object * | |
259 | drm_gem_object_lookup(struct drm_file *file, u32 handle); | |
260 | ||
261 | __attribute__((nonnull)) | |
262 | static inline struct drm_i915_gem_object * | |
263 | i915_gem_object_get(struct drm_i915_gem_object *obj) | |
264 | { | |
265 | drm_gem_object_reference(&obj->base); | |
266 | return obj; | |
267 | } | |
268 | ||
269 | __deprecated | |
270 | extern void drm_gem_object_reference(struct drm_gem_object *); | |
271 | ||
272 | __attribute__((nonnull)) | |
273 | static inline void | |
274 | i915_gem_object_put(struct drm_i915_gem_object *obj) | |
275 | { | |
276 | __drm_gem_object_unreference(&obj->base); | |
277 | } | |
278 | ||
279 | __deprecated | |
280 | extern void drm_gem_object_unreference(struct drm_gem_object *); | |
281 | ||
282 | __deprecated | |
283 | extern void drm_gem_object_unreference_unlocked(struct drm_gem_object *); | |
284 | ||
dd689287 CW |
285 | static inline void i915_gem_object_lock(struct drm_i915_gem_object *obj) |
286 | { | |
287 | reservation_object_lock(obj->resv, NULL); | |
288 | } | |
289 | ||
290 | static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj) | |
291 | { | |
292 | reservation_object_unlock(obj->resv); | |
293 | } | |
294 | ||
b42fe9ca JL |
295 | static inline bool |
296 | i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj) | |
297 | { | |
298 | return obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE; | |
299 | } | |
300 | ||
301 | static inline bool | |
302 | i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj) | |
303 | { | |
304 | return obj->ops->flags & I915_GEM_OBJECT_IS_SHRINKABLE; | |
305 | } | |
306 | ||
307 | static inline bool | |
308 | i915_gem_object_is_active(const struct drm_i915_gem_object *obj) | |
309 | { | |
310 | return obj->active_count; | |
311 | } | |
312 | ||
313 | static inline bool | |
314 | i915_gem_object_has_active_reference(const struct drm_i915_gem_object *obj) | |
315 | { | |
316 | return test_bit(I915_BO_ACTIVE_REF, &obj->flags); | |
317 | } | |
318 | ||
319 | static inline void | |
320 | i915_gem_object_set_active_reference(struct drm_i915_gem_object *obj) | |
321 | { | |
322 | lockdep_assert_held(&obj->base.dev->struct_mutex); | |
323 | __set_bit(I915_BO_ACTIVE_REF, &obj->flags); | |
324 | } | |
325 | ||
326 | static inline void | |
327 | i915_gem_object_clear_active_reference(struct drm_i915_gem_object *obj) | |
328 | { | |
329 | lockdep_assert_held(&obj->base.dev->struct_mutex); | |
330 | __clear_bit(I915_BO_ACTIVE_REF, &obj->flags); | |
331 | } | |
332 | ||
333 | void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj); | |
334 | ||
dd689287 CW |
335 | static inline bool |
336 | i915_gem_object_is_framebuffer(const struct drm_i915_gem_object *obj) | |
337 | { | |
338 | return READ_ONCE(obj->framebuffer_references); | |
339 | } | |
340 | ||
b42fe9ca JL |
341 | static inline unsigned int |
342 | i915_gem_object_get_tiling(struct drm_i915_gem_object *obj) | |
343 | { | |
344 | return obj->tiling_and_stride & TILING_MASK; | |
345 | } | |
346 | ||
347 | static inline bool | |
348 | i915_gem_object_is_tiled(struct drm_i915_gem_object *obj) | |
349 | { | |
350 | return i915_gem_object_get_tiling(obj) != I915_TILING_NONE; | |
351 | } | |
352 | ||
353 | static inline unsigned int | |
354 | i915_gem_object_get_stride(struct drm_i915_gem_object *obj) | |
355 | { | |
356 | return obj->tiling_and_stride & STRIDE_MASK; | |
357 | } | |
358 | ||
6649a0b6 CW |
359 | static inline unsigned int |
360 | i915_gem_tile_height(unsigned int tiling) | |
361 | { | |
362 | GEM_BUG_ON(!tiling); | |
363 | return tiling == I915_TILING_Y ? 32 : 8; | |
364 | } | |
365 | ||
366 | static inline unsigned int | |
367 | i915_gem_object_get_tile_height(struct drm_i915_gem_object *obj) | |
368 | { | |
369 | return i915_gem_tile_height(i915_gem_object_get_tiling(obj)); | |
370 | } | |
371 | ||
372 | static inline unsigned int | |
373 | i915_gem_object_get_tile_row_size(struct drm_i915_gem_object *obj) | |
374 | { | |
375 | return (i915_gem_object_get_stride(obj) * | |
376 | i915_gem_object_get_tile_height(obj)); | |
377 | } | |
378 | ||
957870f9 CW |
379 | int i915_gem_object_set_tiling(struct drm_i915_gem_object *obj, |
380 | unsigned int tiling, unsigned int stride); | |
381 | ||
b42fe9ca JL |
382 | static inline struct intel_engine_cs * |
383 | i915_gem_object_last_write_engine(struct drm_i915_gem_object *obj) | |
384 | { | |
385 | struct intel_engine_cs *engine = NULL; | |
386 | struct dma_fence *fence; | |
387 | ||
388 | rcu_read_lock(); | |
389 | fence = reservation_object_get_excl_rcu(obj->resv); | |
390 | rcu_read_unlock(); | |
391 | ||
392 | if (fence && dma_fence_is_i915(fence) && !dma_fence_is_signaled(fence)) | |
393 | engine = to_request(fence)->engine; | |
394 | dma_fence_put(fence); | |
395 | ||
396 | return engine; | |
397 | } | |
398 | ||
b8f55be6 CW |
399 | void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj, |
400 | unsigned int cache_level); | |
5a97bcc6 CW |
401 | void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj); |
402 | ||
b42fe9ca JL |
403 | #endif |
404 |