]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blob - drivers/gpu/drm/i915/i915_gem_object.h
drm/i915: Prevent concurrent tiling/framebuffer modifications
[mirror_ubuntu-eoan-kernel.git] / drivers / gpu / drm / i915 / i915_gem_object.h
1 /*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25 #ifndef __I915_GEM_OBJECT_H__
26 #define __I915_GEM_OBJECT_H__
27
28 #include <linux/reservation.h>
29
30 #include <drm/drm_vma_manager.h>
31 #include <drm/drm_gem.h>
32 #include <drm/drmP.h>
33
34 #include <drm/i915_drm.h>
35
36 #include "i915_selftest.h"
37
38 struct drm_i915_gem_object_ops {
39 unsigned int flags;
40 #define I915_GEM_OBJECT_HAS_STRUCT_PAGE 0x1
41 #define I915_GEM_OBJECT_IS_SHRINKABLE 0x2
42
43 /* Interface between the GEM object and its backing storage.
44 * get_pages() is called once prior to the use of the associated set
45 * of pages before to binding them into the GTT, and put_pages() is
46 * called after we no longer need them. As we expect there to be
47 * associated cost with migrating pages between the backing storage
48 * and making them available for the GPU (e.g. clflush), we may hold
49 * onto the pages after they are no longer referenced by the GPU
50 * in case they may be used again shortly (for example migrating the
51 * pages to a different memory domain within the GTT). put_pages()
52 * will therefore most likely be called when the object itself is
53 * being released or under memory pressure (where we attempt to
54 * reap pages for the shrinker).
55 */
56 struct sg_table *(*get_pages)(struct drm_i915_gem_object *);
57 void (*put_pages)(struct drm_i915_gem_object *, struct sg_table *);
58
59 int (*dmabuf_export)(struct drm_i915_gem_object *);
60 void (*release)(struct drm_i915_gem_object *);
61 };
62
63 struct drm_i915_gem_object {
64 struct drm_gem_object base;
65
66 const struct drm_i915_gem_object_ops *ops;
67
68 /** List of VMAs backed by this object */
69 struct list_head vma_list;
70 struct rb_root vma_tree;
71
72 /** Stolen memory for this object, instead of being backed by shmem. */
73 struct drm_mm_node *stolen;
74 struct list_head global_link;
75 union {
76 struct rcu_head rcu;
77 struct llist_node freed;
78 };
79
80 /**
81 * Whether the object is currently in the GGTT mmap.
82 */
83 struct list_head userfault_link;
84
85 /** Used in execbuf to temporarily hold a ref */
86 struct list_head obj_exec_link;
87
88 struct list_head batch_pool_link;
89 I915_SELFTEST_DECLARE(struct list_head st_link);
90
91 unsigned long flags;
92
93 /**
94 * Have we taken a reference for the object for incomplete GPU
95 * activity?
96 */
97 #define I915_BO_ACTIVE_REF 0
98
99 /*
100 * Is the object to be mapped as read-only to the GPU
101 * Only honoured if hardware has relevant pte bit
102 */
103 unsigned long gt_ro:1;
104 unsigned int cache_level:3;
105 unsigned int cache_dirty:1;
106
107 atomic_t frontbuffer_bits;
108 unsigned int frontbuffer_ggtt_origin; /* write once */
109 struct i915_gem_active frontbuffer_write;
110
111 /** Current tiling stride for the object, if it's tiled. */
112 unsigned int tiling_and_stride;
113 #define FENCE_MINIMUM_STRIDE 128 /* See i915_tiling_ok() */
114 #define TILING_MASK (FENCE_MINIMUM_STRIDE-1)
115 #define STRIDE_MASK (~TILING_MASK)
116
117 /** Count of VMA actually bound by this object */
118 unsigned int bind_count;
119 unsigned int active_count;
120 unsigned int pin_display;
121
122 struct {
123 struct mutex lock; /* protects the pages and their use */
124 atomic_t pages_pin_count;
125
126 struct sg_table *pages;
127 void *mapping;
128
129 struct i915_gem_object_page_iter {
130 struct scatterlist *sg_pos;
131 unsigned int sg_idx; /* in pages, but 32bit eek! */
132
133 struct radix_tree_root radix;
134 struct mutex lock; /* protects this cache */
135 } get_page;
136
137 /**
138 * Advice: are the backing pages purgeable?
139 */
140 unsigned int madv:2;
141
142 /**
143 * This is set if the object has been written to since the
144 * pages were last acquired.
145 */
146 bool dirty:1;
147
148 /**
149 * This is set if the object has been pinned due to unknown
150 * swizzling.
151 */
152 bool quirked:1;
153 } mm;
154
155 /** Breadcrumb of last rendering to the buffer.
156 * There can only be one writer, but we allow for multiple readers.
157 * If there is a writer that necessarily implies that all other
158 * read requests are complete - but we may only be lazily clearing
159 * the read requests. A read request is naturally the most recent
160 * request on a ring, so we may have two different write and read
161 * requests on one ring where the write request is older than the
162 * read request. This allows for the CPU to read from an active
163 * buffer by only waiting for the write to complete.
164 */
165 struct reservation_object *resv;
166
167 /** References from framebuffers, locks out tiling changes. */
168 unsigned int framebuffer_references;
169
170 /** Record of address bit 17 of each page at last unbind. */
171 unsigned long *bit_17;
172
173 union {
174 struct i915_gem_userptr {
175 uintptr_t ptr;
176 unsigned read_only :1;
177
178 struct i915_mm_struct *mm;
179 struct i915_mmu_object *mmu_object;
180 struct work_struct *work;
181 } userptr;
182
183 unsigned long scratch;
184 };
185
186 /** for phys allocated objects */
187 struct drm_dma_handle *phys_handle;
188
189 struct reservation_object __builtin_resv;
190 };
191
192 static inline struct drm_i915_gem_object *
193 to_intel_bo(struct drm_gem_object *gem)
194 {
195 /* Assert that to_intel_bo(NULL) == NULL */
196 BUILD_BUG_ON(offsetof(struct drm_i915_gem_object, base));
197
198 return container_of(gem, struct drm_i915_gem_object, base);
199 }
200
201 /**
202 * i915_gem_object_lookup_rcu - look up a temporary GEM object from its handle
203 * @filp: DRM file private date
204 * @handle: userspace handle
205 *
206 * Returns:
207 *
208 * A pointer to the object named by the handle if such exists on @filp, NULL
209 * otherwise. This object is only valid whilst under the RCU read lock, and
210 * note carefully the object may be in the process of being destroyed.
211 */
212 static inline struct drm_i915_gem_object *
213 i915_gem_object_lookup_rcu(struct drm_file *file, u32 handle)
214 {
215 #ifdef CONFIG_LOCKDEP
216 WARN_ON(debug_locks && !lock_is_held(&rcu_lock_map));
217 #endif
218 return idr_find(&file->object_idr, handle);
219 }
220
221 static inline struct drm_i915_gem_object *
222 i915_gem_object_lookup(struct drm_file *file, u32 handle)
223 {
224 struct drm_i915_gem_object *obj;
225
226 rcu_read_lock();
227 obj = i915_gem_object_lookup_rcu(file, handle);
228 if (obj && !kref_get_unless_zero(&obj->base.refcount))
229 obj = NULL;
230 rcu_read_unlock();
231
232 return obj;
233 }
234
235 __deprecated
236 extern struct drm_gem_object *
237 drm_gem_object_lookup(struct drm_file *file, u32 handle);
238
239 __attribute__((nonnull))
240 static inline struct drm_i915_gem_object *
241 i915_gem_object_get(struct drm_i915_gem_object *obj)
242 {
243 drm_gem_object_reference(&obj->base);
244 return obj;
245 }
246
247 __deprecated
248 extern void drm_gem_object_reference(struct drm_gem_object *);
249
250 __attribute__((nonnull))
251 static inline void
252 i915_gem_object_put(struct drm_i915_gem_object *obj)
253 {
254 __drm_gem_object_unreference(&obj->base);
255 }
256
257 __deprecated
258 extern void drm_gem_object_unreference(struct drm_gem_object *);
259
260 __deprecated
261 extern void drm_gem_object_unreference_unlocked(struct drm_gem_object *);
262
263 static inline void i915_gem_object_lock(struct drm_i915_gem_object *obj)
264 {
265 reservation_object_lock(obj->resv, NULL);
266 }
267
268 static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj)
269 {
270 reservation_object_unlock(obj->resv);
271 }
272
273 static inline bool
274 i915_gem_object_is_dead(const struct drm_i915_gem_object *obj)
275 {
276 return atomic_read(&obj->base.refcount.refcount) == 0;
277 }
278
279 static inline bool
280 i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
281 {
282 return obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE;
283 }
284
285 static inline bool
286 i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj)
287 {
288 return obj->ops->flags & I915_GEM_OBJECT_IS_SHRINKABLE;
289 }
290
291 static inline bool
292 i915_gem_object_is_active(const struct drm_i915_gem_object *obj)
293 {
294 return obj->active_count;
295 }
296
297 static inline bool
298 i915_gem_object_has_active_reference(const struct drm_i915_gem_object *obj)
299 {
300 return test_bit(I915_BO_ACTIVE_REF, &obj->flags);
301 }
302
303 static inline void
304 i915_gem_object_set_active_reference(struct drm_i915_gem_object *obj)
305 {
306 lockdep_assert_held(&obj->base.dev->struct_mutex);
307 __set_bit(I915_BO_ACTIVE_REF, &obj->flags);
308 }
309
310 static inline void
311 i915_gem_object_clear_active_reference(struct drm_i915_gem_object *obj)
312 {
313 lockdep_assert_held(&obj->base.dev->struct_mutex);
314 __clear_bit(I915_BO_ACTIVE_REF, &obj->flags);
315 }
316
317 void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj);
318
319 static inline bool
320 i915_gem_object_is_framebuffer(const struct drm_i915_gem_object *obj)
321 {
322 return READ_ONCE(obj->framebuffer_references);
323 }
324
325 static inline unsigned int
326 i915_gem_object_get_tiling(struct drm_i915_gem_object *obj)
327 {
328 return obj->tiling_and_stride & TILING_MASK;
329 }
330
331 static inline bool
332 i915_gem_object_is_tiled(struct drm_i915_gem_object *obj)
333 {
334 return i915_gem_object_get_tiling(obj) != I915_TILING_NONE;
335 }
336
337 static inline unsigned int
338 i915_gem_object_get_stride(struct drm_i915_gem_object *obj)
339 {
340 return obj->tiling_and_stride & STRIDE_MASK;
341 }
342
343 static inline unsigned int
344 i915_gem_tile_height(unsigned int tiling)
345 {
346 GEM_BUG_ON(!tiling);
347 return tiling == I915_TILING_Y ? 32 : 8;
348 }
349
350 static inline unsigned int
351 i915_gem_object_get_tile_height(struct drm_i915_gem_object *obj)
352 {
353 return i915_gem_tile_height(i915_gem_object_get_tiling(obj));
354 }
355
356 static inline unsigned int
357 i915_gem_object_get_tile_row_size(struct drm_i915_gem_object *obj)
358 {
359 return (i915_gem_object_get_stride(obj) *
360 i915_gem_object_get_tile_height(obj));
361 }
362
363 int i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
364 unsigned int tiling, unsigned int stride);
365
366 static inline struct intel_engine_cs *
367 i915_gem_object_last_write_engine(struct drm_i915_gem_object *obj)
368 {
369 struct intel_engine_cs *engine = NULL;
370 struct dma_fence *fence;
371
372 rcu_read_lock();
373 fence = reservation_object_get_excl_rcu(obj->resv);
374 rcu_read_unlock();
375
376 if (fence && dma_fence_is_i915(fence) && !dma_fence_is_signaled(fence))
377 engine = to_request(fence)->engine;
378 dma_fence_put(fence);
379
380 return engine;
381 }
382
383 void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj);
384
385 #endif
386