]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/gpu/drm/i915/i915_gem_object.h
drm/i915: Move dev_priv->mm.[un]bound_list to its own lock
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / i915 / i915_gem_object.h
CommitLineData
b42fe9ca
JL
1/*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25#ifndef __I915_GEM_OBJECT_H__
26#define __I915_GEM_OBJECT_H__
27
28#include <linux/reservation.h>
29
30#include <drm/drm_vma_manager.h>
31#include <drm/drm_gem.h>
32#include <drm/drmP.h>
33
34#include <drm/i915_drm.h>
35
b8f55be6 36#include "i915_gem_request.h"
8d28ba45
CW
37#include "i915_selftest.h"
38
b8f55be6
CW
39struct drm_i915_gem_object;
40
d1b48c1e
CW
41/*
42 * struct i915_lut_handle tracks the fast lookups from handle to vma used
43 * for execbuf. Although we use a radixtree for that mapping, in order to
44 * remove them as the object or context is closed, we need a secondary list
45 * and a translation entry (i915_lut_handle).
46 */
47struct i915_lut_handle {
48 struct list_head obj_link;
49 struct list_head ctx_link;
50 struct i915_gem_context *ctx;
51 u32 handle;
52};
53
b42fe9ca
JL
54struct drm_i915_gem_object_ops {
55 unsigned int flags;
22284f40
CW
56#define I915_GEM_OBJECT_HAS_STRUCT_PAGE BIT(0)
57#define I915_GEM_OBJECT_IS_SHRINKABLE BIT(1)
b42fe9ca
JL
58
59 /* Interface between the GEM object and its backing storage.
60 * get_pages() is called once prior to the use of the associated set
61 * of pages before to binding them into the GTT, and put_pages() is
62 * called after we no longer need them. As we expect there to be
63 * associated cost with migrating pages between the backing storage
64 * and making them available for the GPU (e.g. clflush), we may hold
65 * onto the pages after they are no longer referenced by the GPU
66 * in case they may be used again shortly (for example migrating the
67 * pages to a different memory domain within the GTT). put_pages()
68 * will therefore most likely be called when the object itself is
69 * being released or under memory pressure (where we attempt to
70 * reap pages for the shrinker).
71 */
b91b09ee 72 int (*get_pages)(struct drm_i915_gem_object *);
b42fe9ca
JL
73 void (*put_pages)(struct drm_i915_gem_object *, struct sg_table *);
74
7c55e2c5
CW
75 int (*pwrite)(struct drm_i915_gem_object *,
76 const struct drm_i915_gem_pwrite *);
77
b42fe9ca
JL
78 int (*dmabuf_export)(struct drm_i915_gem_object *);
79 void (*release)(struct drm_i915_gem_object *);
80};
81
82struct drm_i915_gem_object {
83 struct drm_gem_object base;
84
85 const struct drm_i915_gem_object_ops *ops;
86
b5a82425
CW
87 /**
88 * @vma_list: List of VMAs backed by this object
89 *
90 * The VMA on this list are ordered by type, all GGTT vma are placed
91 * at the head and all ppGTT vma are placed at the tail. The different
92 * types of GGTT vma are unordered between themselves, use the
93 * @vma_tree (which has a defined order between all VMA) to find an
94 * exact match.
95 */
b42fe9ca 96 struct list_head vma_list;
b5a82425
CW
97 /**
98 * @vma_tree: Ordered tree of VMAs backed by this object
99 *
100 * All VMA created for this object are placed in the @vma_tree for
101 * fast retrieval via a binary search in i915_vma_instance().
102 * They are also added to @vma_list for easy iteration.
103 */
b42fe9ca 104 struct rb_root vma_tree;
d1b48c1e
CW
105
106 /**
107 * @lut_list: List of vma lookup entries in use for this object.
108 *
109 * If this object is closed, we need to remove all of its VMA from
110 * the fast lookup index in associated contexts; @lut_list provides
111 * this translation from object to context->handles_vma.
112 */
113 struct list_head lut_list;
b42fe9ca
JL
114
115 /** Stolen memory for this object, instead of being backed by shmem. */
116 struct drm_mm_node *stolen;
b42fe9ca
JL
117 union {
118 struct rcu_head rcu;
119 struct llist_node freed;
120 };
121
122 /**
123 * Whether the object is currently in the GGTT mmap.
124 */
a65adaf8 125 unsigned int userfault_count;
b42fe9ca
JL
126 struct list_head userfault_link;
127
b42fe9ca 128 struct list_head batch_pool_link;
8d28ba45 129 I915_SELFTEST_DECLARE(struct list_head st_link);
b42fe9ca
JL
130
131 unsigned long flags;
132
133 /**
134 * Have we taken a reference for the object for incomplete GPU
135 * activity?
136 */
137#define I915_BO_ACTIVE_REF 0
138
139 /*
140 * Is the object to be mapped as read-only to the GPU
141 * Only honoured if hardware has relevant pte bit
142 */
143 unsigned long gt_ro:1;
144 unsigned int cache_level:3;
b8f55be6
CW
145 unsigned int cache_coherent:2;
146#define I915_BO_CACHE_COHERENT_FOR_READ BIT(0)
147#define I915_BO_CACHE_COHERENT_FOR_WRITE BIT(1)
b42fe9ca
JL
148 unsigned int cache_dirty:1;
149
150 atomic_t frontbuffer_bits;
151 unsigned int frontbuffer_ggtt_origin; /* write once */
5b8c8aec 152 struct i915_gem_active frontbuffer_write;
b42fe9ca
JL
153
154 /** Current tiling stride for the object, if it's tiled. */
155 unsigned int tiling_and_stride;
156#define FENCE_MINIMUM_STRIDE 128 /* See i915_tiling_ok() */
157#define TILING_MASK (FENCE_MINIMUM_STRIDE-1)
158#define STRIDE_MASK (~TILING_MASK)
159
160 /** Count of VMA actually bound by this object */
161 unsigned int bind_count;
162 unsigned int active_count;
bd3d2252
CW
163 /** Count of how many global VMA are currently pinned for use by HW */
164 unsigned int pin_global;
b42fe9ca
JL
165
166 struct {
167 struct mutex lock; /* protects the pages and their use */
168 atomic_t pages_pin_count;
169
170 struct sg_table *pages;
171 void *mapping;
172
d9ec12f8 173 /* TODO: whack some of this into the error state */
a5c08166
MA
174 struct i915_page_sizes {
175 /**
176 * The sg mask of the pages sg_table. i.e the mask of
177 * of the lengths for each sg entry.
178 */
179 unsigned int phys;
180
181 /**
182 * The gtt page sizes we are allowed to use given the
183 * sg mask and the supported page sizes. This will
184 * express the smallest unit we can use for the whole
185 * object, as well as the larger sizes we may be able
186 * to use opportunistically.
187 */
188 unsigned int sg;
d9ec12f8
MA
189
190 /**
191 * The actual gtt page size usage. Since we can have
192 * multiple vma associated with this object we need to
193 * prevent any trampling of state, hence a copy of this
194 * struct also lives in each vma, therefore the gtt
195 * value here should only be read/write through the vma.
196 */
197 unsigned int gtt;
a5c08166
MA
198 } page_sizes;
199
4049866f
MA
200 I915_SELFTEST_DECLARE(unsigned int page_mask);
201
b42fe9ca
JL
202 struct i915_gem_object_page_iter {
203 struct scatterlist *sg_pos;
204 unsigned int sg_idx; /* in pages, but 32bit eek! */
205
206 struct radix_tree_root radix;
207 struct mutex lock; /* protects this cache */
208 } get_page;
209
f2123818
CW
210 /**
211 * Element within i915->mm.unbound_list or i915->mm.bound_list,
212 * locked by i915->mm.obj_lock.
213 */
214 struct list_head link;
215
b42fe9ca
JL
216 /**
217 * Advice: are the backing pages purgeable?
218 */
219 unsigned int madv:2;
220
221 /**
222 * This is set if the object has been written to since the
223 * pages were last acquired.
224 */
225 bool dirty:1;
226
227 /**
228 * This is set if the object has been pinned due to unknown
229 * swizzling.
230 */
231 bool quirked:1;
232 } mm;
233
234 /** Breadcrumb of last rendering to the buffer.
235 * There can only be one writer, but we allow for multiple readers.
236 * If there is a writer that necessarily implies that all other
237 * read requests are complete - but we may only be lazily clearing
238 * the read requests. A read request is naturally the most recent
239 * request on a ring, so we may have two different write and read
240 * requests on one ring where the write request is older than the
241 * read request. This allows for the CPU to read from an active
242 * buffer by only waiting for the write to complete.
243 */
244 struct reservation_object *resv;
245
246 /** References from framebuffers, locks out tiling changes. */
dd689287 247 unsigned int framebuffer_references;
b42fe9ca
JL
248
249 /** Record of address bit 17 of each page at last unbind. */
250 unsigned long *bit_17;
251
44653988
CW
252 union {
253 struct i915_gem_userptr {
254 uintptr_t ptr;
255 unsigned read_only :1;
256
257 struct i915_mm_struct *mm;
258 struct i915_mmu_object *mmu_object;
259 struct work_struct *work;
260 } userptr;
261
262 unsigned long scratch;
263 };
b42fe9ca
JL
264
265 /** for phys allocated objects */
266 struct drm_dma_handle *phys_handle;
267
268 struct reservation_object __builtin_resv;
269};
270
271static inline struct drm_i915_gem_object *
272to_intel_bo(struct drm_gem_object *gem)
273{
274 /* Assert that to_intel_bo(NULL) == NULL */
275 BUILD_BUG_ON(offsetof(struct drm_i915_gem_object, base));
276
277 return container_of(gem, struct drm_i915_gem_object, base);
278}
279
280/**
281 * i915_gem_object_lookup_rcu - look up a temporary GEM object from its handle
282 * @filp: DRM file private date
283 * @handle: userspace handle
284 *
285 * Returns:
286 *
287 * A pointer to the object named by the handle if such exists on @filp, NULL
288 * otherwise. This object is only valid whilst under the RCU read lock, and
289 * note carefully the object may be in the process of being destroyed.
290 */
291static inline struct drm_i915_gem_object *
292i915_gem_object_lookup_rcu(struct drm_file *file, u32 handle)
293{
294#ifdef CONFIG_LOCKDEP
295 WARN_ON(debug_locks && !lock_is_held(&rcu_lock_map));
296#endif
297 return idr_find(&file->object_idr, handle);
298}
299
300static inline struct drm_i915_gem_object *
301i915_gem_object_lookup(struct drm_file *file, u32 handle)
302{
303 struct drm_i915_gem_object *obj;
304
305 rcu_read_lock();
306 obj = i915_gem_object_lookup_rcu(file, handle);
307 if (obj && !kref_get_unless_zero(&obj->base.refcount))
308 obj = NULL;
309 rcu_read_unlock();
310
311 return obj;
312}
313
314__deprecated
315extern struct drm_gem_object *
316drm_gem_object_lookup(struct drm_file *file, u32 handle);
317
318__attribute__((nonnull))
319static inline struct drm_i915_gem_object *
320i915_gem_object_get(struct drm_i915_gem_object *obj)
321{
322 drm_gem_object_reference(&obj->base);
323 return obj;
324}
325
326__deprecated
327extern void drm_gem_object_reference(struct drm_gem_object *);
328
329__attribute__((nonnull))
330static inline void
331i915_gem_object_put(struct drm_i915_gem_object *obj)
332{
333 __drm_gem_object_unreference(&obj->base);
334}
335
336__deprecated
337extern void drm_gem_object_unreference(struct drm_gem_object *);
338
339__deprecated
340extern void drm_gem_object_unreference_unlocked(struct drm_gem_object *);
341
dd689287
CW
342static inline void i915_gem_object_lock(struct drm_i915_gem_object *obj)
343{
344 reservation_object_lock(obj->resv, NULL);
345}
346
347static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj)
348{
349 reservation_object_unlock(obj->resv);
350}
351
b42fe9ca
JL
352static inline bool
353i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
354{
355 return obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE;
356}
357
358static inline bool
359i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj)
360{
361 return obj->ops->flags & I915_GEM_OBJECT_IS_SHRINKABLE;
362}
363
364static inline bool
365i915_gem_object_is_active(const struct drm_i915_gem_object *obj)
366{
367 return obj->active_count;
368}
369
370static inline bool
371i915_gem_object_has_active_reference(const struct drm_i915_gem_object *obj)
372{
373 return test_bit(I915_BO_ACTIVE_REF, &obj->flags);
374}
375
376static inline void
377i915_gem_object_set_active_reference(struct drm_i915_gem_object *obj)
378{
379 lockdep_assert_held(&obj->base.dev->struct_mutex);
380 __set_bit(I915_BO_ACTIVE_REF, &obj->flags);
381}
382
383static inline void
384i915_gem_object_clear_active_reference(struct drm_i915_gem_object *obj)
385{
386 lockdep_assert_held(&obj->base.dev->struct_mutex);
387 __clear_bit(I915_BO_ACTIVE_REF, &obj->flags);
388}
389
390void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj);
391
dd689287
CW
392static inline bool
393i915_gem_object_is_framebuffer(const struct drm_i915_gem_object *obj)
394{
395 return READ_ONCE(obj->framebuffer_references);
396}
397
b42fe9ca
JL
398static inline unsigned int
399i915_gem_object_get_tiling(struct drm_i915_gem_object *obj)
400{
401 return obj->tiling_and_stride & TILING_MASK;
402}
403
404static inline bool
405i915_gem_object_is_tiled(struct drm_i915_gem_object *obj)
406{
407 return i915_gem_object_get_tiling(obj) != I915_TILING_NONE;
408}
409
410static inline unsigned int
411i915_gem_object_get_stride(struct drm_i915_gem_object *obj)
412{
413 return obj->tiling_and_stride & STRIDE_MASK;
414}
415
6649a0b6
CW
416static inline unsigned int
417i915_gem_tile_height(unsigned int tiling)
418{
419 GEM_BUG_ON(!tiling);
420 return tiling == I915_TILING_Y ? 32 : 8;
421}
422
423static inline unsigned int
424i915_gem_object_get_tile_height(struct drm_i915_gem_object *obj)
425{
426 return i915_gem_tile_height(i915_gem_object_get_tiling(obj));
427}
428
429static inline unsigned int
430i915_gem_object_get_tile_row_size(struct drm_i915_gem_object *obj)
431{
432 return (i915_gem_object_get_stride(obj) *
433 i915_gem_object_get_tile_height(obj));
434}
435
957870f9
CW
436int i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
437 unsigned int tiling, unsigned int stride);
438
b42fe9ca
JL
439static inline struct intel_engine_cs *
440i915_gem_object_last_write_engine(struct drm_i915_gem_object *obj)
441{
442 struct intel_engine_cs *engine = NULL;
443 struct dma_fence *fence;
444
445 rcu_read_lock();
446 fence = reservation_object_get_excl_rcu(obj->resv);
447 rcu_read_unlock();
448
449 if (fence && dma_fence_is_i915(fence) && !dma_fence_is_signaled(fence))
450 engine = to_request(fence)->engine;
451 dma_fence_put(fence);
452
453 return engine;
454}
455
b8f55be6
CW
456void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj,
457 unsigned int cache_level);
5a97bcc6
CW
458void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj);
459
b42fe9ca
JL
460#endif
461