]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/gpu/drm/i915/i915_gem_context.h
b651c5f427b94f49bf5443856740c4490609bd61
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / i915 / i915_gem_context.h
1 /*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25 #ifndef __I915_GEM_CONTEXT_H__
26 #define __I915_GEM_CONTEXT_H__
27
28 #include <linux/bitops.h>
29 #include <linux/list.h>
30 #include <linux/radix-tree.h>
31
32 struct pid;
33
34 struct drm_device;
35 struct drm_file;
36
37 struct drm_i915_private;
38 struct drm_i915_file_private;
39 struct i915_hw_ppgtt;
40 struct i915_vma;
41 struct intel_ring;
42
43 #define DEFAULT_CONTEXT_HANDLE 0
44
45 /**
46 * struct i915_gem_context - client state
47 *
48 * The struct i915_gem_context represents the combined view of the driver and
49 * logical hardware state for a particular client.
50 */
51 struct i915_gem_context {
52 /** i915: i915 device backpointer */
53 struct drm_i915_private *i915;
54
55 /** file_priv: owning file descriptor */
56 struct drm_i915_file_private *file_priv;
57
58 /**
59 * @ppgtt: unique address space (GTT)
60 *
61 * In full-ppgtt mode, each context has its own address space ensuring
62 * complete seperation of one client from all others.
63 *
64 * In other modes, this is a NULL pointer with the expectation that
65 * the caller uses the shared global GTT.
66 */
67 struct i915_hw_ppgtt *ppgtt;
68
69 /**
70 * @pid: process id of creator
71 *
72 * Note that who created the context may not be the principle user,
73 * as the context may be shared across a local socket. However,
74 * that should only affect the default context, all contexts created
75 * explicitly by the client are expected to be isolated.
76 */
77 struct pid *pid;
78
79 /**
80 * @name: arbitrary name
81 *
82 * A name is constructed for the context from the creator's process
83 * name, pid and user handle in order to uniquely identify the
84 * context in messages.
85 */
86 const char *name;
87
88 /** link: place with &drm_i915_private.context_list */
89 struct list_head link;
90 struct llist_node free_link;
91
92 /**
93 * @ref: reference count
94 *
95 * A reference to a context is held by both the client who created it
96 * and on each request submitted to the hardware using the request
97 * (to ensure the hardware has access to the state until it has
98 * finished all pending writes). See i915_gem_context_get() and
99 * i915_gem_context_put() for access.
100 */
101 struct kref ref;
102
103 /**
104 * @rcu: rcu_head for deferred freeing.
105 */
106 struct rcu_head rcu;
107
108 /**
109 * @flags: small set of booleans
110 */
111 unsigned long flags;
112 #define CONTEXT_NO_ZEROMAP BIT(0)
113 #define CONTEXT_NO_ERROR_CAPTURE 1
114 #define CONTEXT_CLOSED 2
115 #define CONTEXT_BANNABLE 3
116 #define CONTEXT_BANNED 4
117 #define CONTEXT_FORCE_SINGLE_SUBMISSION 5
118
119 /**
120 * @hw_id: - unique identifier for the context
121 *
122 * The hardware needs to uniquely identify the context for a few
123 * functions like fault reporting, PASID, scheduling. The
124 * &drm_i915_private.context_hw_ida is used to assign a unqiue
125 * id for the lifetime of the context.
126 */
127 unsigned int hw_id;
128
129 /**
130 * @user_handle: userspace identifier
131 *
132 * A unique per-file identifier is generated from
133 * &drm_i915_file_private.contexts.
134 */
135 u32 user_handle;
136
137 /**
138 * @priority: execution and service priority
139 *
140 * All clients are equal, but some are more equal than others!
141 *
142 * Requests from a context with a greater (more positive) value of
143 * @priority will be executed before those with a lower @priority
144 * value, forming a simple QoS.
145 *
146 * The &drm_i915_private.kernel_context is assigned the lowest priority.
147 */
148 int priority;
149
150 /** ggtt_offset_bias: placement restriction for context objects */
151 u32 ggtt_offset_bias;
152
153 /** engine: per-engine logical HW state */
154 struct intel_context {
155 struct i915_vma *state;
156 struct intel_ring *ring;
157 u32 *lrc_reg_state;
158 u64 lrc_desc;
159 int pin_count;
160 bool initialised;
161 } engine[I915_NUM_ENGINES];
162
163 /** ring_size: size for allocating the per-engine ring buffer */
164 u32 ring_size;
165 /** desc_template: invariant fields for the HW context descriptor */
166 u32 desc_template;
167
168 /** guilty_count: How many times this context has caused a GPU hang. */
169 atomic_t guilty_count;
170 /**
171 * @active_count: How many times this context was active during a GPU
172 * hang, but did not cause it.
173 */
174 atomic_t active_count;
175
176 #define CONTEXT_SCORE_GUILTY 10
177 #define CONTEXT_SCORE_BAN_THRESHOLD 40
178 /** ban_score: Accumulated score of all hangs caused by this context. */
179 atomic_t ban_score;
180
181 /** remap_slice: Bitmask of cache lines that need remapping */
182 u8 remap_slice;
183
184 /** jump_whitelist: Bit array for tracking cmds during cmdparsing */
185 unsigned long *jump_whitelist;
186
187 /** jump_whitelist_cmds: No of cmd slots available */
188 u32 jump_whitelist_cmds;
189
190 /** handles_vma: rbtree to look up our context specific obj/vma for
191 * the user handle. (user handles are per fd, but the binding is
192 * per vm, which may be one per context or shared with the global GTT)
193 */
194 struct radix_tree_root handles_vma;
195
196 /** handles_list: reverse list of all the rbtree entries in use for
197 * this context, which allows us to free all the allocations on
198 * context close.
199 */
200 struct list_head handles_list;
201 };
202
203 static inline bool i915_gem_context_is_closed(const struct i915_gem_context *ctx)
204 {
205 return test_bit(CONTEXT_CLOSED, &ctx->flags);
206 }
207
208 static inline void i915_gem_context_set_closed(struct i915_gem_context *ctx)
209 {
210 GEM_BUG_ON(i915_gem_context_is_closed(ctx));
211 __set_bit(CONTEXT_CLOSED, &ctx->flags);
212 }
213
214 static inline bool i915_gem_context_no_error_capture(const struct i915_gem_context *ctx)
215 {
216 return test_bit(CONTEXT_NO_ERROR_CAPTURE, &ctx->flags);
217 }
218
219 static inline void i915_gem_context_set_no_error_capture(struct i915_gem_context *ctx)
220 {
221 __set_bit(CONTEXT_NO_ERROR_CAPTURE, &ctx->flags);
222 }
223
224 static inline void i915_gem_context_clear_no_error_capture(struct i915_gem_context *ctx)
225 {
226 __clear_bit(CONTEXT_NO_ERROR_CAPTURE, &ctx->flags);
227 }
228
229 static inline bool i915_gem_context_is_bannable(const struct i915_gem_context *ctx)
230 {
231 return test_bit(CONTEXT_BANNABLE, &ctx->flags);
232 }
233
234 static inline void i915_gem_context_set_bannable(struct i915_gem_context *ctx)
235 {
236 __set_bit(CONTEXT_BANNABLE, &ctx->flags);
237 }
238
239 static inline void i915_gem_context_clear_bannable(struct i915_gem_context *ctx)
240 {
241 __clear_bit(CONTEXT_BANNABLE, &ctx->flags);
242 }
243
244 static inline bool i915_gem_context_is_banned(const struct i915_gem_context *ctx)
245 {
246 return test_bit(CONTEXT_BANNED, &ctx->flags);
247 }
248
249 static inline void i915_gem_context_set_banned(struct i915_gem_context *ctx)
250 {
251 __set_bit(CONTEXT_BANNED, &ctx->flags);
252 }
253
254 static inline bool i915_gem_context_force_single_submission(const struct i915_gem_context *ctx)
255 {
256 return test_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ctx->flags);
257 }
258
259 static inline void i915_gem_context_set_force_single_submission(struct i915_gem_context *ctx)
260 {
261 __set_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ctx->flags);
262 }
263
264 static inline bool i915_gem_context_is_default(const struct i915_gem_context *c)
265 {
266 return c->user_handle == DEFAULT_CONTEXT_HANDLE;
267 }
268
269 static inline bool i915_gem_context_is_kernel(struct i915_gem_context *ctx)
270 {
271 return !ctx->file_priv;
272 }
273
274 /* i915_gem_context.c */
275 int __must_check i915_gem_contexts_init(struct drm_i915_private *dev_priv);
276 void i915_gem_contexts_lost(struct drm_i915_private *dev_priv);
277 void i915_gem_contexts_fini(struct drm_i915_private *dev_priv);
278
279 int i915_gem_context_open(struct drm_i915_private *i915,
280 struct drm_file *file);
281 void i915_gem_context_close(struct drm_file *file);
282
283 int i915_switch_context(struct drm_i915_gem_request *req);
284 int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv);
285
286 void i915_gem_context_release(struct kref *ctx_ref);
287 struct i915_gem_context *
288 i915_gem_context_create_gvt(struct drm_device *dev);
289
290 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
291 struct drm_file *file);
292 int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
293 struct drm_file *file);
294 int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
295 struct drm_file *file_priv);
296 int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
297 struct drm_file *file_priv);
298 int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, void *data,
299 struct drm_file *file);
300
301 static inline struct i915_gem_context *
302 i915_gem_context_get(struct i915_gem_context *ctx)
303 {
304 kref_get(&ctx->ref);
305 return ctx;
306 }
307
308 static inline void i915_gem_context_put(struct i915_gem_context *ctx)
309 {
310 kref_put(&ctx->ref, i915_gem_context_release);
311 }
312
313 #endif /* !__I915_GEM_CONTEXT_H__ */