]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/gpu/drm/i915/i915_gem_context.h
Merge tag 'drm-intel-next-2017-07-17' of git://anongit.freedesktop.org/git/drm-intel...
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / i915 / i915_gem_context.h
CommitLineData
6095868a
CW
1/*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25#ifndef __I915_GEM_CONTEXT_H__
26#define __I915_GEM_CONTEXT_H__
27
28#include <linux/bitops.h>
29#include <linux/list.h>
30
31struct pid;
32
33struct drm_device;
34struct drm_file;
35
36struct drm_i915_private;
37struct drm_i915_file_private;
38struct i915_hw_ppgtt;
39struct i915_vma;
40struct intel_ring;
41
42#define DEFAULT_CONTEXT_HANDLE 0
43
44/**
45 * struct i915_gem_context - client state
46 *
47 * The struct i915_gem_context represents the combined view of the driver and
48 * logical hardware state for a particular client.
49 */
50struct i915_gem_context {
51 /** i915: i915 device backpointer */
52 struct drm_i915_private *i915;
53
54 /** file_priv: owning file descriptor */
55 struct drm_i915_file_private *file_priv;
56
57 /**
58 * @ppgtt: unique address space (GTT)
59 *
60 * In full-ppgtt mode, each context has its own address space ensuring
61 * complete seperation of one client from all others.
62 *
63 * In other modes, this is a NULL pointer with the expectation that
64 * the caller uses the shared global GTT.
65 */
66 struct i915_hw_ppgtt *ppgtt;
67
68 /**
69 * @pid: process id of creator
70 *
71 * Note that who created the context may not be the principle user,
72 * as the context may be shared across a local socket. However,
73 * that should only affect the default context, all contexts created
74 * explicitly by the client are expected to be isolated.
75 */
76 struct pid *pid;
77
78 /**
79 * @name: arbitrary name
80 *
81 * A name is constructed for the context from the creator's process
82 * name, pid and user handle in order to uniquely identify the
83 * context in messages.
84 */
85 const char *name;
86
87 /** link: place with &drm_i915_private.context_list */
88 struct list_head link;
5f09a9c8 89 struct llist_node free_link;
6095868a
CW
90
91 /**
92 * @ref: reference count
93 *
94 * A reference to a context is held by both the client who created it
95 * and on each request submitted to the hardware using the request
96 * (to ensure the hardware has access to the state until it has
97 * finished all pending writes). See i915_gem_context_get() and
98 * i915_gem_context_put() for access.
99 */
100 struct kref ref;
101
1acfc104
CW
102 /**
103 * @rcu: rcu_head for deferred freeing.
104 */
105 struct rcu_head rcu;
106
6095868a
CW
107 /**
108 * @flags: small set of booleans
109 */
110 unsigned long flags;
111#define CONTEXT_NO_ZEROMAP BIT(0)
112#define CONTEXT_NO_ERROR_CAPTURE 1
113#define CONTEXT_CLOSED 2
114#define CONTEXT_BANNABLE 3
115#define CONTEXT_BANNED 4
116#define CONTEXT_FORCE_SINGLE_SUBMISSION 5
117
118 /**
119 * @hw_id: - unique identifier for the context
120 *
121 * The hardware needs to uniquely identify the context for a few
122 * functions like fault reporting, PASID, scheduling. The
123 * &drm_i915_private.context_hw_ida is used to assign a unqiue
124 * id for the lifetime of the context.
125 */
126 unsigned int hw_id;
127
128 /**
129 * @user_handle: userspace identifier
130 *
131 * A unique per-file identifier is generated from
132 * &drm_i915_file_private.contexts.
133 */
134 u32 user_handle;
135
136 /**
137 * @priority: execution and service priority
138 *
139 * All clients are equal, but some are more equal than others!
140 *
141 * Requests from a context with a greater (more positive) value of
142 * @priority will be executed before those with a lower @priority
143 * value, forming a simple QoS.
144 *
145 * The &drm_i915_private.kernel_context is assigned the lowest priority.
146 */
147 int priority;
148
6095868a
CW
149 /** ggtt_offset_bias: placement restriction for context objects */
150 u32 ggtt_offset_bias;
151
4ff4b44c
CW
152 struct i915_gem_context_vma_lut {
153 /** ht_size: last request size to allocate the hashtable for. */
154 unsigned int ht_size;
155#define I915_CTX_RESIZE_IN_PROGRESS BIT(0)
156 /** ht_bits: real log2(size) of hashtable. */
157 unsigned int ht_bits;
158 /** ht_count: current number of entries inside the hashtable */
159 unsigned int ht_count;
160
161 /** ht: the array of buckets comprising the simple hashtable */
162 struct hlist_head *ht;
163
164 /**
165 * resize: After an execbuf completes, we check the load factor
166 * of the hashtable. If the hashtable is too full, or too empty,
167 * we schedule a task to resize the hashtable. During the
168 * resize, the entries are moved between different buckets and
169 * so we cannot simultaneously read the hashtable as it is
170 * being resized (unlike rhashtable). Therefore we treat the
171 * active work as a strong barrier, pausing a subsequent
172 * execbuf to wait for the resize worker to complete, if
173 * required.
174 */
175 struct work_struct resize;
176 } vma_lut;
177
6095868a
CW
178 /** engine: per-engine logical HW state */
179 struct intel_context {
180 struct i915_vma *state;
181 struct intel_ring *ring;
182 u32 *lrc_reg_state;
183 u64 lrc_desc;
184 int pin_count;
185 bool initialised;
186 } engine[I915_NUM_ENGINES];
187
188 /** ring_size: size for allocating the per-engine ring buffer */
189 u32 ring_size;
190 /** desc_template: invariant fields for the HW context descriptor */
191 u32 desc_template;
192
6095868a
CW
193 /** guilty_count: How many times this context has caused a GPU hang. */
194 unsigned int guilty_count;
195 /**
196 * @active_count: How many times this context was active during a GPU
197 * hang, but did not cause it.
198 */
199 unsigned int active_count;
200
201#define CONTEXT_SCORE_GUILTY 10
202#define CONTEXT_SCORE_BAN_THRESHOLD 40
203 /** ban_score: Accumulated score of all hangs caused by this context. */
204 int ban_score;
205
206 /** remap_slice: Bitmask of cache lines that need remapping */
207 u8 remap_slice;
208};
209
210static inline bool i915_gem_context_is_closed(const struct i915_gem_context *ctx)
211{
212 return test_bit(CONTEXT_CLOSED, &ctx->flags);
213}
214
215static inline void i915_gem_context_set_closed(struct i915_gem_context *ctx)
216{
217 GEM_BUG_ON(i915_gem_context_is_closed(ctx));
218 __set_bit(CONTEXT_CLOSED, &ctx->flags);
219}
220
221static inline bool i915_gem_context_no_error_capture(const struct i915_gem_context *ctx)
222{
223 return test_bit(CONTEXT_NO_ERROR_CAPTURE, &ctx->flags);
224}
225
226static inline void i915_gem_context_set_no_error_capture(struct i915_gem_context *ctx)
227{
228 __set_bit(CONTEXT_NO_ERROR_CAPTURE, &ctx->flags);
229}
230
231static inline void i915_gem_context_clear_no_error_capture(struct i915_gem_context *ctx)
232{
233 __clear_bit(CONTEXT_NO_ERROR_CAPTURE, &ctx->flags);
234}
235
236static inline bool i915_gem_context_is_bannable(const struct i915_gem_context *ctx)
237{
238 return test_bit(CONTEXT_BANNABLE, &ctx->flags);
239}
240
241static inline void i915_gem_context_set_bannable(struct i915_gem_context *ctx)
242{
243 __set_bit(CONTEXT_BANNABLE, &ctx->flags);
244}
245
246static inline void i915_gem_context_clear_bannable(struct i915_gem_context *ctx)
247{
248 __clear_bit(CONTEXT_BANNABLE, &ctx->flags);
249}
250
251static inline bool i915_gem_context_is_banned(const struct i915_gem_context *ctx)
252{
253 return test_bit(CONTEXT_BANNED, &ctx->flags);
254}
255
256static inline void i915_gem_context_set_banned(struct i915_gem_context *ctx)
257{
258 __set_bit(CONTEXT_BANNED, &ctx->flags);
259}
260
261static inline bool i915_gem_context_force_single_submission(const struct i915_gem_context *ctx)
262{
263 return test_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ctx->flags);
264}
265
266static inline void i915_gem_context_set_force_single_submission(struct i915_gem_context *ctx)
267{
268 __set_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ctx->flags);
269}
270
271static inline bool i915_gem_context_is_default(const struct i915_gem_context *c)
272{
273 return c->user_handle == DEFAULT_CONTEXT_HANDLE;
274}
275
984ff29f
CW
276static inline bool i915_gem_context_is_kernel(struct i915_gem_context *ctx)
277{
278 return !ctx->file_priv;
279}
280
6095868a 281/* i915_gem_context.c */
829a0af2
CW
282int __must_check i915_gem_contexts_init(struct drm_i915_private *dev_priv);
283void i915_gem_contexts_lost(struct drm_i915_private *dev_priv);
284void i915_gem_contexts_fini(struct drm_i915_private *dev_priv);
285
286int i915_gem_context_open(struct drm_i915_private *i915,
287 struct drm_file *file);
288void i915_gem_context_close(struct drm_file *file);
289
6095868a
CW
290int i915_switch_context(struct drm_i915_gem_request *req);
291int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv);
829a0af2 292
5f09a9c8 293void i915_gem_context_release(struct kref *ctx_ref);
6095868a
CW
294struct i915_gem_context *
295i915_gem_context_create_gvt(struct drm_device *dev);
296
297int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
298 struct drm_file *file);
299int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
300 struct drm_file *file);
301int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
302 struct drm_file *file_priv);
303int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
304 struct drm_file *file_priv);
305int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, void *data,
306 struct drm_file *file);
307
5f09a9c8
CW
308static inline struct i915_gem_context *
309i915_gem_context_get(struct i915_gem_context *ctx)
310{
311 kref_get(&ctx->ref);
312 return ctx;
313}
314
315static inline void i915_gem_context_put(struct i915_gem_context *ctx)
316{
317 kref_put(&ctx->ref, i915_gem_context_release);
318}
319
6095868a 320#endif /* !__I915_GEM_CONTEXT_H__ */