]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/gpu/drm/i915/intel_lrc.c
drm/i915: Late request cancellations are harmful
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / i915 / intel_lrc.c
CommitLineData
b20385f1
OM
1/*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Ben Widawsky <ben@bwidawsk.net>
25 * Michel Thierry <michel.thierry@intel.com>
26 * Thomas Daniel <thomas.daniel@intel.com>
27 * Oscar Mateo <oscar.mateo@intel.com>
28 *
29 */
30
73e4d07f
OM
31/**
32 * DOC: Logical Rings, Logical Ring Contexts and Execlists
33 *
34 * Motivation:
b20385f1
OM
35 * GEN8 brings an expansion of the HW contexts: "Logical Ring Contexts".
36 * These expanded contexts enable a number of new abilities, especially
37 * "Execlists" (also implemented in this file).
38 *
73e4d07f
OM
39 * One of the main differences with the legacy HW contexts is that logical
40 * ring contexts incorporate many more things to the context's state, like
41 * PDPs or ringbuffer control registers:
42 *
43 * The reason why PDPs are included in the context is straightforward: as
44 * PPGTTs (per-process GTTs) are actually per-context, having the PDPs
45 * contained there mean you don't need to do a ppgtt->switch_mm yourself,
46 * instead, the GPU will do it for you on the context switch.
47 *
48 * But, what about the ringbuffer control registers (head, tail, etc..)?
49 * shouldn't we just need a set of those per engine command streamer? This is
50 * where the name "Logical Rings" starts to make sense: by virtualizing the
51 * rings, the engine cs shifts to a new "ring buffer" with every context
52 * switch. When you want to submit a workload to the GPU you: A) choose your
53 * context, B) find its appropriate virtualized ring, C) write commands to it
54 * and then, finally, D) tell the GPU to switch to that context.
55 *
56 * Instead of the legacy MI_SET_CONTEXT, the way you tell the GPU to switch
57 * to a contexts is via a context execution list, ergo "Execlists".
58 *
59 * LRC implementation:
60 * Regarding the creation of contexts, we have:
61 *
62 * - One global default context.
63 * - One local default context for each opened fd.
64 * - One local extra context for each context create ioctl call.
65 *
66 * Now that ringbuffers belong per-context (and not per-engine, like before)
67 * and that contexts are uniquely tied to a given engine (and not reusable,
68 * like before) we need:
69 *
70 * - One ringbuffer per-engine inside each context.
71 * - One backing object per-engine inside each context.
72 *
73 * The global default context starts its life with these new objects fully
74 * allocated and populated. The local default context for each opened fd is
75 * more complex, because we don't know at creation time which engine is going
76 * to use them. To handle this, we have implemented a deferred creation of LR
77 * contexts:
78 *
79 * The local context starts its life as a hollow or blank holder, that only
80 * gets populated for a given engine once we receive an execbuffer. If later
81 * on we receive another execbuffer ioctl for the same context but a different
82 * engine, we allocate/populate a new ringbuffer and context backing object and
83 * so on.
84 *
85 * Finally, regarding local contexts created using the ioctl call: as they are
86 * only allowed with the render ring, we can allocate & populate them right
87 * away (no need to defer anything, at least for now).
88 *
89 * Execlists implementation:
b20385f1
OM
90 * Execlists are the new method by which, on gen8+ hardware, workloads are
91 * submitted for execution (as opposed to the legacy, ringbuffer-based, method).
73e4d07f
OM
92 * This method works as follows:
93 *
94 * When a request is committed, its commands (the BB start and any leading or
95 * trailing commands, like the seqno breadcrumbs) are placed in the ringbuffer
96 * for the appropriate context. The tail pointer in the hardware context is not
97 * updated at this time, but instead, kept by the driver in the ringbuffer
98 * structure. A structure representing this request is added to a request queue
99 * for the appropriate engine: this structure contains a copy of the context's
100 * tail after the request was written to the ring buffer and a pointer to the
101 * context itself.
102 *
103 * If the engine's request queue was empty before the request was added, the
104 * queue is processed immediately. Otherwise the queue will be processed during
105 * a context switch interrupt. In any case, elements on the queue will get sent
106 * (in pairs) to the GPU's ExecLists Submit Port (ELSP, for short) with a
107 * globally unique 20-bits submission ID.
108 *
109 * When execution of a request completes, the GPU updates the context status
110 * buffer with a context complete event and generates a context switch interrupt.
111 * During the interrupt handling, the driver examines the events in the buffer:
112 * for each context complete event, if the announced ID matches that on the head
113 * of the request queue, then that request is retired and removed from the queue.
114 *
115 * After processing, if any requests were retired and the queue is not empty
116 * then a new execution list can be submitted. The two requests at the front of
117 * the queue are next to be submitted but since a context may not occur twice in
118 * an execution list, if subsequent requests have the same ID as the first then
119 * the two requests must be combined. This is done simply by discarding requests
120 * at the head of the queue until either only one requests is left (in which case
121 * we use a NULL second context) or the first two requests have unique IDs.
122 *
123 * By always executing the first two requests in the queue the driver ensures
124 * that the GPU is kept as busy as possible. In the case where a single context
125 * completes but a second context is still executing, the request for this second
126 * context will be at the head of the queue when we remove the first one. This
127 * request will then be resubmitted along with a new request for a different context,
128 * which will cause the hardware to continue executing the second request and queue
129 * the new request (the GPU detects the condition of a context getting preempted
130 * with the same context and optimizes the context switch flow by not doing
131 * preemption, but just sampling the new tail pointer).
132 *
b20385f1 133 */
27af5eea 134#include <linux/interrupt.h>
b20385f1
OM
135
136#include <drm/drmP.h>
137#include <drm/i915_drm.h>
138#include "i915_drv.h"
3bbaba0c 139#include "intel_mocs.h"
127f1003 140
468c6816 141#define GEN9_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE)
8c857917
OM
142#define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE)
143#define GEN8_LR_CONTEXT_OTHER_SIZE (2 * PAGE_SIZE)
144
e981e7b1
TD
145#define RING_EXECLIST_QFULL (1 << 0x2)
146#define RING_EXECLIST1_VALID (1 << 0x3)
147#define RING_EXECLIST0_VALID (1 << 0x4)
148#define RING_EXECLIST_ACTIVE_STATUS (3 << 0xE)
149#define RING_EXECLIST1_ACTIVE (1 << 0x11)
150#define RING_EXECLIST0_ACTIVE (1 << 0x12)
151
152#define GEN8_CTX_STATUS_IDLE_ACTIVE (1 << 0)
153#define GEN8_CTX_STATUS_PREEMPTED (1 << 1)
154#define GEN8_CTX_STATUS_ELEMENT_SWITCH (1 << 2)
155#define GEN8_CTX_STATUS_ACTIVE_IDLE (1 << 3)
156#define GEN8_CTX_STATUS_COMPLETE (1 << 4)
157#define GEN8_CTX_STATUS_LITE_RESTORE (1 << 15)
8670d6f9
OM
158
159#define CTX_LRI_HEADER_0 0x01
160#define CTX_CONTEXT_CONTROL 0x02
161#define CTX_RING_HEAD 0x04
162#define CTX_RING_TAIL 0x06
163#define CTX_RING_BUFFER_START 0x08
164#define CTX_RING_BUFFER_CONTROL 0x0a
165#define CTX_BB_HEAD_U 0x0c
166#define CTX_BB_HEAD_L 0x0e
167#define CTX_BB_STATE 0x10
168#define CTX_SECOND_BB_HEAD_U 0x12
169#define CTX_SECOND_BB_HEAD_L 0x14
170#define CTX_SECOND_BB_STATE 0x16
171#define CTX_BB_PER_CTX_PTR 0x18
172#define CTX_RCS_INDIRECT_CTX 0x1a
173#define CTX_RCS_INDIRECT_CTX_OFFSET 0x1c
174#define CTX_LRI_HEADER_1 0x21
175#define CTX_CTX_TIMESTAMP 0x22
176#define CTX_PDP3_UDW 0x24
177#define CTX_PDP3_LDW 0x26
178#define CTX_PDP2_UDW 0x28
179#define CTX_PDP2_LDW 0x2a
180#define CTX_PDP1_UDW 0x2c
181#define CTX_PDP1_LDW 0x2e
182#define CTX_PDP0_UDW 0x30
183#define CTX_PDP0_LDW 0x32
184#define CTX_LRI_HEADER_2 0x41
185#define CTX_R_PWR_CLK_STATE 0x42
186#define CTX_GPGPU_CSR_BASE_ADDRESS 0x44
187
84b790f8
BW
188#define GEN8_CTX_VALID (1<<0)
189#define GEN8_CTX_FORCE_PD_RESTORE (1<<1)
190#define GEN8_CTX_FORCE_RESTORE (1<<2)
191#define GEN8_CTX_L3LLC_COHERENT (1<<5)
192#define GEN8_CTX_PRIVILEGE (1<<8)
e5815a2e 193
0d925ea0 194#define ASSIGN_CTX_REG(reg_state, pos, reg, val) do { \
f0f59a00 195 (reg_state)[(pos)+0] = i915_mmio_reg_offset(reg); \
0d925ea0
VS
196 (reg_state)[(pos)+1] = (val); \
197} while (0)
198
199#define ASSIGN_CTX_PDP(ppgtt, reg_state, n) do { \
d852c7bf 200 const u64 _addr = i915_page_dir_dma_addr((ppgtt), (n)); \
e5815a2e
MT
201 reg_state[CTX_PDP ## n ## _UDW+1] = upper_32_bits(_addr); \
202 reg_state[CTX_PDP ## n ## _LDW+1] = lower_32_bits(_addr); \
9244a817 203} while (0)
e5815a2e 204
9244a817 205#define ASSIGN_CTX_PML4(ppgtt, reg_state) do { \
2dba3239
MT
206 reg_state[CTX_PDP0_UDW + 1] = upper_32_bits(px_dma(&ppgtt->pml4)); \
207 reg_state[CTX_PDP0_LDW + 1] = lower_32_bits(px_dma(&ppgtt->pml4)); \
9244a817 208} while (0)
2dba3239 209
84b790f8
BW
210enum {
211 ADVANCED_CONTEXT = 0,
2dba3239 212 LEGACY_32B_CONTEXT,
84b790f8
BW
213 ADVANCED_AD_CONTEXT,
214 LEGACY_64B_CONTEXT
215};
2dba3239
MT
216#define GEN8_CTX_ADDRESSING_MODE_SHIFT 3
217#define GEN8_CTX_ADDRESSING_MODE(dev) (USES_FULL_48BIT_PPGTT(dev) ?\
218 LEGACY_64B_CONTEXT :\
219 LEGACY_32B_CONTEXT)
84b790f8
BW
220enum {
221 FAULT_AND_HANG = 0,
222 FAULT_AND_HALT, /* Debug only */
223 FAULT_AND_STREAM,
224 FAULT_AND_CONTINUE /* Unsupported */
225};
226#define GEN8_CTX_ID_SHIFT 32
71562919
MT
227#define GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x17
228#define GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x26
84b790f8 229
e5292823
TU
230static int intel_lr_context_pin(struct intel_context *ctx,
231 struct intel_engine_cs *engine);
7ba717cf 232
73e4d07f
OM
233/**
234 * intel_sanitize_enable_execlists() - sanitize i915.enable_execlists
235 * @dev: DRM device.
236 * @enable_execlists: value of i915.enable_execlists module parameter.
237 *
238 * Only certain platforms support Execlists (the prerequisites being
27401d12 239 * support for Logical Ring Contexts and Aliasing PPGTT or better).
73e4d07f
OM
240 *
241 * Return: 1 if Execlists is supported and has to be enabled.
242 */
127f1003
OM
243int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists)
244{
bd84b1e9
DV
245 WARN_ON(i915.enable_ppgtt == -1);
246
a0bd6c31
ZL
247 /* On platforms with execlist available, vGPU will only
248 * support execlist mode, no ring buffer mode.
249 */
250 if (HAS_LOGICAL_RING_CONTEXTS(dev) && intel_vgpu_active(dev))
251 return 1;
252
70ee45e1
DL
253 if (INTEL_INFO(dev)->gen >= 9)
254 return 1;
255
127f1003
OM
256 if (enable_execlists == 0)
257 return 0;
258
14bf993e
OM
259 if (HAS_LOGICAL_RING_CONTEXTS(dev) && USES_PPGTT(dev) &&
260 i915.use_mmio_flip >= 0)
127f1003
OM
261 return 1;
262
263 return 0;
264}
ede7d42b 265
ca82580c 266static void
0bc40be8 267logical_ring_init_platform_invariants(struct intel_engine_cs *engine)
ca82580c 268{
0bc40be8 269 struct drm_device *dev = engine->dev;
ca82580c 270
c6a2ac71 271 if (IS_GEN8(dev) || IS_GEN9(dev))
0bc40be8 272 engine->idle_lite_restore_wa = ~0;
c6a2ac71 273
0bc40be8 274 engine->disable_lite_restore_wa = (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
ca82580c 275 IS_BXT_REVID(dev, 0, BXT_REVID_A1)) &&
0bc40be8 276 (engine->id == VCS || engine->id == VCS2);
ca82580c 277
0bc40be8
TU
278 engine->ctx_desc_template = GEN8_CTX_VALID;
279 engine->ctx_desc_template |= GEN8_CTX_ADDRESSING_MODE(dev) <<
ca82580c
TU
280 GEN8_CTX_ADDRESSING_MODE_SHIFT;
281 if (IS_GEN8(dev))
0bc40be8
TU
282 engine->ctx_desc_template |= GEN8_CTX_L3LLC_COHERENT;
283 engine->ctx_desc_template |= GEN8_CTX_PRIVILEGE;
ca82580c
TU
284
285 /* TODO: WaDisableLiteRestore when we start using semaphore
286 * signalling between Command Streamers */
287 /* ring->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE; */
288
289 /* WaEnableForceRestoreInCtxtDescForVCS:skl */
290 /* WaEnableForceRestoreInCtxtDescForVCS:bxt */
0bc40be8
TU
291 if (engine->disable_lite_restore_wa)
292 engine->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE;
ca82580c
TU
293}
294
73e4d07f 295/**
ca82580c
TU
296 * intel_lr_context_descriptor_update() - calculate & cache the descriptor
297 * descriptor for a pinned context
73e4d07f 298 *
ca82580c
TU
299 * @ctx: Context to work on
300 * @ring: Engine the descriptor will be used with
73e4d07f 301 *
ca82580c
TU
302 * The context descriptor encodes various attributes of a context,
303 * including its GTT address and some flags. Because it's fairly
304 * expensive to calculate, we'll just do it once and cache the result,
305 * which remains valid until the context is unpinned.
306 *
307 * This is what a descriptor looks like, from LSB to MSB:
308 * bits 0-11: flags, GEN8_CTX_* (cached in ctx_desc_template)
309 * bits 12-31: LRCA, GTT address of (the HWSP of) this context
310 * bits 32-51: ctx ID, a globally unique tag (the LRCA again!)
311 * bits 52-63: reserved, may encode the engine ID (for GuC)
73e4d07f 312 */
ca82580c
TU
313static void
314intel_lr_context_descriptor_update(struct intel_context *ctx,
0bc40be8 315 struct intel_engine_cs *engine)
84b790f8 316{
ca82580c 317 uint64_t lrca, desc;
84b790f8 318
0bc40be8 319 lrca = ctx->engine[engine->id].lrc_vma->node.start +
ca82580c 320 LRC_PPHWSP_PN * PAGE_SIZE;
84b790f8 321
0bc40be8 322 desc = engine->ctx_desc_template; /* bits 0-11 */
ca82580c
TU
323 desc |= lrca; /* bits 12-31 */
324 desc |= (lrca >> PAGE_SHIFT) << GEN8_CTX_ID_SHIFT; /* bits 32-51 */
5af05fef 325
0bc40be8 326 ctx->engine[engine->id].lrc_desc = desc;
5af05fef
MT
327}
328
919f1f55 329uint64_t intel_lr_context_descriptor(struct intel_context *ctx,
0bc40be8 330 struct intel_engine_cs *engine)
84b790f8 331{
0bc40be8 332 return ctx->engine[engine->id].lrc_desc;
ca82580c 333}
203a571b 334
ca82580c
TU
335/**
336 * intel_execlists_ctx_id() - get the Execlists Context ID
337 * @ctx: Context to get the ID for
338 * @ring: Engine to get the ID for
339 *
340 * Do not confuse with ctx->id! Unfortunately we have a name overload
341 * here: the old context ID we pass to userspace as a handler so that
342 * they can refer to a context, and the new context ID we pass to the
343 * ELSP so that the GPU can inform us of the context status via
344 * interrupts.
345 *
346 * The context ID is a portion of the context descriptor, so we can
347 * just extract the required part from the cached descriptor.
348 *
349 * Return: 20-bits globally unique context ID.
350 */
351u32 intel_execlists_ctx_id(struct intel_context *ctx,
0bc40be8 352 struct intel_engine_cs *engine)
ca82580c 353{
0bc40be8 354 return intel_lr_context_descriptor(ctx, engine) >> GEN8_CTX_ID_SHIFT;
84b790f8
BW
355}
356
cc3c4253
MK
357static void execlists_elsp_write(struct drm_i915_gem_request *rq0,
358 struct drm_i915_gem_request *rq1)
84b790f8 359{
cc3c4253 360
4a570db5 361 struct intel_engine_cs *engine = rq0->engine;
e2f80391 362 struct drm_device *dev = engine->dev;
6e7cc470 363 struct drm_i915_private *dev_priv = dev->dev_private;
1cff8cc3 364 uint64_t desc[2];
84b790f8 365
1cff8cc3 366 if (rq1) {
4a570db5 367 desc[1] = intel_lr_context_descriptor(rq1->ctx, rq1->engine);
1cff8cc3
MK
368 rq1->elsp_submitted++;
369 } else {
370 desc[1] = 0;
371 }
84b790f8 372
4a570db5 373 desc[0] = intel_lr_context_descriptor(rq0->ctx, rq0->engine);
1cff8cc3 374 rq0->elsp_submitted++;
84b790f8 375
1cff8cc3 376 /* You must always write both descriptors in the order below. */
e2f80391
TU
377 I915_WRITE_FW(RING_ELSP(engine), upper_32_bits(desc[1]));
378 I915_WRITE_FW(RING_ELSP(engine), lower_32_bits(desc[1]));
6daccb0b 379
e2f80391 380 I915_WRITE_FW(RING_ELSP(engine), upper_32_bits(desc[0]));
84b790f8 381 /* The context is automatically loaded after the following */
e2f80391 382 I915_WRITE_FW(RING_ELSP(engine), lower_32_bits(desc[0]));
84b790f8 383
1cff8cc3 384 /* ELSP is a wo register, use another nearby reg for posting */
e2f80391 385 POSTING_READ_FW(RING_EXECLIST_STATUS_LO(engine));
84b790f8
BW
386}
387
c6a2ac71
TU
388static void
389execlists_update_context_pdps(struct i915_hw_ppgtt *ppgtt, u32 *reg_state)
390{
391 ASSIGN_CTX_PDP(ppgtt, reg_state, 3);
392 ASSIGN_CTX_PDP(ppgtt, reg_state, 2);
393 ASSIGN_CTX_PDP(ppgtt, reg_state, 1);
394 ASSIGN_CTX_PDP(ppgtt, reg_state, 0);
395}
396
397static void execlists_update_context(struct drm_i915_gem_request *rq)
ae1250b9 398{
4a570db5 399 struct intel_engine_cs *engine = rq->engine;
05d9824b 400 struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt;
e2f80391 401 uint32_t *reg_state = rq->ctx->engine[engine->id].lrc_reg_state;
ae1250b9 402
05d9824b 403 reg_state[CTX_RING_TAIL+1] = rq->tail;
ae1250b9 404
c6a2ac71
TU
405 /* True 32b PPGTT with dynamic page allocation: update PDP
406 * registers and point the unallocated PDPs to scratch page.
407 * PML4 is allocated during ppgtt init, so this is not needed
408 * in 48-bit mode.
409 */
410 if (ppgtt && !USES_FULL_48BIT_PPGTT(ppgtt->base.dev))
411 execlists_update_context_pdps(ppgtt, reg_state);
ae1250b9
OM
412}
413
d8cb8875
MK
414static void execlists_submit_requests(struct drm_i915_gem_request *rq0,
415 struct drm_i915_gem_request *rq1)
84b790f8 416{
26720ab9 417 struct drm_i915_private *dev_priv = rq0->i915;
3756685a 418 unsigned int fw_domains = rq0->engine->fw_domains;
26720ab9 419
05d9824b 420 execlists_update_context(rq0);
d8cb8875 421
cc3c4253 422 if (rq1)
05d9824b 423 execlists_update_context(rq1);
84b790f8 424
27af5eea 425 spin_lock_irq(&dev_priv->uncore.lock);
3756685a 426 intel_uncore_forcewake_get__locked(dev_priv, fw_domains);
26720ab9 427
cc3c4253 428 execlists_elsp_write(rq0, rq1);
26720ab9 429
3756685a 430 intel_uncore_forcewake_put__locked(dev_priv, fw_domains);
27af5eea 431 spin_unlock_irq(&dev_priv->uncore.lock);
84b790f8
BW
432}
433
26720ab9 434static void execlists_context_unqueue(struct intel_engine_cs *engine)
acdd884a 435{
6d3d8274 436 struct drm_i915_gem_request *req0 = NULL, *req1 = NULL;
c6a2ac71 437 struct drm_i915_gem_request *cursor, *tmp;
e981e7b1 438
0bc40be8 439 assert_spin_locked(&engine->execlist_lock);
acdd884a 440
779949f4
PA
441 /*
442 * If irqs are not active generate a warning as batches that finish
443 * without the irqs may get lost and a GPU Hang may occur.
444 */
0bc40be8 445 WARN_ON(!intel_irqs_enabled(engine->dev->dev_private));
779949f4 446
acdd884a 447 /* Try to read in pairs */
0bc40be8 448 list_for_each_entry_safe(cursor, tmp, &engine->execlist_queue,
acdd884a
MT
449 execlist_link) {
450 if (!req0) {
451 req0 = cursor;
6d3d8274 452 } else if (req0->ctx == cursor->ctx) {
acdd884a
MT
453 /* Same ctx: ignore first request, as second request
454 * will update tail past first request's workload */
e1fee72c 455 cursor->elsp_submitted = req0->elsp_submitted;
7eb08a25 456 list_move_tail(&req0->execlist_link,
0bc40be8 457 &engine->execlist_retired_req_list);
acdd884a
MT
458 req0 = cursor;
459 } else {
460 req1 = cursor;
c6a2ac71 461 WARN_ON(req1->elsp_submitted);
acdd884a
MT
462 break;
463 }
464 }
465
c6a2ac71
TU
466 if (unlikely(!req0))
467 return;
468
0bc40be8 469 if (req0->elsp_submitted & engine->idle_lite_restore_wa) {
53292cdb 470 /*
c6a2ac71
TU
471 * WaIdleLiteRestore: make sure we never cause a lite restore
472 * with HEAD==TAIL.
473 *
474 * Apply the wa NOOPS to prevent ring:HEAD == req:TAIL as we
475 * resubmit the request. See gen8_emit_request() for where we
476 * prepare the padding after the end of the request.
53292cdb 477 */
c6a2ac71 478 struct intel_ringbuffer *ringbuf;
53292cdb 479
0bc40be8 480 ringbuf = req0->ctx->engine[engine->id].ringbuf;
c6a2ac71
TU
481 req0->tail += 8;
482 req0->tail &= ringbuf->size - 1;
53292cdb
MT
483 }
484
d8cb8875 485 execlists_submit_requests(req0, req1);
acdd884a
MT
486}
487
c6a2ac71 488static unsigned int
0bc40be8 489execlists_check_remove_request(struct intel_engine_cs *engine, u32 request_id)
e981e7b1 490{
6d3d8274 491 struct drm_i915_gem_request *head_req;
e981e7b1 492
0bc40be8 493 assert_spin_locked(&engine->execlist_lock);
e981e7b1 494
0bc40be8 495 head_req = list_first_entry_or_null(&engine->execlist_queue,
6d3d8274 496 struct drm_i915_gem_request,
e981e7b1
TD
497 execlist_link);
498
c6a2ac71
TU
499 if (!head_req)
500 return 0;
e1fee72c 501
0bc40be8 502 if (unlikely(intel_execlists_ctx_id(head_req->ctx, engine) != request_id))
c6a2ac71
TU
503 return 0;
504
505 WARN(head_req->elsp_submitted == 0, "Never submitted head request\n");
506
507 if (--head_req->elsp_submitted > 0)
508 return 0;
509
510 list_move_tail(&head_req->execlist_link,
0bc40be8 511 &engine->execlist_retired_req_list);
e981e7b1 512
c6a2ac71 513 return 1;
e981e7b1
TD
514}
515
c6a2ac71 516static u32
0bc40be8 517get_context_status(struct intel_engine_cs *engine, unsigned int read_pointer,
c6a2ac71 518 u32 *context_id)
91a41032 519{
0bc40be8 520 struct drm_i915_private *dev_priv = engine->dev->dev_private;
c6a2ac71 521 u32 status;
91a41032 522
c6a2ac71
TU
523 read_pointer %= GEN8_CSB_ENTRIES;
524
0bc40be8 525 status = I915_READ_FW(RING_CONTEXT_STATUS_BUF_LO(engine, read_pointer));
c6a2ac71
TU
526
527 if (status & GEN8_CTX_STATUS_IDLE_ACTIVE)
528 return 0;
91a41032 529
0bc40be8 530 *context_id = I915_READ_FW(RING_CONTEXT_STATUS_BUF_HI(engine,
c6a2ac71
TU
531 read_pointer));
532
533 return status;
91a41032
BW
534}
535
73e4d07f 536/**
3f7531c3 537 * intel_lrc_irq_handler() - handle Context Switch interrupts
27af5eea 538 * @engine: Engine Command Streamer to handle.
73e4d07f
OM
539 *
540 * Check the unread Context Status Buffers and manage the submission of new
541 * contexts to the ELSP accordingly.
542 */
27af5eea 543static void intel_lrc_irq_handler(unsigned long data)
e981e7b1 544{
27af5eea 545 struct intel_engine_cs *engine = (struct intel_engine_cs *)data;
0bc40be8 546 struct drm_i915_private *dev_priv = engine->dev->dev_private;
e981e7b1 547 u32 status_pointer;
c6a2ac71 548 unsigned int read_pointer, write_pointer;
26720ab9
TU
549 u32 csb[GEN8_CSB_ENTRIES][2];
550 unsigned int csb_read = 0, i;
c6a2ac71
TU
551 unsigned int submit_contexts = 0;
552
3756685a 553 intel_uncore_forcewake_get(dev_priv, engine->fw_domains);
c6a2ac71 554
0bc40be8 555 status_pointer = I915_READ_FW(RING_CONTEXT_STATUS_PTR(engine));
e981e7b1 556
0bc40be8 557 read_pointer = engine->next_context_status_buffer;
5590a5f0 558 write_pointer = GEN8_CSB_WRITE_PTR(status_pointer);
e981e7b1 559 if (read_pointer > write_pointer)
dfc53c5e 560 write_pointer += GEN8_CSB_ENTRIES;
e981e7b1 561
e981e7b1 562 while (read_pointer < write_pointer) {
26720ab9
TU
563 if (WARN_ON_ONCE(csb_read == GEN8_CSB_ENTRIES))
564 break;
565 csb[csb_read][0] = get_context_status(engine, ++read_pointer,
566 &csb[csb_read][1]);
567 csb_read++;
568 }
91a41032 569
26720ab9
TU
570 engine->next_context_status_buffer = write_pointer % GEN8_CSB_ENTRIES;
571
572 /* Update the read pointer to the old write pointer. Manual ringbuffer
573 * management ftw </sarcasm> */
574 I915_WRITE_FW(RING_CONTEXT_STATUS_PTR(engine),
575 _MASKED_FIELD(GEN8_CSB_READ_PTR_MASK,
576 engine->next_context_status_buffer << 8));
577
3756685a 578 intel_uncore_forcewake_put(dev_priv, engine->fw_domains);
26720ab9
TU
579
580 spin_lock(&engine->execlist_lock);
581
582 for (i = 0; i < csb_read; i++) {
583 if (unlikely(csb[i][0] & GEN8_CTX_STATUS_PREEMPTED)) {
584 if (csb[i][0] & GEN8_CTX_STATUS_LITE_RESTORE) {
585 if (execlists_check_remove_request(engine, csb[i][1]))
e1fee72c
OM
586 WARN(1, "Lite Restored request removed from queue\n");
587 } else
588 WARN(1, "Preemption without Lite Restore\n");
589 }
590
26720ab9 591 if (csb[i][0] & (GEN8_CTX_STATUS_ACTIVE_IDLE |
c6a2ac71
TU
592 GEN8_CTX_STATUS_ELEMENT_SWITCH))
593 submit_contexts +=
26720ab9 594 execlists_check_remove_request(engine, csb[i][1]);
e981e7b1
TD
595 }
596
c6a2ac71 597 if (submit_contexts) {
0bc40be8 598 if (!engine->disable_lite_restore_wa ||
26720ab9
TU
599 (csb[i][0] & GEN8_CTX_STATUS_ACTIVE_IDLE))
600 execlists_context_unqueue(engine);
5af05fef 601 }
e981e7b1 602
0bc40be8 603 spin_unlock(&engine->execlist_lock);
c6a2ac71
TU
604
605 if (unlikely(submit_contexts > 2))
606 DRM_ERROR("More than two context complete events?\n");
e981e7b1
TD
607}
608
c6a2ac71 609static void execlists_context_queue(struct drm_i915_gem_request *request)
acdd884a 610{
4a570db5 611 struct intel_engine_cs *engine = request->engine;
6d3d8274 612 struct drm_i915_gem_request *cursor;
f1ad5a1f 613 int num_elements = 0;
acdd884a 614
ed54c1a1 615 if (request->ctx != request->i915->kernel_context)
e2f80391 616 intel_lr_context_pin(request->ctx, engine);
af3302b9 617
9bb1af44
JH
618 i915_gem_request_reference(request);
619
27af5eea 620 spin_lock_bh(&engine->execlist_lock);
acdd884a 621
e2f80391 622 list_for_each_entry(cursor, &engine->execlist_queue, execlist_link)
f1ad5a1f
OM
623 if (++num_elements > 2)
624 break;
625
626 if (num_elements > 2) {
6d3d8274 627 struct drm_i915_gem_request *tail_req;
f1ad5a1f 628
e2f80391 629 tail_req = list_last_entry(&engine->execlist_queue,
6d3d8274 630 struct drm_i915_gem_request,
f1ad5a1f
OM
631 execlist_link);
632
ae70797d 633 if (request->ctx == tail_req->ctx) {
f1ad5a1f 634 WARN(tail_req->elsp_submitted != 0,
7ba717cf 635 "More than 2 already-submitted reqs queued\n");
7eb08a25 636 list_move_tail(&tail_req->execlist_link,
e2f80391 637 &engine->execlist_retired_req_list);
f1ad5a1f
OM
638 }
639 }
640
e2f80391 641 list_add_tail(&request->execlist_link, &engine->execlist_queue);
f1ad5a1f 642 if (num_elements == 0)
e2f80391 643 execlists_context_unqueue(engine);
acdd884a 644
27af5eea 645 spin_unlock_bh(&engine->execlist_lock);
acdd884a
MT
646}
647
2f20055d 648static int logical_ring_invalidate_all_caches(struct drm_i915_gem_request *req)
ba8b7ccb 649{
4a570db5 650 struct intel_engine_cs *engine = req->engine;
ba8b7ccb
OM
651 uint32_t flush_domains;
652 int ret;
653
654 flush_domains = 0;
e2f80391 655 if (engine->gpu_caches_dirty)
ba8b7ccb
OM
656 flush_domains = I915_GEM_GPU_DOMAINS;
657
e2f80391 658 ret = engine->emit_flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
ba8b7ccb
OM
659 if (ret)
660 return ret;
661
e2f80391 662 engine->gpu_caches_dirty = false;
ba8b7ccb
OM
663 return 0;
664}
665
535fbe82 666static int execlists_move_to_gpu(struct drm_i915_gem_request *req,
ba8b7ccb
OM
667 struct list_head *vmas)
668{
666796da 669 const unsigned other_rings = ~intel_engine_flag(req->engine);
ba8b7ccb
OM
670 struct i915_vma *vma;
671 uint32_t flush_domains = 0;
672 bool flush_chipset = false;
673 int ret;
674
675 list_for_each_entry(vma, vmas, exec_list) {
676 struct drm_i915_gem_object *obj = vma->obj;
677
03ade511 678 if (obj->active & other_rings) {
4a570db5 679 ret = i915_gem_object_sync(obj, req->engine, &req);
03ade511
CW
680 if (ret)
681 return ret;
682 }
ba8b7ccb
OM
683
684 if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
685 flush_chipset |= i915_gem_clflush_object(obj, false);
686
687 flush_domains |= obj->base.write_domain;
688 }
689
690 if (flush_domains & I915_GEM_DOMAIN_GTT)
691 wmb();
692
693 /* Unconditionally invalidate gpu caches and ensure that we do flush
694 * any residual writes from the previous batch.
695 */
2f20055d 696 return logical_ring_invalidate_all_caches(req);
ba8b7ccb
OM
697}
698
40e895ce 699int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request)
bc0dce3f 700{
e28e404c 701 int ret = 0;
bc0dce3f 702
4a570db5 703 request->ringbuf = request->ctx->engine[request->engine->id].ringbuf;
f3cc01f0 704
a7e02199
AD
705 if (i915.enable_guc_submission) {
706 /*
707 * Check that the GuC has space for the request before
708 * going any further, as the i915_add_request() call
709 * later on mustn't fail ...
710 */
711 struct intel_guc *guc = &request->i915->guc;
712
713 ret = i915_guc_wq_check_space(guc->execbuf_client);
714 if (ret)
715 return ret;
716 }
717
e28e404c 718 if (request->ctx != request->i915->kernel_context)
4a570db5 719 ret = intel_lr_context_pin(request->ctx, request->engine);
e28e404c
DG
720
721 return ret;
bc0dce3f
JH
722}
723
ae70797d 724static int logical_ring_wait_for_space(struct drm_i915_gem_request *req,
595e1eeb 725 int bytes)
bc0dce3f 726{
ae70797d 727 struct intel_ringbuffer *ringbuf = req->ringbuf;
4a570db5 728 struct intel_engine_cs *engine = req->engine;
ae70797d 729 struct drm_i915_gem_request *target;
b4716185
CW
730 unsigned space;
731 int ret;
bc0dce3f
JH
732
733 if (intel_ring_space(ringbuf) >= bytes)
734 return 0;
735
79bbcc29
JH
736 /* The whole point of reserving space is to not wait! */
737 WARN_ON(ringbuf->reserved_in_use);
738
e2f80391 739 list_for_each_entry(target, &engine->request_list, list) {
bc0dce3f
JH
740 /*
741 * The request queue is per-engine, so can contain requests
742 * from multiple ringbuffers. Here, we must ignore any that
743 * aren't from the ringbuffer we're considering.
744 */
ae70797d 745 if (target->ringbuf != ringbuf)
bc0dce3f
JH
746 continue;
747
748 /* Would completion of this request free enough space? */
ae70797d 749 space = __intel_ring_space(target->postfix, ringbuf->tail,
b4716185
CW
750 ringbuf->size);
751 if (space >= bytes)
bc0dce3f 752 break;
bc0dce3f
JH
753 }
754
e2f80391 755 if (WARN_ON(&target->list == &engine->request_list))
bc0dce3f
JH
756 return -ENOSPC;
757
ae70797d 758 ret = i915_wait_request(target);
bc0dce3f
JH
759 if (ret)
760 return ret;
761
b4716185
CW
762 ringbuf->space = space;
763 return 0;
bc0dce3f
JH
764}
765
766/*
767 * intel_logical_ring_advance_and_submit() - advance the tail and submit the workload
ae70797d 768 * @request: Request to advance the logical ringbuffer of.
bc0dce3f
JH
769 *
770 * The tail is updated in our logical ringbuffer struct, not in the actual context. What
771 * really happens during submission is that the context and current tail will be placed
772 * on a queue waiting for the ELSP to be ready to accept a new context submission. At that
773 * point, the tail *inside* the context is updated and the ELSP written to.
774 */
7c17d377 775static int
ae70797d 776intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
bc0dce3f 777{
7c17d377 778 struct intel_ringbuffer *ringbuf = request->ringbuf;
d1675198 779 struct drm_i915_private *dev_priv = request->i915;
4a570db5 780 struct intel_engine_cs *engine = request->engine;
bc0dce3f 781
7c17d377
CW
782 intel_logical_ring_advance(ringbuf);
783 request->tail = ringbuf->tail;
bc0dce3f 784
7c17d377
CW
785 /*
786 * Here we add two extra NOOPs as padding to avoid
787 * lite restore of a context with HEAD==TAIL.
788 *
789 * Caller must reserve WA_TAIL_DWORDS for us!
790 */
791 intel_logical_ring_emit(ringbuf, MI_NOOP);
792 intel_logical_ring_emit(ringbuf, MI_NOOP);
793 intel_logical_ring_advance(ringbuf);
d1675198 794
117897f4 795 if (intel_engine_stopped(engine))
7c17d377 796 return 0;
bc0dce3f 797
f4e2dece
TU
798 if (engine->last_context != request->ctx) {
799 if (engine->last_context)
800 intel_lr_context_unpin(engine->last_context, engine);
801 if (request->ctx != request->i915->kernel_context) {
802 intel_lr_context_pin(request->ctx, engine);
803 engine->last_context = request->ctx;
804 } else {
805 engine->last_context = NULL;
806 }
807 }
808
d1675198
AD
809 if (dev_priv->guc.execbuf_client)
810 i915_guc_submit(dev_priv->guc.execbuf_client, request);
811 else
812 execlists_context_queue(request);
7c17d377
CW
813
814 return 0;
bc0dce3f
JH
815}
816
79bbcc29 817static void __wrap_ring_buffer(struct intel_ringbuffer *ringbuf)
bc0dce3f
JH
818{
819 uint32_t __iomem *virt;
820 int rem = ringbuf->size - ringbuf->tail;
821
bc0dce3f
JH
822 virt = ringbuf->virtual_start + ringbuf->tail;
823 rem /= 4;
824 while (rem--)
825 iowrite32(MI_NOOP, virt++);
826
827 ringbuf->tail = 0;
828 intel_ring_update_space(ringbuf);
bc0dce3f
JH
829}
830
ae70797d 831static int logical_ring_prepare(struct drm_i915_gem_request *req, int bytes)
bc0dce3f 832{
ae70797d 833 struct intel_ringbuffer *ringbuf = req->ringbuf;
79bbcc29
JH
834 int remain_usable = ringbuf->effective_size - ringbuf->tail;
835 int remain_actual = ringbuf->size - ringbuf->tail;
836 int ret, total_bytes, wait_bytes = 0;
837 bool need_wrap = false;
29b1b415 838
79bbcc29
JH
839 if (ringbuf->reserved_in_use)
840 total_bytes = bytes;
841 else
842 total_bytes = bytes + ringbuf->reserved_size;
29b1b415 843
79bbcc29
JH
844 if (unlikely(bytes > remain_usable)) {
845 /*
846 * Not enough space for the basic request. So need to flush
847 * out the remainder and then wait for base + reserved.
848 */
849 wait_bytes = remain_actual + total_bytes;
850 need_wrap = true;
851 } else {
852 if (unlikely(total_bytes > remain_usable)) {
853 /*
854 * The base request will fit but the reserved space
782f6bc0
AG
855 * falls off the end. So don't need an immediate wrap
856 * and only need to effectively wait for the reserved
857 * size space from the start of ringbuffer.
79bbcc29
JH
858 */
859 wait_bytes = remain_actual + ringbuf->reserved_size;
79bbcc29
JH
860 } else if (total_bytes > ringbuf->space) {
861 /* No wrapping required, just waiting. */
862 wait_bytes = total_bytes;
29b1b415 863 }
bc0dce3f
JH
864 }
865
79bbcc29
JH
866 if (wait_bytes) {
867 ret = logical_ring_wait_for_space(req, wait_bytes);
bc0dce3f
JH
868 if (unlikely(ret))
869 return ret;
79bbcc29
JH
870
871 if (need_wrap)
872 __wrap_ring_buffer(ringbuf);
bc0dce3f
JH
873 }
874
875 return 0;
876}
877
878/**
879 * intel_logical_ring_begin() - prepare the logical ringbuffer to accept some commands
880 *
374887ba 881 * @req: The request to start some new work for
bc0dce3f
JH
882 * @num_dwords: number of DWORDs that we plan to write to the ringbuffer.
883 *
884 * The ringbuffer might not be ready to accept the commands right away (maybe it needs to
885 * be wrapped, or wait a bit for the tail to be updated). This function takes care of that
886 * and also preallocates a request (every workload submission is still mediated through
887 * requests, same as it did with legacy ringbuffer submission).
888 *
889 * Return: non-zero if the ringbuffer is not ready to be written to.
890 */
3bbaba0c 891int intel_logical_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
bc0dce3f 892{
bc0dce3f
JH
893 int ret;
894
4d616a29 895 WARN_ON(req == NULL);
bc0dce3f 896
ae70797d 897 ret = logical_ring_prepare(req, num_dwords * sizeof(uint32_t));
bc0dce3f
JH
898 if (ret)
899 return ret;
900
4d616a29 901 req->ringbuf->space -= num_dwords * sizeof(uint32_t);
bc0dce3f
JH
902 return 0;
903}
904
ccd98fe4
JH
905int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request)
906{
907 /*
908 * The first call merely notes the reserve request and is common for
909 * all back ends. The subsequent localised _begin() call actually
910 * ensures that the reservation is available. Without the begin, if
911 * the request creator immediately submitted the request without
912 * adding any commands to it then there might not actually be
913 * sufficient room for the submission commands.
914 */
915 intel_ring_reserved_space_reserve(request->ringbuf, MIN_SPACE_FOR_ADD_REQUEST);
916
917 return intel_logical_ring_begin(request, 0);
918}
919
73e4d07f
OM
920/**
921 * execlists_submission() - submit a batchbuffer for execution, Execlists style
922 * @dev: DRM device.
923 * @file: DRM file.
924 * @ring: Engine Command Streamer to submit to.
925 * @ctx: Context to employ for this submission.
926 * @args: execbuffer call arguments.
927 * @vmas: list of vmas.
928 * @batch_obj: the batchbuffer to submit.
929 * @exec_start: batchbuffer start virtual address pointer.
8e004efc 930 * @dispatch_flags: translated execbuffer call flags.
73e4d07f
OM
931 *
932 * This is the evil twin version of i915_gem_ringbuffer_submission. It abstracts
933 * away the submission details of the execbuffer ioctl call.
934 *
935 * Return: non-zero if the submission fails.
936 */
5f19e2bf 937int intel_execlists_submission(struct i915_execbuffer_params *params,
454afebd 938 struct drm_i915_gem_execbuffer2 *args,
5f19e2bf 939 struct list_head *vmas)
454afebd 940{
5f19e2bf 941 struct drm_device *dev = params->dev;
4a570db5 942 struct intel_engine_cs *engine = params->engine;
ba8b7ccb 943 struct drm_i915_private *dev_priv = dev->dev_private;
e2f80391 944 struct intel_ringbuffer *ringbuf = params->ctx->engine[engine->id].ringbuf;
5f19e2bf 945 u64 exec_start;
ba8b7ccb
OM
946 int instp_mode;
947 u32 instp_mask;
948 int ret;
949
950 instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
951 instp_mask = I915_EXEC_CONSTANTS_MASK;
952 switch (instp_mode) {
953 case I915_EXEC_CONSTANTS_REL_GENERAL:
954 case I915_EXEC_CONSTANTS_ABSOLUTE:
955 case I915_EXEC_CONSTANTS_REL_SURFACE:
4a570db5 956 if (instp_mode != 0 && engine != &dev_priv->engine[RCS]) {
ba8b7ccb
OM
957 DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
958 return -EINVAL;
959 }
960
961 if (instp_mode != dev_priv->relative_constants_mode) {
962 if (instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
963 DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
964 return -EINVAL;
965 }
966
967 /* The HW changed the meaning on this bit on gen6 */
968 instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
969 }
970 break;
971 default:
972 DRM_DEBUG("execbuf with unknown constants: %d\n", instp_mode);
973 return -EINVAL;
974 }
975
ba8b7ccb
OM
976 if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
977 DRM_DEBUG("sol reset is gen7 only\n");
978 return -EINVAL;
979 }
980
535fbe82 981 ret = execlists_move_to_gpu(params->request, vmas);
ba8b7ccb
OM
982 if (ret)
983 return ret;
984
4a570db5 985 if (engine == &dev_priv->engine[RCS] &&
ba8b7ccb 986 instp_mode != dev_priv->relative_constants_mode) {
4d616a29 987 ret = intel_logical_ring_begin(params->request, 4);
ba8b7ccb
OM
988 if (ret)
989 return ret;
990
991 intel_logical_ring_emit(ringbuf, MI_NOOP);
992 intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(1));
f92a9162 993 intel_logical_ring_emit_reg(ringbuf, INSTPM);
ba8b7ccb
OM
994 intel_logical_ring_emit(ringbuf, instp_mask << 16 | instp_mode);
995 intel_logical_ring_advance(ringbuf);
996
997 dev_priv->relative_constants_mode = instp_mode;
998 }
999
5f19e2bf
JH
1000 exec_start = params->batch_obj_vm_offset +
1001 args->batch_start_offset;
1002
e2f80391 1003 ret = engine->emit_bb_start(params->request, exec_start, params->dispatch_flags);
ba8b7ccb
OM
1004 if (ret)
1005 return ret;
1006
95c24161 1007 trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags);
5e4be7bd 1008
8a8edb59 1009 i915_gem_execbuffer_move_to_active(vmas, params->request);
ba8b7ccb 1010
454afebd
OM
1011 return 0;
1012}
1013
0bc40be8 1014void intel_execlists_retire_requests(struct intel_engine_cs *engine)
c86ee3a9 1015{
6d3d8274 1016 struct drm_i915_gem_request *req, *tmp;
c86ee3a9
TD
1017 struct list_head retired_list;
1018
0bc40be8
TU
1019 WARN_ON(!mutex_is_locked(&engine->dev->struct_mutex));
1020 if (list_empty(&engine->execlist_retired_req_list))
c86ee3a9
TD
1021 return;
1022
1023 INIT_LIST_HEAD(&retired_list);
27af5eea 1024 spin_lock_bh(&engine->execlist_lock);
0bc40be8 1025 list_replace_init(&engine->execlist_retired_req_list, &retired_list);
27af5eea 1026 spin_unlock_bh(&engine->execlist_lock);
c86ee3a9
TD
1027
1028 list_for_each_entry_safe(req, tmp, &retired_list, execlist_link) {
af3302b9
DV
1029 struct intel_context *ctx = req->ctx;
1030 struct drm_i915_gem_object *ctx_obj =
0bc40be8 1031 ctx->engine[engine->id].state;
af3302b9 1032
ed54c1a1 1033 if (ctx_obj && (ctx != req->i915->kernel_context))
0bc40be8 1034 intel_lr_context_unpin(ctx, engine);
e5292823 1035
c86ee3a9 1036 list_del(&req->execlist_link);
f8210795 1037 i915_gem_request_unreference(req);
c86ee3a9
TD
1038 }
1039}
1040
0bc40be8 1041void intel_logical_ring_stop(struct intel_engine_cs *engine)
454afebd 1042{
0bc40be8 1043 struct drm_i915_private *dev_priv = engine->dev->dev_private;
9832b9da
OM
1044 int ret;
1045
117897f4 1046 if (!intel_engine_initialized(engine))
9832b9da
OM
1047 return;
1048
666796da 1049 ret = intel_engine_idle(engine);
f4457ae7 1050 if (ret)
9832b9da 1051 DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
0bc40be8 1052 engine->name, ret);
9832b9da
OM
1053
1054 /* TODO: Is this correct with Execlists enabled? */
0bc40be8
TU
1055 I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING));
1056 if (wait_for((I915_READ_MODE(engine) & MODE_IDLE) != 0, 1000)) {
1057 DRM_ERROR("%s :timed out trying to stop ring\n", engine->name);
9832b9da
OM
1058 return;
1059 }
0bc40be8 1060 I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
454afebd
OM
1061}
1062
4866d729 1063int logical_ring_flush_all_caches(struct drm_i915_gem_request *req)
48e29f55 1064{
4a570db5 1065 struct intel_engine_cs *engine = req->engine;
48e29f55
OM
1066 int ret;
1067
e2f80391 1068 if (!engine->gpu_caches_dirty)
48e29f55
OM
1069 return 0;
1070
e2f80391 1071 ret = engine->emit_flush(req, 0, I915_GEM_GPU_DOMAINS);
48e29f55
OM
1072 if (ret)
1073 return ret;
1074
e2f80391 1075 engine->gpu_caches_dirty = false;
48e29f55
OM
1076 return 0;
1077}
1078
e5292823 1079static int intel_lr_context_do_pin(struct intel_context *ctx,
0bc40be8 1080 struct intel_engine_cs *engine)
dcb4c12a 1081{
0bc40be8 1082 struct drm_device *dev = engine->dev;
e84fe803 1083 struct drm_i915_private *dev_priv = dev->dev_private;
0bc40be8
TU
1084 struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state;
1085 struct intel_ringbuffer *ringbuf = ctx->engine[engine->id].ringbuf;
7d774cac
TU
1086 void *vaddr;
1087 u32 *lrc_reg_state;
ca82580c 1088 int ret;
dcb4c12a 1089
0bc40be8 1090 WARN_ON(!mutex_is_locked(&engine->dev->struct_mutex));
ca82580c 1091
e84fe803
NH
1092 ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN,
1093 PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
1094 if (ret)
1095 return ret;
7ba717cf 1096
7d774cac
TU
1097 vaddr = i915_gem_object_pin_map(ctx_obj);
1098 if (IS_ERR(vaddr)) {
1099 ret = PTR_ERR(vaddr);
82352e90
TU
1100 goto unpin_ctx_obj;
1101 }
1102
7d774cac
TU
1103 lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
1104
0bc40be8 1105 ret = intel_pin_and_map_ringbuffer_obj(engine->dev, ringbuf);
e84fe803 1106 if (ret)
7d774cac 1107 goto unpin_map;
d1675198 1108
0bc40be8
TU
1109 ctx->engine[engine->id].lrc_vma = i915_gem_obj_to_ggtt(ctx_obj);
1110 intel_lr_context_descriptor_update(ctx, engine);
77b04a04 1111 lrc_reg_state[CTX_RING_BUFFER_START+1] = ringbuf->vma->node.start;
0bc40be8 1112 ctx->engine[engine->id].lrc_reg_state = lrc_reg_state;
e84fe803 1113 ctx_obj->dirty = true;
e93c28f3 1114
e84fe803
NH
1115 /* Invalidate GuC TLB. */
1116 if (i915.enable_guc_submission)
1117 I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
dcb4c12a 1118
7ba717cf
TD
1119 return ret;
1120
7d774cac
TU
1121unpin_map:
1122 i915_gem_object_unpin_map(ctx_obj);
7ba717cf
TD
1123unpin_ctx_obj:
1124 i915_gem_object_ggtt_unpin(ctx_obj);
e84fe803
NH
1125
1126 return ret;
1127}
1128
e5292823
TU
1129static int intel_lr_context_pin(struct intel_context *ctx,
1130 struct intel_engine_cs *engine)
e84fe803
NH
1131{
1132 int ret = 0;
e84fe803 1133
e5292823
TU
1134 if (ctx->engine[engine->id].pin_count++ == 0) {
1135 ret = intel_lr_context_do_pin(ctx, engine);
e84fe803
NH
1136 if (ret)
1137 goto reset_pin_count;
321fe304
TU
1138
1139 i915_gem_context_reference(ctx);
e84fe803
NH
1140 }
1141 return ret;
1142
a7cbedec 1143reset_pin_count:
e5292823 1144 ctx->engine[engine->id].pin_count = 0;
dcb4c12a
OM
1145 return ret;
1146}
1147
e5292823
TU
1148void intel_lr_context_unpin(struct intel_context *ctx,
1149 struct intel_engine_cs *engine)
dcb4c12a 1150{
e5292823 1151 struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state;
af3302b9 1152
f4e2dece 1153 WARN_ON(!mutex_is_locked(&ctx->i915->dev->struct_mutex));
e5292823 1154 if (--ctx->engine[engine->id].pin_count == 0) {
7d774cac 1155 i915_gem_object_unpin_map(ctx_obj);
e5292823 1156 intel_unpin_ringbuffer_obj(ctx->engine[engine->id].ringbuf);
82352e90 1157 i915_gem_object_ggtt_unpin(ctx_obj);
e5292823
TU
1158 ctx->engine[engine->id].lrc_vma = NULL;
1159 ctx->engine[engine->id].lrc_desc = 0;
1160 ctx->engine[engine->id].lrc_reg_state = NULL;
321fe304
TU
1161
1162 i915_gem_context_unreference(ctx);
dcb4c12a
OM
1163 }
1164}
1165
e2be4faf 1166static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
771b9a53
MT
1167{
1168 int ret, i;
4a570db5 1169 struct intel_engine_cs *engine = req->engine;
e2be4faf 1170 struct intel_ringbuffer *ringbuf = req->ringbuf;
e2f80391 1171 struct drm_device *dev = engine->dev;
771b9a53
MT
1172 struct drm_i915_private *dev_priv = dev->dev_private;
1173 struct i915_workarounds *w = &dev_priv->workarounds;
1174
cd7feaaa 1175 if (w->count == 0)
771b9a53
MT
1176 return 0;
1177
e2f80391 1178 engine->gpu_caches_dirty = true;
4866d729 1179 ret = logical_ring_flush_all_caches(req);
771b9a53
MT
1180 if (ret)
1181 return ret;
1182
4d616a29 1183 ret = intel_logical_ring_begin(req, w->count * 2 + 2);
771b9a53
MT
1184 if (ret)
1185 return ret;
1186
1187 intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(w->count));
1188 for (i = 0; i < w->count; i++) {
f92a9162 1189 intel_logical_ring_emit_reg(ringbuf, w->reg[i].addr);
771b9a53
MT
1190 intel_logical_ring_emit(ringbuf, w->reg[i].value);
1191 }
1192 intel_logical_ring_emit(ringbuf, MI_NOOP);
1193
1194 intel_logical_ring_advance(ringbuf);
1195
e2f80391 1196 engine->gpu_caches_dirty = true;
4866d729 1197 ret = logical_ring_flush_all_caches(req);
771b9a53
MT
1198 if (ret)
1199 return ret;
1200
1201 return 0;
1202}
1203
83b8a982 1204#define wa_ctx_emit(batch, index, cmd) \
17ee950d 1205 do { \
83b8a982
AS
1206 int __index = (index)++; \
1207 if (WARN_ON(__index >= (PAGE_SIZE / sizeof(uint32_t)))) { \
17ee950d
AS
1208 return -ENOSPC; \
1209 } \
83b8a982 1210 batch[__index] = (cmd); \
17ee950d
AS
1211 } while (0)
1212
8f40db77 1213#define wa_ctx_emit_reg(batch, index, reg) \
f0f59a00 1214 wa_ctx_emit((batch), (index), i915_mmio_reg_offset(reg))
9e000847
AS
1215
1216/*
1217 * In this WA we need to set GEN8_L3SQCREG4[21:21] and reset it after
1218 * PIPE_CONTROL instruction. This is required for the flush to happen correctly
1219 * but there is a slight complication as this is applied in WA batch where the
1220 * values are only initialized once so we cannot take register value at the
1221 * beginning and reuse it further; hence we save its value to memory, upload a
1222 * constant value with bit21 set and then we restore it back with the saved value.
1223 * To simplify the WA, a constant value is formed by using the default value
1224 * of this register. This shouldn't be a problem because we are only modifying
1225 * it for a short period and this batch in non-premptible. We can ofcourse
1226 * use additional instructions that read the actual value of the register
1227 * at that time and set our bit of interest but it makes the WA complicated.
1228 *
1229 * This WA is also required for Gen9 so extracting as a function avoids
1230 * code duplication.
1231 */
0bc40be8 1232static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine,
9e000847
AS
1233 uint32_t *const batch,
1234 uint32_t index)
1235{
1236 uint32_t l3sqc4_flush = (0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES);
1237
a4106a78
AS
1238 /*
1239 * WaDisableLSQCROPERFforOCL:skl
1240 * This WA is implemented in skl_init_clock_gating() but since
1241 * this batch updates GEN8_L3SQCREG4 with default value we need to
1242 * set this bit here to retain the WA during flush.
1243 */
0bc40be8 1244 if (IS_SKL_REVID(engine->dev, 0, SKL_REVID_E0))
a4106a78
AS
1245 l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS;
1246
f1afe24f 1247 wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 |
83b8a982 1248 MI_SRM_LRM_GLOBAL_GTT));
8f40db77 1249 wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
0bc40be8 1250 wa_ctx_emit(batch, index, engine->scratch.gtt_offset + 256);
83b8a982
AS
1251 wa_ctx_emit(batch, index, 0);
1252
1253 wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
8f40db77 1254 wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
83b8a982
AS
1255 wa_ctx_emit(batch, index, l3sqc4_flush);
1256
1257 wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6));
1258 wa_ctx_emit(batch, index, (PIPE_CONTROL_CS_STALL |
1259 PIPE_CONTROL_DC_FLUSH_ENABLE));
1260 wa_ctx_emit(batch, index, 0);
1261 wa_ctx_emit(batch, index, 0);
1262 wa_ctx_emit(batch, index, 0);
1263 wa_ctx_emit(batch, index, 0);
1264
f1afe24f 1265 wa_ctx_emit(batch, index, (MI_LOAD_REGISTER_MEM_GEN8 |
83b8a982 1266 MI_SRM_LRM_GLOBAL_GTT));
8f40db77 1267 wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
0bc40be8 1268 wa_ctx_emit(batch, index, engine->scratch.gtt_offset + 256);
83b8a982 1269 wa_ctx_emit(batch, index, 0);
9e000847
AS
1270
1271 return index;
1272}
1273
17ee950d
AS
1274static inline uint32_t wa_ctx_start(struct i915_wa_ctx_bb *wa_ctx,
1275 uint32_t offset,
1276 uint32_t start_alignment)
1277{
1278 return wa_ctx->offset = ALIGN(offset, start_alignment);
1279}
1280
1281static inline int wa_ctx_end(struct i915_wa_ctx_bb *wa_ctx,
1282 uint32_t offset,
1283 uint32_t size_alignment)
1284{
1285 wa_ctx->size = offset - wa_ctx->offset;
1286
1287 WARN(wa_ctx->size % size_alignment,
1288 "wa_ctx_bb failed sanity checks: size %d is not aligned to %d\n",
1289 wa_ctx->size, size_alignment);
1290 return 0;
1291}
1292
1293/**
1294 * gen8_init_indirectctx_bb() - initialize indirect ctx batch with WA
1295 *
1296 * @ring: only applicable for RCS
1297 * @wa_ctx: structure representing wa_ctx
1298 * offset: specifies start of the batch, should be cache-aligned. This is updated
1299 * with the offset value received as input.
1300 * size: size of the batch in DWORDS but HW expects in terms of cachelines
1301 * @batch: page in which WA are loaded
1302 * @offset: This field specifies the start of the batch, it should be
1303 * cache-aligned otherwise it is adjusted accordingly.
1304 * Typically we only have one indirect_ctx and per_ctx batch buffer which are
1305 * initialized at the beginning and shared across all contexts but this field
1306 * helps us to have multiple batches at different offsets and select them based
1307 * on a criteria. At the moment this batch always start at the beginning of the page
1308 * and at this point we don't have multiple wa_ctx batch buffers.
1309 *
1310 * The number of WA applied are not known at the beginning; we use this field
1311 * to return the no of DWORDS written.
4d78c8dc 1312 *
17ee950d
AS
1313 * It is to be noted that this batch does not contain MI_BATCH_BUFFER_END
1314 * so it adds NOOPs as padding to make it cacheline aligned.
1315 * MI_BATCH_BUFFER_END will be added to perctx batch and both of them together
1316 * makes a complete batch buffer.
1317 *
1318 * Return: non-zero if we exceed the PAGE_SIZE limit.
1319 */
1320
0bc40be8 1321static int gen8_init_indirectctx_bb(struct intel_engine_cs *engine,
17ee950d
AS
1322 struct i915_wa_ctx_bb *wa_ctx,
1323 uint32_t *const batch,
1324 uint32_t *offset)
1325{
0160f055 1326 uint32_t scratch_addr;
17ee950d
AS
1327 uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
1328
7ad00d1a 1329 /* WaDisableCtxRestoreArbitration:bdw,chv */
83b8a982 1330 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
17ee950d 1331
c82435bb 1332 /* WaFlushCoherentL3CacheLinesAtContextSwitch:bdw */
0bc40be8
TU
1333 if (IS_BROADWELL(engine->dev)) {
1334 int rc = gen8_emit_flush_coherentl3_wa(engine, batch, index);
604ef734
AH
1335 if (rc < 0)
1336 return rc;
1337 index = rc;
c82435bb
AS
1338 }
1339
0160f055
AS
1340 /* WaClearSlmSpaceAtContextSwitch:bdw,chv */
1341 /* Actual scratch location is at 128 bytes offset */
0bc40be8 1342 scratch_addr = engine->scratch.gtt_offset + 2*CACHELINE_BYTES;
0160f055 1343
83b8a982
AS
1344 wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6));
1345 wa_ctx_emit(batch, index, (PIPE_CONTROL_FLUSH_L3 |
1346 PIPE_CONTROL_GLOBAL_GTT_IVB |
1347 PIPE_CONTROL_CS_STALL |
1348 PIPE_CONTROL_QW_WRITE));
1349 wa_ctx_emit(batch, index, scratch_addr);
1350 wa_ctx_emit(batch, index, 0);
1351 wa_ctx_emit(batch, index, 0);
1352 wa_ctx_emit(batch, index, 0);
0160f055 1353
17ee950d
AS
1354 /* Pad to end of cacheline */
1355 while (index % CACHELINE_DWORDS)
83b8a982 1356 wa_ctx_emit(batch, index, MI_NOOP);
17ee950d
AS
1357
1358 /*
1359 * MI_BATCH_BUFFER_END is not required in Indirect ctx BB because
1360 * execution depends on the length specified in terms of cache lines
1361 * in the register CTX_RCS_INDIRECT_CTX
1362 */
1363
1364 return wa_ctx_end(wa_ctx, *offset = index, CACHELINE_DWORDS);
1365}
1366
1367/**
1368 * gen8_init_perctx_bb() - initialize per ctx batch with WA
1369 *
1370 * @ring: only applicable for RCS
1371 * @wa_ctx: structure representing wa_ctx
1372 * offset: specifies start of the batch, should be cache-aligned.
1373 * size: size of the batch in DWORDS but HW expects in terms of cachelines
4d78c8dc 1374 * @batch: page in which WA are loaded
17ee950d
AS
1375 * @offset: This field specifies the start of this batch.
1376 * This batch is started immediately after indirect_ctx batch. Since we ensure
1377 * that indirect_ctx ends on a cacheline this batch is aligned automatically.
1378 *
1379 * The number of DWORDS written are returned using this field.
1380 *
1381 * This batch is terminated with MI_BATCH_BUFFER_END and so we need not add padding
1382 * to align it with cacheline as padding after MI_BATCH_BUFFER_END is redundant.
1383 */
0bc40be8 1384static int gen8_init_perctx_bb(struct intel_engine_cs *engine,
17ee950d
AS
1385 struct i915_wa_ctx_bb *wa_ctx,
1386 uint32_t *const batch,
1387 uint32_t *offset)
1388{
1389 uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
1390
7ad00d1a 1391 /* WaDisableCtxRestoreArbitration:bdw,chv */
83b8a982 1392 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_ENABLE);
7ad00d1a 1393
83b8a982 1394 wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END);
17ee950d
AS
1395
1396 return wa_ctx_end(wa_ctx, *offset = index, 1);
1397}
1398
0bc40be8 1399static int gen9_init_indirectctx_bb(struct intel_engine_cs *engine,
0504cffc
AS
1400 struct i915_wa_ctx_bb *wa_ctx,
1401 uint32_t *const batch,
1402 uint32_t *offset)
1403{
a4106a78 1404 int ret;
0bc40be8 1405 struct drm_device *dev = engine->dev;
0504cffc
AS
1406 uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
1407
0907c8f7 1408 /* WaDisableCtxRestoreArbitration:skl,bxt */
e87a005d 1409 if (IS_SKL_REVID(dev, 0, SKL_REVID_D0) ||
cbdc12a9 1410 IS_BXT_REVID(dev, 0, BXT_REVID_A1))
0907c8f7 1411 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
0504cffc 1412
a4106a78 1413 /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt */
0bc40be8 1414 ret = gen8_emit_flush_coherentl3_wa(engine, batch, index);
a4106a78
AS
1415 if (ret < 0)
1416 return ret;
1417 index = ret;
1418
0504cffc
AS
1419 /* Pad to end of cacheline */
1420 while (index % CACHELINE_DWORDS)
1421 wa_ctx_emit(batch, index, MI_NOOP);
1422
1423 return wa_ctx_end(wa_ctx, *offset = index, CACHELINE_DWORDS);
1424}
1425
0bc40be8 1426static int gen9_init_perctx_bb(struct intel_engine_cs *engine,
0504cffc
AS
1427 struct i915_wa_ctx_bb *wa_ctx,
1428 uint32_t *const batch,
1429 uint32_t *offset)
1430{
0bc40be8 1431 struct drm_device *dev = engine->dev;
0504cffc
AS
1432 uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
1433
9b01435d 1434 /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
e87a005d 1435 if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
cbdc12a9 1436 IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
9b01435d 1437 wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
8f40db77 1438 wa_ctx_emit_reg(batch, index, GEN9_SLICE_COMMON_ECO_CHICKEN0);
9b01435d
AS
1439 wa_ctx_emit(batch, index,
1440 _MASKED_BIT_ENABLE(DISABLE_PIXEL_MASK_CAMMING));
1441 wa_ctx_emit(batch, index, MI_NOOP);
1442 }
1443
b1e429fe
TG
1444 /* WaClearTdlStateAckDirtyBits:bxt */
1445 if (IS_BXT_REVID(dev, 0, BXT_REVID_B0)) {
1446 wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(4));
1447
1448 wa_ctx_emit_reg(batch, index, GEN8_STATE_ACK);
1449 wa_ctx_emit(batch, index, _MASKED_BIT_DISABLE(GEN9_SUBSLICE_TDL_ACK_BITS));
1450
1451 wa_ctx_emit_reg(batch, index, GEN9_STATE_ACK_SLICE1);
1452 wa_ctx_emit(batch, index, _MASKED_BIT_DISABLE(GEN9_SUBSLICE_TDL_ACK_BITS));
1453
1454 wa_ctx_emit_reg(batch, index, GEN9_STATE_ACK_SLICE2);
1455 wa_ctx_emit(batch, index, _MASKED_BIT_DISABLE(GEN9_SUBSLICE_TDL_ACK_BITS));
1456
1457 wa_ctx_emit_reg(batch, index, GEN7_ROW_CHICKEN2);
1458 /* dummy write to CS, mask bits are 0 to ensure the register is not modified */
1459 wa_ctx_emit(batch, index, 0x0);
1460 wa_ctx_emit(batch, index, MI_NOOP);
1461 }
1462
0907c8f7 1463 /* WaDisableCtxRestoreArbitration:skl,bxt */
e87a005d 1464 if (IS_SKL_REVID(dev, 0, SKL_REVID_D0) ||
cbdc12a9 1465 IS_BXT_REVID(dev, 0, BXT_REVID_A1))
0907c8f7
AS
1466 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_ENABLE);
1467
0504cffc
AS
1468 wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END);
1469
1470 return wa_ctx_end(wa_ctx, *offset = index, 1);
1471}
1472
0bc40be8 1473static int lrc_setup_wa_ctx_obj(struct intel_engine_cs *engine, u32 size)
17ee950d
AS
1474{
1475 int ret;
1476
0bc40be8
TU
1477 engine->wa_ctx.obj = i915_gem_alloc_object(engine->dev,
1478 PAGE_ALIGN(size));
1479 if (!engine->wa_ctx.obj) {
17ee950d
AS
1480 DRM_DEBUG_DRIVER("alloc LRC WA ctx backing obj failed.\n");
1481 return -ENOMEM;
1482 }
1483
0bc40be8 1484 ret = i915_gem_obj_ggtt_pin(engine->wa_ctx.obj, PAGE_SIZE, 0);
17ee950d
AS
1485 if (ret) {
1486 DRM_DEBUG_DRIVER("pin LRC WA ctx backing obj failed: %d\n",
1487 ret);
0bc40be8 1488 drm_gem_object_unreference(&engine->wa_ctx.obj->base);
17ee950d
AS
1489 return ret;
1490 }
1491
1492 return 0;
1493}
1494
0bc40be8 1495static void lrc_destroy_wa_ctx_obj(struct intel_engine_cs *engine)
17ee950d 1496{
0bc40be8
TU
1497 if (engine->wa_ctx.obj) {
1498 i915_gem_object_ggtt_unpin(engine->wa_ctx.obj);
1499 drm_gem_object_unreference(&engine->wa_ctx.obj->base);
1500 engine->wa_ctx.obj = NULL;
17ee950d
AS
1501 }
1502}
1503
0bc40be8 1504static int intel_init_workaround_bb(struct intel_engine_cs *engine)
17ee950d
AS
1505{
1506 int ret;
1507 uint32_t *batch;
1508 uint32_t offset;
1509 struct page *page;
0bc40be8 1510 struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx;
17ee950d 1511
0bc40be8 1512 WARN_ON(engine->id != RCS);
17ee950d 1513
5e60d790 1514 /* update this when WA for higher Gen are added */
0bc40be8 1515 if (INTEL_INFO(engine->dev)->gen > 9) {
0504cffc 1516 DRM_ERROR("WA batch buffer is not initialized for Gen%d\n",
0bc40be8 1517 INTEL_INFO(engine->dev)->gen);
5e60d790 1518 return 0;
0504cffc 1519 }
5e60d790 1520
c4db7599 1521 /* some WA perform writes to scratch page, ensure it is valid */
0bc40be8
TU
1522 if (engine->scratch.obj == NULL) {
1523 DRM_ERROR("scratch page not allocated for %s\n", engine->name);
c4db7599
AS
1524 return -EINVAL;
1525 }
1526
0bc40be8 1527 ret = lrc_setup_wa_ctx_obj(engine, PAGE_SIZE);
17ee950d
AS
1528 if (ret) {
1529 DRM_DEBUG_DRIVER("Failed to setup context WA page: %d\n", ret);
1530 return ret;
1531 }
1532
033908ae 1533 page = i915_gem_object_get_dirty_page(wa_ctx->obj, 0);
17ee950d
AS
1534 batch = kmap_atomic(page);
1535 offset = 0;
1536
0bc40be8
TU
1537 if (INTEL_INFO(engine->dev)->gen == 8) {
1538 ret = gen8_init_indirectctx_bb(engine,
17ee950d
AS
1539 &wa_ctx->indirect_ctx,
1540 batch,
1541 &offset);
1542 if (ret)
1543 goto out;
1544
0bc40be8 1545 ret = gen8_init_perctx_bb(engine,
17ee950d
AS
1546 &wa_ctx->per_ctx,
1547 batch,
1548 &offset);
1549 if (ret)
1550 goto out;
0bc40be8
TU
1551 } else if (INTEL_INFO(engine->dev)->gen == 9) {
1552 ret = gen9_init_indirectctx_bb(engine,
0504cffc
AS
1553 &wa_ctx->indirect_ctx,
1554 batch,
1555 &offset);
1556 if (ret)
1557 goto out;
1558
0bc40be8 1559 ret = gen9_init_perctx_bb(engine,
0504cffc
AS
1560 &wa_ctx->per_ctx,
1561 batch,
1562 &offset);
1563 if (ret)
1564 goto out;
17ee950d
AS
1565 }
1566
1567out:
1568 kunmap_atomic(batch);
1569 if (ret)
0bc40be8 1570 lrc_destroy_wa_ctx_obj(engine);
17ee950d
AS
1571
1572 return ret;
1573}
1574
04794adb
TU
1575static void lrc_init_hws(struct intel_engine_cs *engine)
1576{
1577 struct drm_i915_private *dev_priv = engine->dev->dev_private;
1578
1579 I915_WRITE(RING_HWS_PGA(engine->mmio_base),
1580 (u32)engine->status_page.gfx_addr);
1581 POSTING_READ(RING_HWS_PGA(engine->mmio_base));
1582}
1583
0bc40be8 1584static int gen8_init_common_ring(struct intel_engine_cs *engine)
9b1136d5 1585{
0bc40be8 1586 struct drm_device *dev = engine->dev;
9b1136d5 1587 struct drm_i915_private *dev_priv = dev->dev_private;
c6a2ac71 1588 unsigned int next_context_status_buffer_hw;
9b1136d5 1589
04794adb 1590 lrc_init_hws(engine);
e84fe803 1591
0bc40be8
TU
1592 I915_WRITE_IMR(engine,
1593 ~(engine->irq_enable_mask | engine->irq_keep_mask));
1594 I915_WRITE(RING_HWSTAM(engine->mmio_base), 0xffffffff);
73d477f6 1595
0bc40be8 1596 I915_WRITE(RING_MODE_GEN7(engine),
9b1136d5
OM
1597 _MASKED_BIT_DISABLE(GFX_REPLAY_MODE) |
1598 _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE));
0bc40be8 1599 POSTING_READ(RING_MODE_GEN7(engine));
dfc53c5e
MT
1600
1601 /*
1602 * Instead of resetting the Context Status Buffer (CSB) read pointer to
1603 * zero, we need to read the write pointer from hardware and use its
1604 * value because "this register is power context save restored".
1605 * Effectively, these states have been observed:
1606 *
1607 * | Suspend-to-idle (freeze) | Suspend-to-RAM (mem) |
1608 * BDW | CSB regs not reset | CSB regs reset |
1609 * CHT | CSB regs not reset | CSB regs not reset |
5590a5f0
BW
1610 * SKL | ? | ? |
1611 * BXT | ? | ? |
dfc53c5e 1612 */
5590a5f0 1613 next_context_status_buffer_hw =
0bc40be8 1614 GEN8_CSB_WRITE_PTR(I915_READ(RING_CONTEXT_STATUS_PTR(engine)));
dfc53c5e
MT
1615
1616 /*
1617 * When the CSB registers are reset (also after power-up / gpu reset),
1618 * CSB write pointer is set to all 1's, which is not valid, use '5' in
1619 * this special case, so the first element read is CSB[0].
1620 */
1621 if (next_context_status_buffer_hw == GEN8_CSB_PTR_MASK)
1622 next_context_status_buffer_hw = (GEN8_CSB_ENTRIES - 1);
1623
0bc40be8
TU
1624 engine->next_context_status_buffer = next_context_status_buffer_hw;
1625 DRM_DEBUG_DRIVER("Execlists enabled for %s\n", engine->name);
9b1136d5 1626
fc0768ce 1627 intel_engine_init_hangcheck(engine);
9b1136d5
OM
1628
1629 return 0;
1630}
1631
0bc40be8 1632static int gen8_init_render_ring(struct intel_engine_cs *engine)
9b1136d5 1633{
0bc40be8 1634 struct drm_device *dev = engine->dev;
9b1136d5
OM
1635 struct drm_i915_private *dev_priv = dev->dev_private;
1636 int ret;
1637
0bc40be8 1638 ret = gen8_init_common_ring(engine);
9b1136d5
OM
1639 if (ret)
1640 return ret;
1641
1642 /* We need to disable the AsyncFlip performance optimisations in order
1643 * to use MI_WAIT_FOR_EVENT within the CS. It should already be
1644 * programmed to '1' on all products.
1645 *
1646 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw,chv
1647 */
1648 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
1649
9b1136d5
OM
1650 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
1651
0bc40be8 1652 return init_workarounds_ring(engine);
9b1136d5
OM
1653}
1654
0bc40be8 1655static int gen9_init_render_ring(struct intel_engine_cs *engine)
82ef822e
DL
1656{
1657 int ret;
1658
0bc40be8 1659 ret = gen8_init_common_ring(engine);
82ef822e
DL
1660 if (ret)
1661 return ret;
1662
0bc40be8 1663 return init_workarounds_ring(engine);
82ef822e
DL
1664}
1665
7a01a0a2
MT
1666static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
1667{
1668 struct i915_hw_ppgtt *ppgtt = req->ctx->ppgtt;
4a570db5 1669 struct intel_engine_cs *engine = req->engine;
7a01a0a2
MT
1670 struct intel_ringbuffer *ringbuf = req->ringbuf;
1671 const int num_lri_cmds = GEN8_LEGACY_PDPES * 2;
1672 int i, ret;
1673
1674 ret = intel_logical_ring_begin(req, num_lri_cmds * 2 + 2);
1675 if (ret)
1676 return ret;
1677
1678 intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(num_lri_cmds));
1679 for (i = GEN8_LEGACY_PDPES - 1; i >= 0; i--) {
1680 const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
1681
e2f80391
TU
1682 intel_logical_ring_emit_reg(ringbuf,
1683 GEN8_RING_PDP_UDW(engine, i));
7a01a0a2 1684 intel_logical_ring_emit(ringbuf, upper_32_bits(pd_daddr));
e2f80391
TU
1685 intel_logical_ring_emit_reg(ringbuf,
1686 GEN8_RING_PDP_LDW(engine, i));
7a01a0a2
MT
1687 intel_logical_ring_emit(ringbuf, lower_32_bits(pd_daddr));
1688 }
1689
1690 intel_logical_ring_emit(ringbuf, MI_NOOP);
1691 intel_logical_ring_advance(ringbuf);
1692
1693 return 0;
1694}
1695
be795fc1 1696static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
8e004efc 1697 u64 offset, unsigned dispatch_flags)
15648585 1698{
be795fc1 1699 struct intel_ringbuffer *ringbuf = req->ringbuf;
8e004efc 1700 bool ppgtt = !(dispatch_flags & I915_DISPATCH_SECURE);
15648585
OM
1701 int ret;
1702
7a01a0a2
MT
1703 /* Don't rely in hw updating PDPs, specially in lite-restore.
1704 * Ideally, we should set Force PD Restore in ctx descriptor,
1705 * but we can't. Force Restore would be a second option, but
1706 * it is unsafe in case of lite-restore (because the ctx is
2dba3239
MT
1707 * not idle). PML4 is allocated during ppgtt init so this is
1708 * not needed in 48-bit.*/
7a01a0a2 1709 if (req->ctx->ppgtt &&
666796da 1710 (intel_engine_flag(req->engine) & req->ctx->ppgtt->pd_dirty_rings)) {
331f38e7
ZL
1711 if (!USES_FULL_48BIT_PPGTT(req->i915) &&
1712 !intel_vgpu_active(req->i915->dev)) {
2dba3239
MT
1713 ret = intel_logical_ring_emit_pdps(req);
1714 if (ret)
1715 return ret;
1716 }
7a01a0a2 1717
666796da 1718 req->ctx->ppgtt->pd_dirty_rings &= ~intel_engine_flag(req->engine);
7a01a0a2
MT
1719 }
1720
4d616a29 1721 ret = intel_logical_ring_begin(req, 4);
15648585
OM
1722 if (ret)
1723 return ret;
1724
1725 /* FIXME(BDW): Address space and security selectors. */
6922528a
AJ
1726 intel_logical_ring_emit(ringbuf, MI_BATCH_BUFFER_START_GEN8 |
1727 (ppgtt<<8) |
1728 (dispatch_flags & I915_DISPATCH_RS ?
1729 MI_BATCH_RESOURCE_STREAMER : 0));
15648585
OM
1730 intel_logical_ring_emit(ringbuf, lower_32_bits(offset));
1731 intel_logical_ring_emit(ringbuf, upper_32_bits(offset));
1732 intel_logical_ring_emit(ringbuf, MI_NOOP);
1733 intel_logical_ring_advance(ringbuf);
1734
1735 return 0;
1736}
1737
0bc40be8 1738static bool gen8_logical_ring_get_irq(struct intel_engine_cs *engine)
73d477f6 1739{
0bc40be8 1740 struct drm_device *dev = engine->dev;
73d477f6
OM
1741 struct drm_i915_private *dev_priv = dev->dev_private;
1742 unsigned long flags;
1743
7cd512f1 1744 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
73d477f6
OM
1745 return false;
1746
1747 spin_lock_irqsave(&dev_priv->irq_lock, flags);
0bc40be8
TU
1748 if (engine->irq_refcount++ == 0) {
1749 I915_WRITE_IMR(engine,
1750 ~(engine->irq_enable_mask | engine->irq_keep_mask));
1751 POSTING_READ(RING_IMR(engine->mmio_base));
73d477f6
OM
1752 }
1753 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1754
1755 return true;
1756}
1757
0bc40be8 1758static void gen8_logical_ring_put_irq(struct intel_engine_cs *engine)
73d477f6 1759{
0bc40be8 1760 struct drm_device *dev = engine->dev;
73d477f6
OM
1761 struct drm_i915_private *dev_priv = dev->dev_private;
1762 unsigned long flags;
1763
1764 spin_lock_irqsave(&dev_priv->irq_lock, flags);
0bc40be8
TU
1765 if (--engine->irq_refcount == 0) {
1766 I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
1767 POSTING_READ(RING_IMR(engine->mmio_base));
73d477f6
OM
1768 }
1769 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1770}
1771
7deb4d39 1772static int gen8_emit_flush(struct drm_i915_gem_request *request,
4712274c
OM
1773 u32 invalidate_domains,
1774 u32 unused)
1775{
7deb4d39 1776 struct intel_ringbuffer *ringbuf = request->ringbuf;
4a570db5 1777 struct intel_engine_cs *engine = ringbuf->engine;
e2f80391 1778 struct drm_device *dev = engine->dev;
4712274c
OM
1779 struct drm_i915_private *dev_priv = dev->dev_private;
1780 uint32_t cmd;
1781 int ret;
1782
4d616a29 1783 ret = intel_logical_ring_begin(request, 4);
4712274c
OM
1784 if (ret)
1785 return ret;
1786
1787 cmd = MI_FLUSH_DW + 1;
1788
f0a1fb10
CW
1789 /* We always require a command barrier so that subsequent
1790 * commands, such as breadcrumb interrupts, are strictly ordered
1791 * wrt the contents of the write cache being flushed to memory
1792 * (and thus being coherent from the CPU).
1793 */
1794 cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
1795
1796 if (invalidate_domains & I915_GEM_GPU_DOMAINS) {
1797 cmd |= MI_INVALIDATE_TLB;
4a570db5 1798 if (engine == &dev_priv->engine[VCS])
f0a1fb10 1799 cmd |= MI_INVALIDATE_BSD;
4712274c
OM
1800 }
1801
1802 intel_logical_ring_emit(ringbuf, cmd);
1803 intel_logical_ring_emit(ringbuf,
1804 I915_GEM_HWS_SCRATCH_ADDR |
1805 MI_FLUSH_DW_USE_GTT);
1806 intel_logical_ring_emit(ringbuf, 0); /* upper addr */
1807 intel_logical_ring_emit(ringbuf, 0); /* value */
1808 intel_logical_ring_advance(ringbuf);
1809
1810 return 0;
1811}
1812
7deb4d39 1813static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
4712274c
OM
1814 u32 invalidate_domains,
1815 u32 flush_domains)
1816{
7deb4d39 1817 struct intel_ringbuffer *ringbuf = request->ringbuf;
4a570db5 1818 struct intel_engine_cs *engine = ringbuf->engine;
e2f80391 1819 u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
1a5a9ce7 1820 bool vf_flush_wa = false;
4712274c
OM
1821 u32 flags = 0;
1822 int ret;
1823
1824 flags |= PIPE_CONTROL_CS_STALL;
1825
1826 if (flush_domains) {
1827 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
1828 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
965fd602 1829 flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
40a24488 1830 flags |= PIPE_CONTROL_FLUSH_ENABLE;
4712274c
OM
1831 }
1832
1833 if (invalidate_domains) {
1834 flags |= PIPE_CONTROL_TLB_INVALIDATE;
1835 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
1836 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
1837 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
1838 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
1839 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
1840 flags |= PIPE_CONTROL_QW_WRITE;
1841 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
4712274c 1842
1a5a9ce7
BW
1843 /*
1844 * On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL
1845 * pipe control.
1846 */
e2f80391 1847 if (IS_GEN9(engine->dev))
1a5a9ce7
BW
1848 vf_flush_wa = true;
1849 }
9647ff36 1850
4d616a29 1851 ret = intel_logical_ring_begin(request, vf_flush_wa ? 12 : 6);
4712274c
OM
1852 if (ret)
1853 return ret;
1854
9647ff36
ID
1855 if (vf_flush_wa) {
1856 intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
1857 intel_logical_ring_emit(ringbuf, 0);
1858 intel_logical_ring_emit(ringbuf, 0);
1859 intel_logical_ring_emit(ringbuf, 0);
1860 intel_logical_ring_emit(ringbuf, 0);
1861 intel_logical_ring_emit(ringbuf, 0);
1862 }
1863
4712274c
OM
1864 intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
1865 intel_logical_ring_emit(ringbuf, flags);
1866 intel_logical_ring_emit(ringbuf, scratch_addr);
1867 intel_logical_ring_emit(ringbuf, 0);
1868 intel_logical_ring_emit(ringbuf, 0);
1869 intel_logical_ring_emit(ringbuf, 0);
1870 intel_logical_ring_advance(ringbuf);
1871
1872 return 0;
1873}
1874
c04e0f3b 1875static u32 gen8_get_seqno(struct intel_engine_cs *engine)
e94e37ad 1876{
0bc40be8 1877 return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
e94e37ad
OM
1878}
1879
0bc40be8 1880static void gen8_set_seqno(struct intel_engine_cs *engine, u32 seqno)
e94e37ad 1881{
0bc40be8 1882 intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
e94e37ad
OM
1883}
1884
c04e0f3b 1885static void bxt_a_seqno_barrier(struct intel_engine_cs *engine)
319404df 1886{
319404df
ID
1887 /*
1888 * On BXT A steppings there is a HW coherency issue whereby the
1889 * MI_STORE_DATA_IMM storing the completed request's seqno
1890 * occasionally doesn't invalidate the CPU cache. Work around this by
1891 * clflushing the corresponding cacheline whenever the caller wants
1892 * the coherency to be guaranteed. Note that this cacheline is known
1893 * to be clean at this point, since we only write it in
1894 * bxt_a_set_seqno(), where we also do a clflush after the write. So
1895 * this clflush in practice becomes an invalidate operation.
1896 */
c04e0f3b 1897 intel_flush_status_page(engine, I915_GEM_HWS_INDEX);
319404df
ID
1898}
1899
0bc40be8 1900static void bxt_a_set_seqno(struct intel_engine_cs *engine, u32 seqno)
319404df 1901{
0bc40be8 1902 intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
319404df
ID
1903
1904 /* See bxt_a_get_seqno() explaining the reason for the clflush. */
0bc40be8 1905 intel_flush_status_page(engine, I915_GEM_HWS_INDEX);
319404df
ID
1906}
1907
7c17d377
CW
1908/*
1909 * Reserve space for 2 NOOPs at the end of each request to be
1910 * used as a workaround for not being allowed to do lite
1911 * restore with HEAD==TAIL (WaIdleLiteRestore).
1912 */
1913#define WA_TAIL_DWORDS 2
1914
1915static inline u32 hws_seqno_address(struct intel_engine_cs *engine)
1916{
1917 return engine->status_page.gfx_addr + I915_GEM_HWS_INDEX_ADDR;
1918}
1919
c4e76638 1920static int gen8_emit_request(struct drm_i915_gem_request *request)
4da46e1e 1921{
c4e76638 1922 struct intel_ringbuffer *ringbuf = request->ringbuf;
4da46e1e
OM
1923 int ret;
1924
7c17d377 1925 ret = intel_logical_ring_begin(request, 6 + WA_TAIL_DWORDS);
4da46e1e
OM
1926 if (ret)
1927 return ret;
1928
7c17d377
CW
1929 /* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
1930 BUILD_BUG_ON(I915_GEM_HWS_INDEX_ADDR & (1 << 5));
4da46e1e 1931
4da46e1e 1932 intel_logical_ring_emit(ringbuf,
7c17d377
CW
1933 (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW);
1934 intel_logical_ring_emit(ringbuf,
4a570db5 1935 hws_seqno_address(request->engine) |
7c17d377 1936 MI_FLUSH_DW_USE_GTT);
4da46e1e 1937 intel_logical_ring_emit(ringbuf, 0);
c4e76638 1938 intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request));
4da46e1e
OM
1939 intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
1940 intel_logical_ring_emit(ringbuf, MI_NOOP);
7c17d377
CW
1941 return intel_logical_ring_advance_and_submit(request);
1942}
4da46e1e 1943
7c17d377
CW
1944static int gen8_emit_request_render(struct drm_i915_gem_request *request)
1945{
1946 struct intel_ringbuffer *ringbuf = request->ringbuf;
1947 int ret;
53292cdb 1948
ce81a65c 1949 ret = intel_logical_ring_begin(request, 8 + WA_TAIL_DWORDS);
7c17d377
CW
1950 if (ret)
1951 return ret;
1952
ce81a65c
MW
1953 /* We're using qword write, seqno should be aligned to 8 bytes. */
1954 BUILD_BUG_ON(I915_GEM_HWS_INDEX & 1);
1955
7c17d377
CW
1956 /* w/a for post sync ops following a GPGPU operation we
1957 * need a prior CS_STALL, which is emitted by the flush
1958 * following the batch.
1959 */
ce81a65c 1960 intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
7c17d377
CW
1961 intel_logical_ring_emit(ringbuf,
1962 (PIPE_CONTROL_GLOBAL_GTT_IVB |
1963 PIPE_CONTROL_CS_STALL |
1964 PIPE_CONTROL_QW_WRITE));
4a570db5 1965 intel_logical_ring_emit(ringbuf, hws_seqno_address(request->engine));
7c17d377
CW
1966 intel_logical_ring_emit(ringbuf, 0);
1967 intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request));
ce81a65c
MW
1968 /* We're thrashing one dword of HWS. */
1969 intel_logical_ring_emit(ringbuf, 0);
7c17d377 1970 intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
ce81a65c 1971 intel_logical_ring_emit(ringbuf, MI_NOOP);
7c17d377 1972 return intel_logical_ring_advance_and_submit(request);
4da46e1e
OM
1973}
1974
be01363f 1975static int intel_lr_context_render_state_init(struct drm_i915_gem_request *req)
cef437ad 1976{
cef437ad 1977 struct render_state so;
cef437ad
DL
1978 int ret;
1979
4a570db5 1980 ret = i915_gem_render_state_prepare(req->engine, &so);
cef437ad
DL
1981 if (ret)
1982 return ret;
1983
1984 if (so.rodata == NULL)
1985 return 0;
1986
4a570db5 1987 ret = req->engine->emit_bb_start(req, so.ggtt_offset,
be01363f 1988 I915_DISPATCH_SECURE);
cef437ad
DL
1989 if (ret)
1990 goto out;
1991
4a570db5 1992 ret = req->engine->emit_bb_start(req,
84e81020
AS
1993 (so.ggtt_offset + so.aux_batch_offset),
1994 I915_DISPATCH_SECURE);
1995 if (ret)
1996 goto out;
1997
b2af0376 1998 i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), req);
cef437ad 1999
cef437ad
DL
2000out:
2001 i915_gem_render_state_fini(&so);
2002 return ret;
2003}
2004
8753181e 2005static int gen8_init_rcs_context(struct drm_i915_gem_request *req)
e7778be1
TD
2006{
2007 int ret;
2008
e2be4faf 2009 ret = intel_logical_ring_workarounds_emit(req);
e7778be1
TD
2010 if (ret)
2011 return ret;
2012
3bbaba0c
PA
2013 ret = intel_rcs_context_init_mocs(req);
2014 /*
2015 * Failing to program the MOCS is non-fatal.The system will not
2016 * run at peak performance. So generate an error and carry on.
2017 */
2018 if (ret)
2019 DRM_ERROR("MOCS failed to program: expect performance issues.\n");
2020
be01363f 2021 return intel_lr_context_render_state_init(req);
e7778be1
TD
2022}
2023
73e4d07f
OM
2024/**
2025 * intel_logical_ring_cleanup() - deallocate the Engine Command Streamer
2026 *
2027 * @ring: Engine Command Streamer.
2028 *
2029 */
0bc40be8 2030void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
454afebd 2031{
6402c330 2032 struct drm_i915_private *dev_priv;
9832b9da 2033
117897f4 2034 if (!intel_engine_initialized(engine))
48d82387
OM
2035 return;
2036
27af5eea
TU
2037 /*
2038 * Tasklet cannot be active at this point due intel_mark_active/idle
2039 * so this is just for documentation.
2040 */
2041 if (WARN_ON(test_bit(TASKLET_STATE_SCHED, &engine->irq_tasklet.state)))
2042 tasklet_kill(&engine->irq_tasklet);
2043
0bc40be8 2044 dev_priv = engine->dev->dev_private;
6402c330 2045
0bc40be8
TU
2046 if (engine->buffer) {
2047 intel_logical_ring_stop(engine);
2048 WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0);
b0366a54 2049 }
48d82387 2050
0bc40be8
TU
2051 if (engine->cleanup)
2052 engine->cleanup(engine);
48d82387 2053
0bc40be8
TU
2054 i915_cmd_parser_fini_ring(engine);
2055 i915_gem_batch_pool_fini(&engine->batch_pool);
48d82387 2056
0bc40be8 2057 if (engine->status_page.obj) {
7d774cac 2058 i915_gem_object_unpin_map(engine->status_page.obj);
0bc40be8 2059 engine->status_page.obj = NULL;
48d82387 2060 }
17ee950d 2061
0bc40be8
TU
2062 engine->idle_lite_restore_wa = 0;
2063 engine->disable_lite_restore_wa = false;
2064 engine->ctx_desc_template = 0;
ca82580c 2065
0bc40be8
TU
2066 lrc_destroy_wa_ctx_obj(engine);
2067 engine->dev = NULL;
454afebd
OM
2068}
2069
c9cacf93
TU
2070static void
2071logical_ring_default_vfuncs(struct drm_device *dev,
0bc40be8 2072 struct intel_engine_cs *engine)
c9cacf93
TU
2073{
2074 /* Default vfuncs which can be overriden by each engine. */
0bc40be8
TU
2075 engine->init_hw = gen8_init_common_ring;
2076 engine->emit_request = gen8_emit_request;
2077 engine->emit_flush = gen8_emit_flush;
2078 engine->irq_get = gen8_logical_ring_get_irq;
2079 engine->irq_put = gen8_logical_ring_put_irq;
2080 engine->emit_bb_start = gen8_emit_bb_start;
c04e0f3b
CW
2081 engine->get_seqno = gen8_get_seqno;
2082 engine->set_seqno = gen8_set_seqno;
c9cacf93 2083 if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
c04e0f3b 2084 engine->irq_seqno_barrier = bxt_a_seqno_barrier;
0bc40be8 2085 engine->set_seqno = bxt_a_set_seqno;
c9cacf93
TU
2086 }
2087}
2088
d9f3af96 2089static inline void
0bc40be8 2090logical_ring_default_irqs(struct intel_engine_cs *engine, unsigned shift)
d9f3af96 2091{
0bc40be8
TU
2092 engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift;
2093 engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift;
d9f3af96
TU
2094}
2095
7d774cac 2096static int
04794adb
TU
2097lrc_setup_hws(struct intel_engine_cs *engine,
2098 struct drm_i915_gem_object *dctx_obj)
2099{
7d774cac 2100 void *hws;
04794adb
TU
2101
2102 /* The HWSP is part of the default context object in LRC mode. */
2103 engine->status_page.gfx_addr = i915_gem_obj_ggtt_offset(dctx_obj) +
2104 LRC_PPHWSP_PN * PAGE_SIZE;
7d774cac
TU
2105 hws = i915_gem_object_pin_map(dctx_obj);
2106 if (IS_ERR(hws))
2107 return PTR_ERR(hws);
2108 engine->status_page.page_addr = hws + LRC_PPHWSP_PN * PAGE_SIZE;
04794adb 2109 engine->status_page.obj = dctx_obj;
7d774cac
TU
2110
2111 return 0;
04794adb
TU
2112}
2113
c9cacf93 2114static int
0bc40be8 2115logical_ring_init(struct drm_device *dev, struct intel_engine_cs *engine)
454afebd 2116{
3756685a
TU
2117 struct drm_i915_private *dev_priv = to_i915(dev);
2118 struct intel_context *dctx = dev_priv->kernel_context;
2119 enum forcewake_domains fw_domains;
48d82387 2120 int ret;
48d82387
OM
2121
2122 /* Intentionally left blank. */
0bc40be8 2123 engine->buffer = NULL;
48d82387 2124
0bc40be8
TU
2125 engine->dev = dev;
2126 INIT_LIST_HEAD(&engine->active_list);
2127 INIT_LIST_HEAD(&engine->request_list);
2128 i915_gem_batch_pool_init(dev, &engine->batch_pool);
2129 init_waitqueue_head(&engine->irq_queue);
48d82387 2130
0bc40be8
TU
2131 INIT_LIST_HEAD(&engine->buffers);
2132 INIT_LIST_HEAD(&engine->execlist_queue);
2133 INIT_LIST_HEAD(&engine->execlist_retired_req_list);
2134 spin_lock_init(&engine->execlist_lock);
acdd884a 2135
27af5eea
TU
2136 tasklet_init(&engine->irq_tasklet,
2137 intel_lrc_irq_handler, (unsigned long)engine);
2138
0bc40be8 2139 logical_ring_init_platform_invariants(engine);
ca82580c 2140
3756685a
TU
2141 fw_domains = intel_uncore_forcewake_for_reg(dev_priv,
2142 RING_ELSP(engine),
2143 FW_REG_WRITE);
2144
2145 fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
2146 RING_CONTEXT_STATUS_PTR(engine),
2147 FW_REG_READ | FW_REG_WRITE);
2148
2149 fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
2150 RING_CONTEXT_STATUS_BUF_BASE(engine),
2151 FW_REG_READ);
2152
2153 engine->fw_domains = fw_domains;
2154
0bc40be8 2155 ret = i915_cmd_parser_init_ring(engine);
48d82387 2156 if (ret)
b0366a54 2157 goto error;
48d82387 2158
0bc40be8 2159 ret = intel_lr_context_deferred_alloc(dctx, engine);
e84fe803 2160 if (ret)
b0366a54 2161 goto error;
e84fe803
NH
2162
2163 /* As this is the default context, always pin it */
0bc40be8 2164 ret = intel_lr_context_do_pin(dctx, engine);
e84fe803
NH
2165 if (ret) {
2166 DRM_ERROR(
2167 "Failed to pin and map ringbuffer %s: %d\n",
0bc40be8 2168 engine->name, ret);
b0366a54 2169 goto error;
e84fe803 2170 }
564ddb2f 2171
04794adb 2172 /* And setup the hardware status page. */
7d774cac
TU
2173 ret = lrc_setup_hws(engine, dctx->engine[engine->id].state);
2174 if (ret) {
2175 DRM_ERROR("Failed to set up hws %s: %d\n", engine->name, ret);
2176 goto error;
2177 }
04794adb 2178
b0366a54
DG
2179 return 0;
2180
2181error:
0bc40be8 2182 intel_logical_ring_cleanup(engine);
564ddb2f 2183 return ret;
454afebd
OM
2184}
2185
2186static int logical_render_ring_init(struct drm_device *dev)
2187{
2188 struct drm_i915_private *dev_priv = dev->dev_private;
4a570db5 2189 struct intel_engine_cs *engine = &dev_priv->engine[RCS];
99be1dfe 2190 int ret;
454afebd 2191
e2f80391
TU
2192 engine->name = "render ring";
2193 engine->id = RCS;
2194 engine->exec_id = I915_EXEC_RENDER;
2195 engine->guc_id = GUC_RENDER_ENGINE;
2196 engine->mmio_base = RENDER_RING_BASE;
d9f3af96 2197
e2f80391 2198 logical_ring_default_irqs(engine, GEN8_RCS_IRQ_SHIFT);
73d477f6 2199 if (HAS_L3_DPF(dev))
e2f80391 2200 engine->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
454afebd 2201
e2f80391 2202 logical_ring_default_vfuncs(dev, engine);
c9cacf93
TU
2203
2204 /* Override some for render ring. */
82ef822e 2205 if (INTEL_INFO(dev)->gen >= 9)
e2f80391 2206 engine->init_hw = gen9_init_render_ring;
82ef822e 2207 else
e2f80391
TU
2208 engine->init_hw = gen8_init_render_ring;
2209 engine->init_context = gen8_init_rcs_context;
2210 engine->cleanup = intel_fini_pipe_control;
2211 engine->emit_flush = gen8_emit_flush_render;
2212 engine->emit_request = gen8_emit_request_render;
9b1136d5 2213
e2f80391 2214 engine->dev = dev;
c4db7599 2215
e2f80391 2216 ret = intel_init_pipe_control(engine);
99be1dfe
DV
2217 if (ret)
2218 return ret;
2219
e2f80391 2220 ret = intel_init_workaround_bb(engine);
17ee950d
AS
2221 if (ret) {
2222 /*
2223 * We continue even if we fail to initialize WA batch
2224 * because we only expect rare glitches but nothing
2225 * critical to prevent us from using GPU
2226 */
2227 DRM_ERROR("WA batch buffer initialization failed: %d\n",
2228 ret);
2229 }
2230
e2f80391 2231 ret = logical_ring_init(dev, engine);
c4db7599 2232 if (ret) {
e2f80391 2233 lrc_destroy_wa_ctx_obj(engine);
c4db7599 2234 }
17ee950d
AS
2235
2236 return ret;
454afebd
OM
2237}
2238
2239static int logical_bsd_ring_init(struct drm_device *dev)
2240{
2241 struct drm_i915_private *dev_priv = dev->dev_private;
4a570db5 2242 struct intel_engine_cs *engine = &dev_priv->engine[VCS];
454afebd 2243
e2f80391
TU
2244 engine->name = "bsd ring";
2245 engine->id = VCS;
2246 engine->exec_id = I915_EXEC_BSD;
2247 engine->guc_id = GUC_VIDEO_ENGINE;
2248 engine->mmio_base = GEN6_BSD_RING_BASE;
454afebd 2249
e2f80391
TU
2250 logical_ring_default_irqs(engine, GEN8_VCS1_IRQ_SHIFT);
2251 logical_ring_default_vfuncs(dev, engine);
9b1136d5 2252
e2f80391 2253 return logical_ring_init(dev, engine);
454afebd
OM
2254}
2255
2256static int logical_bsd2_ring_init(struct drm_device *dev)
2257{
2258 struct drm_i915_private *dev_priv = dev->dev_private;
4a570db5 2259 struct intel_engine_cs *engine = &dev_priv->engine[VCS2];
454afebd 2260
e2f80391
TU
2261 engine->name = "bsd2 ring";
2262 engine->id = VCS2;
2263 engine->exec_id = I915_EXEC_BSD;
2264 engine->guc_id = GUC_VIDEO_ENGINE2;
2265 engine->mmio_base = GEN8_BSD2_RING_BASE;
454afebd 2266
e2f80391
TU
2267 logical_ring_default_irqs(engine, GEN8_VCS2_IRQ_SHIFT);
2268 logical_ring_default_vfuncs(dev, engine);
9b1136d5 2269
e2f80391 2270 return logical_ring_init(dev, engine);
454afebd
OM
2271}
2272
2273static int logical_blt_ring_init(struct drm_device *dev)
2274{
2275 struct drm_i915_private *dev_priv = dev->dev_private;
4a570db5 2276 struct intel_engine_cs *engine = &dev_priv->engine[BCS];
454afebd 2277
e2f80391
TU
2278 engine->name = "blitter ring";
2279 engine->id = BCS;
2280 engine->exec_id = I915_EXEC_BLT;
2281 engine->guc_id = GUC_BLITTER_ENGINE;
2282 engine->mmio_base = BLT_RING_BASE;
454afebd 2283
e2f80391
TU
2284 logical_ring_default_irqs(engine, GEN8_BCS_IRQ_SHIFT);
2285 logical_ring_default_vfuncs(dev, engine);
9b1136d5 2286
e2f80391 2287 return logical_ring_init(dev, engine);
454afebd
OM
2288}
2289
2290static int logical_vebox_ring_init(struct drm_device *dev)
2291{
2292 struct drm_i915_private *dev_priv = dev->dev_private;
4a570db5 2293 struct intel_engine_cs *engine = &dev_priv->engine[VECS];
454afebd 2294
e2f80391
TU
2295 engine->name = "video enhancement ring";
2296 engine->id = VECS;
2297 engine->exec_id = I915_EXEC_VEBOX;
2298 engine->guc_id = GUC_VIDEOENHANCE_ENGINE;
2299 engine->mmio_base = VEBOX_RING_BASE;
454afebd 2300
e2f80391
TU
2301 logical_ring_default_irqs(engine, GEN8_VECS_IRQ_SHIFT);
2302 logical_ring_default_vfuncs(dev, engine);
9b1136d5 2303
e2f80391 2304 return logical_ring_init(dev, engine);
454afebd
OM
2305}
2306
73e4d07f
OM
2307/**
2308 * intel_logical_rings_init() - allocate, populate and init the Engine Command Streamers
2309 * @dev: DRM device.
2310 *
2311 * This function inits the engines for an Execlists submission style (the equivalent in the
117897f4 2312 * legacy ringbuffer submission world would be i915_gem_init_engines). It does it only for
73e4d07f
OM
2313 * those engines that are present in the hardware.
2314 *
2315 * Return: non-zero if the initialization failed.
2316 */
454afebd
OM
2317int intel_logical_rings_init(struct drm_device *dev)
2318{
2319 struct drm_i915_private *dev_priv = dev->dev_private;
2320 int ret;
2321
2322 ret = logical_render_ring_init(dev);
2323 if (ret)
2324 return ret;
2325
2326 if (HAS_BSD(dev)) {
2327 ret = logical_bsd_ring_init(dev);
2328 if (ret)
2329 goto cleanup_render_ring;
2330 }
2331
2332 if (HAS_BLT(dev)) {
2333 ret = logical_blt_ring_init(dev);
2334 if (ret)
2335 goto cleanup_bsd_ring;
2336 }
2337
2338 if (HAS_VEBOX(dev)) {
2339 ret = logical_vebox_ring_init(dev);
2340 if (ret)
2341 goto cleanup_blt_ring;
2342 }
2343
2344 if (HAS_BSD2(dev)) {
2345 ret = logical_bsd2_ring_init(dev);
2346 if (ret)
2347 goto cleanup_vebox_ring;
2348 }
2349
454afebd
OM
2350 return 0;
2351
454afebd 2352cleanup_vebox_ring:
4a570db5 2353 intel_logical_ring_cleanup(&dev_priv->engine[VECS]);
454afebd 2354cleanup_blt_ring:
4a570db5 2355 intel_logical_ring_cleanup(&dev_priv->engine[BCS]);
454afebd 2356cleanup_bsd_ring:
4a570db5 2357 intel_logical_ring_cleanup(&dev_priv->engine[VCS]);
454afebd 2358cleanup_render_ring:
4a570db5 2359 intel_logical_ring_cleanup(&dev_priv->engine[RCS]);
454afebd
OM
2360
2361 return ret;
2362}
2363
0cea6502
JM
2364static u32
2365make_rpcs(struct drm_device *dev)
2366{
2367 u32 rpcs = 0;
2368
2369 /*
2370 * No explicit RPCS request is needed to ensure full
2371 * slice/subslice/EU enablement prior to Gen9.
2372 */
2373 if (INTEL_INFO(dev)->gen < 9)
2374 return 0;
2375
2376 /*
2377 * Starting in Gen9, render power gating can leave
2378 * slice/subslice/EU in a partially enabled state. We
2379 * must make an explicit request through RPCS for full
2380 * enablement.
2381 */
2382 if (INTEL_INFO(dev)->has_slice_pg) {
2383 rpcs |= GEN8_RPCS_S_CNT_ENABLE;
2384 rpcs |= INTEL_INFO(dev)->slice_total <<
2385 GEN8_RPCS_S_CNT_SHIFT;
2386 rpcs |= GEN8_RPCS_ENABLE;
2387 }
2388
2389 if (INTEL_INFO(dev)->has_subslice_pg) {
2390 rpcs |= GEN8_RPCS_SS_CNT_ENABLE;
2391 rpcs |= INTEL_INFO(dev)->subslice_per_slice <<
2392 GEN8_RPCS_SS_CNT_SHIFT;
2393 rpcs |= GEN8_RPCS_ENABLE;
2394 }
2395
2396 if (INTEL_INFO(dev)->has_eu_pg) {
2397 rpcs |= INTEL_INFO(dev)->eu_per_subslice <<
2398 GEN8_RPCS_EU_MIN_SHIFT;
2399 rpcs |= INTEL_INFO(dev)->eu_per_subslice <<
2400 GEN8_RPCS_EU_MAX_SHIFT;
2401 rpcs |= GEN8_RPCS_ENABLE;
2402 }
2403
2404 return rpcs;
2405}
2406
0bc40be8 2407static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine)
71562919
MT
2408{
2409 u32 indirect_ctx_offset;
2410
0bc40be8 2411 switch (INTEL_INFO(engine->dev)->gen) {
71562919 2412 default:
0bc40be8 2413 MISSING_CASE(INTEL_INFO(engine->dev)->gen);
71562919
MT
2414 /* fall through */
2415 case 9:
2416 indirect_ctx_offset =
2417 GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
2418 break;
2419 case 8:
2420 indirect_ctx_offset =
2421 GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
2422 break;
2423 }
2424
2425 return indirect_ctx_offset;
2426}
2427
8670d6f9 2428static int
7d774cac
TU
2429populate_lr_context(struct intel_context *ctx,
2430 struct drm_i915_gem_object *ctx_obj,
0bc40be8
TU
2431 struct intel_engine_cs *engine,
2432 struct intel_ringbuffer *ringbuf)
8670d6f9 2433{
0bc40be8 2434 struct drm_device *dev = engine->dev;
2d965536 2435 struct drm_i915_private *dev_priv = dev->dev_private;
ae6c4806 2436 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
7d774cac
TU
2437 void *vaddr;
2438 u32 *reg_state;
8670d6f9
OM
2439 int ret;
2440
2d965536
TD
2441 if (!ppgtt)
2442 ppgtt = dev_priv->mm.aliasing_ppgtt;
2443
8670d6f9
OM
2444 ret = i915_gem_object_set_to_cpu_domain(ctx_obj, true);
2445 if (ret) {
2446 DRM_DEBUG_DRIVER("Could not set to CPU domain\n");
2447 return ret;
2448 }
2449
7d774cac
TU
2450 vaddr = i915_gem_object_pin_map(ctx_obj);
2451 if (IS_ERR(vaddr)) {
2452 ret = PTR_ERR(vaddr);
2453 DRM_DEBUG_DRIVER("Could not map object pages! (%d)\n", ret);
8670d6f9
OM
2454 return ret;
2455 }
7d774cac 2456 ctx_obj->dirty = true;
8670d6f9
OM
2457
2458 /* The second page of the context object contains some fields which must
2459 * be set up prior to the first execution. */
7d774cac 2460 reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
8670d6f9
OM
2461
2462 /* A context is actually a big batch buffer with several MI_LOAD_REGISTER_IMM
2463 * commands followed by (reg, value) pairs. The values we are setting here are
2464 * only for the first context restore: on a subsequent save, the GPU will
2465 * recreate this batchbuffer with new values (including all the missing
2466 * MI_LOAD_REGISTER_IMM commands that we are not initializing here). */
0d925ea0 2467 reg_state[CTX_LRI_HEADER_0] =
0bc40be8
TU
2468 MI_LOAD_REGISTER_IMM(engine->id == RCS ? 14 : 11) | MI_LRI_FORCE_POSTED;
2469 ASSIGN_CTX_REG(reg_state, CTX_CONTEXT_CONTROL,
2470 RING_CONTEXT_CONTROL(engine),
0d925ea0
VS
2471 _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH |
2472 CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
99cf8ea1
MT
2473 (HAS_RESOURCE_STREAMER(dev) ?
2474 CTX_CTRL_RS_CTX_ENABLE : 0)));
0bc40be8
TU
2475 ASSIGN_CTX_REG(reg_state, CTX_RING_HEAD, RING_HEAD(engine->mmio_base),
2476 0);
2477 ASSIGN_CTX_REG(reg_state, CTX_RING_TAIL, RING_TAIL(engine->mmio_base),
2478 0);
7ba717cf
TD
2479 /* Ring buffer start address is not known until the buffer is pinned.
2480 * It is written to the context image in execlists_update_context()
2481 */
0bc40be8
TU
2482 ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_START,
2483 RING_START(engine->mmio_base), 0);
2484 ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_CONTROL,
2485 RING_CTL(engine->mmio_base),
0d925ea0 2486 ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID);
0bc40be8
TU
2487 ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_U,
2488 RING_BBADDR_UDW(engine->mmio_base), 0);
2489 ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_L,
2490 RING_BBADDR(engine->mmio_base), 0);
2491 ASSIGN_CTX_REG(reg_state, CTX_BB_STATE,
2492 RING_BBSTATE(engine->mmio_base),
0d925ea0 2493 RING_BB_PPGTT);
0bc40be8
TU
2494 ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_HEAD_U,
2495 RING_SBBADDR_UDW(engine->mmio_base), 0);
2496 ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_HEAD_L,
2497 RING_SBBADDR(engine->mmio_base), 0);
2498 ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_STATE,
2499 RING_SBBSTATE(engine->mmio_base), 0);
2500 if (engine->id == RCS) {
2501 ASSIGN_CTX_REG(reg_state, CTX_BB_PER_CTX_PTR,
2502 RING_BB_PER_CTX_PTR(engine->mmio_base), 0);
2503 ASSIGN_CTX_REG(reg_state, CTX_RCS_INDIRECT_CTX,
2504 RING_INDIRECT_CTX(engine->mmio_base), 0);
2505 ASSIGN_CTX_REG(reg_state, CTX_RCS_INDIRECT_CTX_OFFSET,
2506 RING_INDIRECT_CTX_OFFSET(engine->mmio_base), 0);
2507 if (engine->wa_ctx.obj) {
2508 struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx;
17ee950d
AS
2509 uint32_t ggtt_offset = i915_gem_obj_ggtt_offset(wa_ctx->obj);
2510
2511 reg_state[CTX_RCS_INDIRECT_CTX+1] =
2512 (ggtt_offset + wa_ctx->indirect_ctx.offset * sizeof(uint32_t)) |
2513 (wa_ctx->indirect_ctx.size / CACHELINE_DWORDS);
2514
2515 reg_state[CTX_RCS_INDIRECT_CTX_OFFSET+1] =
0bc40be8 2516 intel_lr_indirect_ctx_offset(engine) << 6;
17ee950d
AS
2517
2518 reg_state[CTX_BB_PER_CTX_PTR+1] =
2519 (ggtt_offset + wa_ctx->per_ctx.offset * sizeof(uint32_t)) |
2520 0x01;
2521 }
8670d6f9 2522 }
0d925ea0 2523 reg_state[CTX_LRI_HEADER_1] = MI_LOAD_REGISTER_IMM(9) | MI_LRI_FORCE_POSTED;
0bc40be8
TU
2524 ASSIGN_CTX_REG(reg_state, CTX_CTX_TIMESTAMP,
2525 RING_CTX_TIMESTAMP(engine->mmio_base), 0);
0d925ea0 2526 /* PDP values well be assigned later if needed */
0bc40be8
TU
2527 ASSIGN_CTX_REG(reg_state, CTX_PDP3_UDW, GEN8_RING_PDP_UDW(engine, 3),
2528 0);
2529 ASSIGN_CTX_REG(reg_state, CTX_PDP3_LDW, GEN8_RING_PDP_LDW(engine, 3),
2530 0);
2531 ASSIGN_CTX_REG(reg_state, CTX_PDP2_UDW, GEN8_RING_PDP_UDW(engine, 2),
2532 0);
2533 ASSIGN_CTX_REG(reg_state, CTX_PDP2_LDW, GEN8_RING_PDP_LDW(engine, 2),
2534 0);
2535 ASSIGN_CTX_REG(reg_state, CTX_PDP1_UDW, GEN8_RING_PDP_UDW(engine, 1),
2536 0);
2537 ASSIGN_CTX_REG(reg_state, CTX_PDP1_LDW, GEN8_RING_PDP_LDW(engine, 1),
2538 0);
2539 ASSIGN_CTX_REG(reg_state, CTX_PDP0_UDW, GEN8_RING_PDP_UDW(engine, 0),
2540 0);
2541 ASSIGN_CTX_REG(reg_state, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(engine, 0),
2542 0);
d7b2633d 2543
2dba3239
MT
2544 if (USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) {
2545 /* 64b PPGTT (48bit canonical)
2546 * PDP0_DESCRIPTOR contains the base address to PML4 and
2547 * other PDP Descriptors are ignored.
2548 */
2549 ASSIGN_CTX_PML4(ppgtt, reg_state);
2550 } else {
2551 /* 32b PPGTT
2552 * PDP*_DESCRIPTOR contains the base address of space supported.
2553 * With dynamic page allocation, PDPs may not be allocated at
2554 * this point. Point the unallocated PDPs to the scratch page
2555 */
c6a2ac71 2556 execlists_update_context_pdps(ppgtt, reg_state);
2dba3239
MT
2557 }
2558
0bc40be8 2559 if (engine->id == RCS) {
8670d6f9 2560 reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
0d925ea0
VS
2561 ASSIGN_CTX_REG(reg_state, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE,
2562 make_rpcs(dev));
8670d6f9
OM
2563 }
2564
7d774cac 2565 i915_gem_object_unpin_map(ctx_obj);
8670d6f9
OM
2566
2567 return 0;
2568}
2569
73e4d07f
OM
2570/**
2571 * intel_lr_context_free() - free the LRC specific bits of a context
2572 * @ctx: the LR context to free.
2573 *
2574 * The real context freeing is done in i915_gem_context_free: this only
2575 * takes care of the bits that are LRC related: the per-engine backing
2576 * objects and the logical ringbuffer.
2577 */
ede7d42b
OM
2578void intel_lr_context_free(struct intel_context *ctx)
2579{
8c857917
OM
2580 int i;
2581
666796da 2582 for (i = I915_NUM_ENGINES; --i >= 0; ) {
e28e404c 2583 struct intel_ringbuffer *ringbuf = ctx->engine[i].ringbuf;
8c857917 2584 struct drm_i915_gem_object *ctx_obj = ctx->engine[i].state;
84c2377f 2585
e28e404c
DG
2586 if (!ctx_obj)
2587 continue;
dcb4c12a 2588
e28e404c
DG
2589 if (ctx == ctx->i915->kernel_context) {
2590 intel_unpin_ringbuffer_obj(ringbuf);
2591 i915_gem_object_ggtt_unpin(ctx_obj);
7d774cac 2592 i915_gem_object_unpin_map(ctx_obj);
8c857917 2593 }
e28e404c
DG
2594
2595 WARN_ON(ctx->engine[i].pin_count);
2596 intel_ringbuffer_free(ringbuf);
2597 drm_gem_object_unreference(&ctx_obj->base);
8c857917
OM
2598 }
2599}
2600
c5d46ee2
DG
2601/**
2602 * intel_lr_context_size() - return the size of the context for an engine
2603 * @ring: which engine to find the context size for
2604 *
2605 * Each engine may require a different amount of space for a context image,
2606 * so when allocating (or copying) an image, this function can be used to
2607 * find the right size for the specific engine.
2608 *
2609 * Return: size (in bytes) of an engine-specific context image
2610 *
2611 * Note: this size includes the HWSP, which is part of the context image
2612 * in LRC mode, but does not include the "shared data page" used with
2613 * GuC submission. The caller should account for this if using the GuC.
2614 */
0bc40be8 2615uint32_t intel_lr_context_size(struct intel_engine_cs *engine)
8c857917
OM
2616{
2617 int ret = 0;
2618
0bc40be8 2619 WARN_ON(INTEL_INFO(engine->dev)->gen < 8);
8c857917 2620
0bc40be8 2621 switch (engine->id) {
8c857917 2622 case RCS:
0bc40be8 2623 if (INTEL_INFO(engine->dev)->gen >= 9)
468c6816
MN
2624 ret = GEN9_LR_CONTEXT_RENDER_SIZE;
2625 else
2626 ret = GEN8_LR_CONTEXT_RENDER_SIZE;
8c857917
OM
2627 break;
2628 case VCS:
2629 case BCS:
2630 case VECS:
2631 case VCS2:
2632 ret = GEN8_LR_CONTEXT_OTHER_SIZE;
2633 break;
2634 }
2635
2636 return ret;
ede7d42b
OM
2637}
2638
73e4d07f 2639/**
e84fe803 2640 * intel_lr_context_deferred_alloc() - create the LRC specific bits of a context
73e4d07f
OM
2641 * @ctx: LR context to create.
2642 * @ring: engine to be used with the context.
2643 *
2644 * This function can be called more than once, with different engines, if we plan
2645 * to use the context with them. The context backing objects and the ringbuffers
2646 * (specially the ringbuffer backing objects) suck a lot of memory up, and that's why
2647 * the creation is a deferred call: it's better to make sure first that we need to use
2648 * a given ring with the context.
2649 *
32197aab 2650 * Return: non-zero on error.
73e4d07f 2651 */
e84fe803
NH
2652
2653int intel_lr_context_deferred_alloc(struct intel_context *ctx,
0bc40be8 2654 struct intel_engine_cs *engine)
ede7d42b 2655{
0bc40be8 2656 struct drm_device *dev = engine->dev;
8c857917
OM
2657 struct drm_i915_gem_object *ctx_obj;
2658 uint32_t context_size;
84c2377f 2659 struct intel_ringbuffer *ringbuf;
8c857917
OM
2660 int ret;
2661
ede7d42b 2662 WARN_ON(ctx->legacy_hw_ctx.rcs_state != NULL);
0bc40be8 2663 WARN_ON(ctx->engine[engine->id].state);
ede7d42b 2664
0bc40be8 2665 context_size = round_up(intel_lr_context_size(engine), 4096);
8c857917 2666
d1675198
AD
2667 /* One extra page as the sharing data between driver and GuC */
2668 context_size += PAGE_SIZE * LRC_PPHWSP_PN;
2669
149c86e7 2670 ctx_obj = i915_gem_alloc_object(dev, context_size);
3126a660
DC
2671 if (!ctx_obj) {
2672 DRM_DEBUG_DRIVER("Alloc LRC backing obj failed.\n");
2673 return -ENOMEM;
8c857917
OM
2674 }
2675
0bc40be8 2676 ringbuf = intel_engine_create_ringbuffer(engine, 4 * PAGE_SIZE);
01101fa7
CW
2677 if (IS_ERR(ringbuf)) {
2678 ret = PTR_ERR(ringbuf);
e84fe803 2679 goto error_deref_obj;
8670d6f9
OM
2680 }
2681
0bc40be8 2682 ret = populate_lr_context(ctx, ctx_obj, engine, ringbuf);
8670d6f9
OM
2683 if (ret) {
2684 DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret);
e84fe803 2685 goto error_ringbuf;
84c2377f
OM
2686 }
2687
0bc40be8
TU
2688 ctx->engine[engine->id].ringbuf = ringbuf;
2689 ctx->engine[engine->id].state = ctx_obj;
ede7d42b 2690
0bc40be8 2691 if (ctx != ctx->i915->kernel_context && engine->init_context) {
e84fe803 2692 struct drm_i915_gem_request *req;
76c39168 2693
0bc40be8 2694 req = i915_gem_request_alloc(engine, ctx);
26827088
DG
2695 if (IS_ERR(req)) {
2696 ret = PTR_ERR(req);
2697 DRM_ERROR("ring create req: %d\n", ret);
e84fe803 2698 goto error_ringbuf;
771b9a53
MT
2699 }
2700
0bc40be8 2701 ret = engine->init_context(req);
aa9b7810 2702 i915_add_request_no_flush(req);
e84fe803
NH
2703 if (ret) {
2704 DRM_ERROR("ring init context: %d\n",
2705 ret);
e84fe803
NH
2706 goto error_ringbuf;
2707 }
564ddb2f 2708 }
ede7d42b 2709 return 0;
8670d6f9 2710
01101fa7
CW
2711error_ringbuf:
2712 intel_ringbuffer_free(ringbuf);
e84fe803 2713error_deref_obj:
8670d6f9 2714 drm_gem_object_unreference(&ctx_obj->base);
0bc40be8
TU
2715 ctx->engine[engine->id].ringbuf = NULL;
2716 ctx->engine[engine->id].state = NULL;
8670d6f9 2717 return ret;
ede7d42b 2718}
3e5b6f05 2719
7d774cac
TU
2720void intel_lr_context_reset(struct drm_i915_private *dev_priv,
2721 struct intel_context *ctx)
3e5b6f05 2722{
e2f80391 2723 struct intel_engine_cs *engine;
3e5b6f05 2724
b4ac5afc 2725 for_each_engine(engine, dev_priv) {
3e5b6f05 2726 struct drm_i915_gem_object *ctx_obj =
e2f80391 2727 ctx->engine[engine->id].state;
3e5b6f05 2728 struct intel_ringbuffer *ringbuf =
e2f80391 2729 ctx->engine[engine->id].ringbuf;
7d774cac 2730 void *vaddr;
3e5b6f05 2731 uint32_t *reg_state;
3e5b6f05
TD
2732
2733 if (!ctx_obj)
2734 continue;
2735
7d774cac
TU
2736 vaddr = i915_gem_object_pin_map(ctx_obj);
2737 if (WARN_ON(IS_ERR(vaddr)))
3e5b6f05 2738 continue;
7d774cac
TU
2739
2740 reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
2741 ctx_obj->dirty = true;
3e5b6f05
TD
2742
2743 reg_state[CTX_RING_HEAD+1] = 0;
2744 reg_state[CTX_RING_TAIL+1] = 0;
2745
7d774cac 2746 i915_gem_object_unpin_map(ctx_obj);
3e5b6f05
TD
2747
2748 ringbuf->head = 0;
2749 ringbuf->tail = 0;
2750 }
2751}