]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/gpu/drm/i915/i915_gpu_error.c
drm/i915: Release vma when the handle is closed
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / i915 / i915_gpu_error.c
CommitLineData
84734a04
MK
1/*
2 * Copyright (c) 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
26 * Mika Kuoppala <mika.kuoppala@intel.com>
27 *
28 */
29
30#include <generated/utsrelease.h>
31#include "i915_drv.h"
32
6361f4ba 33static const char *engine_str(int engine)
84734a04 34{
6361f4ba 35 switch (engine) {
84734a04
MK
36 case RCS: return "render";
37 case VCS: return "bsd";
38 case BCS: return "blt";
39 case VECS: return "vebox";
845f74a7 40 case VCS2: return "bsd2";
84734a04
MK
41 default: return "";
42 }
43}
44
45static const char *pin_flag(int pinned)
46{
47 if (pinned > 0)
48 return " P";
49 else if (pinned < 0)
50 return " p";
51 else
52 return "";
53}
54
55static const char *tiling_flag(int tiling)
56{
57 switch (tiling) {
58 default:
59 case I915_TILING_NONE: return "";
60 case I915_TILING_X: return " X";
61 case I915_TILING_Y: return " Y";
62 }
63}
64
65static const char *dirty_flag(int dirty)
66{
67 return dirty ? " dirty" : "";
68}
69
70static const char *purgeable_flag(int purgeable)
71{
72 return purgeable ? " purgeable" : "";
73}
74
75static bool __i915_error_ok(struct drm_i915_error_state_buf *e)
76{
77
78 if (!e->err && WARN(e->bytes > (e->size - 1), "overflow")) {
79 e->err = -ENOSPC;
80 return false;
81 }
82
83 if (e->bytes == e->size - 1 || e->err)
84 return false;
85
86 return true;
87}
88
89static bool __i915_error_seek(struct drm_i915_error_state_buf *e,
90 unsigned len)
91{
92 if (e->pos + len <= e->start) {
93 e->pos += len;
94 return false;
95 }
96
97 /* First vsnprintf needs to fit in its entirety for memmove */
98 if (len >= e->size) {
99 e->err = -EIO;
100 return false;
101 }
102
103 return true;
104}
105
106static void __i915_error_advance(struct drm_i915_error_state_buf *e,
107 unsigned len)
108{
109 /* If this is first printf in this window, adjust it so that
110 * start position matches start of the buffer
111 */
112
113 if (e->pos < e->start) {
114 const size_t off = e->start - e->pos;
115
116 /* Should not happen but be paranoid */
117 if (off > len || e->bytes) {
118 e->err = -EIO;
119 return;
120 }
121
122 memmove(e->buf, e->buf + off, len - off);
123 e->bytes = len - off;
124 e->pos = e->start;
125 return;
126 }
127
128 e->bytes += len;
129 e->pos += len;
130}
131
132static void i915_error_vprintf(struct drm_i915_error_state_buf *e,
133 const char *f, va_list args)
134{
135 unsigned len;
136
137 if (!__i915_error_ok(e))
138 return;
139
140 /* Seek the first printf which is hits start position */
141 if (e->pos < e->start) {
e29bb4eb
CW
142 va_list tmp;
143
144 va_copy(tmp, args);
1d2cb9a5
MK
145 len = vsnprintf(NULL, 0, f, tmp);
146 va_end(tmp);
147
148 if (!__i915_error_seek(e, len))
84734a04
MK
149 return;
150 }
151
152 len = vsnprintf(e->buf + e->bytes, e->size - e->bytes, f, args);
153 if (len >= e->size - e->bytes)
154 len = e->size - e->bytes - 1;
155
156 __i915_error_advance(e, len);
157}
158
159static void i915_error_puts(struct drm_i915_error_state_buf *e,
160 const char *str)
161{
162 unsigned len;
163
164 if (!__i915_error_ok(e))
165 return;
166
167 len = strlen(str);
168
169 /* Seek the first printf which is hits start position */
170 if (e->pos < e->start) {
171 if (!__i915_error_seek(e, len))
172 return;
173 }
174
175 if (len >= e->size - e->bytes)
176 len = e->size - e->bytes - 1;
177 memcpy(e->buf + e->bytes, str, len);
178
179 __i915_error_advance(e, len);
180}
181
182#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
183#define err_puts(e, s) i915_error_puts(e, s)
184
185static void print_error_buffers(struct drm_i915_error_state_buf *m,
186 const char *name,
187 struct drm_i915_error_buffer *err,
188 int count)
189{
b4716185
CW
190 int i;
191
3a448734 192 err_printf(m, " %s [%d]:\n", name, count);
84734a04
MK
193
194 while (count--) {
e1f12325
MT
195 err_printf(m, " %08x_%08x %8u %02x %02x [ ",
196 upper_32_bits(err->gtt_offset),
197 lower_32_bits(err->gtt_offset),
84734a04
MK
198 err->size,
199 err->read_domains,
b4716185 200 err->write_domain);
666796da 201 for (i = 0; i < I915_NUM_ENGINES; i++)
b4716185
CW
202 err_printf(m, "%02x ", err->rseqno[i]);
203
204 err_printf(m, "] %02x", err->wseqno);
84734a04
MK
205 err_puts(m, pin_flag(err->pinned));
206 err_puts(m, tiling_flag(err->tiling));
207 err_puts(m, dirty_flag(err->dirty));
208 err_puts(m, purgeable_flag(err->purgeable));
5cc9ed4b 209 err_puts(m, err->userptr ? " userptr" : "");
6361f4ba
CW
210 err_puts(m, err->engine != -1 ? " " : "");
211 err_puts(m, engine_str(err->engine));
0a4cd7c8 212 err_puts(m, i915_cache_level_str(m->i915, err->cache_level));
84734a04
MK
213
214 if (err->name)
215 err_printf(m, " (name: %d)", err->name);
216 if (err->fence_reg != I915_FENCE_REG_NONE)
217 err_printf(m, " (fence: %d)", err->fence_reg);
218
219 err_puts(m, "\n");
220 err++;
221 }
222}
223
7e37f889 224static const char *hangcheck_action_to_str(enum intel_engine_hangcheck_action a)
da661464
MK
225{
226 switch (a) {
227 case HANGCHECK_IDLE:
228 return "idle";
229 case HANGCHECK_WAIT:
230 return "wait";
231 case HANGCHECK_ACTIVE:
232 return "active";
233 case HANGCHECK_KICK:
234 return "kick";
235 case HANGCHECK_HUNG:
236 return "hung";
237 }
238
239 return "unknown";
240}
241
6361f4ba
CW
242static void error_print_engine(struct drm_i915_error_state_buf *m,
243 struct drm_i915_error_engine *ee)
84734a04 244{
6361f4ba
CW
245 err_printf(m, "%s command stream:\n", engine_str(ee->engine_id));
246 err_printf(m, " START: 0x%08x\n", ee->start);
247 err_printf(m, " HEAD: 0x%08x\n", ee->head);
248 err_printf(m, " TAIL: 0x%08x\n", ee->tail);
249 err_printf(m, " CTL: 0x%08x\n", ee->ctl);
250 err_printf(m, " HWS: 0x%08x\n", ee->hws);
251 err_printf(m, " ACTHD: 0x%08x %08x\n",
252 (u32)(ee->acthd>>32), (u32)ee->acthd);
253 err_printf(m, " IPEIR: 0x%08x\n", ee->ipeir);
254 err_printf(m, " IPEHR: 0x%08x\n", ee->ipehr);
255 err_printf(m, " INSTDONE: 0x%08x\n", ee->instdone);
256 if (INTEL_GEN(m->i915) >= 4) {
257 err_printf(m, " BBADDR: 0x%08x %08x\n",
258 (u32)(ee->bbaddr>>32), (u32)ee->bbaddr);
259 err_printf(m, " BB_STATE: 0x%08x\n", ee->bbstate);
260 err_printf(m, " INSTPS: 0x%08x\n", ee->instps);
3dda20a9 261 }
6361f4ba
CW
262 err_printf(m, " INSTPM: 0x%08x\n", ee->instpm);
263 err_printf(m, " FADDR: 0x%08x %08x\n", upper_32_bits(ee->faddr),
264 lower_32_bits(ee->faddr));
265 if (INTEL_GEN(m->i915) >= 6) {
266 err_printf(m, " RC PSMI: 0x%08x\n", ee->rc_psmi);
267 err_printf(m, " FAULT_REG: 0x%08x\n", ee->fault_reg);
84734a04 268 err_printf(m, " SYNC_0: 0x%08x [last synced 0x%08x]\n",
6361f4ba
CW
269 ee->semaphore_mboxes[0],
270 ee->semaphore_seqno[0]);
84734a04 271 err_printf(m, " SYNC_1: 0x%08x [last synced 0x%08x]\n",
6361f4ba
CW
272 ee->semaphore_mboxes[1],
273 ee->semaphore_seqno[1]);
274 if (HAS_VEBOX(m->i915)) {
4e5aabfd 275 err_printf(m, " SYNC_2: 0x%08x [last synced 0x%08x]\n",
6361f4ba
CW
276 ee->semaphore_mboxes[2],
277 ee->semaphore_seqno[2]);
4e5aabfd 278 }
84734a04 279 }
6361f4ba
CW
280 if (USES_PPGTT(m->i915)) {
281 err_printf(m, " GFX_MODE: 0x%08x\n", ee->vm_info.gfx_mode);
6c7a01ec 282
6361f4ba 283 if (INTEL_GEN(m->i915) >= 8) {
6c7a01ec
BW
284 int i;
285 for (i = 0; i < 4; i++)
286 err_printf(m, " PDP%d: 0x%016llx\n",
6361f4ba 287 i, ee->vm_info.pdp[i]);
6c7a01ec
BW
288 } else {
289 err_printf(m, " PP_DIR_BASE: 0x%08x\n",
6361f4ba 290 ee->vm_info.pp_dir_base);
6c7a01ec
BW
291 }
292 }
6361f4ba
CW
293 err_printf(m, " seqno: 0x%08x\n", ee->seqno);
294 err_printf(m, " last_seqno: 0x%08x\n", ee->last_seqno);
295 err_printf(m, " waiting: %s\n", yesno(ee->waiting));
296 err_printf(m, " ring->head: 0x%08x\n", ee->cpu_ring_head);
297 err_printf(m, " ring->tail: 0x%08x\n", ee->cpu_ring_tail);
da661464 298 err_printf(m, " hangcheck: %s [%d]\n",
6361f4ba
CW
299 hangcheck_action_to_str(ee->hangcheck_action),
300 ee->hangcheck_score);
84734a04
MK
301}
302
303void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
304{
305 va_list args;
306
307 va_start(args, f);
308 i915_error_vprintf(e, f, args);
309 va_end(args);
310}
311
ab0e7ff9
CW
312static void print_error_obj(struct drm_i915_error_state_buf *m,
313 struct drm_i915_error_object *obj)
314{
315 int page, offset, elt;
316
317 for (page = offset = 0; page < obj->page_count; page++) {
318 for (elt = 0; elt < PAGE_SIZE/4; elt++) {
319 err_printf(m, "%08x : %08x\n", offset,
320 obj->pages[page][elt]);
321 offset += 4;
322 }
323 }
324}
325
84734a04
MK
326int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
327 const struct i915_error_state_file_priv *error_priv)
328{
329 struct drm_device *dev = error_priv->dev;
fac5e23e 330 struct drm_i915_private *dev_priv = to_i915(dev);
84734a04 331 struct drm_i915_error_state *error = error_priv->error;
0ca36d78 332 struct drm_i915_error_object *obj;
ab0e7ff9
CW
333 int i, j, offset, elt;
334 int max_hangcheck_score;
84734a04
MK
335
336 if (!error) {
337 err_printf(m, "no error state collected\n");
338 goto out;
339 }
340
cb383002 341 err_printf(m, "%s\n", error->error_msg);
84734a04
MK
342 err_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
343 error->time.tv_usec);
344 err_printf(m, "Kernel: " UTS_RELEASE "\n");
ab0e7ff9 345 max_hangcheck_score = 0;
6361f4ba
CW
346 for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
347 if (error->engine[i].hangcheck_score > max_hangcheck_score)
348 max_hangcheck_score = error->engine[i].hangcheck_score;
ab0e7ff9 349 }
6361f4ba
CW
350 for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
351 if (error->engine[i].hangcheck_score == max_hangcheck_score &&
352 error->engine[i].pid != -1) {
ab0e7ff9 353 err_printf(m, "Active process (on ring %s): %s [%d]\n",
6361f4ba
CW
354 engine_str(i),
355 error->engine[i].comm,
356 error->engine[i].pid);
ab0e7ff9
CW
357 }
358 }
48b031e3 359 err_printf(m, "Reset count: %u\n", error->reset_count);
62d5d69b 360 err_printf(m, "Suspend count: %u\n", error->suspend_count);
ffbab09b 361 err_printf(m, "PCI ID: 0x%04x\n", dev->pdev->device);
06e6ff8f
AS
362 err_printf(m, "PCI Revision: 0x%02x\n", dev->pdev->revision);
363 err_printf(m, "PCI Subsystem: %04x:%04x\n",
364 dev->pdev->subsystem_vendor,
365 dev->pdev->subsystem_device);
eb5be9d0 366 err_printf(m, "IOMMU enabled?: %d\n", error->iommu);
0ac7655c
MK
367
368 if (HAS_CSR(dev)) {
369 struct intel_csr *csr = &dev_priv->csr;
370
371 err_printf(m, "DMC loaded: %s\n",
372 yesno(csr->dmc_payload != NULL));
373 err_printf(m, "DMC fw version: %d.%d\n",
374 CSR_VERSION_MAJOR(csr->version),
375 CSR_VERSION_MINOR(csr->version));
376 }
377
84734a04
MK
378 err_printf(m, "EIR: 0x%08x\n", error->eir);
379 err_printf(m, "IER: 0x%08x\n", error->ier);
885ea5a8
RV
380 if (INTEL_INFO(dev)->gen >= 8) {
381 for (i = 0; i < 4; i++)
382 err_printf(m, "GTIER gt %d: 0x%08x\n", i,
383 error->gtier[i]);
384 } else if (HAS_PCH_SPLIT(dev) || IS_VALLEYVIEW(dev))
385 err_printf(m, "GTIER: 0x%08x\n", error->gtier[0]);
84734a04
MK
386 err_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
387 err_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake);
388 err_printf(m, "DERRMR: 0x%08x\n", error->derrmr);
389 err_printf(m, "CCID: 0x%08x\n", error->ccid);
094f9a54 390 err_printf(m, "Missed interrupts: 0x%08lx\n", dev_priv->gpu_error.missed_irq_rings);
84734a04
MK
391
392 for (i = 0; i < dev_priv->num_fence_regs; i++)
393 err_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]);
394
395 for (i = 0; i < ARRAY_SIZE(error->extra_instdone); i++)
396 err_printf(m, " INSTDONE_%d: 0x%08x\n", i,
397 error->extra_instdone[i]);
398
399 if (INTEL_INFO(dev)->gen >= 6) {
400 err_printf(m, "ERROR: 0x%08x\n", error->error);
6c826f34
MK
401
402 if (INTEL_INFO(dev)->gen >= 8)
403 err_printf(m, "FAULT_TLB_DATA: 0x%08x 0x%08x\n",
404 error->fault_data1, error->fault_data0);
405
84734a04
MK
406 err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
407 }
408
7e22dbbb 409 if (IS_GEN7(dev))
84734a04
MK
410 err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
411
6361f4ba
CW
412 for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
413 if (error->engine[i].engine_id != -1)
414 error_print_engine(m, &error->engine[i]);
415 }
84734a04 416
3a448734
CW
417 for (i = 0; i < error->vm_count; i++) {
418 err_printf(m, "vm[%d]\n", i);
419
84734a04 420 print_error_buffers(m, "Active",
3a448734
CW
421 error->active_bo[i],
422 error->active_bo_count[i]);
84734a04 423
84734a04 424 print_error_buffers(m, "Pinned",
3a448734
CW
425 error->pinned_bo[i],
426 error->pinned_bo_count[i]);
427 }
84734a04 428
6361f4ba
CW
429 for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
430 struct drm_i915_error_engine *ee = &error->engine[i];
431
432 obj = ee->batchbuffer;
ab0e7ff9 433 if (obj) {
4a570db5 434 err_puts(m, dev_priv->engine[i].name);
6361f4ba 435 if (ee->pid != -1)
ab0e7ff9 436 err_printf(m, " (submitted by %s [%d])",
6361f4ba
CW
437 ee->comm,
438 ee->pid);
e1f12325
MT
439 err_printf(m, " --- gtt_offset = 0x%08x %08x\n",
440 upper_32_bits(obj->gtt_offset),
441 lower_32_bits(obj->gtt_offset));
ab0e7ff9
CW
442 print_error_obj(m, obj);
443 }
444
6361f4ba 445 obj = ee->wa_batchbuffer;
ab0e7ff9
CW
446 if (obj) {
447 err_printf(m, "%s (w/a) --- gtt_offset = 0x%08x\n",
4a570db5 448 dev_priv->engine[i].name,
e1f12325 449 lower_32_bits(obj->gtt_offset));
ab0e7ff9 450 print_error_obj(m, obj);
84734a04
MK
451 }
452
6361f4ba 453 if (ee->num_requests) {
84734a04 454 err_printf(m, "%s --- %d requests\n",
4a570db5 455 dev_priv->engine[i].name,
6361f4ba
CW
456 ee->num_requests);
457 for (j = 0; j < ee->num_requests; j++) {
84734a04 458 err_printf(m, " seqno 0x%08x, emitted %ld, tail 0x%08x\n",
6361f4ba
CW
459 ee->requests[j].seqno,
460 ee->requests[j].jiffies,
461 ee->requests[j].tail);
84734a04
MK
462 }
463 }
464
6361f4ba 465 if (ee->num_waiters) {
688e6c72
CW
466 err_printf(m, "%s --- %d waiters\n",
467 dev_priv->engine[i].name,
6361f4ba
CW
468 ee->num_waiters);
469 for (j = 0; j < ee->num_waiters; j++) {
688e6c72 470 err_printf(m, " seqno 0x%08x for %s [%d]\n",
6361f4ba
CW
471 ee->waiters[j].seqno,
472 ee->waiters[j].comm,
473 ee->waiters[j].pid);
688e6c72
CW
474 }
475 }
476
6361f4ba 477 if ((obj = ee->ringbuffer)) {
84734a04 478 err_printf(m, "%s --- ringbuffer = 0x%08x\n",
4a570db5 479 dev_priv->engine[i].name,
e1f12325 480 lower_32_bits(obj->gtt_offset));
ab0e7ff9 481 print_error_obj(m, obj);
84734a04
MK
482 }
483
6361f4ba 484 if ((obj = ee->hws_page)) {
3a5a0393
JB
485 u64 hws_offset = obj->gtt_offset;
486 u32 *hws_page = &obj->pages[0][0];
487
488 if (i915.enable_execlists) {
489 hws_offset += LRC_PPHWSP_PN * PAGE_SIZE;
490 hws_page = &obj->pages[LRC_PPHWSP_PN][0];
491 }
d1675198 492 err_printf(m, "%s --- HW Status = 0x%08llx\n",
4a570db5 493 dev_priv->engine[i].name, hws_offset);
f3ce3821
CW
494 offset = 0;
495 for (elt = 0; elt < PAGE_SIZE/16; elt += 4) {
496 err_printf(m, "[%04x] %08x %08x %08x %08x\n",
497 offset,
3a5a0393
JB
498 hws_page[elt],
499 hws_page[elt+1],
500 hws_page[elt+2],
501 hws_page[elt+3]);
a98b7e58 502 offset += 16;
f3ce3821
CW
503 }
504 }
505
6361f4ba 506 obj = ee->wa_ctx;
f85db059 507 if (obj) {
508 u64 wa_ctx_offset = obj->gtt_offset;
509 u32 *wa_ctx_page = &obj->pages[0][0];
4a570db5 510 struct intel_engine_cs *engine = &dev_priv->engine[RCS];
e2f80391
TU
511 u32 wa_ctx_size = (engine->wa_ctx.indirect_ctx.size +
512 engine->wa_ctx.per_ctx.size);
f85db059 513
514 err_printf(m, "%s --- WA ctx batch buffer = 0x%08llx\n",
4a570db5 515 dev_priv->engine[i].name, wa_ctx_offset);
f85db059 516 offset = 0;
517 for (elt = 0; elt < wa_ctx_size; elt += 4) {
518 err_printf(m, "[%04x] %08x %08x %08x %08x\n",
519 offset,
520 wa_ctx_page[elt + 0],
521 wa_ctx_page[elt + 1],
522 wa_ctx_page[elt + 2],
523 wa_ctx_page[elt + 3]);
524 offset += 16;
525 }
526 }
527
6361f4ba 528 if ((obj = ee->ctx)) {
84734a04 529 err_printf(m, "%s --- HW Context = 0x%08x\n",
4a570db5 530 dev_priv->engine[i].name,
e1f12325 531 lower_32_bits(obj->gtt_offset));
17d36749 532 print_error_obj(m, obj);
84734a04
MK
533 }
534 }
535
0ca36d78 536 if ((obj = error->semaphore_obj)) {
e1f12325
MT
537 err_printf(m, "Semaphore page = 0x%08x\n",
538 lower_32_bits(obj->gtt_offset));
0ca36d78
BW
539 for (elt = 0; elt < PAGE_SIZE/16; elt += 4) {
540 err_printf(m, "[%04x] %08x %08x %08x %08x\n",
541 elt * 4,
542 obj->pages[0][elt],
543 obj->pages[0][elt+1],
544 obj->pages[0][elt+2],
545 obj->pages[0][elt+3]);
546 }
547 }
548
84734a04
MK
549 if (error->overlay)
550 intel_overlay_print_error_state(m, error->overlay);
551
552 if (error->display)
553 intel_display_print_error_state(m, dev, error->display);
554
555out:
556 if (m->bytes == 0 && m->err)
557 return m->err;
558
559 return 0;
560}
561
562int i915_error_state_buf_init(struct drm_i915_error_state_buf *ebuf,
0a4cd7c8 563 struct drm_i915_private *i915,
84734a04
MK
564 size_t count, loff_t pos)
565{
566 memset(ebuf, 0, sizeof(*ebuf));
0a4cd7c8 567 ebuf->i915 = i915;
84734a04
MK
568
569 /* We need to have enough room to store any i915_error_state printf
570 * so that we can move it to start position.
571 */
572 ebuf->size = count + 1 > PAGE_SIZE ? count + 1 : PAGE_SIZE;
573 ebuf->buf = kmalloc(ebuf->size,
574 GFP_TEMPORARY | __GFP_NORETRY | __GFP_NOWARN);
575
576 if (ebuf->buf == NULL) {
577 ebuf->size = PAGE_SIZE;
578 ebuf->buf = kmalloc(ebuf->size, GFP_TEMPORARY);
579 }
580
581 if (ebuf->buf == NULL) {
582 ebuf->size = 128;
583 ebuf->buf = kmalloc(ebuf->size, GFP_TEMPORARY);
584 }
585
586 if (ebuf->buf == NULL)
587 return -ENOMEM;
588
589 ebuf->start = pos;
590
591 return 0;
592}
593
594static void i915_error_object_free(struct drm_i915_error_object *obj)
595{
596 int page;
597
598 if (obj == NULL)
599 return;
600
601 for (page = 0; page < obj->page_count; page++)
602 kfree(obj->pages[page]);
603
604 kfree(obj);
605}
606
607static void i915_error_state_free(struct kref *error_ref)
608{
609 struct drm_i915_error_state *error = container_of(error_ref,
610 typeof(*error), ref);
611 int i;
612
6361f4ba
CW
613 for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
614 struct drm_i915_error_engine *ee = &error->engine[i];
615
616 i915_error_object_free(ee->batchbuffer);
617 i915_error_object_free(ee->wa_batchbuffer);
618 i915_error_object_free(ee->ringbuffer);
619 i915_error_object_free(ee->hws_page);
620 i915_error_object_free(ee->ctx);
621 i915_error_object_free(ee->wa_ctx);
622
623 kfree(ee->requests);
624 kfree(ee->waiters);
84734a04
MK
625 }
626
0ca36d78 627 i915_error_object_free(error->semaphore_obj);
0b37a9a9
MT
628
629 for (i = 0; i < error->vm_count; i++)
630 kfree(error->active_bo[i]);
631
84734a04 632 kfree(error->active_bo);
0b37a9a9
MT
633 kfree(error->active_bo_count);
634 kfree(error->pinned_bo);
635 kfree(error->pinned_bo_count);
84734a04
MK
636 kfree(error->overlay);
637 kfree(error->display);
638 kfree(error);
639}
640
641static struct drm_i915_error_object *
8ae62dc6
CW
642i915_error_object_create(struct drm_i915_private *dev_priv,
643 struct drm_i915_gem_object *src,
644 struct i915_address_space *vm)
84734a04 645{
72e96d64 646 struct i915_ggtt *ggtt = &dev_priv->ggtt;
84734a04 647 struct drm_i915_error_object *dst;
aff43766 648 struct i915_vma *vma = NULL;
8ae62dc6 649 int num_pages;
b3c3f5e6
CW
650 bool use_ggtt;
651 int i = 0;
e1f12325 652 u64 reloc_offset;
84734a04
MK
653
654 if (src == NULL || src->pages == NULL)
655 return NULL;
656
8ae62dc6
CW
657 num_pages = src->base.size >> PAGE_SHIFT;
658
84734a04
MK
659 dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), GFP_ATOMIC);
660 if (dst == NULL)
661 return NULL;
662
87a01e82
CW
663 if (i915_gem_obj_bound(src, vm))
664 dst->gtt_offset = i915_gem_obj_offset(src, vm);
665 else
666 dst->gtt_offset = -1;
b3c3f5e6
CW
667
668 reloc_offset = dst->gtt_offset;
aff43766
TU
669 if (i915_is_ggtt(vm))
670 vma = i915_gem_obj_to_ggtt(src);
b3c3f5e6 671 use_ggtt = (src->cache_level == I915_CACHE_NONE &&
aff43766 672 vma && (vma->bound & GLOBAL_BIND) &&
72e96d64 673 reloc_offset + num_pages * PAGE_SIZE <= ggtt->mappable_end);
b3c3f5e6
CW
674
675 /* Cannot access stolen address directly, try to use the aperture */
676 if (src->stolen) {
677 use_ggtt = true;
678
aff43766 679 if (!(vma && vma->bound & GLOBAL_BIND))
b3c3f5e6
CW
680 goto unwind;
681
682 reloc_offset = i915_gem_obj_ggtt_offset(src);
72e96d64 683 if (reloc_offset + num_pages * PAGE_SIZE > ggtt->mappable_end)
b3c3f5e6
CW
684 goto unwind;
685 }
686
687 /* Cannot access snooped pages through the aperture */
2d1fe073
JL
688 if (use_ggtt && src->cache_level != I915_CACHE_NONE &&
689 !HAS_LLC(dev_priv))
b3c3f5e6
CW
690 goto unwind;
691
692 dst->page_count = num_pages;
693 while (num_pages--) {
84734a04
MK
694 unsigned long flags;
695 void *d;
696
697 d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
698 if (d == NULL)
699 goto unwind;
700
701 local_irq_save(flags);
b3c3f5e6 702 if (use_ggtt) {
84734a04
MK
703 void __iomem *s;
704
705 /* Simply ignore tiling or any overlapping fence.
706 * It's part of the error state, and this hopefully
707 * captures what the GPU read.
708 */
709
72e96d64 710 s = io_mapping_map_atomic_wc(ggtt->mappable,
84734a04
MK
711 reloc_offset);
712 memcpy_fromio(d, s, PAGE_SIZE);
713 io_mapping_unmap_atomic(s);
84734a04
MK
714 } else {
715 struct page *page;
716 void *s;
717
718 page = i915_gem_object_get_page(src, i);
719
720 drm_clflush_pages(&page, 1);
721
722 s = kmap_atomic(page);
723 memcpy(d, s, PAGE_SIZE);
724 kunmap_atomic(s);
725
726 drm_clflush_pages(&page, 1);
727 }
728 local_irq_restore(flags);
729
b3c3f5e6 730 dst->pages[i++] = d;
84734a04
MK
731 reloc_offset += PAGE_SIZE;
732 }
84734a04
MK
733
734 return dst;
735
736unwind:
737 while (i--)
738 kfree(dst->pages[i]);
739 kfree(dst);
740 return NULL;
741}
a7b91078 742#define i915_error_ggtt_object_create(dev_priv, src) \
62106b4f 743 i915_error_object_create((dev_priv), (src), &(dev_priv)->ggtt.base)
84734a04 744
d72d908b
CW
745/* The error capture is special as tries to run underneath the normal
746 * locking rules - so we use the raw version of the i915_gem_active lookup.
747 */
748static inline uint32_t
749__active_get_seqno(struct i915_gem_active *active)
750{
751 return i915_gem_request_get_seqno(__i915_gem_active_peek(active));
752}
753
754static inline int
755__active_get_engine_id(struct i915_gem_active *active)
756{
757 struct intel_engine_cs *engine;
758
759 engine = i915_gem_request_get_engine(__i915_gem_active_peek(active));
760 return engine ? engine->id : -1;
761}
762
84734a04 763static void capture_bo(struct drm_i915_error_buffer *err,
3a448734 764 struct i915_vma *vma)
84734a04 765{
3a448734 766 struct drm_i915_gem_object *obj = vma->obj;
b4716185 767 int i;
3a448734 768
84734a04
MK
769 err->size = obj->base.size;
770 err->name = obj->base.name;
d72d908b 771
666796da 772 for (i = 0; i < I915_NUM_ENGINES; i++)
d72d908b
CW
773 err->rseqno[i] = __active_get_seqno(&obj->last_read[i]);
774 err->wseqno = __active_get_seqno(&obj->last_write);
775 err->engine = __active_get_engine_id(&obj->last_write);
776
3a448734 777 err->gtt_offset = vma->node.start;
84734a04
MK
778 err->read_domains = obj->base.read_domains;
779 err->write_domain = obj->base.write_domain;
780 err->fence_reg = obj->fence_reg;
781 err->pinned = 0;
d7f46fc4 782 if (i915_gem_obj_is_pinned(obj))
84734a04 783 err->pinned = 1;
84734a04
MK
784 err->tiling = obj->tiling_mode;
785 err->dirty = obj->dirty;
786 err->purgeable = obj->madv != I915_MADV_WILLNEED;
5cc9ed4b 787 err->userptr = obj->userptr.mm != NULL;
84734a04
MK
788 err->cache_level = obj->cache_level;
789}
790
791static u32 capture_active_bo(struct drm_i915_error_buffer *err,
792 int count, struct list_head *head)
793{
ca191b13 794 struct i915_vma *vma;
84734a04
MK
795 int i = 0;
796
1c7f4bca 797 list_for_each_entry(vma, head, vm_link) {
3a448734 798 capture_bo(err++, vma);
84734a04
MK
799 if (++i == count)
800 break;
801 }
802
803 return i;
804}
805
806static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
3a448734
CW
807 int count, struct list_head *head,
808 struct i915_address_space *vm)
84734a04
MK
809{
810 struct drm_i915_gem_object *obj;
3a448734
CW
811 struct drm_i915_error_buffer * const first = err;
812 struct drm_i915_error_buffer * const last = err + count;
84734a04
MK
813
814 list_for_each_entry(obj, head, global_list) {
3a448734 815 struct i915_vma *vma;
84734a04 816
3a448734 817 if (err == last)
84734a04 818 break;
3a448734 819
1c7f4bca 820 list_for_each_entry(vma, &obj->vma_list, obj_link)
fe14d5f4 821 if (vma->vm == vm && vma->pin_count > 0)
3a448734 822 capture_bo(err++, vma);
84734a04
MK
823 }
824
3a448734 825 return err - first;
84734a04
MK
826}
827
011cf577
BW
828/* Generate a semi-unique error code. The code is not meant to have meaning, The
829 * code's only purpose is to try to prevent false duplicated bug reports by
830 * grossly estimating a GPU error state.
831 *
832 * TODO Ideally, hashing the batchbuffer would be a very nice way to determine
833 * the hang if we could strip the GTT offset information from it.
834 *
835 * It's only a small step better than a random number in its current form.
836 */
837static uint32_t i915_error_generate_code(struct drm_i915_private *dev_priv,
cb383002 838 struct drm_i915_error_state *error,
6361f4ba 839 int *engine_id)
011cf577
BW
840{
841 uint32_t error_code = 0;
842 int i;
843
844 /* IPEHR would be an ideal way to detect errors, as it's the gross
845 * measure of "the command that hung." However, has some very common
846 * synchronization commands which almost always appear in the case
847 * strictly a client bug. Use instdone to differentiate those some.
848 */
666796da 849 for (i = 0; i < I915_NUM_ENGINES; i++) {
6361f4ba
CW
850 if (error->engine[i].hangcheck_action == HANGCHECK_HUNG) {
851 if (engine_id)
852 *engine_id = i;
cb383002 853
6361f4ba 854 return error->engine[i].ipehr ^ error->engine[i].instdone;
cb383002
MK
855 }
856 }
011cf577
BW
857
858 return error_code;
859}
860
c033666a 861static void i915_gem_record_fences(struct drm_i915_private *dev_priv,
84734a04
MK
862 struct drm_i915_error_state *error)
863{
84734a04
MK
864 int i;
865
c033666a 866 if (IS_GEN3(dev_priv) || IS_GEN2(dev_priv)) {
ce38ab05 867 for (i = 0; i < dev_priv->num_fence_regs; i++)
eecf613a 868 error->fence[i] = I915_READ(FENCE_REG(i));
c033666a 869 } else if (IS_GEN5(dev_priv) || IS_GEN4(dev_priv)) {
eecf613a
VS
870 for (i = 0; i < dev_priv->num_fence_regs; i++)
871 error->fence[i] = I915_READ64(FENCE_REG_965_LO(i));
c033666a 872 } else if (INTEL_GEN(dev_priv) >= 6) {
eecf613a
VS
873 for (i = 0; i < dev_priv->num_fence_regs; i++)
874 error->fence[i] = I915_READ64(FENCE_REG_GEN6_LO(i));
875 }
84734a04
MK
876}
877
87f85ebc 878
6361f4ba 879static void gen8_record_semaphore_state(struct drm_i915_error_state *error,
0bc40be8 880 struct intel_engine_cs *engine,
6361f4ba 881 struct drm_i915_error_engine *ee)
0ca36d78 882{
6361f4ba 883 struct drm_i915_private *dev_priv = engine->i915;
b4558b46 884 struct intel_engine_cs *to;
c3232b18 885 enum intel_engine_id id;
0ca36d78 886
0ca36d78 887 if (!error->semaphore_obj)
6361f4ba 888 return;
0ca36d78 889
c3232b18 890 for_each_engine_id(to, dev_priv, id) {
b4558b46
RV
891 int idx;
892 u16 signal_offset;
893 u32 *tmp;
0ca36d78 894
0bc40be8 895 if (engine == to)
b4558b46
RV
896 continue;
897
6361f4ba
CW
898 signal_offset =
899 (GEN8_SIGNAL_OFFSET(engine, id) & (PAGE_SIZE - 1)) / 4;
b4558b46 900 tmp = error->semaphore_obj->pages[0];
7e37f889 901 idx = intel_engine_sync_index(engine, to);
b4558b46 902
6361f4ba
CW
903 ee->semaphore_mboxes[idx] = tmp[signal_offset];
904 ee->semaphore_seqno[idx] = engine->semaphore.sync_seqno[idx];
0ca36d78
BW
905 }
906}
907
6361f4ba
CW
908static void gen6_record_semaphore_state(struct intel_engine_cs *engine,
909 struct drm_i915_error_engine *ee)
87f85ebc 910{
6361f4ba
CW
911 struct drm_i915_private *dev_priv = engine->i915;
912
913 ee->semaphore_mboxes[0] = I915_READ(RING_SYNC_0(engine->mmio_base));
914 ee->semaphore_mboxes[1] = I915_READ(RING_SYNC_1(engine->mmio_base));
915 ee->semaphore_seqno[0] = engine->semaphore.sync_seqno[0];
916 ee->semaphore_seqno[1] = engine->semaphore.sync_seqno[1];
87f85ebc 917
2d1fe073 918 if (HAS_VEBOX(dev_priv)) {
6361f4ba 919 ee->semaphore_mboxes[2] =
0bc40be8 920 I915_READ(RING_SYNC_2(engine->mmio_base));
6361f4ba 921 ee->semaphore_seqno[2] = engine->semaphore.sync_seqno[2];
87f85ebc
BW
922 }
923}
924
6361f4ba
CW
925static void error_record_engine_waiters(struct intel_engine_cs *engine,
926 struct drm_i915_error_engine *ee)
688e6c72
CW
927{
928 struct intel_breadcrumbs *b = &engine->breadcrumbs;
929 struct drm_i915_error_waiter *waiter;
930 struct rb_node *rb;
931 int count;
932
6361f4ba
CW
933 ee->num_waiters = 0;
934 ee->waiters = NULL;
688e6c72
CW
935
936 spin_lock(&b->lock);
937 count = 0;
938 for (rb = rb_first(&b->waiters); rb != NULL; rb = rb_next(rb))
939 count++;
940 spin_unlock(&b->lock);
941
942 waiter = NULL;
943 if (count)
944 waiter = kmalloc_array(count,
945 sizeof(struct drm_i915_error_waiter),
946 GFP_ATOMIC);
947 if (!waiter)
948 return;
949
6361f4ba 950 ee->waiters = waiter;
688e6c72
CW
951
952 spin_lock(&b->lock);
953 for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
954 struct intel_wait *w = container_of(rb, typeof(*w), node);
955
956 strcpy(waiter->comm, w->tsk->comm);
957 waiter->pid = w->tsk->pid;
958 waiter->seqno = w->seqno;
959 waiter++;
960
6361f4ba 961 if (++ee->num_waiters == count)
688e6c72
CW
962 break;
963 }
964 spin_unlock(&b->lock);
965}
966
6361f4ba
CW
967static void error_record_engine_registers(struct drm_i915_error_state *error,
968 struct intel_engine_cs *engine,
969 struct drm_i915_error_engine *ee)
84734a04 970{
6361f4ba
CW
971 struct drm_i915_private *dev_priv = engine->i915;
972
c033666a 973 if (INTEL_GEN(dev_priv) >= 6) {
6361f4ba
CW
974 ee->rc_psmi = I915_READ(RING_PSMI_CTL(engine->mmio_base));
975 ee->fault_reg = I915_READ(RING_FAULT_REG(engine));
c033666a 976 if (INTEL_GEN(dev_priv) >= 8)
6361f4ba 977 gen8_record_semaphore_state(error, engine, ee);
0ca36d78 978 else
6361f4ba 979 gen6_record_semaphore_state(engine, ee);
4e5aabfd
BW
980 }
981
c033666a 982 if (INTEL_GEN(dev_priv) >= 4) {
6361f4ba
CW
983 ee->faddr = I915_READ(RING_DMA_FADD(engine->mmio_base));
984 ee->ipeir = I915_READ(RING_IPEIR(engine->mmio_base));
985 ee->ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
986 ee->instdone = I915_READ(RING_INSTDONE(engine->mmio_base));
987 ee->instps = I915_READ(RING_INSTPS(engine->mmio_base));
988 ee->bbaddr = I915_READ(RING_BBADDR(engine->mmio_base));
c033666a 989 if (INTEL_GEN(dev_priv) >= 8) {
6361f4ba
CW
990 ee->faddr |= (u64) I915_READ(RING_DMA_FADD_UDW(engine->mmio_base)) << 32;
991 ee->bbaddr |= (u64) I915_READ(RING_BBADDR_UDW(engine->mmio_base)) << 32;
13ffadd1 992 }
6361f4ba 993 ee->bbstate = I915_READ(RING_BBSTATE(engine->mmio_base));
84734a04 994 } else {
6361f4ba
CW
995 ee->faddr = I915_READ(DMA_FADD_I8XX);
996 ee->ipeir = I915_READ(IPEIR);
997 ee->ipehr = I915_READ(IPEHR);
998 ee->instdone = I915_READ(GEN2_INSTDONE);
84734a04
MK
999 }
1000
6361f4ba
CW
1001 ee->waiting = intel_engine_has_waiter(engine);
1002 ee->instpm = I915_READ(RING_INSTPM(engine->mmio_base));
7e37f889 1003 ee->acthd = intel_engine_get_active_head(engine);
6361f4ba
CW
1004 ee->seqno = intel_engine_get_seqno(engine);
1005 ee->last_seqno = engine->last_submitted_seqno;
1006 ee->start = I915_READ_START(engine);
1007 ee->head = I915_READ_HEAD(engine);
1008 ee->tail = I915_READ_TAIL(engine);
1009 ee->ctl = I915_READ_CTL(engine);
84734a04 1010
c033666a 1011 if (I915_NEED_GFX_HWS(dev_priv)) {
f0f59a00 1012 i915_reg_t mmio;
f3ce3821 1013
c033666a 1014 if (IS_GEN7(dev_priv)) {
0bc40be8 1015 switch (engine->id) {
f3ce3821
CW
1016 default:
1017 case RCS:
1018 mmio = RENDER_HWS_PGA_GEN7;
1019 break;
1020 case BCS:
1021 mmio = BLT_HWS_PGA_GEN7;
1022 break;
1023 case VCS:
1024 mmio = BSD_HWS_PGA_GEN7;
1025 break;
1026 case VECS:
1027 mmio = VEBOX_HWS_PGA_GEN7;
1028 break;
1029 }
c033666a 1030 } else if (IS_GEN6(engine->i915)) {
0bc40be8 1031 mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
f3ce3821
CW
1032 } else {
1033 /* XXX: gen8 returns to sanity */
0bc40be8 1034 mmio = RING_HWS_PGA(engine->mmio_base);
f3ce3821
CW
1035 }
1036
6361f4ba 1037 ee->hws = I915_READ(mmio);
f3ce3821
CW
1038 }
1039
6361f4ba
CW
1040 ee->hangcheck_score = engine->hangcheck.score;
1041 ee->hangcheck_action = engine->hangcheck.action;
6c7a01ec 1042
c033666a 1043 if (USES_PPGTT(dev_priv)) {
6c7a01ec
BW
1044 int i;
1045
6361f4ba 1046 ee->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(engine));
6c7a01ec 1047
c033666a 1048 if (IS_GEN6(dev_priv))
6361f4ba 1049 ee->vm_info.pp_dir_base =
0bc40be8 1050 I915_READ(RING_PP_DIR_BASE_READ(engine));
c033666a 1051 else if (IS_GEN7(dev_priv))
6361f4ba 1052 ee->vm_info.pp_dir_base =
0bc40be8 1053 I915_READ(RING_PP_DIR_BASE(engine));
c033666a 1054 else if (INTEL_GEN(dev_priv) >= 8)
6c7a01ec 1055 for (i = 0; i < 4; i++) {
6361f4ba 1056 ee->vm_info.pdp[i] =
0bc40be8 1057 I915_READ(GEN8_RING_PDP_UDW(engine, i));
6361f4ba
CW
1058 ee->vm_info.pdp[i] <<= 32;
1059 ee->vm_info.pdp[i] |=
0bc40be8 1060 I915_READ(GEN8_RING_PDP_LDW(engine, i));
6c7a01ec 1061 }
6c7a01ec 1062 }
84734a04
MK
1063}
1064
1065
0bc40be8 1066static void i915_gem_record_active_context(struct intel_engine_cs *engine,
84734a04 1067 struct drm_i915_error_state *error,
6361f4ba 1068 struct drm_i915_error_engine *ee)
84734a04 1069{
c033666a 1070 struct drm_i915_private *dev_priv = engine->i915;
84734a04
MK
1071 struct drm_i915_gem_object *obj;
1072
1073 /* Currently render ring is the only HW context user */
0bc40be8 1074 if (engine->id != RCS || !error->ccid)
84734a04
MK
1075 return;
1076
1077 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
36362ad3
BW
1078 if (!i915_gem_obj_ggtt_bound(obj))
1079 continue;
1080
84734a04 1081 if ((error->ccid & PAGE_MASK) == i915_gem_obj_ggtt_offset(obj)) {
6361f4ba 1082 ee->ctx = i915_error_ggtt_object_create(dev_priv, obj);
84734a04
MK
1083 break;
1084 }
1085 }
1086}
1087
c033666a 1088static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
84734a04
MK
1089 struct drm_i915_error_state *error)
1090{
72e96d64 1091 struct i915_ggtt *ggtt = &dev_priv->ggtt;
84734a04
MK
1092 struct drm_i915_gem_request *request;
1093 int i, count;
1094
6361f4ba
CW
1095 if (dev_priv->semaphore_obj) {
1096 error->semaphore_obj =
1097 i915_error_ggtt_object_create(dev_priv,
1098 dev_priv->semaphore_obj);
1099 }
1100
666796da 1101 for (i = 0; i < I915_NUM_ENGINES; i++) {
4a570db5 1102 struct intel_engine_cs *engine = &dev_priv->engine[i];
6361f4ba 1103 struct drm_i915_error_engine *ee = &error->engine[i];
372fbb8e 1104
6361f4ba
CW
1105 ee->pid = -1;
1106 ee->engine_id = -1;
eee73b46 1107
c033666a 1108 if (!intel_engine_initialized(engine))
372fbb8e
CW
1109 continue;
1110
6361f4ba 1111 ee->engine_id = i;
372fbb8e 1112
6361f4ba
CW
1113 error_record_engine_registers(error, engine, ee);
1114 error_record_engine_waiters(engine, ee);
84734a04 1115
e2f80391 1116 request = i915_gem_find_active_request(engine);
ab0e7ff9 1117 if (request) {
ae6c4806 1118 struct i915_address_space *vm;
7e37f889 1119 struct intel_ring *ring;
ae6c4806 1120
bc3d6744
CW
1121 vm = request->ctx->ppgtt ?
1122 &request->ctx->ppgtt->base : &ggtt->base;
ae6c4806 1123
ab0e7ff9
CW
1124 /* We need to copy these to an anonymous buffer
1125 * as the simplest method to avoid being overwritten
1126 * by userspace.
1127 */
6361f4ba 1128 ee->batchbuffer =
ab0e7ff9
CW
1129 i915_error_object_create(dev_priv,
1130 request->batch_obj,
ae6c4806 1131 vm);
ab0e7ff9 1132
2d1fe073 1133 if (HAS_BROKEN_CS_TLB(dev_priv))
6361f4ba 1134 ee->wa_batchbuffer =
ab0e7ff9 1135 i915_error_ggtt_object_create(dev_priv,
1dae2dfb 1136 engine->scratch.obj);
ab0e7ff9 1137
071c92de 1138 if (request->pid) {
ab0e7ff9
CW
1139 struct task_struct *task;
1140
1141 rcu_read_lock();
071c92de 1142 task = pid_task(request->pid, PIDTYPE_PID);
ab0e7ff9 1143 if (task) {
6361f4ba
CW
1144 strcpy(ee->comm, task->comm);
1145 ee->pid = task->pid;
ab0e7ff9
CW
1146 }
1147 rcu_read_unlock();
1148 }
84734a04 1149
bc3d6744
CW
1150 error->simulated |=
1151 request->ctx->flags & CONTEXT_NO_ERROR_CAPTURE;
1152
1dae2dfb
CW
1153 ring = request->ring;
1154 ee->cpu_ring_head = ring->head;
1155 ee->cpu_ring_tail = ring->tail;
6361f4ba 1156 ee->ringbuffer =
ba6e0418 1157 i915_error_ggtt_object_create(dev_priv,
1dae2dfb 1158 ring->obj);
ba6e0418 1159 }
84734a04 1160
6361f4ba 1161 ee->hws_page =
e2f80391
TU
1162 i915_error_ggtt_object_create(dev_priv,
1163 engine->status_page.obj);
84734a04 1164
1dae2dfb
CW
1165 ee->wa_ctx = i915_error_ggtt_object_create(dev_priv,
1166 engine->wa_ctx.obj);
f85db059 1167
6361f4ba 1168 i915_gem_record_active_context(engine, error, ee);
84734a04
MK
1169
1170 count = 0;
efdf7c06 1171 list_for_each_entry(request, &engine->request_list, link)
84734a04
MK
1172 count++;
1173
6361f4ba
CW
1174 ee->num_requests = count;
1175 ee->requests =
1176 kcalloc(count, sizeof(*ee->requests), GFP_ATOMIC);
1177 if (!ee->requests) {
1178 ee->num_requests = 0;
84734a04
MK
1179 continue;
1180 }
1181
1182 count = 0;
efdf7c06 1183 list_for_each_entry(request, &engine->request_list, link) {
84734a04
MK
1184 struct drm_i915_error_request *erq;
1185
6361f4ba 1186 if (count >= ee->num_requests) {
9c8e1bdb
TE
1187 /*
1188 * If the ring request list was changed in
1189 * between the point where the error request
1190 * list was created and dimensioned and this
1191 * point then just exit early to avoid crashes.
1192 *
1193 * We don't need to communicate that the
1194 * request list changed state during error
1195 * state capture and that the error state is
1196 * slightly incorrect as a consequence since we
1197 * are typically only interested in the request
1198 * list state at the point of error state
1199 * capture, not in any changes happening during
1200 * the capture.
1201 */
1202 break;
1203 }
1204
6361f4ba 1205 erq = &ee->requests[count++];
04769652 1206 erq->seqno = request->fence.seqno;
84734a04 1207 erq->jiffies = request->emitted_jiffies;
72f95afa 1208 erq->tail = request->postfix;
84734a04
MK
1209 }
1210 }
1211}
1212
95f5301d
BW
1213/* FIXME: Since pin count/bound list is global, we duplicate what we capture per
1214 * VM.
1215 */
1216static void i915_gem_capture_vm(struct drm_i915_private *dev_priv,
1217 struct drm_i915_error_state *error,
1218 struct i915_address_space *vm,
1219 const int ndx)
84734a04 1220{
95f5301d 1221 struct drm_i915_error_buffer *active_bo = NULL, *pinned_bo = NULL;
84734a04 1222 struct drm_i915_gem_object *obj;
95f5301d 1223 struct i915_vma *vma;
84734a04
MK
1224 int i;
1225
1226 i = 0;
1c7f4bca 1227 list_for_each_entry(vma, &vm->active_list, vm_link)
84734a04 1228 i++;
95f5301d 1229 error->active_bo_count[ndx] = i;
3a448734
CW
1230
1231 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
1c7f4bca 1232 list_for_each_entry(vma, &obj->vma_list, obj_link)
fe14d5f4 1233 if (vma->vm == vm && vma->pin_count > 0)
3a448734 1234 i++;
3a448734 1235 }
95f5301d 1236 error->pinned_bo_count[ndx] = i - error->active_bo_count[ndx];
84734a04
MK
1237
1238 if (i) {
a1e22653 1239 active_bo = kcalloc(i, sizeof(*active_bo), GFP_ATOMIC);
95f5301d
BW
1240 if (active_bo)
1241 pinned_bo = active_bo + error->active_bo_count[ndx];
84734a04
MK
1242 }
1243
95f5301d
BW
1244 if (active_bo)
1245 error->active_bo_count[ndx] =
1246 capture_active_bo(active_bo,
1247 error->active_bo_count[ndx],
5cef07e1 1248 &vm->active_list);
84734a04 1249
95f5301d
BW
1250 if (pinned_bo)
1251 error->pinned_bo_count[ndx] =
1252 capture_pinned_bo(pinned_bo,
1253 error->pinned_bo_count[ndx],
3a448734 1254 &dev_priv->mm.bound_list, vm);
95f5301d
BW
1255 error->active_bo[ndx] = active_bo;
1256 error->pinned_bo[ndx] = pinned_bo;
1257}
1258
1259static void i915_gem_capture_buffers(struct drm_i915_private *dev_priv,
1260 struct drm_i915_error_state *error)
1261{
1262 struct i915_address_space *vm;
1263 int cnt = 0, i = 0;
1264
1265 list_for_each_entry(vm, &dev_priv->vm_list, global_link)
1266 cnt++;
1267
95f5301d
BW
1268 error->active_bo = kcalloc(cnt, sizeof(*error->active_bo), GFP_ATOMIC);
1269 error->pinned_bo = kcalloc(cnt, sizeof(*error->pinned_bo), GFP_ATOMIC);
1270 error->active_bo_count = kcalloc(cnt, sizeof(*error->active_bo_count),
1271 GFP_ATOMIC);
1272 error->pinned_bo_count = kcalloc(cnt, sizeof(*error->pinned_bo_count),
1273 GFP_ATOMIC);
1274
3a448734
CW
1275 if (error->active_bo == NULL ||
1276 error->pinned_bo == NULL ||
1277 error->active_bo_count == NULL ||
1278 error->pinned_bo_count == NULL) {
1279 kfree(error->active_bo);
1280 kfree(error->active_bo_count);
1281 kfree(error->pinned_bo);
1282 kfree(error->pinned_bo_count);
1283
1284 error->active_bo = NULL;
1285 error->active_bo_count = NULL;
1286 error->pinned_bo = NULL;
1287 error->pinned_bo_count = NULL;
1288 } else {
1289 list_for_each_entry(vm, &dev_priv->vm_list, global_link)
1290 i915_gem_capture_vm(dev_priv, error, vm, i++);
1291
1292 error->vm_count = cnt;
1293 }
84734a04
MK
1294}
1295
1d762aad
BW
1296/* Capture all registers which don't fit into another category. */
1297static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
1298 struct drm_i915_error_state *error)
84734a04 1299{
91c8a326 1300 struct drm_device *dev = &dev_priv->drm;
885ea5a8 1301 int i;
84734a04 1302
654c90c6
BW
1303 /* General organization
1304 * 1. Registers specific to a single generation
1305 * 2. Registers which belong to multiple generations
1306 * 3. Feature specific registers.
1307 * 4. Everything else
1308 * Please try to follow the order.
1309 */
84734a04 1310
654c90c6
BW
1311 /* 1: Registers specific to a single generation */
1312 if (IS_VALLEYVIEW(dev)) {
885ea5a8 1313 error->gtier[0] = I915_READ(GTIER);
843db716 1314 error->ier = I915_READ(VLV_IER);
40181697 1315 error->forcewake = I915_READ_FW(FORCEWAKE_VLV);
654c90c6 1316 }
84734a04 1317
654c90c6
BW
1318 if (IS_GEN7(dev))
1319 error->err_int = I915_READ(GEN7_ERR_INT);
84734a04 1320
6c826f34
MK
1321 if (INTEL_INFO(dev)->gen >= 8) {
1322 error->fault_data0 = I915_READ(GEN8_FAULT_TLB_DATA0);
1323 error->fault_data1 = I915_READ(GEN8_FAULT_TLB_DATA1);
1324 }
1325
91ec5d11 1326 if (IS_GEN6(dev)) {
40181697 1327 error->forcewake = I915_READ_FW(FORCEWAKE);
91ec5d11
BW
1328 error->gab_ctl = I915_READ(GAB_CTL);
1329 error->gfx_mode = I915_READ(GFX_MODE);
1330 }
84734a04 1331
654c90c6
BW
1332 /* 2: Registers which belong to multiple generations */
1333 if (INTEL_INFO(dev)->gen >= 7)
40181697 1334 error->forcewake = I915_READ_FW(FORCEWAKE_MT);
84734a04
MK
1335
1336 if (INTEL_INFO(dev)->gen >= 6) {
654c90c6 1337 error->derrmr = I915_READ(DERRMR);
84734a04
MK
1338 error->error = I915_READ(ERROR_GEN6);
1339 error->done_reg = I915_READ(DONE_REG);
1340 }
1341
654c90c6 1342 /* 3: Feature specific registers */
91ec5d11
BW
1343 if (IS_GEN6(dev) || IS_GEN7(dev)) {
1344 error->gam_ecochk = I915_READ(GAM_ECOCHK);
1345 error->gac_eco = I915_READ(GAC_ECO_BITS);
1346 }
1347
1348 /* 4: Everything else */
654c90c6
BW
1349 if (HAS_HW_CONTEXTS(dev))
1350 error->ccid = I915_READ(CCID);
1351
885ea5a8
RV
1352 if (INTEL_INFO(dev)->gen >= 8) {
1353 error->ier = I915_READ(GEN8_DE_MISC_IER);
1354 for (i = 0; i < 4; i++)
1355 error->gtier[i] = I915_READ(GEN8_GT_IER(i));
1356 } else if (HAS_PCH_SPLIT(dev)) {
843db716 1357 error->ier = I915_READ(DEIER);
885ea5a8 1358 error->gtier[0] = I915_READ(GTIER);
843db716
RV
1359 } else if (IS_GEN2(dev)) {
1360 error->ier = I915_READ16(IER);
1361 } else if (!IS_VALLEYVIEW(dev)) {
1362 error->ier = I915_READ(IER);
654c90c6 1363 }
654c90c6
BW
1364 error->eir = I915_READ(EIR);
1365 error->pgtbl_er = I915_READ(PGTBL_ER);
84734a04 1366
c033666a 1367 i915_get_extra_instdone(dev_priv, error->extra_instdone);
1d762aad
BW
1368}
1369
c033666a 1370static void i915_error_capture_msg(struct drm_i915_private *dev_priv,
58174462 1371 struct drm_i915_error_state *error,
14b730fc 1372 u32 engine_mask,
58174462 1373 const char *error_msg)
cb383002 1374{
cb383002 1375 u32 ecode;
6361f4ba 1376 int engine_id = -1, len;
cb383002 1377
6361f4ba 1378 ecode = i915_error_generate_code(dev_priv, error, &engine_id);
cb383002 1379
58174462 1380 len = scnprintf(error->error_msg, sizeof(error->error_msg),
0b5492d6 1381 "GPU HANG: ecode %d:%d:0x%08x",
6361f4ba 1382 INTEL_GEN(dev_priv), engine_id, ecode);
58174462 1383
6361f4ba 1384 if (engine_id != -1 && error->engine[engine_id].pid != -1)
58174462
MK
1385 len += scnprintf(error->error_msg + len,
1386 sizeof(error->error_msg) - len,
1387 ", in %s [%d]",
6361f4ba
CW
1388 error->engine[engine_id].comm,
1389 error->engine[engine_id].pid);
58174462
MK
1390
1391 scnprintf(error->error_msg + len, sizeof(error->error_msg) - len,
1392 ", reason: %s, action: %s",
1393 error_msg,
14b730fc 1394 engine_mask ? "reset" : "continue");
cb383002
MK
1395}
1396
48b031e3
MK
1397static void i915_capture_gen_state(struct drm_i915_private *dev_priv,
1398 struct drm_i915_error_state *error)
1399{
eb5be9d0
CW
1400 error->iommu = -1;
1401#ifdef CONFIG_INTEL_IOMMU
1402 error->iommu = intel_iommu_gfx_mapped;
1403#endif
48b031e3 1404 error->reset_count = i915_reset_count(&dev_priv->gpu_error);
62d5d69b 1405 error->suspend_count = dev_priv->suspend_count;
48b031e3
MK
1406}
1407
1d762aad
BW
1408/**
1409 * i915_capture_error_state - capture an error record for later analysis
1410 * @dev: drm device
1411 *
1412 * Should be called when an error is detected (either a hang or an error
1413 * interrupt) to capture error state from the time of the error. Fills
1414 * out a structure which becomes available in debugfs for user level tools
1415 * to pick up.
1416 */
c033666a
CW
1417void i915_capture_error_state(struct drm_i915_private *dev_priv,
1418 u32 engine_mask,
58174462 1419 const char *error_msg)
1d762aad 1420{
53a4c6b2 1421 static bool warned;
1d762aad
BW
1422 struct drm_i915_error_state *error;
1423 unsigned long flags;
1d762aad 1424
9777cca0
CW
1425 if (READ_ONCE(dev_priv->gpu_error.first_error))
1426 return;
1427
1d762aad
BW
1428 /* Account for pipe specific data like PIPE*STAT */
1429 error = kzalloc(sizeof(*error), GFP_ATOMIC);
1430 if (!error) {
1431 DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
1432 return;
1433 }
1434
011cf577
BW
1435 kref_init(&error->ref);
1436
48b031e3 1437 i915_capture_gen_state(dev_priv, error);
011cf577
BW
1438 i915_capture_reg_state(dev_priv, error);
1439 i915_gem_capture_buffers(dev_priv, error);
c033666a
CW
1440 i915_gem_record_fences(dev_priv, error);
1441 i915_gem_record_rings(dev_priv, error);
1d762aad 1442
84734a04
MK
1443 do_gettimeofday(&error->time);
1444
c033666a
CW
1445 error->overlay = intel_overlay_capture_error_state(dev_priv);
1446 error->display = intel_display_capture_error_state(dev_priv);
84734a04 1447
c033666a 1448 i915_error_capture_msg(dev_priv, error, engine_mask, error_msg);
cb383002
MK
1449 DRM_INFO("%s\n", error->error_msg);
1450
bc3d6744
CW
1451 if (!error->simulated) {
1452 spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
1453 if (!dev_priv->gpu_error.first_error) {
1454 dev_priv->gpu_error.first_error = error;
1455 error = NULL;
1456 }
1457 spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
84734a04 1458 }
84734a04 1459
cb383002 1460 if (error) {
84734a04 1461 i915_error_state_free(&error->ref);
cb383002
MK
1462 return;
1463 }
1464
1465 if (!warned) {
1466 DRM_INFO("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n");
1467 DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n");
1468 DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
1469 DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n");
91c8a326
CW
1470 DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n",
1471 dev_priv->drm.primary->index);
cb383002
MK
1472 warned = true;
1473 }
84734a04
MK
1474}
1475
1476void i915_error_state_get(struct drm_device *dev,
1477 struct i915_error_state_file_priv *error_priv)
1478{
fac5e23e 1479 struct drm_i915_private *dev_priv = to_i915(dev);
84734a04 1480
5b254c59 1481 spin_lock_irq(&dev_priv->gpu_error.lock);
84734a04
MK
1482 error_priv->error = dev_priv->gpu_error.first_error;
1483 if (error_priv->error)
1484 kref_get(&error_priv->error->ref);
5b254c59 1485 spin_unlock_irq(&dev_priv->gpu_error.lock);
84734a04
MK
1486
1487}
1488
1489void i915_error_state_put(struct i915_error_state_file_priv *error_priv)
1490{
1491 if (error_priv->error)
1492 kref_put(&error_priv->error->ref, i915_error_state_free);
1493}
1494
1495void i915_destroy_error_state(struct drm_device *dev)
1496{
fac5e23e 1497 struct drm_i915_private *dev_priv = to_i915(dev);
84734a04 1498 struct drm_i915_error_state *error;
84734a04 1499
5b254c59 1500 spin_lock_irq(&dev_priv->gpu_error.lock);
84734a04
MK
1501 error = dev_priv->gpu_error.first_error;
1502 dev_priv->gpu_error.first_error = NULL;
5b254c59 1503 spin_unlock_irq(&dev_priv->gpu_error.lock);
84734a04
MK
1504
1505 if (error)
1506 kref_put(&error->ref, i915_error_state_free);
1507}
1508
0a4cd7c8 1509const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
84734a04
MK
1510{
1511 switch (type) {
1512 case I915_CACHE_NONE: return " uncached";
0a4cd7c8 1513 case I915_CACHE_LLC: return HAS_LLC(i915) ? " LLC" : " snooped";
350ec881 1514 case I915_CACHE_L3_LLC: return " L3+LLC";
f56383cb 1515 case I915_CACHE_WT: return " WT";
84734a04
MK
1516 default: return "";
1517 }
1518}
1519
1520/* NB: please notice the memset */
c033666a
CW
1521void i915_get_extra_instdone(struct drm_i915_private *dev_priv,
1522 uint32_t *instdone)
84734a04 1523{
84734a04
MK
1524 memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
1525
c033666a 1526 if (IS_GEN2(dev_priv) || IS_GEN3(dev_priv))
bd93a50e 1527 instdone[0] = I915_READ(GEN2_INSTDONE);
c033666a 1528 else if (IS_GEN4(dev_priv) || IS_GEN5(dev_priv) || IS_GEN6(dev_priv)) {
f1d54348 1529 instdone[0] = I915_READ(RING_INSTDONE(RENDER_RING_BASE));
13d70b81 1530 instdone[1] = I915_READ(GEN4_INSTDONE1);
c033666a 1531 } else if (INTEL_GEN(dev_priv) >= 7) {
f1d54348 1532 instdone[0] = I915_READ(RING_INSTDONE(RENDER_RING_BASE));
84734a04
MK
1533 instdone[1] = I915_READ(GEN7_SC_INSTDONE);
1534 instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
1535 instdone[3] = I915_READ(GEN7_ROW_INSTDONE);
84734a04
MK
1536 }
1537}