]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/gpu/drm/i915/i915_debugfs.c
drm/i915: Hold rcu_read_lock when iterating over the radixtree (vma idr)
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / i915 / i915_debugfs.c
CommitLineData
2017263e
BG
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
26 *
27 */
28
f3cd474b 29#include <linux/debugfs.h>
e637d2cb 30#include <linux/sort.h>
d92a8cfc 31#include <linux/sched/mm.h>
4e5359cd 32#include "intel_drv.h"
2017263e 33
36cdd013
DW
34static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
35{
36 return to_i915(node->minor->dev);
37}
38
418e3cd8
CW
39static __always_inline void seq_print_param(struct seq_file *m,
40 const char *name,
41 const char *type,
42 const void *x)
43{
44 if (!__builtin_strcmp(type, "bool"))
45 seq_printf(m, "i915.%s=%s\n", name, yesno(*(const bool *)x));
46 else if (!__builtin_strcmp(type, "int"))
47 seq_printf(m, "i915.%s=%d\n", name, *(const int *)x);
48 else if (!__builtin_strcmp(type, "unsigned int"))
49 seq_printf(m, "i915.%s=%u\n", name, *(const unsigned int *)x);
1d6aa7a3
CW
50 else if (!__builtin_strcmp(type, "char *"))
51 seq_printf(m, "i915.%s=%s\n", name, *(const char **)x);
418e3cd8
CW
52 else
53 BUILD_BUG();
54}
55
70d39fe4
CW
56static int i915_capabilities(struct seq_file *m, void *data)
57{
36cdd013
DW
58 struct drm_i915_private *dev_priv = node_to_i915(m->private);
59 const struct intel_device_info *info = INTEL_INFO(dev_priv);
70d39fe4 60
36cdd013 61 seq_printf(m, "gen: %d\n", INTEL_GEN(dev_priv));
2e0d26f8 62 seq_printf(m, "platform: %s\n", intel_platform_name(info->platform));
36cdd013 63 seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv));
418e3cd8 64
79fc46df 65#define PRINT_FLAG(x) seq_printf(m, #x ": %s\n", yesno(info->x))
604db650 66 DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG);
79fc46df 67#undef PRINT_FLAG
70d39fe4 68
418e3cd8
CW
69 kernel_param_lock(THIS_MODULE);
70#define PRINT_PARAM(T, x) seq_print_param(m, #x, #T, &i915.x);
71 I915_PARAMS_FOR_EACH(PRINT_PARAM);
72#undef PRINT_PARAM
73 kernel_param_unlock(THIS_MODULE);
74
70d39fe4
CW
75 return 0;
76}
2017263e 77
a7363de7 78static char get_active_flag(struct drm_i915_gem_object *obj)
a6172a80 79{
573adb39 80 return i915_gem_object_is_active(obj) ? '*' : ' ';
a6172a80
CW
81}
82
a7363de7 83static char get_pin_flag(struct drm_i915_gem_object *obj)
be12a86b
TU
84{
85 return obj->pin_display ? 'p' : ' ';
86}
87
a7363de7 88static char get_tiling_flag(struct drm_i915_gem_object *obj)
a6172a80 89{
3e510a8e 90 switch (i915_gem_object_get_tiling(obj)) {
0206e353 91 default:
be12a86b
TU
92 case I915_TILING_NONE: return ' ';
93 case I915_TILING_X: return 'X';
94 case I915_TILING_Y: return 'Y';
0206e353 95 }
a6172a80
CW
96}
97
a7363de7 98static char get_global_flag(struct drm_i915_gem_object *obj)
be12a86b 99{
275f039d 100 return !list_empty(&obj->userfault_link) ? 'g' : ' ';
be12a86b
TU
101}
102
a7363de7 103static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
1d693bcc 104{
a4f5ea64 105 return obj->mm.mapping ? 'M' : ' ';
1d693bcc
BW
106}
107
ca1543be
TU
108static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
109{
110 u64 size = 0;
111 struct i915_vma *vma;
112
1c7f4bca 113 list_for_each_entry(vma, &obj->vma_list, obj_link) {
3272db53 114 if (i915_vma_is_ggtt(vma) && drm_mm_node_allocated(&vma->node))
ca1543be
TU
115 size += vma->node.size;
116 }
117
118 return size;
119}
120
37811fcc
CW
121static void
122describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
123{
b4716185 124 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
e2f80391 125 struct intel_engine_cs *engine;
1d693bcc 126 struct i915_vma *vma;
faf5bf0a 127 unsigned int frontbuffer_bits;
d7f46fc4
BW
128 int pin_count = 0;
129
188c1ab7
CW
130 lockdep_assert_held(&obj->base.dev->struct_mutex);
131
d07f0e59 132 seq_printf(m, "%pK: %c%c%c%c%c %8zdKiB %02x %02x %s%s%s",
37811fcc 133 &obj->base,
be12a86b 134 get_active_flag(obj),
37811fcc
CW
135 get_pin_flag(obj),
136 get_tiling_flag(obj),
1d693bcc 137 get_global_flag(obj),
be12a86b 138 get_pin_mapped_flag(obj),
a05a5862 139 obj->base.size / 1024,
37811fcc 140 obj->base.read_domains,
d07f0e59 141 obj->base.write_domain,
36cdd013 142 i915_cache_level_str(dev_priv, obj->cache_level),
a4f5ea64
CW
143 obj->mm.dirty ? " dirty" : "",
144 obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
37811fcc
CW
145 if (obj->base.name)
146 seq_printf(m, " (name: %d)", obj->base.name);
1c7f4bca 147 list_for_each_entry(vma, &obj->vma_list, obj_link) {
20dfbde4 148 if (i915_vma_is_pinned(vma))
d7f46fc4 149 pin_count++;
ba0635ff
DC
150 }
151 seq_printf(m, " (pinned x %d)", pin_count);
cc98b413
CW
152 if (obj->pin_display)
153 seq_printf(m, " (display)");
1c7f4bca 154 list_for_each_entry(vma, &obj->vma_list, obj_link) {
15717de2
CW
155 if (!drm_mm_node_allocated(&vma->node))
156 continue;
157
8d2fdc3f 158 seq_printf(m, " (%sgtt offset: %08llx, size: %08llx",
3272db53 159 i915_vma_is_ggtt(vma) ? "g" : "pp",
8d2fdc3f 160 vma->node.start, vma->node.size);
21976853
CW
161 if (i915_vma_is_ggtt(vma)) {
162 switch (vma->ggtt_view.type) {
163 case I915_GGTT_VIEW_NORMAL:
164 seq_puts(m, ", normal");
165 break;
166
167 case I915_GGTT_VIEW_PARTIAL:
168 seq_printf(m, ", partial [%08llx+%x]",
8bab1193
CW
169 vma->ggtt_view.partial.offset << PAGE_SHIFT,
170 vma->ggtt_view.partial.size << PAGE_SHIFT);
21976853
CW
171 break;
172
173 case I915_GGTT_VIEW_ROTATED:
174 seq_printf(m, ", rotated [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
8bab1193
CW
175 vma->ggtt_view.rotated.plane[0].width,
176 vma->ggtt_view.rotated.plane[0].height,
177 vma->ggtt_view.rotated.plane[0].stride,
178 vma->ggtt_view.rotated.plane[0].offset,
179 vma->ggtt_view.rotated.plane[1].width,
180 vma->ggtt_view.rotated.plane[1].height,
181 vma->ggtt_view.rotated.plane[1].stride,
182 vma->ggtt_view.rotated.plane[1].offset);
21976853
CW
183 break;
184
185 default:
186 MISSING_CASE(vma->ggtt_view.type);
187 break;
188 }
189 }
49ef5294
CW
190 if (vma->fence)
191 seq_printf(m, " , fence: %d%s",
192 vma->fence->id,
193 i915_gem_active_isset(&vma->last_fence) ? "*" : "");
596c5923 194 seq_puts(m, ")");
1d693bcc 195 }
c1ad11fc 196 if (obj->stolen)
440fd528 197 seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
27c01aae 198
d07f0e59 199 engine = i915_gem_object_last_write_engine(obj);
27c01aae
CW
200 if (engine)
201 seq_printf(m, " (%s)", engine->name);
202
faf5bf0a
CW
203 frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
204 if (frontbuffer_bits)
205 seq_printf(m, " (frontbuffer: 0x%03x)", frontbuffer_bits);
37811fcc
CW
206}
207
e637d2cb 208static int obj_rank_by_stolen(const void *A, const void *B)
6d2b8885 209{
e637d2cb
CW
210 const struct drm_i915_gem_object *a =
211 *(const struct drm_i915_gem_object **)A;
212 const struct drm_i915_gem_object *b =
213 *(const struct drm_i915_gem_object **)B;
6d2b8885 214
2d05fa16
RV
215 if (a->stolen->start < b->stolen->start)
216 return -1;
217 if (a->stolen->start > b->stolen->start)
218 return 1;
219 return 0;
6d2b8885
CW
220}
221
222static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
223{
36cdd013
DW
224 struct drm_i915_private *dev_priv = node_to_i915(m->private);
225 struct drm_device *dev = &dev_priv->drm;
e637d2cb 226 struct drm_i915_gem_object **objects;
6d2b8885 227 struct drm_i915_gem_object *obj;
c44ef60e 228 u64 total_obj_size, total_gtt_size;
e637d2cb
CW
229 unsigned long total, count, n;
230 int ret;
231
232 total = READ_ONCE(dev_priv->mm.object_count);
2098105e 233 objects = kvmalloc_array(total, sizeof(*objects), GFP_KERNEL);
e637d2cb
CW
234 if (!objects)
235 return -ENOMEM;
6d2b8885
CW
236
237 ret = mutex_lock_interruptible(&dev->struct_mutex);
238 if (ret)
e637d2cb 239 goto out;
6d2b8885
CW
240
241 total_obj_size = total_gtt_size = count = 0;
56cea323 242 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
e637d2cb
CW
243 if (count == total)
244 break;
245
6d2b8885
CW
246 if (obj->stolen == NULL)
247 continue;
248
e637d2cb 249 objects[count++] = obj;
6d2b8885 250 total_obj_size += obj->base.size;
ca1543be 251 total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
e637d2cb 252
6d2b8885 253 }
56cea323 254 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_link) {
e637d2cb
CW
255 if (count == total)
256 break;
257
6d2b8885
CW
258 if (obj->stolen == NULL)
259 continue;
260
e637d2cb 261 objects[count++] = obj;
6d2b8885 262 total_obj_size += obj->base.size;
6d2b8885 263 }
e637d2cb
CW
264
265 sort(objects, count, sizeof(*objects), obj_rank_by_stolen, NULL);
266
6d2b8885 267 seq_puts(m, "Stolen:\n");
e637d2cb 268 for (n = 0; n < count; n++) {
6d2b8885 269 seq_puts(m, " ");
e637d2cb 270 describe_obj(m, objects[n]);
6d2b8885 271 seq_putc(m, '\n');
6d2b8885 272 }
e637d2cb 273 seq_printf(m, "Total %lu objects, %llu bytes, %llu GTT size\n",
6d2b8885 274 count, total_obj_size, total_gtt_size);
e637d2cb
CW
275
276 mutex_unlock(&dev->struct_mutex);
277out:
2098105e 278 kvfree(objects);
e637d2cb 279 return ret;
6d2b8885
CW
280}
281
2db8e9d6 282struct file_stats {
6313c204 283 struct drm_i915_file_private *file_priv;
c44ef60e
MK
284 unsigned long count;
285 u64 total, unbound;
286 u64 global, shared;
287 u64 active, inactive;
2db8e9d6
CW
288};
289
290static int per_file_stats(int id, void *ptr, void *data)
291{
292 struct drm_i915_gem_object *obj = ptr;
293 struct file_stats *stats = data;
6313c204 294 struct i915_vma *vma;
2db8e9d6 295
2c736762
CW
296 lockdep_assert_held(&obj->base.dev->struct_mutex);
297
2db8e9d6
CW
298 stats->count++;
299 stats->total += obj->base.size;
15717de2
CW
300 if (!obj->bind_count)
301 stats->unbound += obj->base.size;
c67a17e9
CW
302 if (obj->base.name || obj->base.dma_buf)
303 stats->shared += obj->base.size;
304
894eeecc
CW
305 list_for_each_entry(vma, &obj->vma_list, obj_link) {
306 if (!drm_mm_node_allocated(&vma->node))
307 continue;
6313c204 308
3272db53 309 if (i915_vma_is_ggtt(vma)) {
894eeecc
CW
310 stats->global += vma->node.size;
311 } else {
312 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vma->vm);
6313c204 313
2bfa996e 314 if (ppgtt->base.file != stats->file_priv)
6313c204 315 continue;
6313c204 316 }
894eeecc 317
b0decaf7 318 if (i915_vma_is_active(vma))
894eeecc
CW
319 stats->active += vma->node.size;
320 else
321 stats->inactive += vma->node.size;
2db8e9d6
CW
322 }
323
324 return 0;
325}
326
b0da1b79
CW
327#define print_file_stats(m, name, stats) do { \
328 if (stats.count) \
c44ef60e 329 seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound)\n", \
b0da1b79
CW
330 name, \
331 stats.count, \
332 stats.total, \
333 stats.active, \
334 stats.inactive, \
335 stats.global, \
336 stats.shared, \
337 stats.unbound); \
338} while (0)
493018dc
BV
339
340static void print_batch_pool_stats(struct seq_file *m,
341 struct drm_i915_private *dev_priv)
342{
343 struct drm_i915_gem_object *obj;
344 struct file_stats stats;
e2f80391 345 struct intel_engine_cs *engine;
3b3f1650 346 enum intel_engine_id id;
b4ac5afc 347 int j;
493018dc
BV
348
349 memset(&stats, 0, sizeof(stats));
350
3b3f1650 351 for_each_engine(engine, dev_priv, id) {
e2f80391 352 for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
8d9d5744 353 list_for_each_entry(obj,
e2f80391 354 &engine->batch_pool.cache_list[j],
8d9d5744
CW
355 batch_pool_link)
356 per_file_stats(0, obj, &stats);
357 }
06fbca71 358 }
493018dc 359
b0da1b79 360 print_file_stats(m, "[k]batch pool", stats);
493018dc
BV
361}
362
15da9565
CW
363static int per_file_ctx_stats(int id, void *ptr, void *data)
364{
365 struct i915_gem_context *ctx = ptr;
366 int n;
367
368 for (n = 0; n < ARRAY_SIZE(ctx->engine); n++) {
369 if (ctx->engine[n].state)
bf3783e5 370 per_file_stats(0, ctx->engine[n].state->obj, data);
dca33ecc 371 if (ctx->engine[n].ring)
57e88531 372 per_file_stats(0, ctx->engine[n].ring->vma->obj, data);
15da9565
CW
373 }
374
375 return 0;
376}
377
378static void print_context_stats(struct seq_file *m,
379 struct drm_i915_private *dev_priv)
380{
36cdd013 381 struct drm_device *dev = &dev_priv->drm;
15da9565
CW
382 struct file_stats stats;
383 struct drm_file *file;
384
385 memset(&stats, 0, sizeof(stats));
386
36cdd013 387 mutex_lock(&dev->struct_mutex);
15da9565
CW
388 if (dev_priv->kernel_context)
389 per_file_ctx_stats(0, dev_priv->kernel_context, &stats);
390
36cdd013 391 list_for_each_entry(file, &dev->filelist, lhead) {
15da9565
CW
392 struct drm_i915_file_private *fpriv = file->driver_priv;
393 idr_for_each(&fpriv->context_idr, per_file_ctx_stats, &stats);
394 }
36cdd013 395 mutex_unlock(&dev->struct_mutex);
15da9565
CW
396
397 print_file_stats(m, "[k]contexts", stats);
398}
399
36cdd013 400static int i915_gem_object_info(struct seq_file *m, void *data)
73aa808f 401{
36cdd013
DW
402 struct drm_i915_private *dev_priv = node_to_i915(m->private);
403 struct drm_device *dev = &dev_priv->drm;
72e96d64 404 struct i915_ggtt *ggtt = &dev_priv->ggtt;
2bd160a1
CW
405 u32 count, mapped_count, purgeable_count, dpy_count;
406 u64 size, mapped_size, purgeable_size, dpy_size;
6299f992 407 struct drm_i915_gem_object *obj;
2db8e9d6 408 struct drm_file *file;
73aa808f
CW
409 int ret;
410
411 ret = mutex_lock_interruptible(&dev->struct_mutex);
412 if (ret)
413 return ret;
414
3ef7f228 415 seq_printf(m, "%u objects, %llu bytes\n",
6299f992
CW
416 dev_priv->mm.object_count,
417 dev_priv->mm.object_memory);
418
1544c42e
CW
419 size = count = 0;
420 mapped_size = mapped_count = 0;
421 purgeable_size = purgeable_count = 0;
56cea323 422 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_link) {
2bd160a1
CW
423 size += obj->base.size;
424 ++count;
425
a4f5ea64 426 if (obj->mm.madv == I915_MADV_DONTNEED) {
2bd160a1
CW
427 purgeable_size += obj->base.size;
428 ++purgeable_count;
429 }
430
a4f5ea64 431 if (obj->mm.mapping) {
2bd160a1
CW
432 mapped_count++;
433 mapped_size += obj->base.size;
be19b10d 434 }
b7abb714 435 }
c44ef60e 436 seq_printf(m, "%u unbound objects, %llu bytes\n", count, size);
6c085a72 437
2bd160a1 438 size = count = dpy_size = dpy_count = 0;
56cea323 439 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
2bd160a1
CW
440 size += obj->base.size;
441 ++count;
442
30154650 443 if (obj->pin_display) {
2bd160a1
CW
444 dpy_size += obj->base.size;
445 ++dpy_count;
6299f992 446 }
2bd160a1 447
a4f5ea64 448 if (obj->mm.madv == I915_MADV_DONTNEED) {
b7abb714
CW
449 purgeable_size += obj->base.size;
450 ++purgeable_count;
451 }
2bd160a1 452
a4f5ea64 453 if (obj->mm.mapping) {
2bd160a1
CW
454 mapped_count++;
455 mapped_size += obj->base.size;
be19b10d 456 }
6299f992 457 }
2bd160a1
CW
458 seq_printf(m, "%u bound objects, %llu bytes\n",
459 count, size);
c44ef60e 460 seq_printf(m, "%u purgeable objects, %llu bytes\n",
b7abb714 461 purgeable_count, purgeable_size);
2bd160a1
CW
462 seq_printf(m, "%u mapped objects, %llu bytes\n",
463 mapped_count, mapped_size);
464 seq_printf(m, "%u display objects (pinned), %llu bytes\n",
465 dpy_count, dpy_size);
6299f992 466
c44ef60e 467 seq_printf(m, "%llu [%llu] gtt total\n",
381b943b 468 ggtt->base.total, ggtt->mappable_end);
73aa808f 469
493018dc
BV
470 seq_putc(m, '\n');
471 print_batch_pool_stats(m, dev_priv);
1d2ac403
DV
472 mutex_unlock(&dev->struct_mutex);
473
474 mutex_lock(&dev->filelist_mutex);
15da9565 475 print_context_stats(m, dev_priv);
2db8e9d6
CW
476 list_for_each_entry_reverse(file, &dev->filelist, lhead) {
477 struct file_stats stats;
c84455b4
CW
478 struct drm_i915_file_private *file_priv = file->driver_priv;
479 struct drm_i915_gem_request *request;
3ec2f427 480 struct task_struct *task;
2db8e9d6 481
2c736762
CW
482 mutex_lock(&dev->struct_mutex);
483
2db8e9d6 484 memset(&stats, 0, sizeof(stats));
6313c204 485 stats.file_priv = file->driver_priv;
5b5ffff0 486 spin_lock(&file->table_lock);
2db8e9d6 487 idr_for_each(&file->object_idr, per_file_stats, &stats);
5b5ffff0 488 spin_unlock(&file->table_lock);
3ec2f427
TH
489 /*
490 * Although we have a valid reference on file->pid, that does
491 * not guarantee that the task_struct who called get_pid() is
492 * still alive (e.g. get_pid(current) => fork() => exit()).
493 * Therefore, we need to protect this ->comm access using RCU.
494 */
c84455b4
CW
495 request = list_first_entry_or_null(&file_priv->mm.request_list,
496 struct drm_i915_gem_request,
c8659efa 497 client_link);
3ec2f427 498 rcu_read_lock();
c84455b4
CW
499 task = pid_task(request && request->ctx->pid ?
500 request->ctx->pid : file->pid,
501 PIDTYPE_PID);
493018dc 502 print_file_stats(m, task ? task->comm : "<unknown>", stats);
3ec2f427 503 rcu_read_unlock();
2c736762 504
c84455b4 505 mutex_unlock(&dev->struct_mutex);
2db8e9d6 506 }
1d2ac403 507 mutex_unlock(&dev->filelist_mutex);
73aa808f
CW
508
509 return 0;
510}
511
aee56cff 512static int i915_gem_gtt_info(struct seq_file *m, void *data)
08c18323 513{
9f25d007 514 struct drm_info_node *node = m->private;
36cdd013
DW
515 struct drm_i915_private *dev_priv = node_to_i915(node);
516 struct drm_device *dev = &dev_priv->drm;
5f4b091a 517 bool show_pin_display_only = !!node->info_ent->data;
08c18323 518 struct drm_i915_gem_object *obj;
c44ef60e 519 u64 total_obj_size, total_gtt_size;
08c18323
CW
520 int count, ret;
521
522 ret = mutex_lock_interruptible(&dev->struct_mutex);
523 if (ret)
524 return ret;
525
526 total_obj_size = total_gtt_size = count = 0;
56cea323 527 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
6da84829 528 if (show_pin_display_only && !obj->pin_display)
1b50247a
CW
529 continue;
530
267f0c90 531 seq_puts(m, " ");
08c18323 532 describe_obj(m, obj);
267f0c90 533 seq_putc(m, '\n');
08c18323 534 total_obj_size += obj->base.size;
ca1543be 535 total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
08c18323
CW
536 count++;
537 }
538
539 mutex_unlock(&dev->struct_mutex);
540
c44ef60e 541 seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
08c18323
CW
542 count, total_obj_size, total_gtt_size);
543
544 return 0;
545}
546
493018dc
BV
547static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
548{
36cdd013
DW
549 struct drm_i915_private *dev_priv = node_to_i915(m->private);
550 struct drm_device *dev = &dev_priv->drm;
493018dc 551 struct drm_i915_gem_object *obj;
e2f80391 552 struct intel_engine_cs *engine;
3b3f1650 553 enum intel_engine_id id;
8d9d5744 554 int total = 0;
b4ac5afc 555 int ret, j;
493018dc
BV
556
557 ret = mutex_lock_interruptible(&dev->struct_mutex);
558 if (ret)
559 return ret;
560
3b3f1650 561 for_each_engine(engine, dev_priv, id) {
e2f80391 562 for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
8d9d5744
CW
563 int count;
564
565 count = 0;
566 list_for_each_entry(obj,
e2f80391 567 &engine->batch_pool.cache_list[j],
8d9d5744
CW
568 batch_pool_link)
569 count++;
570 seq_printf(m, "%s cache[%d]: %d objects\n",
e2f80391 571 engine->name, j, count);
8d9d5744
CW
572
573 list_for_each_entry(obj,
e2f80391 574 &engine->batch_pool.cache_list[j],
8d9d5744
CW
575 batch_pool_link) {
576 seq_puts(m, " ");
577 describe_obj(m, obj);
578 seq_putc(m, '\n');
579 }
580
581 total += count;
06fbca71 582 }
493018dc
BV
583 }
584
8d9d5744 585 seq_printf(m, "total: %d\n", total);
493018dc
BV
586
587 mutex_unlock(&dev->struct_mutex);
588
589 return 0;
590}
591
1b36595f
CW
592static void print_request(struct seq_file *m,
593 struct drm_i915_gem_request *rq,
594 const char *prefix)
595{
20311bd3 596 seq_printf(m, "%s%x [%x:%x] prio=%d @ %dms: %s\n", prefix,
65e4760e 597 rq->global_seqno, rq->ctx->hw_id, rq->fence.seqno,
20311bd3 598 rq->priotree.priority,
1b36595f 599 jiffies_to_msecs(jiffies - rq->emitted_jiffies),
562f5d45 600 rq->timeline->common->name);
1b36595f
CW
601}
602
2017263e
BG
603static int i915_gem_request_info(struct seq_file *m, void *data)
604{
36cdd013
DW
605 struct drm_i915_private *dev_priv = node_to_i915(m->private);
606 struct drm_device *dev = &dev_priv->drm;
eed29a5b 607 struct drm_i915_gem_request *req;
3b3f1650
AG
608 struct intel_engine_cs *engine;
609 enum intel_engine_id id;
b4ac5afc 610 int ret, any;
de227ef0
CW
611
612 ret = mutex_lock_interruptible(&dev->struct_mutex);
613 if (ret)
614 return ret;
2017263e 615
2d1070b2 616 any = 0;
3b3f1650 617 for_each_engine(engine, dev_priv, id) {
2d1070b2
CW
618 int count;
619
620 count = 0;
73cb9701 621 list_for_each_entry(req, &engine->timeline->requests, link)
2d1070b2
CW
622 count++;
623 if (count == 0)
a2c7f6fd
CW
624 continue;
625
e2f80391 626 seq_printf(m, "%s requests: %d\n", engine->name, count);
73cb9701 627 list_for_each_entry(req, &engine->timeline->requests, link)
1b36595f 628 print_request(m, req, " ");
2d1070b2
CW
629
630 any++;
2017263e 631 }
de227ef0
CW
632 mutex_unlock(&dev->struct_mutex);
633
2d1070b2 634 if (any == 0)
267f0c90 635 seq_puts(m, "No requests\n");
c2c347a9 636
2017263e
BG
637 return 0;
638}
639
b2223497 640static void i915_ring_seqno_info(struct seq_file *m,
0bc40be8 641 struct intel_engine_cs *engine)
b2223497 642{
688e6c72
CW
643 struct intel_breadcrumbs *b = &engine->breadcrumbs;
644 struct rb_node *rb;
645
12471ba8 646 seq_printf(m, "Current sequence (%s): %x\n",
1b7744e7 647 engine->name, intel_engine_get_seqno(engine));
688e6c72 648
61d3dc70 649 spin_lock_irq(&b->rb_lock);
688e6c72 650 for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
f802cf7e 651 struct intel_wait *w = rb_entry(rb, typeof(*w), node);
688e6c72
CW
652
653 seq_printf(m, "Waiting (%s): %s [%d] on %x\n",
654 engine->name, w->tsk->comm, w->tsk->pid, w->seqno);
655 }
61d3dc70 656 spin_unlock_irq(&b->rb_lock);
b2223497
CW
657}
658
2017263e
BG
659static int i915_gem_seqno_info(struct seq_file *m, void *data)
660{
36cdd013 661 struct drm_i915_private *dev_priv = node_to_i915(m->private);
e2f80391 662 struct intel_engine_cs *engine;
3b3f1650 663 enum intel_engine_id id;
2017263e 664
3b3f1650 665 for_each_engine(engine, dev_priv, id)
e2f80391 666 i915_ring_seqno_info(m, engine);
de227ef0 667
2017263e
BG
668 return 0;
669}
670
671
672static int i915_interrupt_info(struct seq_file *m, void *data)
673{
36cdd013 674 struct drm_i915_private *dev_priv = node_to_i915(m->private);
e2f80391 675 struct intel_engine_cs *engine;
3b3f1650 676 enum intel_engine_id id;
4bb05040 677 int i, pipe;
de227ef0 678
c8c8fb33 679 intel_runtime_pm_get(dev_priv);
2017263e 680
36cdd013 681 if (IS_CHERRYVIEW(dev_priv)) {
74e1ca8c
VS
682 seq_printf(m, "Master Interrupt Control:\t%08x\n",
683 I915_READ(GEN8_MASTER_IRQ));
684
685 seq_printf(m, "Display IER:\t%08x\n",
686 I915_READ(VLV_IER));
687 seq_printf(m, "Display IIR:\t%08x\n",
688 I915_READ(VLV_IIR));
689 seq_printf(m, "Display IIR_RW:\t%08x\n",
690 I915_READ(VLV_IIR_RW));
691 seq_printf(m, "Display IMR:\t%08x\n",
692 I915_READ(VLV_IMR));
9c870d03
CW
693 for_each_pipe(dev_priv, pipe) {
694 enum intel_display_power_domain power_domain;
695
696 power_domain = POWER_DOMAIN_PIPE(pipe);
697 if (!intel_display_power_get_if_enabled(dev_priv,
698 power_domain)) {
699 seq_printf(m, "Pipe %c power disabled\n",
700 pipe_name(pipe));
701 continue;
702 }
703
74e1ca8c
VS
704 seq_printf(m, "Pipe %c stat:\t%08x\n",
705 pipe_name(pipe),
706 I915_READ(PIPESTAT(pipe)));
707
9c870d03
CW
708 intel_display_power_put(dev_priv, power_domain);
709 }
710
711 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
74e1ca8c
VS
712 seq_printf(m, "Port hotplug:\t%08x\n",
713 I915_READ(PORT_HOTPLUG_EN));
714 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
715 I915_READ(VLV_DPFLIPSTAT));
716 seq_printf(m, "DPINVGTT:\t%08x\n",
717 I915_READ(DPINVGTT));
9c870d03 718 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
74e1ca8c
VS
719
720 for (i = 0; i < 4; i++) {
721 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
722 i, I915_READ(GEN8_GT_IMR(i)));
723 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
724 i, I915_READ(GEN8_GT_IIR(i)));
725 seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
726 i, I915_READ(GEN8_GT_IER(i)));
727 }
728
729 seq_printf(m, "PCU interrupt mask:\t%08x\n",
730 I915_READ(GEN8_PCU_IMR));
731 seq_printf(m, "PCU interrupt identity:\t%08x\n",
732 I915_READ(GEN8_PCU_IIR));
733 seq_printf(m, "PCU interrupt enable:\t%08x\n",
734 I915_READ(GEN8_PCU_IER));
36cdd013 735 } else if (INTEL_GEN(dev_priv) >= 8) {
a123f157
BW
736 seq_printf(m, "Master Interrupt Control:\t%08x\n",
737 I915_READ(GEN8_MASTER_IRQ));
738
739 for (i = 0; i < 4; i++) {
740 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
741 i, I915_READ(GEN8_GT_IMR(i)));
742 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
743 i, I915_READ(GEN8_GT_IIR(i)));
744 seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
745 i, I915_READ(GEN8_GT_IER(i)));
746 }
747
055e393f 748 for_each_pipe(dev_priv, pipe) {
e129649b
ID
749 enum intel_display_power_domain power_domain;
750
751 power_domain = POWER_DOMAIN_PIPE(pipe);
752 if (!intel_display_power_get_if_enabled(dev_priv,
753 power_domain)) {
22c59960
PZ
754 seq_printf(m, "Pipe %c power disabled\n",
755 pipe_name(pipe));
756 continue;
757 }
a123f157 758 seq_printf(m, "Pipe %c IMR:\t%08x\n",
07d27e20
DL
759 pipe_name(pipe),
760 I915_READ(GEN8_DE_PIPE_IMR(pipe)));
a123f157 761 seq_printf(m, "Pipe %c IIR:\t%08x\n",
07d27e20
DL
762 pipe_name(pipe),
763 I915_READ(GEN8_DE_PIPE_IIR(pipe)));
a123f157 764 seq_printf(m, "Pipe %c IER:\t%08x\n",
07d27e20
DL
765 pipe_name(pipe),
766 I915_READ(GEN8_DE_PIPE_IER(pipe)));
e129649b
ID
767
768 intel_display_power_put(dev_priv, power_domain);
a123f157
BW
769 }
770
771 seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
772 I915_READ(GEN8_DE_PORT_IMR));
773 seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
774 I915_READ(GEN8_DE_PORT_IIR));
775 seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
776 I915_READ(GEN8_DE_PORT_IER));
777
778 seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
779 I915_READ(GEN8_DE_MISC_IMR));
780 seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
781 I915_READ(GEN8_DE_MISC_IIR));
782 seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
783 I915_READ(GEN8_DE_MISC_IER));
784
785 seq_printf(m, "PCU interrupt mask:\t%08x\n",
786 I915_READ(GEN8_PCU_IMR));
787 seq_printf(m, "PCU interrupt identity:\t%08x\n",
788 I915_READ(GEN8_PCU_IIR));
789 seq_printf(m, "PCU interrupt enable:\t%08x\n",
790 I915_READ(GEN8_PCU_IER));
36cdd013 791 } else if (IS_VALLEYVIEW(dev_priv)) {
7e231dbe
JB
792 seq_printf(m, "Display IER:\t%08x\n",
793 I915_READ(VLV_IER));
794 seq_printf(m, "Display IIR:\t%08x\n",
795 I915_READ(VLV_IIR));
796 seq_printf(m, "Display IIR_RW:\t%08x\n",
797 I915_READ(VLV_IIR_RW));
798 seq_printf(m, "Display IMR:\t%08x\n",
799 I915_READ(VLV_IMR));
4f4631af
CW
800 for_each_pipe(dev_priv, pipe) {
801 enum intel_display_power_domain power_domain;
802
803 power_domain = POWER_DOMAIN_PIPE(pipe);
804 if (!intel_display_power_get_if_enabled(dev_priv,
805 power_domain)) {
806 seq_printf(m, "Pipe %c power disabled\n",
807 pipe_name(pipe));
808 continue;
809 }
810
7e231dbe
JB
811 seq_printf(m, "Pipe %c stat:\t%08x\n",
812 pipe_name(pipe),
813 I915_READ(PIPESTAT(pipe)));
4f4631af
CW
814 intel_display_power_put(dev_priv, power_domain);
815 }
7e231dbe
JB
816
817 seq_printf(m, "Master IER:\t%08x\n",
818 I915_READ(VLV_MASTER_IER));
819
820 seq_printf(m, "Render IER:\t%08x\n",
821 I915_READ(GTIER));
822 seq_printf(m, "Render IIR:\t%08x\n",
823 I915_READ(GTIIR));
824 seq_printf(m, "Render IMR:\t%08x\n",
825 I915_READ(GTIMR));
826
827 seq_printf(m, "PM IER:\t\t%08x\n",
828 I915_READ(GEN6_PMIER));
829 seq_printf(m, "PM IIR:\t\t%08x\n",
830 I915_READ(GEN6_PMIIR));
831 seq_printf(m, "PM IMR:\t\t%08x\n",
832 I915_READ(GEN6_PMIMR));
833
834 seq_printf(m, "Port hotplug:\t%08x\n",
835 I915_READ(PORT_HOTPLUG_EN));
836 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
837 I915_READ(VLV_DPFLIPSTAT));
838 seq_printf(m, "DPINVGTT:\t%08x\n",
839 I915_READ(DPINVGTT));
840
36cdd013 841 } else if (!HAS_PCH_SPLIT(dev_priv)) {
5f6a1695
ZW
842 seq_printf(m, "Interrupt enable: %08x\n",
843 I915_READ(IER));
844 seq_printf(m, "Interrupt identity: %08x\n",
845 I915_READ(IIR));
846 seq_printf(m, "Interrupt mask: %08x\n",
847 I915_READ(IMR));
055e393f 848 for_each_pipe(dev_priv, pipe)
9db4a9c7
JB
849 seq_printf(m, "Pipe %c stat: %08x\n",
850 pipe_name(pipe),
851 I915_READ(PIPESTAT(pipe)));
5f6a1695
ZW
852 } else {
853 seq_printf(m, "North Display Interrupt enable: %08x\n",
854 I915_READ(DEIER));
855 seq_printf(m, "North Display Interrupt identity: %08x\n",
856 I915_READ(DEIIR));
857 seq_printf(m, "North Display Interrupt mask: %08x\n",
858 I915_READ(DEIMR));
859 seq_printf(m, "South Display Interrupt enable: %08x\n",
860 I915_READ(SDEIER));
861 seq_printf(m, "South Display Interrupt identity: %08x\n",
862 I915_READ(SDEIIR));
863 seq_printf(m, "South Display Interrupt mask: %08x\n",
864 I915_READ(SDEIMR));
865 seq_printf(m, "Graphics Interrupt enable: %08x\n",
866 I915_READ(GTIER));
867 seq_printf(m, "Graphics Interrupt identity: %08x\n",
868 I915_READ(GTIIR));
869 seq_printf(m, "Graphics Interrupt mask: %08x\n",
870 I915_READ(GTIMR));
871 }
3b3f1650 872 for_each_engine(engine, dev_priv, id) {
36cdd013 873 if (INTEL_GEN(dev_priv) >= 6) {
a2c7f6fd
CW
874 seq_printf(m,
875 "Graphics Interrupt mask (%s): %08x\n",
e2f80391 876 engine->name, I915_READ_IMR(engine));
9862e600 877 }
e2f80391 878 i915_ring_seqno_info(m, engine);
9862e600 879 }
c8c8fb33 880 intel_runtime_pm_put(dev_priv);
de227ef0 881
2017263e
BG
882 return 0;
883}
884
a6172a80
CW
885static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
886{
36cdd013
DW
887 struct drm_i915_private *dev_priv = node_to_i915(m->private);
888 struct drm_device *dev = &dev_priv->drm;
de227ef0
CW
889 int i, ret;
890
891 ret = mutex_lock_interruptible(&dev->struct_mutex);
892 if (ret)
893 return ret;
a6172a80 894
a6172a80
CW
895 seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
896 for (i = 0; i < dev_priv->num_fence_regs; i++) {
49ef5294 897 struct i915_vma *vma = dev_priv->fence_regs[i].vma;
a6172a80 898
6c085a72
CW
899 seq_printf(m, "Fence %d, pin count = %d, object = ",
900 i, dev_priv->fence_regs[i].pin_count);
49ef5294 901 if (!vma)
267f0c90 902 seq_puts(m, "unused");
c2c347a9 903 else
49ef5294 904 describe_obj(m, vma->obj);
267f0c90 905 seq_putc(m, '\n');
a6172a80
CW
906 }
907
05394f39 908 mutex_unlock(&dev->struct_mutex);
a6172a80
CW
909 return 0;
910}
911
98a2f411 912#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
5a4c6f1b
CW
913static ssize_t gpu_state_read(struct file *file, char __user *ubuf,
914 size_t count, loff_t *pos)
d5442303 915{
5a4c6f1b
CW
916 struct i915_gpu_state *error = file->private_data;
917 struct drm_i915_error_state_buf str;
918 ssize_t ret;
919 loff_t tmp;
d5442303 920
5a4c6f1b
CW
921 if (!error)
922 return 0;
d5442303 923
5a4c6f1b
CW
924 ret = i915_error_state_buf_init(&str, error->i915, count, *pos);
925 if (ret)
926 return ret;
d5442303 927
5a4c6f1b
CW
928 ret = i915_error_state_to_str(&str, error);
929 if (ret)
930 goto out;
d5442303 931
5a4c6f1b
CW
932 tmp = 0;
933 ret = simple_read_from_buffer(ubuf, count, &tmp, str.buf, str.bytes);
934 if (ret < 0)
935 goto out;
d5442303 936
5a4c6f1b
CW
937 *pos = str.start + ret;
938out:
939 i915_error_state_buf_release(&str);
940 return ret;
941}
edc3d884 942
5a4c6f1b
CW
943static int gpu_state_release(struct inode *inode, struct file *file)
944{
945 i915_gpu_state_put(file->private_data);
edc3d884 946 return 0;
d5442303
DV
947}
948
5a4c6f1b 949static int i915_gpu_info_open(struct inode *inode, struct file *file)
d5442303 950{
090e5fe3 951 struct drm_i915_private *i915 = inode->i_private;
5a4c6f1b 952 struct i915_gpu_state *gpu;
d5442303 953
090e5fe3
CW
954 intel_runtime_pm_get(i915);
955 gpu = i915_capture_gpu_state(i915);
956 intel_runtime_pm_put(i915);
5a4c6f1b
CW
957 if (!gpu)
958 return -ENOMEM;
d5442303 959
5a4c6f1b 960 file->private_data = gpu;
edc3d884
MK
961 return 0;
962}
963
5a4c6f1b
CW
964static const struct file_operations i915_gpu_info_fops = {
965 .owner = THIS_MODULE,
966 .open = i915_gpu_info_open,
967 .read = gpu_state_read,
968 .llseek = default_llseek,
969 .release = gpu_state_release,
970};
971
972static ssize_t
973i915_error_state_write(struct file *filp,
974 const char __user *ubuf,
975 size_t cnt,
976 loff_t *ppos)
4dc955f7 977{
5a4c6f1b 978 struct i915_gpu_state *error = filp->private_data;
4dc955f7 979
5a4c6f1b
CW
980 if (!error)
981 return 0;
edc3d884 982
5a4c6f1b
CW
983 DRM_DEBUG_DRIVER("Resetting error state\n");
984 i915_reset_error_state(error->i915);
edc3d884 985
5a4c6f1b
CW
986 return cnt;
987}
edc3d884 988
5a4c6f1b
CW
989static int i915_error_state_open(struct inode *inode, struct file *file)
990{
991 file->private_data = i915_first_error_state(inode->i_private);
992 return 0;
d5442303
DV
993}
994
995static const struct file_operations i915_error_state_fops = {
996 .owner = THIS_MODULE,
997 .open = i915_error_state_open,
5a4c6f1b 998 .read = gpu_state_read,
d5442303
DV
999 .write = i915_error_state_write,
1000 .llseek = default_llseek,
5a4c6f1b 1001 .release = gpu_state_release,
d5442303 1002};
98a2f411
CW
1003#endif
1004
647416f9
KC
1005static int
1006i915_next_seqno_set(void *data, u64 val)
1007{
36cdd013
DW
1008 struct drm_i915_private *dev_priv = data;
1009 struct drm_device *dev = &dev_priv->drm;
40633219
MK
1010 int ret;
1011
40633219
MK
1012 ret = mutex_lock_interruptible(&dev->struct_mutex);
1013 if (ret)
1014 return ret;
1015
73cb9701 1016 ret = i915_gem_set_global_seqno(dev, val);
40633219
MK
1017 mutex_unlock(&dev->struct_mutex);
1018
647416f9 1019 return ret;
40633219
MK
1020}
1021
647416f9 1022DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops,
9b6586ae 1023 NULL, i915_next_seqno_set,
3a3b4f98 1024 "0x%llx\n");
40633219 1025
adb4bd12 1026static int i915_frequency_info(struct seq_file *m, void *unused)
f97108d1 1027{
36cdd013 1028 struct drm_i915_private *dev_priv = node_to_i915(m->private);
c8c8fb33
PZ
1029 int ret = 0;
1030
1031 intel_runtime_pm_get(dev_priv);
3b8d8d91 1032
36cdd013 1033 if (IS_GEN5(dev_priv)) {
3b8d8d91
JB
1034 u16 rgvswctl = I915_READ16(MEMSWCTL);
1035 u16 rgvstat = I915_READ16(MEMSTAT_ILK);
1036
1037 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
1038 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
1039 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
1040 MEMSTAT_VID_SHIFT);
1041 seq_printf(m, "Current P-state: %d\n",
1042 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
36cdd013 1043 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
666a4537
WB
1044 u32 freq_sts;
1045
1046 mutex_lock(&dev_priv->rps.hw_lock);
1047 freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
1048 seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
1049 seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
1050
1051 seq_printf(m, "actual GPU freq: %d MHz\n",
1052 intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
1053
1054 seq_printf(m, "current GPU freq: %d MHz\n",
1055 intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq));
1056
1057 seq_printf(m, "max GPU freq: %d MHz\n",
1058 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
1059
1060 seq_printf(m, "min GPU freq: %d MHz\n",
1061 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq));
1062
1063 seq_printf(m, "idle GPU freq: %d MHz\n",
1064 intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq));
1065
1066 seq_printf(m,
1067 "efficient (RPe) frequency: %d MHz\n",
1068 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
1069 mutex_unlock(&dev_priv->rps.hw_lock);
36cdd013 1070 } else if (INTEL_GEN(dev_priv) >= 6) {
35040562
BP
1071 u32 rp_state_limits;
1072 u32 gt_perf_status;
1073 u32 rp_state_cap;
0d8f9491 1074 u32 rpmodectl, rpinclimit, rpdeclimit;
8e8c06cd 1075 u32 rpstat, cagf, reqf;
ccab5c82
JB
1076 u32 rpupei, rpcurup, rpprevup;
1077 u32 rpdownei, rpcurdown, rpprevdown;
9dd3c605 1078 u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
3b8d8d91
JB
1079 int max_freq;
1080
35040562 1081 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
cc3f90f0 1082 if (IS_GEN9_LP(dev_priv)) {
35040562
BP
1083 rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
1084 gt_perf_status = I915_READ(BXT_GT_PERF_STATUS);
1085 } else {
1086 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
1087 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
1088 }
1089
3b8d8d91 1090 /* RPSTAT1 is in the GT power well */
59bad947 1091 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
3b8d8d91 1092
8e8c06cd 1093 reqf = I915_READ(GEN6_RPNSWREQ);
35ceabf3 1094 if (INTEL_GEN(dev_priv) >= 9)
60260a5b
AG
1095 reqf >>= 23;
1096 else {
1097 reqf &= ~GEN6_TURBO_DISABLE;
36cdd013 1098 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
60260a5b
AG
1099 reqf >>= 24;
1100 else
1101 reqf >>= 25;
1102 }
7c59a9c1 1103 reqf = intel_gpu_freq(dev_priv, reqf);
8e8c06cd 1104
0d8f9491
CW
1105 rpmodectl = I915_READ(GEN6_RP_CONTROL);
1106 rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
1107 rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);
1108
ccab5c82 1109 rpstat = I915_READ(GEN6_RPSTAT1);
d6cda9c7
AG
1110 rpupei = I915_READ(GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
1111 rpcurup = I915_READ(GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
1112 rpprevup = I915_READ(GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
1113 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
1114 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
1115 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
35ceabf3 1116 if (INTEL_GEN(dev_priv) >= 9)
60260a5b 1117 cagf = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT;
36cdd013 1118 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
f82855d3
BW
1119 cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
1120 else
1121 cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
7c59a9c1 1122 cagf = intel_gpu_freq(dev_priv, cagf);
ccab5c82 1123
59bad947 1124 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
d1ebd816 1125
36cdd013 1126 if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) {
9dd3c605
PZ
1127 pm_ier = I915_READ(GEN6_PMIER);
1128 pm_imr = I915_READ(GEN6_PMIMR);
1129 pm_isr = I915_READ(GEN6_PMISR);
1130 pm_iir = I915_READ(GEN6_PMIIR);
1131 pm_mask = I915_READ(GEN6_PMINTRMSK);
1132 } else {
1133 pm_ier = I915_READ(GEN8_GT_IER(2));
1134 pm_imr = I915_READ(GEN8_GT_IMR(2));
1135 pm_isr = I915_READ(GEN8_GT_ISR(2));
1136 pm_iir = I915_READ(GEN8_GT_IIR(2));
1137 pm_mask = I915_READ(GEN6_PMINTRMSK);
1138 }
0d8f9491 1139 seq_printf(m, "PM IER=0x%08x IMR=0x%08x ISR=0x%08x IIR=0x%08x, MASK=0x%08x\n",
9dd3c605 1140 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask);
5dd04556
SAK
1141 seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n",
1142 dev_priv->rps.pm_intrmsk_mbz);
3b8d8d91 1143 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
3b8d8d91 1144 seq_printf(m, "Render p-state ratio: %d\n",
35ceabf3 1145 (gt_perf_status & (INTEL_GEN(dev_priv) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
3b8d8d91
JB
1146 seq_printf(m, "Render p-state VID: %d\n",
1147 gt_perf_status & 0xff);
1148 seq_printf(m, "Render p-state limit: %d\n",
1149 rp_state_limits & 0xff);
0d8f9491
CW
1150 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
1151 seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
1152 seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
1153 seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
8e8c06cd 1154 seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
f82855d3 1155 seq_printf(m, "CAGF: %dMHz\n", cagf);
d6cda9c7
AG
1156 seq_printf(m, "RP CUR UP EI: %d (%dus)\n",
1157 rpupei, GT_PM_INTERVAL_TO_US(dev_priv, rpupei));
1158 seq_printf(m, "RP CUR UP: %d (%dus)\n",
1159 rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup));
1160 seq_printf(m, "RP PREV UP: %d (%dus)\n",
1161 rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup));
d86ed34a
CW
1162 seq_printf(m, "Up threshold: %d%%\n",
1163 dev_priv->rps.up_threshold);
1164
d6cda9c7
AG
1165 seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
1166 rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei));
1167 seq_printf(m, "RP CUR DOWN: %d (%dus)\n",
1168 rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown));
1169 seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
1170 rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown));
d86ed34a
CW
1171 seq_printf(m, "Down threshold: %d%%\n",
1172 dev_priv->rps.down_threshold);
3b8d8d91 1173
cc3f90f0 1174 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 :
35040562 1175 rp_state_cap >> 16) & 0xff;
35ceabf3
RV
1176 max_freq *= (IS_GEN9_BC(dev_priv) ||
1177 IS_CANNONLAKE(dev_priv) ? GEN9_FREQ_SCALER : 1);
3b8d8d91 1178 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
7c59a9c1 1179 intel_gpu_freq(dev_priv, max_freq));
3b8d8d91
JB
1180
1181 max_freq = (rp_state_cap & 0xff00) >> 8;
35ceabf3
RV
1182 max_freq *= (IS_GEN9_BC(dev_priv) ||
1183 IS_CANNONLAKE(dev_priv) ? GEN9_FREQ_SCALER : 1);
3b8d8d91 1184 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
7c59a9c1 1185 intel_gpu_freq(dev_priv, max_freq));
3b8d8d91 1186
cc3f90f0 1187 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 :
35040562 1188 rp_state_cap >> 0) & 0xff;
35ceabf3
RV
1189 max_freq *= (IS_GEN9_BC(dev_priv) ||
1190 IS_CANNONLAKE(dev_priv) ? GEN9_FREQ_SCALER : 1);
3b8d8d91 1191 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
7c59a9c1 1192 intel_gpu_freq(dev_priv, max_freq));
31c77388 1193 seq_printf(m, "Max overclocked frequency: %dMHz\n",
7c59a9c1 1194 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
aed242ff 1195
d86ed34a
CW
1196 seq_printf(m, "Current freq: %d MHz\n",
1197 intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq));
1198 seq_printf(m, "Actual freq: %d MHz\n", cagf);
aed242ff
CW
1199 seq_printf(m, "Idle freq: %d MHz\n",
1200 intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq));
d86ed34a
CW
1201 seq_printf(m, "Min freq: %d MHz\n",
1202 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq));
29ecd78d
CW
1203 seq_printf(m, "Boost freq: %d MHz\n",
1204 intel_gpu_freq(dev_priv, dev_priv->rps.boost_freq));
d86ed34a
CW
1205 seq_printf(m, "Max freq: %d MHz\n",
1206 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
1207 seq_printf(m,
1208 "efficient (RPe) frequency: %d MHz\n",
1209 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
3b8d8d91 1210 } else {
267f0c90 1211 seq_puts(m, "no P-state info available\n");
3b8d8d91 1212 }
f97108d1 1213
49cd97a3 1214 seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk.hw.cdclk);
1170f28c
MK
1215 seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq);
1216 seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq);
1217
c8c8fb33
PZ
1218 intel_runtime_pm_put(dev_priv);
1219 return ret;
f97108d1
JB
1220}
1221
d636951e
BW
1222static void i915_instdone_info(struct drm_i915_private *dev_priv,
1223 struct seq_file *m,
1224 struct intel_instdone *instdone)
1225{
f9e61372
BW
1226 int slice;
1227 int subslice;
1228
d636951e
BW
1229 seq_printf(m, "\t\tINSTDONE: 0x%08x\n",
1230 instdone->instdone);
1231
1232 if (INTEL_GEN(dev_priv) <= 3)
1233 return;
1234
1235 seq_printf(m, "\t\tSC_INSTDONE: 0x%08x\n",
1236 instdone->slice_common);
1237
1238 if (INTEL_GEN(dev_priv) <= 6)
1239 return;
1240
f9e61372
BW
1241 for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1242 seq_printf(m, "\t\tSAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
1243 slice, subslice, instdone->sampler[slice][subslice]);
1244
1245 for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1246 seq_printf(m, "\t\tROW_INSTDONE[%d][%d]: 0x%08x\n",
1247 slice, subslice, instdone->row[slice][subslice]);
d636951e
BW
1248}
1249
f654449a
CW
1250static int i915_hangcheck_info(struct seq_file *m, void *unused)
1251{
36cdd013 1252 struct drm_i915_private *dev_priv = node_to_i915(m->private);
e2f80391 1253 struct intel_engine_cs *engine;
666796da
TU
1254 u64 acthd[I915_NUM_ENGINES];
1255 u32 seqno[I915_NUM_ENGINES];
d636951e 1256 struct intel_instdone instdone;
c3232b18 1257 enum intel_engine_id id;
f654449a 1258
8af29b0c 1259 if (test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
8c185eca
CW
1260 seq_puts(m, "Wedged\n");
1261 if (test_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags))
1262 seq_puts(m, "Reset in progress: struct_mutex backoff\n");
1263 if (test_bit(I915_RESET_HANDOFF, &dev_priv->gpu_error.flags))
1264 seq_puts(m, "Reset in progress: reset handoff to waiter\n");
8af29b0c 1265 if (waitqueue_active(&dev_priv->gpu_error.wait_queue))
8c185eca 1266 seq_puts(m, "Waiter holding struct mutex\n");
8af29b0c 1267 if (waitqueue_active(&dev_priv->gpu_error.reset_queue))
8c185eca 1268 seq_puts(m, "struct_mutex blocked for reset\n");
8af29b0c 1269
f654449a 1270 if (!i915.enable_hangcheck) {
8c185eca 1271 seq_puts(m, "Hangcheck disabled\n");
f654449a
CW
1272 return 0;
1273 }
1274
ebbc7546
MK
1275 intel_runtime_pm_get(dev_priv);
1276
3b3f1650 1277 for_each_engine(engine, dev_priv, id) {
7e37f889 1278 acthd[id] = intel_engine_get_active_head(engine);
1b7744e7 1279 seqno[id] = intel_engine_get_seqno(engine);
ebbc7546
MK
1280 }
1281
3b3f1650 1282 intel_engine_get_instdone(dev_priv->engine[RCS], &instdone);
61642ff0 1283
ebbc7546
MK
1284 intel_runtime_pm_put(dev_priv);
1285
8352aea3
CW
1286 if (timer_pending(&dev_priv->gpu_error.hangcheck_work.timer))
1287 seq_printf(m, "Hangcheck active, timer fires in %dms\n",
f654449a
CW
1288 jiffies_to_msecs(dev_priv->gpu_error.hangcheck_work.timer.expires -
1289 jiffies));
8352aea3
CW
1290 else if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work))
1291 seq_puts(m, "Hangcheck active, work pending\n");
1292 else
1293 seq_puts(m, "Hangcheck inactive\n");
f654449a 1294
f73b5674
CW
1295 seq_printf(m, "GT active? %s\n", yesno(dev_priv->gt.awake));
1296
3b3f1650 1297 for_each_engine(engine, dev_priv, id) {
33f53719
CW
1298 struct intel_breadcrumbs *b = &engine->breadcrumbs;
1299 struct rb_node *rb;
1300
e2f80391 1301 seq_printf(m, "%s:\n", engine->name);
f73b5674 1302 seq_printf(m, "\tseqno = %x [current %x, last %x], inflight %d\n",
cb399eab 1303 engine->hangcheck.seqno, seqno[id],
f73b5674
CW
1304 intel_engine_last_submit(engine),
1305 engine->timeline->inflight_seqnos);
3fe3b030 1306 seq_printf(m, "\twaiters? %s, fake irq active? %s, stalled? %s\n",
83348ba8
CW
1307 yesno(intel_engine_has_waiter(engine)),
1308 yesno(test_bit(engine->id,
3fe3b030
MK
1309 &dev_priv->gpu_error.missed_irq_rings)),
1310 yesno(engine->hangcheck.stalled));
1311
61d3dc70 1312 spin_lock_irq(&b->rb_lock);
33f53719 1313 for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
f802cf7e 1314 struct intel_wait *w = rb_entry(rb, typeof(*w), node);
33f53719
CW
1315
1316 seq_printf(m, "\t%s [%d] waiting for %x\n",
1317 w->tsk->comm, w->tsk->pid, w->seqno);
1318 }
61d3dc70 1319 spin_unlock_irq(&b->rb_lock);
33f53719 1320
f654449a 1321 seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
e2f80391 1322 (long long)engine->hangcheck.acthd,
c3232b18 1323 (long long)acthd[id]);
3fe3b030
MK
1324 seq_printf(m, "\taction = %s(%d) %d ms ago\n",
1325 hangcheck_action_to_str(engine->hangcheck.action),
1326 engine->hangcheck.action,
1327 jiffies_to_msecs(jiffies -
1328 engine->hangcheck.action_timestamp));
61642ff0 1329
e2f80391 1330 if (engine->id == RCS) {
d636951e 1331 seq_puts(m, "\tinstdone read =\n");
61642ff0 1332
d636951e 1333 i915_instdone_info(dev_priv, m, &instdone);
61642ff0 1334
d636951e 1335 seq_puts(m, "\tinstdone accu =\n");
61642ff0 1336
d636951e
BW
1337 i915_instdone_info(dev_priv, m,
1338 &engine->hangcheck.instdone);
61642ff0 1339 }
f654449a
CW
1340 }
1341
1342 return 0;
1343}
1344
061d06a2
MT
1345static int i915_reset_info(struct seq_file *m, void *unused)
1346{
1347 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1348 struct i915_gpu_error *error = &dev_priv->gpu_error;
1349 struct intel_engine_cs *engine;
1350 enum intel_engine_id id;
1351
1352 seq_printf(m, "full gpu reset = %u\n", i915_reset_count(error));
1353
1354 for_each_engine(engine, dev_priv, id) {
1355 seq_printf(m, "%s = %u\n", engine->name,
1356 i915_reset_engine_count(error, engine));
1357 }
1358
1359 return 0;
1360}
1361
4d85529d 1362static int ironlake_drpc_info(struct seq_file *m)
f97108d1 1363{
36cdd013 1364 struct drm_i915_private *dev_priv = node_to_i915(m->private);
616fdb5a
BW
1365 u32 rgvmodectl, rstdbyctl;
1366 u16 crstandvid;
616fdb5a 1367
616fdb5a
BW
1368 rgvmodectl = I915_READ(MEMMODECTL);
1369 rstdbyctl = I915_READ(RSTDBYCTL);
1370 crstandvid = I915_READ16(CRSTANDVID);
1371
742f491d 1372 seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN));
f97108d1
JB
1373 seq_printf(m, "Boost freq: %d\n",
1374 (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
1375 MEMMODE_BOOST_FREQ_SHIFT);
1376 seq_printf(m, "HW control enabled: %s\n",
742f491d 1377 yesno(rgvmodectl & MEMMODE_HWIDLE_EN));
f97108d1 1378 seq_printf(m, "SW control enabled: %s\n",
742f491d 1379 yesno(rgvmodectl & MEMMODE_SWMODE_EN));
f97108d1 1380 seq_printf(m, "Gated voltage change: %s\n",
742f491d 1381 yesno(rgvmodectl & MEMMODE_RCLK_GATE));
f97108d1
JB
1382 seq_printf(m, "Starting frequency: P%d\n",
1383 (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
7648fa99 1384 seq_printf(m, "Max P-state: P%d\n",
f97108d1 1385 (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
7648fa99
JB
1386 seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
1387 seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
1388 seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
1389 seq_printf(m, "Render standby enabled: %s\n",
742f491d 1390 yesno(!(rstdbyctl & RCX_SW_EXIT)));
267f0c90 1391 seq_puts(m, "Current RS state: ");
88271da3
JB
1392 switch (rstdbyctl & RSX_STATUS_MASK) {
1393 case RSX_STATUS_ON:
267f0c90 1394 seq_puts(m, "on\n");
88271da3
JB
1395 break;
1396 case RSX_STATUS_RC1:
267f0c90 1397 seq_puts(m, "RC1\n");
88271da3
JB
1398 break;
1399 case RSX_STATUS_RC1E:
267f0c90 1400 seq_puts(m, "RC1E\n");
88271da3
JB
1401 break;
1402 case RSX_STATUS_RS1:
267f0c90 1403 seq_puts(m, "RS1\n");
88271da3
JB
1404 break;
1405 case RSX_STATUS_RS2:
267f0c90 1406 seq_puts(m, "RS2 (RC6)\n");
88271da3
JB
1407 break;
1408 case RSX_STATUS_RS3:
267f0c90 1409 seq_puts(m, "RC3 (RC6+)\n");
88271da3
JB
1410 break;
1411 default:
267f0c90 1412 seq_puts(m, "unknown\n");
88271da3
JB
1413 break;
1414 }
f97108d1
JB
1415
1416 return 0;
1417}
1418
f65367b5 1419static int i915_forcewake_domains(struct seq_file *m, void *data)
669ab5aa 1420{
233ebf57 1421 struct drm_i915_private *i915 = node_to_i915(m->private);
b2cff0db 1422 struct intel_uncore_forcewake_domain *fw_domain;
d2dc94bc 1423 unsigned int tmp;
b2cff0db 1424
233ebf57 1425 for_each_fw_domain(fw_domain, i915, tmp)
b2cff0db 1426 seq_printf(m, "%s.wake_count = %u\n",
33c582c1 1427 intel_uncore_forcewake_domain_to_str(fw_domain->id),
233ebf57 1428 READ_ONCE(fw_domain->wake_count));
669ab5aa 1429
b2cff0db
CW
1430 return 0;
1431}
1432
1362877e
MK
1433static void print_rc6_res(struct seq_file *m,
1434 const char *title,
1435 const i915_reg_t reg)
1436{
1437 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1438
1439 seq_printf(m, "%s %u (%llu us)\n",
1440 title, I915_READ(reg),
1441 intel_rc6_residency_us(dev_priv, reg));
1442}
1443
b2cff0db
CW
1444static int vlv_drpc_info(struct seq_file *m)
1445{
36cdd013 1446 struct drm_i915_private *dev_priv = node_to_i915(m->private);
6b312cd3 1447 u32 rpmodectl1, rcctl1, pw_status;
669ab5aa 1448
6b312cd3 1449 pw_status = I915_READ(VLV_GTLC_PW_STATUS);
669ab5aa
D
1450 rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
1451 rcctl1 = I915_READ(GEN6_RC_CONTROL);
1452
1453 seq_printf(m, "Video Turbo Mode: %s\n",
1454 yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
1455 seq_printf(m, "Turbo enabled: %s\n",
1456 yesno(rpmodectl1 & GEN6_RP_ENABLE));
1457 seq_printf(m, "HW control enabled: %s\n",
1458 yesno(rpmodectl1 & GEN6_RP_ENABLE));
1459 seq_printf(m, "SW control enabled: %s\n",
1460 yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) ==
1461 GEN6_RP_MEDIA_SW_MODE));
1462 seq_printf(m, "RC6 Enabled: %s\n",
1463 yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
1464 GEN6_RC_CTL_EI_MODE(1))));
1465 seq_printf(m, "Render Power Well: %s\n",
6b312cd3 1466 (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
669ab5aa 1467 seq_printf(m, "Media Power Well: %s\n",
6b312cd3 1468 (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
669ab5aa 1469
1362877e
MK
1470 print_rc6_res(m, "Render RC6 residency since boot:", VLV_GT_RENDER_RC6);
1471 print_rc6_res(m, "Media RC6 residency since boot:", VLV_GT_MEDIA_RC6);
9cc19be5 1472
f65367b5 1473 return i915_forcewake_domains(m, NULL);
669ab5aa
D
1474}
1475
4d85529d
BW
1476static int gen6_drpc_info(struct seq_file *m)
1477{
36cdd013 1478 struct drm_i915_private *dev_priv = node_to_i915(m->private);
ecd8faea 1479 u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0;
f2dd7578 1480 u32 gen9_powergate_enable = 0, gen9_powergate_status = 0;
93b525dc 1481 unsigned forcewake_count;
cf632bd6 1482 int count = 0;
93b525dc 1483
cf632bd6 1484 forcewake_count = READ_ONCE(dev_priv->uncore.fw_domain[FW_DOMAIN_ID_RENDER].wake_count);
93b525dc 1485 if (forcewake_count) {
267f0c90
DL
1486 seq_puts(m, "RC information inaccurate because somebody "
1487 "holds a forcewake reference \n");
4d85529d
BW
1488 } else {
1489 /* NB: we cannot use forcewake, else we read the wrong values */
1490 while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
1491 udelay(10);
1492 seq_printf(m, "RC information accurate: %s\n", yesno(count < 51));
1493 }
1494
75aa3f63 1495 gt_core_status = I915_READ_FW(GEN6_GT_CORE_STATUS);
ed71f1b4 1496 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
4d85529d
BW
1497
1498 rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
1499 rcctl1 = I915_READ(GEN6_RC_CONTROL);
36cdd013 1500 if (INTEL_GEN(dev_priv) >= 9) {
f2dd7578
AG
1501 gen9_powergate_enable = I915_READ(GEN9_PG_ENABLE);
1502 gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS);
1503 }
cf632bd6 1504
44cbd338
BW
1505 mutex_lock(&dev_priv->rps.hw_lock);
1506 sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
1507 mutex_unlock(&dev_priv->rps.hw_lock);
4d85529d
BW
1508
1509 seq_printf(m, "Video Turbo Mode: %s\n",
1510 yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
1511 seq_printf(m, "HW control enabled: %s\n",
1512 yesno(rpmodectl1 & GEN6_RP_ENABLE));
1513 seq_printf(m, "SW control enabled: %s\n",
1514 yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) ==
1515 GEN6_RP_MEDIA_SW_MODE));
fff24e21 1516 seq_printf(m, "RC1e Enabled: %s\n",
4d85529d
BW
1517 yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
1518 seq_printf(m, "RC6 Enabled: %s\n",
1519 yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
36cdd013 1520 if (INTEL_GEN(dev_priv) >= 9) {
f2dd7578
AG
1521 seq_printf(m, "Render Well Gating Enabled: %s\n",
1522 yesno(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE));
1523 seq_printf(m, "Media Well Gating Enabled: %s\n",
1524 yesno(gen9_powergate_enable & GEN9_MEDIA_PG_ENABLE));
1525 }
4d85529d
BW
1526 seq_printf(m, "Deep RC6 Enabled: %s\n",
1527 yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
1528 seq_printf(m, "Deepest RC6 Enabled: %s\n",
1529 yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
267f0c90 1530 seq_puts(m, "Current RC state: ");
4d85529d
BW
1531 switch (gt_core_status & GEN6_RCn_MASK) {
1532 case GEN6_RC0:
1533 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
267f0c90 1534 seq_puts(m, "Core Power Down\n");
4d85529d 1535 else
267f0c90 1536 seq_puts(m, "on\n");
4d85529d
BW
1537 break;
1538 case GEN6_RC3:
267f0c90 1539 seq_puts(m, "RC3\n");
4d85529d
BW
1540 break;
1541 case GEN6_RC6:
267f0c90 1542 seq_puts(m, "RC6\n");
4d85529d
BW
1543 break;
1544 case GEN6_RC7:
267f0c90 1545 seq_puts(m, "RC7\n");
4d85529d
BW
1546 break;
1547 default:
267f0c90 1548 seq_puts(m, "Unknown\n");
4d85529d
BW
1549 break;
1550 }
1551
1552 seq_printf(m, "Core Power Down: %s\n",
1553 yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
36cdd013 1554 if (INTEL_GEN(dev_priv) >= 9) {
f2dd7578
AG
1555 seq_printf(m, "Render Power Well: %s\n",
1556 (gen9_powergate_status &
1557 GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down");
1558 seq_printf(m, "Media Power Well: %s\n",
1559 (gen9_powergate_status &
1560 GEN9_PWRGT_MEDIA_STATUS_MASK) ? "Up" : "Down");
1561 }
cce66a28
BW
1562
1563 /* Not exactly sure what this is */
1362877e
MK
1564 print_rc6_res(m, "RC6 \"Locked to RPn\" residency since boot:",
1565 GEN6_GT_GFX_RC6_LOCKED);
1566 print_rc6_res(m, "RC6 residency since boot:", GEN6_GT_GFX_RC6);
1567 print_rc6_res(m, "RC6+ residency since boot:", GEN6_GT_GFX_RC6p);
1568 print_rc6_res(m, "RC6++ residency since boot:", GEN6_GT_GFX_RC6pp);
cce66a28 1569
ecd8faea
BW
1570 seq_printf(m, "RC6 voltage: %dmV\n",
1571 GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
1572 seq_printf(m, "RC6+ voltage: %dmV\n",
1573 GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
1574 seq_printf(m, "RC6++ voltage: %dmV\n",
1575 GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
f2dd7578 1576 return i915_forcewake_domains(m, NULL);
4d85529d
BW
1577}
1578
1579static int i915_drpc_info(struct seq_file *m, void *unused)
1580{
36cdd013 1581 struct drm_i915_private *dev_priv = node_to_i915(m->private);
cf632bd6
CW
1582 int err;
1583
1584 intel_runtime_pm_get(dev_priv);
4d85529d 1585
36cdd013 1586 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
cf632bd6 1587 err = vlv_drpc_info(m);
36cdd013 1588 else if (INTEL_GEN(dev_priv) >= 6)
cf632bd6 1589 err = gen6_drpc_info(m);
4d85529d 1590 else
cf632bd6
CW
1591 err = ironlake_drpc_info(m);
1592
1593 intel_runtime_pm_put(dev_priv);
1594
1595 return err;
4d85529d
BW
1596}
1597
9a851789
DV
1598static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
1599{
36cdd013 1600 struct drm_i915_private *dev_priv = node_to_i915(m->private);
9a851789
DV
1601
1602 seq_printf(m, "FB tracking busy bits: 0x%08x\n",
1603 dev_priv->fb_tracking.busy_bits);
1604
1605 seq_printf(m, "FB tracking flip bits: 0x%08x\n",
1606 dev_priv->fb_tracking.flip_bits);
1607
1608 return 0;
1609}
1610
b5e50c3f
JB
1611static int i915_fbc_status(struct seq_file *m, void *unused)
1612{
36cdd013 1613 struct drm_i915_private *dev_priv = node_to_i915(m->private);
b5e50c3f 1614
36cdd013 1615 if (!HAS_FBC(dev_priv)) {
267f0c90 1616 seq_puts(m, "FBC unsupported on this chipset\n");
b5e50c3f
JB
1617 return 0;
1618 }
1619
36623ef8 1620 intel_runtime_pm_get(dev_priv);
25ad93fd 1621 mutex_lock(&dev_priv->fbc.lock);
36623ef8 1622
0e631adc 1623 if (intel_fbc_is_active(dev_priv))
267f0c90 1624 seq_puts(m, "FBC enabled\n");
2e8144a5
PZ
1625 else
1626 seq_printf(m, "FBC disabled: %s\n",
bf6189c6 1627 dev_priv->fbc.no_fbc_reason);
36623ef8 1628
3fd5d1ec
VS
1629 if (intel_fbc_is_active(dev_priv)) {
1630 u32 mask;
1631
1632 if (INTEL_GEN(dev_priv) >= 8)
1633 mask = I915_READ(IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK;
1634 else if (INTEL_GEN(dev_priv) >= 7)
1635 mask = I915_READ(IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK;
1636 else if (INTEL_GEN(dev_priv) >= 5)
1637 mask = I915_READ(ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK;
1638 else if (IS_G4X(dev_priv))
1639 mask = I915_READ(DPFC_STATUS) & DPFC_COMP_SEG_MASK;
1640 else
1641 mask = I915_READ(FBC_STATUS) & (FBC_STAT_COMPRESSING |
1642 FBC_STAT_COMPRESSED);
1643
1644 seq_printf(m, "Compressing: %s\n", yesno(mask));
0fc6a9dc 1645 }
31b9df10 1646
25ad93fd 1647 mutex_unlock(&dev_priv->fbc.lock);
36623ef8
PZ
1648 intel_runtime_pm_put(dev_priv);
1649
b5e50c3f
JB
1650 return 0;
1651}
1652
4127dc43 1653static int i915_fbc_false_color_get(void *data, u64 *val)
da46f936 1654{
36cdd013 1655 struct drm_i915_private *dev_priv = data;
da46f936 1656
36cdd013 1657 if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
da46f936
RV
1658 return -ENODEV;
1659
da46f936 1660 *val = dev_priv->fbc.false_color;
da46f936
RV
1661
1662 return 0;
1663}
1664
4127dc43 1665static int i915_fbc_false_color_set(void *data, u64 val)
da46f936 1666{
36cdd013 1667 struct drm_i915_private *dev_priv = data;
da46f936
RV
1668 u32 reg;
1669
36cdd013 1670 if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
da46f936
RV
1671 return -ENODEV;
1672
25ad93fd 1673 mutex_lock(&dev_priv->fbc.lock);
da46f936
RV
1674
1675 reg = I915_READ(ILK_DPFC_CONTROL);
1676 dev_priv->fbc.false_color = val;
1677
1678 I915_WRITE(ILK_DPFC_CONTROL, val ?
1679 (reg | FBC_CTL_FALSE_COLOR) :
1680 (reg & ~FBC_CTL_FALSE_COLOR));
1681
25ad93fd 1682 mutex_unlock(&dev_priv->fbc.lock);
da46f936
RV
1683 return 0;
1684}
1685
4127dc43
VS
1686DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops,
1687 i915_fbc_false_color_get, i915_fbc_false_color_set,
da46f936
RV
1688 "%llu\n");
1689
92d44621
PZ
1690static int i915_ips_status(struct seq_file *m, void *unused)
1691{
36cdd013 1692 struct drm_i915_private *dev_priv = node_to_i915(m->private);
92d44621 1693
36cdd013 1694 if (!HAS_IPS(dev_priv)) {
92d44621
PZ
1695 seq_puts(m, "not supported\n");
1696 return 0;
1697 }
1698
36623ef8
PZ
1699 intel_runtime_pm_get(dev_priv);
1700
0eaa53f0
RV
1701 seq_printf(m, "Enabled by kernel parameter: %s\n",
1702 yesno(i915.enable_ips));
1703
36cdd013 1704 if (INTEL_GEN(dev_priv) >= 8) {
0eaa53f0
RV
1705 seq_puts(m, "Currently: unknown\n");
1706 } else {
1707 if (I915_READ(IPS_CTL) & IPS_ENABLE)
1708 seq_puts(m, "Currently: enabled\n");
1709 else
1710 seq_puts(m, "Currently: disabled\n");
1711 }
92d44621 1712
36623ef8
PZ
1713 intel_runtime_pm_put(dev_priv);
1714
92d44621
PZ
1715 return 0;
1716}
1717
4a9bef37
JB
1718static int i915_sr_status(struct seq_file *m, void *unused)
1719{
36cdd013 1720 struct drm_i915_private *dev_priv = node_to_i915(m->private);
4a9bef37
JB
1721 bool sr_enabled = false;
1722
36623ef8 1723 intel_runtime_pm_get(dev_priv);
9c870d03 1724 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
36623ef8 1725
7342a72c
CW
1726 if (INTEL_GEN(dev_priv) >= 9)
1727 /* no global SR status; inspect per-plane WM */;
1728 else if (HAS_PCH_SPLIT(dev_priv))
5ba2aaaa 1729 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
c0f86832 1730 else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) ||
36cdd013 1731 IS_I945G(dev_priv) || IS_I945GM(dev_priv))
4a9bef37 1732 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
36cdd013 1733 else if (IS_I915GM(dev_priv))
4a9bef37 1734 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
36cdd013 1735 else if (IS_PINEVIEW(dev_priv))
4a9bef37 1736 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
36cdd013 1737 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
77b64555 1738 sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
4a9bef37 1739
9c870d03 1740 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
36623ef8
PZ
1741 intel_runtime_pm_put(dev_priv);
1742
08c4d7fc 1743 seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
4a9bef37
JB
1744
1745 return 0;
1746}
1747
7648fa99
JB
1748static int i915_emon_status(struct seq_file *m, void *unused)
1749{
36cdd013
DW
1750 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1751 struct drm_device *dev = &dev_priv->drm;
7648fa99 1752 unsigned long temp, chipset, gfx;
de227ef0
CW
1753 int ret;
1754
36cdd013 1755 if (!IS_GEN5(dev_priv))
582be6b4
CW
1756 return -ENODEV;
1757
de227ef0
CW
1758 ret = mutex_lock_interruptible(&dev->struct_mutex);
1759 if (ret)
1760 return ret;
7648fa99
JB
1761
1762 temp = i915_mch_val(dev_priv);
1763 chipset = i915_chipset_val(dev_priv);
1764 gfx = i915_gfx_val(dev_priv);
de227ef0 1765 mutex_unlock(&dev->struct_mutex);
7648fa99
JB
1766
1767 seq_printf(m, "GMCH temp: %ld\n", temp);
1768 seq_printf(m, "Chipset power: %ld\n", chipset);
1769 seq_printf(m, "GFX power: %ld\n", gfx);
1770 seq_printf(m, "Total power: %ld\n", chipset + gfx);
1771
1772 return 0;
1773}
1774
23b2f8bb
JB
1775static int i915_ring_freq_table(struct seq_file *m, void *unused)
1776{
36cdd013 1777 struct drm_i915_private *dev_priv = node_to_i915(m->private);
5bfa0199 1778 int ret = 0;
23b2f8bb 1779 int gpu_freq, ia_freq;
f936ec34 1780 unsigned int max_gpu_freq, min_gpu_freq;
23b2f8bb 1781
26310346 1782 if (!HAS_LLC(dev_priv)) {
267f0c90 1783 seq_puts(m, "unsupported on this chipset\n");
23b2f8bb
JB
1784 return 0;
1785 }
1786
5bfa0199
PZ
1787 intel_runtime_pm_get(dev_priv);
1788
4fc688ce 1789 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
23b2f8bb 1790 if (ret)
5bfa0199 1791 goto out;
23b2f8bb 1792
35ceabf3 1793 if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) {
f936ec34
AG
1794 /* Convert GT frequency to 50 HZ units */
1795 min_gpu_freq =
1796 dev_priv->rps.min_freq_softlimit / GEN9_FREQ_SCALER;
1797 max_gpu_freq =
1798 dev_priv->rps.max_freq_softlimit / GEN9_FREQ_SCALER;
1799 } else {
1800 min_gpu_freq = dev_priv->rps.min_freq_softlimit;
1801 max_gpu_freq = dev_priv->rps.max_freq_softlimit;
1802 }
1803
267f0c90 1804 seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
23b2f8bb 1805
f936ec34 1806 for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
42c0526c
BW
1807 ia_freq = gpu_freq;
1808 sandybridge_pcode_read(dev_priv,
1809 GEN6_PCODE_READ_MIN_FREQ_TABLE,
1810 &ia_freq);
3ebecd07 1811 seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
f936ec34 1812 intel_gpu_freq(dev_priv, (gpu_freq *
35ceabf3
RV
1813 (IS_GEN9_BC(dev_priv) ||
1814 IS_CANNONLAKE(dev_priv) ?
b976dc53 1815 GEN9_FREQ_SCALER : 1))),
3ebecd07
CW
1816 ((ia_freq >> 0) & 0xff) * 100,
1817 ((ia_freq >> 8) & 0xff) * 100);
23b2f8bb
JB
1818 }
1819
4fc688ce 1820 mutex_unlock(&dev_priv->rps.hw_lock);
23b2f8bb 1821
5bfa0199
PZ
1822out:
1823 intel_runtime_pm_put(dev_priv);
1824 return ret;
23b2f8bb
JB
1825}
1826
44834a67
CW
1827static int i915_opregion(struct seq_file *m, void *unused)
1828{
36cdd013
DW
1829 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1830 struct drm_device *dev = &dev_priv->drm;
44834a67
CW
1831 struct intel_opregion *opregion = &dev_priv->opregion;
1832 int ret;
1833
1834 ret = mutex_lock_interruptible(&dev->struct_mutex);
1835 if (ret)
0d38f009 1836 goto out;
44834a67 1837
2455a8e4
JN
1838 if (opregion->header)
1839 seq_write(m, opregion->header, OPREGION_SIZE);
44834a67
CW
1840
1841 mutex_unlock(&dev->struct_mutex);
1842
0d38f009 1843out:
44834a67
CW
1844 return 0;
1845}
1846
ada8f955
JN
1847static int i915_vbt(struct seq_file *m, void *unused)
1848{
36cdd013 1849 struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
ada8f955
JN
1850
1851 if (opregion->vbt)
1852 seq_write(m, opregion->vbt, opregion->vbt_size);
1853
1854 return 0;
1855}
1856
37811fcc
CW
1857static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1858{
36cdd013
DW
1859 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1860 struct drm_device *dev = &dev_priv->drm;
b13b8402 1861 struct intel_framebuffer *fbdev_fb = NULL;
3a58ee10 1862 struct drm_framebuffer *drm_fb;
188c1ab7
CW
1863 int ret;
1864
1865 ret = mutex_lock_interruptible(&dev->struct_mutex);
1866 if (ret)
1867 return ret;
37811fcc 1868
0695726e 1869#ifdef CONFIG_DRM_FBDEV_EMULATION
346fb4e0 1870 if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) {
36cdd013 1871 fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
25bcce94
CW
1872
1873 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1874 fbdev_fb->base.width,
1875 fbdev_fb->base.height,
b00c600e 1876 fbdev_fb->base.format->depth,
272725c7 1877 fbdev_fb->base.format->cpp[0] * 8,
bae781b2 1878 fbdev_fb->base.modifier,
25bcce94
CW
1879 drm_framebuffer_read_refcount(&fbdev_fb->base));
1880 describe_obj(m, fbdev_fb->obj);
1881 seq_putc(m, '\n');
1882 }
4520f53a 1883#endif
37811fcc 1884
4b096ac1 1885 mutex_lock(&dev->mode_config.fb_lock);
3a58ee10 1886 drm_for_each_fb(drm_fb, dev) {
b13b8402
NS
1887 struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
1888 if (fb == fbdev_fb)
37811fcc
CW
1889 continue;
1890
c1ca506d 1891 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
37811fcc
CW
1892 fb->base.width,
1893 fb->base.height,
b00c600e 1894 fb->base.format->depth,
272725c7 1895 fb->base.format->cpp[0] * 8,
bae781b2 1896 fb->base.modifier,
747a598f 1897 drm_framebuffer_read_refcount(&fb->base));
05394f39 1898 describe_obj(m, fb->obj);
267f0c90 1899 seq_putc(m, '\n');
37811fcc 1900 }
4b096ac1 1901 mutex_unlock(&dev->mode_config.fb_lock);
188c1ab7 1902 mutex_unlock(&dev->struct_mutex);
37811fcc
CW
1903
1904 return 0;
1905}
1906
7e37f889 1907static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
c9fe99bd 1908{
fe085f13
CW
1909 seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u)",
1910 ring->space, ring->head, ring->tail);
c9fe99bd
OM
1911}
1912
e76d3630
BW
1913static int i915_context_status(struct seq_file *m, void *unused)
1914{
36cdd013
DW
1915 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1916 struct drm_device *dev = &dev_priv->drm;
e2f80391 1917 struct intel_engine_cs *engine;
e2efd130 1918 struct i915_gem_context *ctx;
3b3f1650 1919 enum intel_engine_id id;
c3232b18 1920 int ret;
e76d3630 1921
f3d28878 1922 ret = mutex_lock_interruptible(&dev->struct_mutex);
e76d3630
BW
1923 if (ret)
1924 return ret;
1925
829a0af2 1926 list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
5d1808ec 1927 seq_printf(m, "HW context %u ", ctx->hw_id);
c84455b4 1928 if (ctx->pid) {
d28b99ab
CW
1929 struct task_struct *task;
1930
c84455b4 1931 task = get_pid_task(ctx->pid, PIDTYPE_PID);
d28b99ab
CW
1932 if (task) {
1933 seq_printf(m, "(%s [%d]) ",
1934 task->comm, task->pid);
1935 put_task_struct(task);
1936 }
c84455b4
CW
1937 } else if (IS_ERR(ctx->file_priv)) {
1938 seq_puts(m, "(deleted) ");
d28b99ab
CW
1939 } else {
1940 seq_puts(m, "(kernel) ");
1941 }
1942
bca44d80
CW
1943 seq_putc(m, ctx->remap_slice ? 'R' : 'r');
1944 seq_putc(m, '\n');
c9fe99bd 1945
3b3f1650 1946 for_each_engine(engine, dev_priv, id) {
bca44d80
CW
1947 struct intel_context *ce = &ctx->engine[engine->id];
1948
1949 seq_printf(m, "%s: ", engine->name);
1950 seq_putc(m, ce->initialised ? 'I' : 'i');
1951 if (ce->state)
bf3783e5 1952 describe_obj(m, ce->state->obj);
dca33ecc 1953 if (ce->ring)
7e37f889 1954 describe_ctx_ring(m, ce->ring);
c9fe99bd 1955 seq_putc(m, '\n');
c9fe99bd 1956 }
a33afea5 1957
a33afea5 1958 seq_putc(m, '\n');
a168c293
BW
1959 }
1960
f3d28878 1961 mutex_unlock(&dev->struct_mutex);
e76d3630
BW
1962
1963 return 0;
1964}
1965
064ca1d2 1966static void i915_dump_lrc_obj(struct seq_file *m,
e2efd130 1967 struct i915_gem_context *ctx,
0bc40be8 1968 struct intel_engine_cs *engine)
064ca1d2 1969{
bf3783e5 1970 struct i915_vma *vma = ctx->engine[engine->id].state;
064ca1d2 1971 struct page *page;
064ca1d2 1972 int j;
064ca1d2 1973
7069b144
CW
1974 seq_printf(m, "CONTEXT: %s %u\n", engine->name, ctx->hw_id);
1975
bf3783e5
CW
1976 if (!vma) {
1977 seq_puts(m, "\tFake context\n");
064ca1d2
TD
1978 return;
1979 }
1980
bf3783e5
CW
1981 if (vma->flags & I915_VMA_GLOBAL_BIND)
1982 seq_printf(m, "\tBound in GGTT at 0x%08x\n",
bde13ebd 1983 i915_ggtt_offset(vma));
064ca1d2 1984
a4f5ea64 1985 if (i915_gem_object_pin_pages(vma->obj)) {
bf3783e5 1986 seq_puts(m, "\tFailed to get pages for context object\n\n");
064ca1d2
TD
1987 return;
1988 }
1989
bf3783e5
CW
1990 page = i915_gem_object_get_page(vma->obj, LRC_STATE_PN);
1991 if (page) {
1992 u32 *reg_state = kmap_atomic(page);
064ca1d2
TD
1993
1994 for (j = 0; j < 0x600 / sizeof(u32) / 4; j += 4) {
bf3783e5
CW
1995 seq_printf(m,
1996 "\t[0x%04x] 0x%08x 0x%08x 0x%08x 0x%08x\n",
1997 j * 4,
064ca1d2
TD
1998 reg_state[j], reg_state[j + 1],
1999 reg_state[j + 2], reg_state[j + 3]);
2000 }
2001 kunmap_atomic(reg_state);
2002 }
2003
a4f5ea64 2004 i915_gem_object_unpin_pages(vma->obj);
064ca1d2
TD
2005 seq_putc(m, '\n');
2006}
2007
c0ab1ae9
BW
2008static int i915_dump_lrc(struct seq_file *m, void *unused)
2009{
36cdd013
DW
2010 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2011 struct drm_device *dev = &dev_priv->drm;
e2f80391 2012 struct intel_engine_cs *engine;
e2efd130 2013 struct i915_gem_context *ctx;
3b3f1650 2014 enum intel_engine_id id;
b4ac5afc 2015 int ret;
c0ab1ae9
BW
2016
2017 if (!i915.enable_execlists) {
2018 seq_printf(m, "Logical Ring Contexts are disabled\n");
2019 return 0;
2020 }
2021
2022 ret = mutex_lock_interruptible(&dev->struct_mutex);
2023 if (ret)
2024 return ret;
2025
829a0af2 2026 list_for_each_entry(ctx, &dev_priv->contexts.list, link)
3b3f1650 2027 for_each_engine(engine, dev_priv, id)
24f1d3cc 2028 i915_dump_lrc_obj(m, ctx, engine);
c0ab1ae9
BW
2029
2030 mutex_unlock(&dev->struct_mutex);
2031
2032 return 0;
2033}
2034
ea16a3cd
DV
2035static const char *swizzle_string(unsigned swizzle)
2036{
aee56cff 2037 switch (swizzle) {
ea16a3cd
DV
2038 case I915_BIT_6_SWIZZLE_NONE:
2039 return "none";
2040 case I915_BIT_6_SWIZZLE_9:
2041 return "bit9";
2042 case I915_BIT_6_SWIZZLE_9_10:
2043 return "bit9/bit10";
2044 case I915_BIT_6_SWIZZLE_9_11:
2045 return "bit9/bit11";
2046 case I915_BIT_6_SWIZZLE_9_10_11:
2047 return "bit9/bit10/bit11";
2048 case I915_BIT_6_SWIZZLE_9_17:
2049 return "bit9/bit17";
2050 case I915_BIT_6_SWIZZLE_9_10_17:
2051 return "bit9/bit10/bit17";
2052 case I915_BIT_6_SWIZZLE_UNKNOWN:
8a168ca7 2053 return "unknown";
ea16a3cd
DV
2054 }
2055
2056 return "bug";
2057}
2058
2059static int i915_swizzle_info(struct seq_file *m, void *data)
2060{
36cdd013 2061 struct drm_i915_private *dev_priv = node_to_i915(m->private);
22bcfc6a 2062
c8c8fb33 2063 intel_runtime_pm_get(dev_priv);
ea16a3cd 2064
ea16a3cd
DV
2065 seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
2066 swizzle_string(dev_priv->mm.bit_6_swizzle_x));
2067 seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
2068 swizzle_string(dev_priv->mm.bit_6_swizzle_y));
2069
36cdd013 2070 if (IS_GEN3(dev_priv) || IS_GEN4(dev_priv)) {
ea16a3cd
DV
2071 seq_printf(m, "DDC = 0x%08x\n",
2072 I915_READ(DCC));
656bfa3a
DV
2073 seq_printf(m, "DDC2 = 0x%08x\n",
2074 I915_READ(DCC2));
ea16a3cd
DV
2075 seq_printf(m, "C0DRB3 = 0x%04x\n",
2076 I915_READ16(C0DRB3));
2077 seq_printf(m, "C1DRB3 = 0x%04x\n",
2078 I915_READ16(C1DRB3));
36cdd013 2079 } else if (INTEL_GEN(dev_priv) >= 6) {
3fa7d235
DV
2080 seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
2081 I915_READ(MAD_DIMM_C0));
2082 seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
2083 I915_READ(MAD_DIMM_C1));
2084 seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
2085 I915_READ(MAD_DIMM_C2));
2086 seq_printf(m, "TILECTL = 0x%08x\n",
2087 I915_READ(TILECTL));
36cdd013 2088 if (INTEL_GEN(dev_priv) >= 8)
9d3203e1
BW
2089 seq_printf(m, "GAMTARBMODE = 0x%08x\n",
2090 I915_READ(GAMTARBMODE));
2091 else
2092 seq_printf(m, "ARB_MODE = 0x%08x\n",
2093 I915_READ(ARB_MODE));
3fa7d235
DV
2094 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
2095 I915_READ(DISP_ARB_CTL));
ea16a3cd 2096 }
656bfa3a
DV
2097
2098 if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
2099 seq_puts(m, "L-shaped memory detected\n");
2100
c8c8fb33 2101 intel_runtime_pm_put(dev_priv);
ea16a3cd
DV
2102
2103 return 0;
2104}
2105
1c60fef5
BW
2106static int per_file_ctx(int id, void *ptr, void *data)
2107{
e2efd130 2108 struct i915_gem_context *ctx = ptr;
1c60fef5 2109 struct seq_file *m = data;
ae6c4806
DV
2110 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
2111
2112 if (!ppgtt) {
2113 seq_printf(m, " no ppgtt for context %d\n",
2114 ctx->user_handle);
2115 return 0;
2116 }
1c60fef5 2117
f83d6518
OM
2118 if (i915_gem_context_is_default(ctx))
2119 seq_puts(m, " default context:\n");
2120 else
821d66dd 2121 seq_printf(m, " context %d:\n", ctx->user_handle);
1c60fef5
BW
2122 ppgtt->debug_dump(ppgtt, m);
2123
2124 return 0;
2125}
2126
36cdd013
DW
2127static void gen8_ppgtt_info(struct seq_file *m,
2128 struct drm_i915_private *dev_priv)
3cf17fc5 2129{
77df6772 2130 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
3b3f1650
AG
2131 struct intel_engine_cs *engine;
2132 enum intel_engine_id id;
b4ac5afc 2133 int i;
3cf17fc5 2134
77df6772
BW
2135 if (!ppgtt)
2136 return;
2137
3b3f1650 2138 for_each_engine(engine, dev_priv, id) {
e2f80391 2139 seq_printf(m, "%s\n", engine->name);
77df6772 2140 for (i = 0; i < 4; i++) {
e2f80391 2141 u64 pdp = I915_READ(GEN8_RING_PDP_UDW(engine, i));
77df6772 2142 pdp <<= 32;
e2f80391 2143 pdp |= I915_READ(GEN8_RING_PDP_LDW(engine, i));
a2a5b15c 2144 seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp);
77df6772
BW
2145 }
2146 }
2147}
2148
36cdd013
DW
2149static void gen6_ppgtt_info(struct seq_file *m,
2150 struct drm_i915_private *dev_priv)
77df6772 2151{
e2f80391 2152 struct intel_engine_cs *engine;
3b3f1650 2153 enum intel_engine_id id;
3cf17fc5 2154
7e22dbbb 2155 if (IS_GEN6(dev_priv))
3cf17fc5
DV
2156 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
2157
3b3f1650 2158 for_each_engine(engine, dev_priv, id) {
e2f80391 2159 seq_printf(m, "%s\n", engine->name);
7e22dbbb 2160 if (IS_GEN7(dev_priv))
e2f80391
TU
2161 seq_printf(m, "GFX_MODE: 0x%08x\n",
2162 I915_READ(RING_MODE_GEN7(engine)));
2163 seq_printf(m, "PP_DIR_BASE: 0x%08x\n",
2164 I915_READ(RING_PP_DIR_BASE(engine)));
2165 seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n",
2166 I915_READ(RING_PP_DIR_BASE_READ(engine)));
2167 seq_printf(m, "PP_DIR_DCLV: 0x%08x\n",
2168 I915_READ(RING_PP_DIR_DCLV(engine)));
3cf17fc5
DV
2169 }
2170 if (dev_priv->mm.aliasing_ppgtt) {
2171 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
2172
267f0c90 2173 seq_puts(m, "aliasing PPGTT:\n");
44159ddb 2174 seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd.base.ggtt_offset);
1c60fef5 2175
87d60b63 2176 ppgtt->debug_dump(ppgtt, m);
ae6c4806 2177 }
1c60fef5 2178
3cf17fc5 2179 seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
77df6772
BW
2180}
2181
2182static int i915_ppgtt_info(struct seq_file *m, void *data)
2183{
36cdd013
DW
2184 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2185 struct drm_device *dev = &dev_priv->drm;
ea91e401 2186 struct drm_file *file;
637ee29e 2187 int ret;
77df6772 2188
637ee29e
CW
2189 mutex_lock(&dev->filelist_mutex);
2190 ret = mutex_lock_interruptible(&dev->struct_mutex);
77df6772 2191 if (ret)
637ee29e
CW
2192 goto out_unlock;
2193
c8c8fb33 2194 intel_runtime_pm_get(dev_priv);
77df6772 2195
36cdd013
DW
2196 if (INTEL_GEN(dev_priv) >= 8)
2197 gen8_ppgtt_info(m, dev_priv);
2198 else if (INTEL_GEN(dev_priv) >= 6)
2199 gen6_ppgtt_info(m, dev_priv);
77df6772 2200
ea91e401
MT
2201 list_for_each_entry_reverse(file, &dev->filelist, lhead) {
2202 struct drm_i915_file_private *file_priv = file->driver_priv;
7cb5dff8 2203 struct task_struct *task;
ea91e401 2204
7cb5dff8 2205 task = get_pid_task(file->pid, PIDTYPE_PID);
06812760
DC
2206 if (!task) {
2207 ret = -ESRCH;
637ee29e 2208 goto out_rpm;
06812760 2209 }
7cb5dff8
GT
2210 seq_printf(m, "\nproc: %s\n", task->comm);
2211 put_task_struct(task);
ea91e401
MT
2212 idr_for_each(&file_priv->context_idr, per_file_ctx,
2213 (void *)(unsigned long)m);
2214 }
2215
637ee29e 2216out_rpm:
c8c8fb33 2217 intel_runtime_pm_put(dev_priv);
3cf17fc5 2218 mutex_unlock(&dev->struct_mutex);
637ee29e
CW
2219out_unlock:
2220 mutex_unlock(&dev->filelist_mutex);
06812760 2221 return ret;
3cf17fc5
DV
2222}
2223
f5a4c67d
CW
2224static int count_irq_waiters(struct drm_i915_private *i915)
2225{
e2f80391 2226 struct intel_engine_cs *engine;
3b3f1650 2227 enum intel_engine_id id;
f5a4c67d 2228 int count = 0;
f5a4c67d 2229
3b3f1650 2230 for_each_engine(engine, i915, id)
688e6c72 2231 count += intel_engine_has_waiter(engine);
f5a4c67d
CW
2232
2233 return count;
2234}
2235
7466c291
CW
2236static const char *rps_power_to_str(unsigned int power)
2237{
2238 static const char * const strings[] = {
2239 [LOW_POWER] = "low power",
2240 [BETWEEN] = "mixed",
2241 [HIGH_POWER] = "high power",
2242 };
2243
2244 if (power >= ARRAY_SIZE(strings) || !strings[power])
2245 return "unknown";
2246
2247 return strings[power];
2248}
2249
1854d5ca
CW
2250static int i915_rps_boost_info(struct seq_file *m, void *data)
2251{
36cdd013
DW
2252 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2253 struct drm_device *dev = &dev_priv->drm;
1854d5ca 2254 struct drm_file *file;
1854d5ca 2255
f5a4c67d 2256 seq_printf(m, "RPS enabled? %d\n", dev_priv->rps.enabled);
28176ef4
CW
2257 seq_printf(m, "GPU busy? %s [%d requests]\n",
2258 yesno(dev_priv->gt.awake), dev_priv->gt.active_requests);
f5a4c67d 2259 seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv));
7b92c1bd
CW
2260 seq_printf(m, "Boosts outstanding? %d\n",
2261 atomic_read(&dev_priv->rps.num_waiters));
7466c291
CW
2262 seq_printf(m, "Frequency requested %d\n",
2263 intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq));
2264 seq_printf(m, " min hard:%d, soft:%d; max soft:%d, hard:%d\n",
f5a4c67d
CW
2265 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq),
2266 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit),
2267 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit),
2268 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
7466c291
CW
2269 seq_printf(m, " idle:%d, efficient:%d, boost:%d\n",
2270 intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq),
2271 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
2272 intel_gpu_freq(dev_priv, dev_priv->rps.boost_freq));
1d2ac403
DV
2273
2274 mutex_lock(&dev->filelist_mutex);
1854d5ca
CW
2275 list_for_each_entry_reverse(file, &dev->filelist, lhead) {
2276 struct drm_i915_file_private *file_priv = file->driver_priv;
2277 struct task_struct *task;
2278
2279 rcu_read_lock();
2280 task = pid_task(file->pid, PIDTYPE_PID);
7b92c1bd 2281 seq_printf(m, "%s [%d]: %d boosts\n",
1854d5ca
CW
2282 task ? task->comm : "<unknown>",
2283 task ? task->pid : -1,
7b92c1bd 2284 atomic_read(&file_priv->rps.boosts));
1854d5ca
CW
2285 rcu_read_unlock();
2286 }
7b92c1bd
CW
2287 seq_printf(m, "Kernel (anonymous) boosts: %d\n",
2288 atomic_read(&dev_priv->rps.boosts));
1d2ac403 2289 mutex_unlock(&dev->filelist_mutex);
1854d5ca 2290
7466c291
CW
2291 if (INTEL_GEN(dev_priv) >= 6 &&
2292 dev_priv->rps.enabled &&
28176ef4 2293 dev_priv->gt.active_requests) {
7466c291
CW
2294 u32 rpup, rpupei;
2295 u32 rpdown, rpdownei;
2296
2297 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
2298 rpup = I915_READ_FW(GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK;
2299 rpupei = I915_READ_FW(GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK;
2300 rpdown = I915_READ_FW(GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK;
2301 rpdownei = I915_READ_FW(GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK;
2302 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
2303
2304 seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
2305 rps_power_to_str(dev_priv->rps.power));
2306 seq_printf(m, " Avg. up: %d%% [above threshold? %d%%]\n",
23f4a287 2307 rpup && rpupei ? 100 * rpup / rpupei : 0,
7466c291
CW
2308 dev_priv->rps.up_threshold);
2309 seq_printf(m, " Avg. down: %d%% [below threshold? %d%%]\n",
23f4a287 2310 rpdown && rpdownei ? 100 * rpdown / rpdownei : 0,
7466c291
CW
2311 dev_priv->rps.down_threshold);
2312 } else {
2313 seq_puts(m, "\nRPS Autotuning inactive\n");
2314 }
2315
8d3afd7d 2316 return 0;
1854d5ca
CW
2317}
2318
63573eb7
BW
2319static int i915_llc(struct seq_file *m, void *data)
2320{
36cdd013 2321 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3accaf7e 2322 const bool edram = INTEL_GEN(dev_priv) > 8;
63573eb7 2323
36cdd013 2324 seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev_priv)));
3accaf7e
MK
2325 seq_printf(m, "%s: %lluMB\n", edram ? "eDRAM" : "eLLC",
2326 intel_uncore_edram_size(dev_priv)/1024/1024);
63573eb7
BW
2327
2328 return 0;
2329}
2330
0509ead1
AS
2331static int i915_huc_load_status_info(struct seq_file *m, void *data)
2332{
2333 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2334 struct intel_uc_fw *huc_fw = &dev_priv->huc.fw;
2335
2336 if (!HAS_HUC_UCODE(dev_priv))
2337 return 0;
2338
2339 seq_puts(m, "HuC firmware status:\n");
2340 seq_printf(m, "\tpath: %s\n", huc_fw->path);
2341 seq_printf(m, "\tfetch: %s\n",
2342 intel_uc_fw_status_repr(huc_fw->fetch_status));
2343 seq_printf(m, "\tload: %s\n",
2344 intel_uc_fw_status_repr(huc_fw->load_status));
2345 seq_printf(m, "\tversion wanted: %d.%d\n",
2346 huc_fw->major_ver_wanted, huc_fw->minor_ver_wanted);
2347 seq_printf(m, "\tversion found: %d.%d\n",
2348 huc_fw->major_ver_found, huc_fw->minor_ver_found);
2349 seq_printf(m, "\theader: offset is %d; size = %d\n",
2350 huc_fw->header_offset, huc_fw->header_size);
2351 seq_printf(m, "\tuCode: offset is %d; size = %d\n",
2352 huc_fw->ucode_offset, huc_fw->ucode_size);
2353 seq_printf(m, "\tRSA: offset is %d; size = %d\n",
2354 huc_fw->rsa_offset, huc_fw->rsa_size);
2355
3582ad13 2356 intel_runtime_pm_get(dev_priv);
0509ead1 2357 seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2));
3582ad13 2358 intel_runtime_pm_put(dev_priv);
0509ead1
AS
2359
2360 return 0;
2361}
2362
fdf5d357
AD
2363static int i915_guc_load_status_info(struct seq_file *m, void *data)
2364{
36cdd013 2365 struct drm_i915_private *dev_priv = node_to_i915(m->private);
db0a091b 2366 struct intel_uc_fw *guc_fw = &dev_priv->guc.fw;
fdf5d357
AD
2367 u32 tmp, i;
2368
2d1fe073 2369 if (!HAS_GUC_UCODE(dev_priv))
fdf5d357
AD
2370 return 0;
2371
2372 seq_printf(m, "GuC firmware status:\n");
2373 seq_printf(m, "\tpath: %s\n",
db0a091b 2374 guc_fw->path);
fdf5d357 2375 seq_printf(m, "\tfetch: %s\n",
db0a091b 2376 intel_uc_fw_status_repr(guc_fw->fetch_status));
fdf5d357 2377 seq_printf(m, "\tload: %s\n",
db0a091b 2378 intel_uc_fw_status_repr(guc_fw->load_status));
fdf5d357 2379 seq_printf(m, "\tversion wanted: %d.%d\n",
db0a091b 2380 guc_fw->major_ver_wanted, guc_fw->minor_ver_wanted);
fdf5d357 2381 seq_printf(m, "\tversion found: %d.%d\n",
db0a091b 2382 guc_fw->major_ver_found, guc_fw->minor_ver_found);
feda33ef
AD
2383 seq_printf(m, "\theader: offset is %d; size = %d\n",
2384 guc_fw->header_offset, guc_fw->header_size);
2385 seq_printf(m, "\tuCode: offset is %d; size = %d\n",
2386 guc_fw->ucode_offset, guc_fw->ucode_size);
2387 seq_printf(m, "\tRSA: offset is %d; size = %d\n",
2388 guc_fw->rsa_offset, guc_fw->rsa_size);
fdf5d357 2389
3582ad13 2390 intel_runtime_pm_get(dev_priv);
2391
fdf5d357
AD
2392 tmp = I915_READ(GUC_STATUS);
2393
2394 seq_printf(m, "\nGuC status 0x%08x:\n", tmp);
2395 seq_printf(m, "\tBootrom status = 0x%x\n",
2396 (tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
2397 seq_printf(m, "\tuKernel status = 0x%x\n",
2398 (tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
2399 seq_printf(m, "\tMIA Core status = 0x%x\n",
2400 (tmp & GS_MIA_MASK) >> GS_MIA_SHIFT);
2401 seq_puts(m, "\nScratch registers:\n");
2402 for (i = 0; i < 16; i++)
2403 seq_printf(m, "\t%2d: \t0x%x\n", i, I915_READ(SOFT_SCRATCH(i)));
2404
3582ad13 2405 intel_runtime_pm_put(dev_priv);
2406
fdf5d357
AD
2407 return 0;
2408}
2409
5aa1ee4b
AG
2410static void i915_guc_log_info(struct seq_file *m,
2411 struct drm_i915_private *dev_priv)
2412{
2413 struct intel_guc *guc = &dev_priv->guc;
2414
2415 seq_puts(m, "\nGuC logging stats:\n");
2416
2417 seq_printf(m, "\tISR: flush count %10u, overflow count %10u\n",
2418 guc->log.flush_count[GUC_ISR_LOG_BUFFER],
2419 guc->log.total_overflow_count[GUC_ISR_LOG_BUFFER]);
2420
2421 seq_printf(m, "\tDPC: flush count %10u, overflow count %10u\n",
2422 guc->log.flush_count[GUC_DPC_LOG_BUFFER],
2423 guc->log.total_overflow_count[GUC_DPC_LOG_BUFFER]);
2424
2425 seq_printf(m, "\tCRASH: flush count %10u, overflow count %10u\n",
2426 guc->log.flush_count[GUC_CRASH_DUMP_LOG_BUFFER],
2427 guc->log.total_overflow_count[GUC_CRASH_DUMP_LOG_BUFFER]);
2428
2429 seq_printf(m, "\tTotal flush interrupt count: %u\n",
2430 guc->log.flush_interrupt_count);
2431
2432 seq_printf(m, "\tCapture miss count: %u\n",
2433 guc->log.capture_miss_count);
2434}
2435
8b417c26
DG
2436static void i915_guc_client_info(struct seq_file *m,
2437 struct drm_i915_private *dev_priv,
2438 struct i915_guc_client *client)
2439{
e2f80391 2440 struct intel_engine_cs *engine;
c18468c4 2441 enum intel_engine_id id;
8b417c26 2442 uint64_t tot = 0;
8b417c26 2443
b09935a6
OM
2444 seq_printf(m, "\tPriority %d, GuC stage index: %u, PD offset 0x%x\n",
2445 client->priority, client->stage_id, client->proc_desc_offset);
abddffdf 2446 seq_printf(m, "\tDoorbell id %d, offset: 0x%lx, cookie 0x%x\n",
357248bf 2447 client->doorbell_id, client->doorbell_offset, client->doorbell_cookie);
8b417c26
DG
2448 seq_printf(m, "\tWQ size %d, offset: 0x%x, tail %d\n",
2449 client->wq_size, client->wq_offset, client->wq_tail);
2450
551aaecd 2451 seq_printf(m, "\tWork queue full: %u\n", client->no_wq_space);
8b417c26 2452
3b3f1650 2453 for_each_engine(engine, dev_priv, id) {
c18468c4
DG
2454 u64 submissions = client->submissions[id];
2455 tot += submissions;
8b417c26 2456 seq_printf(m, "\tSubmissions: %llu %s\n",
c18468c4 2457 submissions, engine->name);
8b417c26
DG
2458 }
2459 seq_printf(m, "\tTotal: %llu\n", tot);
2460}
2461
a8b9370f 2462static bool check_guc_submission(struct seq_file *m)
8b417c26 2463{
36cdd013 2464 struct drm_i915_private *dev_priv = node_to_i915(m->private);
334636c6 2465 const struct intel_guc *guc = &dev_priv->guc;
8b417c26 2466
334636c6
CW
2467 if (!guc->execbuf_client) {
2468 seq_printf(m, "GuC submission %s\n",
2469 HAS_GUC_SCHED(dev_priv) ?
2470 "disabled" :
2471 "not supported");
a8b9370f 2472 return false;
334636c6 2473 }
8b417c26 2474
a8b9370f
OM
2475 return true;
2476}
2477
2478static int i915_guc_info(struct seq_file *m, void *data)
2479{
2480 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2481 const struct intel_guc *guc = &dev_priv->guc;
a8b9370f
OM
2482
2483 if (!check_guc_submission(m))
2484 return 0;
2485
9636f6db 2486 seq_printf(m, "Doorbell map:\n");
abddffdf 2487 seq_printf(m, "\t%*pb\n", GUC_NUM_DOORBELLS, guc->doorbell_bitmap);
334636c6 2488 seq_printf(m, "Doorbell next cacheline: 0x%x\n\n", guc->db_cacheline);
9636f6db 2489
334636c6
CW
2490 seq_printf(m, "\nGuC execbuf client @ %p:\n", guc->execbuf_client);
2491 i915_guc_client_info(m, dev_priv, guc->execbuf_client);
8b417c26 2492
5aa1ee4b
AG
2493 i915_guc_log_info(m, dev_priv);
2494
8b417c26
DG
2495 /* Add more as required ... */
2496
2497 return 0;
2498}
2499
a8b9370f 2500static int i915_guc_stage_pool(struct seq_file *m, void *data)
4c7e77fc 2501{
36cdd013 2502 struct drm_i915_private *dev_priv = node_to_i915(m->private);
a8b9370f
OM
2503 const struct intel_guc *guc = &dev_priv->guc;
2504 struct guc_stage_desc *desc = guc->stage_desc_pool_vaddr;
2505 struct i915_guc_client *client = guc->execbuf_client;
2506 unsigned int tmp;
2507 int index;
4c7e77fc 2508
a8b9370f 2509 if (!check_guc_submission(m))
4c7e77fc
AD
2510 return 0;
2511
a8b9370f
OM
2512 for (index = 0; index < GUC_MAX_STAGE_DESCRIPTORS; index++, desc++) {
2513 struct intel_engine_cs *engine;
2514
2515 if (!(desc->attribute & GUC_STAGE_DESC_ATTR_ACTIVE))
2516 continue;
4c7e77fc 2517
a8b9370f
OM
2518 seq_printf(m, "GuC stage descriptor %u:\n", index);
2519 seq_printf(m, "\tIndex: %u\n", desc->stage_id);
2520 seq_printf(m, "\tAttribute: 0x%x\n", desc->attribute);
2521 seq_printf(m, "\tPriority: %d\n", desc->priority);
2522 seq_printf(m, "\tDoorbell id: %d\n", desc->db_id);
2523 seq_printf(m, "\tEngines used: 0x%x\n",
2524 desc->engines_used);
2525 seq_printf(m, "\tDoorbell trigger phy: 0x%llx, cpu: 0x%llx, uK: 0x%x\n",
2526 desc->db_trigger_phy,
2527 desc->db_trigger_cpu,
2528 desc->db_trigger_uk);
2529 seq_printf(m, "\tProcess descriptor: 0x%x\n",
2530 desc->process_desc);
9a09485d 2531 seq_printf(m, "\tWorkqueue address: 0x%x, size: 0x%x\n",
a8b9370f
OM
2532 desc->wq_addr, desc->wq_size);
2533 seq_putc(m, '\n');
4c7e77fc 2534
a8b9370f
OM
2535 for_each_engine_masked(engine, dev_priv, client->engines, tmp) {
2536 u32 guc_engine_id = engine->guc_id;
2537 struct guc_execlist_context *lrc =
2538 &desc->lrc[guc_engine_id];
2539
2540 seq_printf(m, "\t%s LRC:\n", engine->name);
2541 seq_printf(m, "\t\tContext desc: 0x%x\n",
2542 lrc->context_desc);
2543 seq_printf(m, "\t\tContext id: 0x%x\n", lrc->context_id);
2544 seq_printf(m, "\t\tLRCA: 0x%x\n", lrc->ring_lrca);
2545 seq_printf(m, "\t\tRing begin: 0x%x\n", lrc->ring_begin);
2546 seq_printf(m, "\t\tRing end: 0x%x\n", lrc->ring_end);
2547 seq_putc(m, '\n');
2548 }
4c7e77fc
AD
2549 }
2550
a8b9370f
OM
2551 return 0;
2552}
2553
4c7e77fc
AD
2554static int i915_guc_log_dump(struct seq_file *m, void *data)
2555{
ac58d2ab
DCS
2556 struct drm_info_node *node = m->private;
2557 struct drm_i915_private *dev_priv = node_to_i915(node);
2558 bool dump_load_err = !!node->info_ent->data;
2559 struct drm_i915_gem_object *obj = NULL;
2560 u32 *log;
2561 int i = 0;
4c7e77fc 2562
ac58d2ab
DCS
2563 if (dump_load_err)
2564 obj = dev_priv->guc.load_err_log;
2565 else if (dev_priv->guc.log.vma)
2566 obj = dev_priv->guc.log.vma->obj;
4c7e77fc 2567
ac58d2ab
DCS
2568 if (!obj)
2569 return 0;
4c7e77fc 2570
ac58d2ab
DCS
2571 log = i915_gem_object_pin_map(obj, I915_MAP_WC);
2572 if (IS_ERR(log)) {
2573 DRM_DEBUG("Failed to pin object\n");
2574 seq_puts(m, "(log data unaccessible)\n");
2575 return PTR_ERR(log);
4c7e77fc
AD
2576 }
2577
ac58d2ab
DCS
2578 for (i = 0; i < obj->base.size / sizeof(u32); i += 4)
2579 seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n",
2580 *(log + i), *(log + i + 1),
2581 *(log + i + 2), *(log + i + 3));
2582
4c7e77fc
AD
2583 seq_putc(m, '\n');
2584
ac58d2ab
DCS
2585 i915_gem_object_unpin_map(obj);
2586
4c7e77fc
AD
2587 return 0;
2588}
2589
685534ef
SAK
2590static int i915_guc_log_control_get(void *data, u64 *val)
2591{
bcc36d8a 2592 struct drm_i915_private *dev_priv = data;
685534ef
SAK
2593
2594 if (!dev_priv->guc.log.vma)
2595 return -EINVAL;
2596
2597 *val = i915.guc_log_level;
2598
2599 return 0;
2600}
2601
2602static int i915_guc_log_control_set(void *data, u64 val)
2603{
bcc36d8a 2604 struct drm_i915_private *dev_priv = data;
685534ef
SAK
2605 int ret;
2606
2607 if (!dev_priv->guc.log.vma)
2608 return -EINVAL;
2609
bcc36d8a 2610 ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex);
685534ef
SAK
2611 if (ret)
2612 return ret;
2613
2614 intel_runtime_pm_get(dev_priv);
2615 ret = i915_guc_log_control(dev_priv, val);
2616 intel_runtime_pm_put(dev_priv);
2617
bcc36d8a 2618 mutex_unlock(&dev_priv->drm.struct_mutex);
685534ef
SAK
2619 return ret;
2620}
2621
2622DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_control_fops,
2623 i915_guc_log_control_get, i915_guc_log_control_set,
2624 "%lld\n");
2625
b86bef20
CW
2626static const char *psr2_live_status(u32 val)
2627{
2628 static const char * const live_status[] = {
2629 "IDLE",
2630 "CAPTURE",
2631 "CAPTURE_FS",
2632 "SLEEP",
2633 "BUFON_FW",
2634 "ML_UP",
2635 "SU_STANDBY",
2636 "FAST_SLEEP",
2637 "DEEP_SLEEP",
2638 "BUF_ON",
2639 "TG_ON"
2640 };
2641
2642 val = (val & EDP_PSR2_STATUS_STATE_MASK) >> EDP_PSR2_STATUS_STATE_SHIFT;
2643 if (val < ARRAY_SIZE(live_status))
2644 return live_status[val];
2645
2646 return "unknown";
2647}
2648
e91fd8c6
RV
2649static int i915_edp_psr_status(struct seq_file *m, void *data)
2650{
36cdd013 2651 struct drm_i915_private *dev_priv = node_to_i915(m->private);
a031d709 2652 u32 psrperf = 0;
a6cbdb8e
RV
2653 u32 stat[3];
2654 enum pipe pipe;
a031d709 2655 bool enabled = false;
e91fd8c6 2656
36cdd013 2657 if (!HAS_PSR(dev_priv)) {
3553a8ea
DL
2658 seq_puts(m, "PSR not supported\n");
2659 return 0;
2660 }
2661
c8c8fb33
PZ
2662 intel_runtime_pm_get(dev_priv);
2663
fa128fa6 2664 mutex_lock(&dev_priv->psr.lock);
a031d709
RV
2665 seq_printf(m, "Sink_Support: %s\n", yesno(dev_priv->psr.sink_support));
2666 seq_printf(m, "Source_OK: %s\n", yesno(dev_priv->psr.source_ok));
2807cf69 2667 seq_printf(m, "Enabled: %s\n", yesno((bool)dev_priv->psr.enabled));
5755c78f 2668 seq_printf(m, "Active: %s\n", yesno(dev_priv->psr.active));
fa128fa6
DV
2669 seq_printf(m, "Busy frontbuffer bits: 0x%03x\n",
2670 dev_priv->psr.busy_frontbuffer_bits);
2671 seq_printf(m, "Re-enable work scheduled: %s\n",
2672 yesno(work_busy(&dev_priv->psr.work.work)));
e91fd8c6 2673
7e3eb599
NV
2674 if (HAS_DDI(dev_priv)) {
2675 if (dev_priv->psr.psr2_support)
2676 enabled = I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE;
2677 else
2678 enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE;
2679 } else {
3553a8ea 2680 for_each_pipe(dev_priv, pipe) {
9c870d03
CW
2681 enum transcoder cpu_transcoder =
2682 intel_pipe_to_cpu_transcoder(dev_priv, pipe);
2683 enum intel_display_power_domain power_domain;
2684
2685 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
2686 if (!intel_display_power_get_if_enabled(dev_priv,
2687 power_domain))
2688 continue;
2689
3553a8ea
DL
2690 stat[pipe] = I915_READ(VLV_PSRSTAT(pipe)) &
2691 VLV_EDP_PSR_CURR_STATE_MASK;
2692 if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
2693 (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE))
2694 enabled = true;
9c870d03
CW
2695
2696 intel_display_power_put(dev_priv, power_domain);
a6cbdb8e
RV
2697 }
2698 }
60e5ffe3
RV
2699
2700 seq_printf(m, "Main link in standby mode: %s\n",
2701 yesno(dev_priv->psr.link_standby));
2702
a6cbdb8e
RV
2703 seq_printf(m, "HW Enabled & Active bit: %s", yesno(enabled));
2704
36cdd013 2705 if (!HAS_DDI(dev_priv))
a6cbdb8e
RV
2706 for_each_pipe(dev_priv, pipe) {
2707 if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
2708 (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE))
2709 seq_printf(m, " pipe %c", pipe_name(pipe));
2710 }
2711 seq_puts(m, "\n");
e91fd8c6 2712
05eec3c2
RV
2713 /*
2714 * VLV/CHV PSR has no kind of performance counter
2715 * SKL+ Perf counter is reset to 0 everytime DC state is entered
2716 */
36cdd013 2717 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
443a389f 2718 psrperf = I915_READ(EDP_PSR_PERF_CNT) &
a031d709 2719 EDP_PSR_PERF_CNT_MASK;
a6cbdb8e
RV
2720
2721 seq_printf(m, "Performance_Counter: %u\n", psrperf);
2722 }
6ba1f9e1 2723 if (dev_priv->psr.psr2_support) {
b86bef20
CW
2724 u32 psr2 = I915_READ(EDP_PSR2_STATUS_CTL);
2725
2726 seq_printf(m, "EDP_PSR2_STATUS_CTL: %x [%s]\n",
2727 psr2, psr2_live_status(psr2));
6ba1f9e1 2728 }
fa128fa6 2729 mutex_unlock(&dev_priv->psr.lock);
e91fd8c6 2730
c8c8fb33 2731 intel_runtime_pm_put(dev_priv);
e91fd8c6
RV
2732 return 0;
2733}
2734
d2e216d0
RV
2735static int i915_sink_crc(struct seq_file *m, void *data)
2736{
36cdd013
DW
2737 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2738 struct drm_device *dev = &dev_priv->drm;
d2e216d0 2739 struct intel_connector *connector;
3f6a5e1e 2740 struct drm_connector_list_iter conn_iter;
d2e216d0
RV
2741 struct intel_dp *intel_dp = NULL;
2742 int ret;
2743 u8 crc[6];
2744
2745 drm_modeset_lock_all(dev);
3f6a5e1e
DV
2746 drm_connector_list_iter_begin(dev, &conn_iter);
2747 for_each_intel_connector_iter(connector, &conn_iter) {
26c17cf6 2748 struct drm_crtc *crtc;
d2e216d0 2749
26c17cf6 2750 if (!connector->base.state->best_encoder)
d2e216d0
RV
2751 continue;
2752
26c17cf6
ML
2753 crtc = connector->base.state->crtc;
2754 if (!crtc->state->active)
b6ae3c7c
PZ
2755 continue;
2756
26c17cf6 2757 if (connector->base.connector_type != DRM_MODE_CONNECTOR_eDP)
d2e216d0
RV
2758 continue;
2759
26c17cf6 2760 intel_dp = enc_to_intel_dp(connector->base.state->best_encoder);
d2e216d0
RV
2761
2762 ret = intel_dp_sink_crc(intel_dp, crc);
2763 if (ret)
2764 goto out;
2765
2766 seq_printf(m, "%02x%02x%02x%02x%02x%02x\n",
2767 crc[0], crc[1], crc[2],
2768 crc[3], crc[4], crc[5]);
2769 goto out;
2770 }
2771 ret = -ENODEV;
2772out:
3f6a5e1e 2773 drm_connector_list_iter_end(&conn_iter);
d2e216d0
RV
2774 drm_modeset_unlock_all(dev);
2775 return ret;
2776}
2777
ec013e7f
JB
2778static int i915_energy_uJ(struct seq_file *m, void *data)
2779{
36cdd013 2780 struct drm_i915_private *dev_priv = node_to_i915(m->private);
d38014ea 2781 unsigned long long power;
ec013e7f
JB
2782 u32 units;
2783
36cdd013 2784 if (INTEL_GEN(dev_priv) < 6)
ec013e7f
JB
2785 return -ENODEV;
2786
36623ef8
PZ
2787 intel_runtime_pm_get(dev_priv);
2788
d38014ea
GKB
2789 if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power)) {
2790 intel_runtime_pm_put(dev_priv);
2791 return -ENODEV;
2792 }
2793
2794 units = (power & 0x1f00) >> 8;
ec013e7f 2795 power = I915_READ(MCH_SECP_NRG_STTS);
d38014ea 2796 power = (1000000 * power) >> units; /* convert to uJ */
ec013e7f 2797
36623ef8
PZ
2798 intel_runtime_pm_put(dev_priv);
2799
d38014ea 2800 seq_printf(m, "%llu", power);
371db66a
PZ
2801
2802 return 0;
2803}
2804
6455c870 2805static int i915_runtime_pm_status(struct seq_file *m, void *unused)
371db66a 2806{
36cdd013 2807 struct drm_i915_private *dev_priv = node_to_i915(m->private);
52a05c30 2808 struct pci_dev *pdev = dev_priv->drm.pdev;
371db66a 2809
a156e64d
CW
2810 if (!HAS_RUNTIME_PM(dev_priv))
2811 seq_puts(m, "Runtime power management not supported\n");
371db66a 2812
67d97da3 2813 seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->gt.awake));
371db66a 2814 seq_printf(m, "IRQs disabled: %s\n",
9df7575f 2815 yesno(!intel_irqs_enabled(dev_priv)));
0d804184 2816#ifdef CONFIG_PM
a6aaec8b 2817 seq_printf(m, "Usage count: %d\n",
36cdd013 2818 atomic_read(&dev_priv->drm.dev->power.usage_count));
0d804184
CW
2819#else
2820 seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
2821#endif
a156e64d 2822 seq_printf(m, "PCI device power state: %s [%d]\n",
52a05c30
DW
2823 pci_power_name(pdev->current_state),
2824 pdev->current_state);
371db66a 2825
ec013e7f
JB
2826 return 0;
2827}
2828
1da51581
ID
2829static int i915_power_domain_info(struct seq_file *m, void *unused)
2830{
36cdd013 2831 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1da51581
ID
2832 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2833 int i;
2834
2835 mutex_lock(&power_domains->lock);
2836
2837 seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
2838 for (i = 0; i < power_domains->power_well_count; i++) {
2839 struct i915_power_well *power_well;
2840 enum intel_display_power_domain power_domain;
2841
2842 power_well = &power_domains->power_wells[i];
2843 seq_printf(m, "%-25s %d\n", power_well->name,
2844 power_well->count);
2845
8385c2ec 2846 for_each_power_domain(power_domain, power_well->domains)
1da51581 2847 seq_printf(m, " %-23s %d\n",
9895ad03 2848 intel_display_power_domain_str(power_domain),
1da51581 2849 power_domains->domain_use_count[power_domain]);
1da51581
ID
2850 }
2851
2852 mutex_unlock(&power_domains->lock);
2853
2854 return 0;
2855}
2856
b7cec66d
DL
2857static int i915_dmc_info(struct seq_file *m, void *unused)
2858{
36cdd013 2859 struct drm_i915_private *dev_priv = node_to_i915(m->private);
b7cec66d
DL
2860 struct intel_csr *csr;
2861
36cdd013 2862 if (!HAS_CSR(dev_priv)) {
b7cec66d
DL
2863 seq_puts(m, "not supported\n");
2864 return 0;
2865 }
2866
2867 csr = &dev_priv->csr;
2868
6fb403de
MK
2869 intel_runtime_pm_get(dev_priv);
2870
b7cec66d
DL
2871 seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
2872 seq_printf(m, "path: %s\n", csr->fw_path);
2873
2874 if (!csr->dmc_payload)
6fb403de 2875 goto out;
b7cec66d
DL
2876
2877 seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
2878 CSR_VERSION_MINOR(csr->version));
2879
48de568c
MK
2880 if (IS_KABYLAKE(dev_priv) ||
2881 (IS_SKYLAKE(dev_priv) && csr->version >= CSR_VERSION(1, 6))) {
8337206d
DL
2882 seq_printf(m, "DC3 -> DC5 count: %d\n",
2883 I915_READ(SKL_CSR_DC3_DC5_COUNT));
2884 seq_printf(m, "DC5 -> DC6 count: %d\n",
2885 I915_READ(SKL_CSR_DC5_DC6_COUNT));
36cdd013 2886 } else if (IS_BROXTON(dev_priv) && csr->version >= CSR_VERSION(1, 4)) {
16e11b99
MK
2887 seq_printf(m, "DC3 -> DC5 count: %d\n",
2888 I915_READ(BXT_CSR_DC3_DC5_COUNT));
8337206d
DL
2889 }
2890
6fb403de
MK
2891out:
2892 seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0)));
2893 seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE));
2894 seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL));
2895
8337206d
DL
2896 intel_runtime_pm_put(dev_priv);
2897
b7cec66d
DL
2898 return 0;
2899}
2900
53f5e3ca
JB
2901static void intel_seq_print_mode(struct seq_file *m, int tabs,
2902 struct drm_display_mode *mode)
2903{
2904 int i;
2905
2906 for (i = 0; i < tabs; i++)
2907 seq_putc(m, '\t');
2908
2909 seq_printf(m, "id %d:\"%s\" freq %d clock %d hdisp %d hss %d hse %d htot %d vdisp %d vss %d vse %d vtot %d type 0x%x flags 0x%x\n",
2910 mode->base.id, mode->name,
2911 mode->vrefresh, mode->clock,
2912 mode->hdisplay, mode->hsync_start,
2913 mode->hsync_end, mode->htotal,
2914 mode->vdisplay, mode->vsync_start,
2915 mode->vsync_end, mode->vtotal,
2916 mode->type, mode->flags);
2917}
2918
2919static void intel_encoder_info(struct seq_file *m,
2920 struct intel_crtc *intel_crtc,
2921 struct intel_encoder *intel_encoder)
2922{
36cdd013
DW
2923 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2924 struct drm_device *dev = &dev_priv->drm;
53f5e3ca
JB
2925 struct drm_crtc *crtc = &intel_crtc->base;
2926 struct intel_connector *intel_connector;
2927 struct drm_encoder *encoder;
2928
2929 encoder = &intel_encoder->base;
2930 seq_printf(m, "\tencoder %d: type: %s, connectors:\n",
8e329a03 2931 encoder->base.id, encoder->name);
53f5e3ca
JB
2932 for_each_connector_on_encoder(dev, encoder, intel_connector) {
2933 struct drm_connector *connector = &intel_connector->base;
2934 seq_printf(m, "\t\tconnector %d: type: %s, status: %s",
2935 connector->base.id,
c23cc417 2936 connector->name,
53f5e3ca
JB
2937 drm_get_connector_status_name(connector->status));
2938 if (connector->status == connector_status_connected) {
2939 struct drm_display_mode *mode = &crtc->mode;
2940 seq_printf(m, ", mode:\n");
2941 intel_seq_print_mode(m, 2, mode);
2942 } else {
2943 seq_putc(m, '\n');
2944 }
2945 }
2946}
2947
2948static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2949{
36cdd013
DW
2950 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2951 struct drm_device *dev = &dev_priv->drm;
53f5e3ca
JB
2952 struct drm_crtc *crtc = &intel_crtc->base;
2953 struct intel_encoder *intel_encoder;
23a48d53
ML
2954 struct drm_plane_state *plane_state = crtc->primary->state;
2955 struct drm_framebuffer *fb = plane_state->fb;
53f5e3ca 2956
23a48d53 2957 if (fb)
5aa8a937 2958 seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
23a48d53
ML
2959 fb->base.id, plane_state->src_x >> 16,
2960 plane_state->src_y >> 16, fb->width, fb->height);
5aa8a937
MR
2961 else
2962 seq_puts(m, "\tprimary plane disabled\n");
53f5e3ca
JB
2963 for_each_encoder_on_crtc(dev, crtc, intel_encoder)
2964 intel_encoder_info(m, intel_crtc, intel_encoder);
2965}
2966
2967static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
2968{
2969 struct drm_display_mode *mode = panel->fixed_mode;
2970
2971 seq_printf(m, "\tfixed mode:\n");
2972 intel_seq_print_mode(m, 2, mode);
2973}
2974
2975static void intel_dp_info(struct seq_file *m,
2976 struct intel_connector *intel_connector)
2977{
2978 struct intel_encoder *intel_encoder = intel_connector->encoder;
2979 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
2980
2981 seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
742f491d 2982 seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
b6dabe3b 2983 if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
53f5e3ca 2984 intel_panel_info(m, &intel_connector->panel);
80209e5f
MK
2985
2986 drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
2987 &intel_dp->aux);
53f5e3ca
JB
2988}
2989
9a148a96
LY
2990static void intel_dp_mst_info(struct seq_file *m,
2991 struct intel_connector *intel_connector)
2992{
2993 struct intel_encoder *intel_encoder = intel_connector->encoder;
2994 struct intel_dp_mst_encoder *intel_mst =
2995 enc_to_mst(&intel_encoder->base);
2996 struct intel_digital_port *intel_dig_port = intel_mst->primary;
2997 struct intel_dp *intel_dp = &intel_dig_port->dp;
2998 bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr,
2999 intel_connector->port);
3000
3001 seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
3002}
3003
53f5e3ca
JB
3004static void intel_hdmi_info(struct seq_file *m,
3005 struct intel_connector *intel_connector)
3006{
3007 struct intel_encoder *intel_encoder = intel_connector->encoder;
3008 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
3009
742f491d 3010 seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
53f5e3ca
JB
3011}
3012
3013static void intel_lvds_info(struct seq_file *m,
3014 struct intel_connector *intel_connector)
3015{
3016 intel_panel_info(m, &intel_connector->panel);
3017}
3018
3019static void intel_connector_info(struct seq_file *m,
3020 struct drm_connector *connector)
3021{
3022 struct intel_connector *intel_connector = to_intel_connector(connector);
3023 struct intel_encoder *intel_encoder = intel_connector->encoder;
f103fc7d 3024 struct drm_display_mode *mode;
53f5e3ca
JB
3025
3026 seq_printf(m, "connector %d: type %s, status: %s\n",
c23cc417 3027 connector->base.id, connector->name,
53f5e3ca
JB
3028 drm_get_connector_status_name(connector->status));
3029 if (connector->status == connector_status_connected) {
3030 seq_printf(m, "\tname: %s\n", connector->display_info.name);
3031 seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
3032 connector->display_info.width_mm,
3033 connector->display_info.height_mm);
3034 seq_printf(m, "\tsubpixel order: %s\n",
3035 drm_get_subpixel_order_name(connector->display_info.subpixel_order));
3036 seq_printf(m, "\tCEA rev: %d\n",
3037 connector->display_info.cea_rev);
3038 }
ee648a74 3039
50740024 3040 if (!intel_encoder)
ee648a74
ML
3041 return;
3042
3043 switch (connector->connector_type) {
3044 case DRM_MODE_CONNECTOR_DisplayPort:
3045 case DRM_MODE_CONNECTOR_eDP:
9a148a96
LY
3046 if (intel_encoder->type == INTEL_OUTPUT_DP_MST)
3047 intel_dp_mst_info(m, intel_connector);
3048 else
3049 intel_dp_info(m, intel_connector);
ee648a74
ML
3050 break;
3051 case DRM_MODE_CONNECTOR_LVDS:
3052 if (intel_encoder->type == INTEL_OUTPUT_LVDS)
36cd7444 3053 intel_lvds_info(m, intel_connector);
ee648a74
ML
3054 break;
3055 case DRM_MODE_CONNECTOR_HDMIA:
3056 if (intel_encoder->type == INTEL_OUTPUT_HDMI ||
3057 intel_encoder->type == INTEL_OUTPUT_UNKNOWN)
3058 intel_hdmi_info(m, intel_connector);
3059 break;
3060 default:
3061 break;
36cd7444 3062 }
53f5e3ca 3063
f103fc7d
JB
3064 seq_printf(m, "\tmodes:\n");
3065 list_for_each_entry(mode, &connector->modes, head)
3066 intel_seq_print_mode(m, 2, mode);
53f5e3ca
JB
3067}
3068
3abc4e09
RF
3069static const char *plane_type(enum drm_plane_type type)
3070{
3071 switch (type) {
3072 case DRM_PLANE_TYPE_OVERLAY:
3073 return "OVL";
3074 case DRM_PLANE_TYPE_PRIMARY:
3075 return "PRI";
3076 case DRM_PLANE_TYPE_CURSOR:
3077 return "CUR";
3078 /*
3079 * Deliberately omitting default: to generate compiler warnings
3080 * when a new drm_plane_type gets added.
3081 */
3082 }
3083
3084 return "unknown";
3085}
3086
3087static const char *plane_rotation(unsigned int rotation)
3088{
3089 static char buf[48];
3090 /*
c2c446ad 3091 * According to doc only one DRM_MODE_ROTATE_ is allowed but this
3abc4e09
RF
3092 * will print them all to visualize if the values are misused
3093 */
3094 snprintf(buf, sizeof(buf),
3095 "%s%s%s%s%s%s(0x%08x)",
c2c446ad
RF
3096 (rotation & DRM_MODE_ROTATE_0) ? "0 " : "",
3097 (rotation & DRM_MODE_ROTATE_90) ? "90 " : "",
3098 (rotation & DRM_MODE_ROTATE_180) ? "180 " : "",
3099 (rotation & DRM_MODE_ROTATE_270) ? "270 " : "",
3100 (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "",
3101 (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "",
3abc4e09
RF
3102 rotation);
3103
3104 return buf;
3105}
3106
3107static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
3108{
36cdd013
DW
3109 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3110 struct drm_device *dev = &dev_priv->drm;
3abc4e09
RF
3111 struct intel_plane *intel_plane;
3112
3113 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
3114 struct drm_plane_state *state;
3115 struct drm_plane *plane = &intel_plane->base;
b3c11ac2 3116 struct drm_format_name_buf format_name;
3abc4e09
RF
3117
3118 if (!plane->state) {
3119 seq_puts(m, "plane->state is NULL!\n");
3120 continue;
3121 }
3122
3123 state = plane->state;
3124
90844f00 3125 if (state->fb) {
438b74a5
VS
3126 drm_get_format_name(state->fb->format->format,
3127 &format_name);
90844f00 3128 } else {
b3c11ac2 3129 sprintf(format_name.str, "N/A");
90844f00
EE
3130 }
3131
3abc4e09
RF
3132 seq_printf(m, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n",
3133 plane->base.id,
3134 plane_type(intel_plane->base.type),
3135 state->crtc_x, state->crtc_y,
3136 state->crtc_w, state->crtc_h,
3137 (state->src_x >> 16),
3138 ((state->src_x & 0xffff) * 15625) >> 10,
3139 (state->src_y >> 16),
3140 ((state->src_y & 0xffff) * 15625) >> 10,
3141 (state->src_w >> 16),
3142 ((state->src_w & 0xffff) * 15625) >> 10,
3143 (state->src_h >> 16),
3144 ((state->src_h & 0xffff) * 15625) >> 10,
b3c11ac2 3145 format_name.str,
3abc4e09
RF
3146 plane_rotation(state->rotation));
3147 }
3148}
3149
3150static void intel_scaler_info(struct seq_file *m, struct intel_crtc *intel_crtc)
3151{
3152 struct intel_crtc_state *pipe_config;
3153 int num_scalers = intel_crtc->num_scalers;
3154 int i;
3155
3156 pipe_config = to_intel_crtc_state(intel_crtc->base.state);
3157
3158 /* Not all platformas have a scaler */
3159 if (num_scalers) {
3160 seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
3161 num_scalers,
3162 pipe_config->scaler_state.scaler_users,
3163 pipe_config->scaler_state.scaler_id);
3164
58415918 3165 for (i = 0; i < num_scalers; i++) {
3abc4e09
RF
3166 struct intel_scaler *sc =
3167 &pipe_config->scaler_state.scalers[i];
3168
3169 seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
3170 i, yesno(sc->in_use), sc->mode);
3171 }
3172 seq_puts(m, "\n");
3173 } else {
3174 seq_puts(m, "\tNo scalers available on this platform\n");
3175 }
3176}
3177
53f5e3ca
JB
3178static int i915_display_info(struct seq_file *m, void *unused)
3179{
36cdd013
DW
3180 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3181 struct drm_device *dev = &dev_priv->drm;
065f2ec2 3182 struct intel_crtc *crtc;
53f5e3ca 3183 struct drm_connector *connector;
3f6a5e1e 3184 struct drm_connector_list_iter conn_iter;
53f5e3ca 3185
b0e5ddf3 3186 intel_runtime_pm_get(dev_priv);
53f5e3ca
JB
3187 seq_printf(m, "CRTC info\n");
3188 seq_printf(m, "---------\n");
d3fcc808 3189 for_each_intel_crtc(dev, crtc) {
f77076c9 3190 struct intel_crtc_state *pipe_config;
53f5e3ca 3191
3f6a5e1e 3192 drm_modeset_lock(&crtc->base.mutex, NULL);
f77076c9
ML
3193 pipe_config = to_intel_crtc_state(crtc->base.state);
3194
3abc4e09 3195 seq_printf(m, "CRTC %d: pipe: %c, active=%s, (size=%dx%d), dither=%s, bpp=%d\n",
065f2ec2 3196 crtc->base.base.id, pipe_name(crtc->pipe),
f77076c9 3197 yesno(pipe_config->base.active),
3abc4e09
RF
3198 pipe_config->pipe_src_w, pipe_config->pipe_src_h,
3199 yesno(pipe_config->dither), pipe_config->pipe_bpp);
3200
f77076c9 3201 if (pipe_config->base.active) {
cd5dcbf1
VS
3202 struct intel_plane *cursor =
3203 to_intel_plane(crtc->base.cursor);
3204
065f2ec2
CW
3205 intel_crtc_info(m, crtc);
3206
cd5dcbf1
VS
3207 seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x\n",
3208 yesno(cursor->base.state->visible),
3209 cursor->base.state->crtc_x,
3210 cursor->base.state->crtc_y,
3211 cursor->base.state->crtc_w,
3212 cursor->base.state->crtc_h,
3213 cursor->cursor.base);
3abc4e09
RF
3214 intel_scaler_info(m, crtc);
3215 intel_plane_info(m, crtc);
a23dc658 3216 }
cace841c
DV
3217
3218 seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n",
3219 yesno(!crtc->cpu_fifo_underrun_disabled),
3220 yesno(!crtc->pch_fifo_underrun_disabled));
3f6a5e1e 3221 drm_modeset_unlock(&crtc->base.mutex);
53f5e3ca
JB
3222 }
3223
3224 seq_printf(m, "\n");
3225 seq_printf(m, "Connector info\n");
3226 seq_printf(m, "--------------\n");
3f6a5e1e
DV
3227 mutex_lock(&dev->mode_config.mutex);
3228 drm_connector_list_iter_begin(dev, &conn_iter);
3229 drm_for_each_connector_iter(connector, &conn_iter)
53f5e3ca 3230 intel_connector_info(m, connector);
3f6a5e1e
DV
3231 drm_connector_list_iter_end(&conn_iter);
3232 mutex_unlock(&dev->mode_config.mutex);
3233
b0e5ddf3 3234 intel_runtime_pm_put(dev_priv);
53f5e3ca
JB
3235
3236 return 0;
3237}
3238
1b36595f
CW
3239static int i915_engine_info(struct seq_file *m, void *unused)
3240{
3241 struct drm_i915_private *dev_priv = node_to_i915(m->private);
061d06a2 3242 struct i915_gpu_error *error = &dev_priv->gpu_error;
1b36595f 3243 struct intel_engine_cs *engine;
3b3f1650 3244 enum intel_engine_id id;
1b36595f 3245
9c870d03
CW
3246 intel_runtime_pm_get(dev_priv);
3247
f73b5674
CW
3248 seq_printf(m, "GT awake? %s\n",
3249 yesno(dev_priv->gt.awake));
3250 seq_printf(m, "Global active requests: %d\n",
3251 dev_priv->gt.active_requests);
3252
3b3f1650 3253 for_each_engine(engine, dev_priv, id) {
1b36595f
CW
3254 struct intel_breadcrumbs *b = &engine->breadcrumbs;
3255 struct drm_i915_gem_request *rq;
3256 struct rb_node *rb;
3257 u64 addr;
3258
3259 seq_printf(m, "%s\n", engine->name);
f73b5674 3260 seq_printf(m, "\tcurrent seqno %x, last %x, hangcheck %x [%d ms], inflight %d\n",
1b36595f 3261 intel_engine_get_seqno(engine),
cb399eab 3262 intel_engine_last_submit(engine),
1b36595f 3263 engine->hangcheck.seqno,
f73b5674
CW
3264 jiffies_to_msecs(jiffies - engine->hangcheck.action_timestamp),
3265 engine->timeline->inflight_seqnos);
061d06a2
MT
3266 seq_printf(m, "\tReset count: %d\n",
3267 i915_reset_engine_count(error, engine));
1b36595f
CW
3268
3269 rcu_read_lock();
3270
3271 seq_printf(m, "\tRequests:\n");
3272
73cb9701
CW
3273 rq = list_first_entry(&engine->timeline->requests,
3274 struct drm_i915_gem_request, link);
3275 if (&rq->link != &engine->timeline->requests)
1b36595f
CW
3276 print_request(m, rq, "\t\tfirst ");
3277
73cb9701
CW
3278 rq = list_last_entry(&engine->timeline->requests,
3279 struct drm_i915_gem_request, link);
3280 if (&rq->link != &engine->timeline->requests)
1b36595f
CW
3281 print_request(m, rq, "\t\tlast ");
3282
3283 rq = i915_gem_find_active_request(engine);
3284 if (rq) {
3285 print_request(m, rq, "\t\tactive ");
3286 seq_printf(m,
3287 "\t\t[head %04x, postfix %04x, tail %04x, batch 0x%08x_%08x]\n",
3288 rq->head, rq->postfix, rq->tail,
3289 rq->batch ? upper_32_bits(rq->batch->node.start) : ~0u,
3290 rq->batch ? lower_32_bits(rq->batch->node.start) : ~0u);
3291 }
3292
3293 seq_printf(m, "\tRING_START: 0x%08x [0x%08x]\n",
3294 I915_READ(RING_START(engine->mmio_base)),
3295 rq ? i915_ggtt_offset(rq->ring->vma) : 0);
3296 seq_printf(m, "\tRING_HEAD: 0x%08x [0x%08x]\n",
3297 I915_READ(RING_HEAD(engine->mmio_base)) & HEAD_ADDR,
3298 rq ? rq->ring->head : 0);
3299 seq_printf(m, "\tRING_TAIL: 0x%08x [0x%08x]\n",
3300 I915_READ(RING_TAIL(engine->mmio_base)) & TAIL_ADDR,
3301 rq ? rq->ring->tail : 0);
3302 seq_printf(m, "\tRING_CTL: 0x%08x [%s]\n",
3303 I915_READ(RING_CTL(engine->mmio_base)),
3304 I915_READ(RING_CTL(engine->mmio_base)) & (RING_WAIT | RING_WAIT_SEMAPHORE) ? "waiting" : "");
3305
3306 rcu_read_unlock();
3307
3308 addr = intel_engine_get_active_head(engine);
3309 seq_printf(m, "\tACTHD: 0x%08x_%08x\n",
3310 upper_32_bits(addr), lower_32_bits(addr));
3311 addr = intel_engine_get_last_batch_head(engine);
3312 seq_printf(m, "\tBBADDR: 0x%08x_%08x\n",
3313 upper_32_bits(addr), lower_32_bits(addr));
3314
3315 if (i915.enable_execlists) {
3316 u32 ptr, read, write;
77f0d0e9 3317 unsigned int idx;
1b36595f
CW
3318
3319 seq_printf(m, "\tExeclist status: 0x%08x %08x\n",
3320 I915_READ(RING_EXECLIST_STATUS_LO(engine)),
3321 I915_READ(RING_EXECLIST_STATUS_HI(engine)));
3322
3323 ptr = I915_READ(RING_CONTEXT_STATUS_PTR(engine));
3324 read = GEN8_CSB_READ_PTR(ptr);
3325 write = GEN8_CSB_WRITE_PTR(ptr);
4d73da93
CW
3326 seq_printf(m, "\tExeclist CSB read %d, write %d, interrupt posted? %s\n",
3327 read, write,
3328 yesno(test_bit(ENGINE_IRQ_EXECLIST,
3329 &engine->irq_posted)));
1b36595f
CW
3330 if (read >= GEN8_CSB_ENTRIES)
3331 read = 0;
3332 if (write >= GEN8_CSB_ENTRIES)
3333 write = 0;
3334 if (read > write)
3335 write += GEN8_CSB_ENTRIES;
3336 while (read < write) {
77f0d0e9 3337 idx = ++read % GEN8_CSB_ENTRIES;
1b36595f
CW
3338 seq_printf(m, "\tExeclist CSB[%d]: 0x%08x, context: %d\n",
3339 idx,
3340 I915_READ(RING_CONTEXT_STATUS_BUF_LO(engine, idx)),
3341 I915_READ(RING_CONTEXT_STATUS_BUF_HI(engine, idx)));
3342 }
3343
3344 rcu_read_lock();
77f0d0e9
CW
3345 for (idx = 0; idx < ARRAY_SIZE(engine->execlist_port); idx++) {
3346 unsigned int count;
3347
3348 rq = port_unpack(&engine->execlist_port[idx],
3349 &count);
3350 if (rq) {
3351 seq_printf(m, "\t\tELSP[%d] count=%d, ",
3352 idx, count);
3353 print_request(m, rq, "rq: ");
3354 } else {
3355 seq_printf(m, "\t\tELSP[%d] idle\n",
3356 idx);
3357 }
816ee798 3358 }
1b36595f 3359 rcu_read_unlock();
c8247c06 3360
663f71e7 3361 spin_lock_irq(&engine->timeline->lock);
6c067579
CW
3362 for (rb = engine->execlist_first; rb; rb = rb_next(rb)){
3363 struct i915_priolist *p =
3364 rb_entry(rb, typeof(*p), node);
3365
3366 list_for_each_entry(rq, &p->requests,
3367 priotree.link)
3368 print_request(m, rq, "\t\tQ ");
c8247c06 3369 }
663f71e7 3370 spin_unlock_irq(&engine->timeline->lock);
1b36595f
CW
3371 } else if (INTEL_GEN(dev_priv) > 6) {
3372 seq_printf(m, "\tPP_DIR_BASE: 0x%08x\n",
3373 I915_READ(RING_PP_DIR_BASE(engine)));
3374 seq_printf(m, "\tPP_DIR_BASE_READ: 0x%08x\n",
3375 I915_READ(RING_PP_DIR_BASE_READ(engine)));
3376 seq_printf(m, "\tPP_DIR_DCLV: 0x%08x\n",
3377 I915_READ(RING_PP_DIR_DCLV(engine)));
3378 }
3379
61d3dc70 3380 spin_lock_irq(&b->rb_lock);
1b36595f 3381 for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
f802cf7e 3382 struct intel_wait *w = rb_entry(rb, typeof(*w), node);
1b36595f
CW
3383
3384 seq_printf(m, "\t%s [%d] waiting for %x\n",
3385 w->tsk->comm, w->tsk->pid, w->seqno);
3386 }
61d3dc70 3387 spin_unlock_irq(&b->rb_lock);
1b36595f
CW
3388
3389 seq_puts(m, "\n");
3390 }
3391
9c870d03
CW
3392 intel_runtime_pm_put(dev_priv);
3393
1b36595f
CW
3394 return 0;
3395}
3396
e04934cf
BW
3397static int i915_semaphore_status(struct seq_file *m, void *unused)
3398{
36cdd013
DW
3399 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3400 struct drm_device *dev = &dev_priv->drm;
e2f80391 3401 struct intel_engine_cs *engine;
36cdd013 3402 int num_rings = INTEL_INFO(dev_priv)->num_rings;
c3232b18
DG
3403 enum intel_engine_id id;
3404 int j, ret;
e04934cf 3405
39df9190 3406 if (!i915.semaphores) {
e04934cf
BW
3407 seq_puts(m, "Semaphores are disabled\n");
3408 return 0;
3409 }
3410
3411 ret = mutex_lock_interruptible(&dev->struct_mutex);
3412 if (ret)
3413 return ret;
03872064 3414 intel_runtime_pm_get(dev_priv);
e04934cf 3415
36cdd013 3416 if (IS_BROADWELL(dev_priv)) {
e04934cf
BW
3417 struct page *page;
3418 uint64_t *seqno;
3419
51d545d0 3420 page = i915_gem_object_get_page(dev_priv->semaphore->obj, 0);
e04934cf
BW
3421
3422 seqno = (uint64_t *)kmap_atomic(page);
3b3f1650 3423 for_each_engine(engine, dev_priv, id) {
e04934cf
BW
3424 uint64_t offset;
3425
e2f80391 3426 seq_printf(m, "%s\n", engine->name);
e04934cf
BW
3427
3428 seq_puts(m, " Last signal:");
3429 for (j = 0; j < num_rings; j++) {
c3232b18 3430 offset = id * I915_NUM_ENGINES + j;
e04934cf
BW
3431 seq_printf(m, "0x%08llx (0x%02llx) ",
3432 seqno[offset], offset * 8);
3433 }
3434 seq_putc(m, '\n');
3435
3436 seq_puts(m, " Last wait: ");
3437 for (j = 0; j < num_rings; j++) {
c3232b18 3438 offset = id + (j * I915_NUM_ENGINES);
e04934cf
BW
3439 seq_printf(m, "0x%08llx (0x%02llx) ",
3440 seqno[offset], offset * 8);
3441 }
3442 seq_putc(m, '\n');
3443
3444 }
3445 kunmap_atomic(seqno);
3446 } else {
3447 seq_puts(m, " Last signal:");
3b3f1650 3448 for_each_engine(engine, dev_priv, id)
e04934cf
BW
3449 for (j = 0; j < num_rings; j++)
3450 seq_printf(m, "0x%08x\n",
e2f80391 3451 I915_READ(engine->semaphore.mbox.signal[j]));
e04934cf
BW
3452 seq_putc(m, '\n');
3453 }
3454
03872064 3455 intel_runtime_pm_put(dev_priv);
e04934cf
BW
3456 mutex_unlock(&dev->struct_mutex);
3457 return 0;
3458}
3459
728e29d7
DV
3460static int i915_shared_dplls_info(struct seq_file *m, void *unused)
3461{
36cdd013
DW
3462 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3463 struct drm_device *dev = &dev_priv->drm;
728e29d7
DV
3464 int i;
3465
3466 drm_modeset_lock_all(dev);
3467 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
3468 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
3469
3470 seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->name, pll->id);
2dd66ebd 3471 seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n",
2c42e535 3472 pll->state.crtc_mask, pll->active_mask, yesno(pll->on));
728e29d7 3473 seq_printf(m, " tracked hardware state:\n");
2c42e535 3474 seq_printf(m, " dpll: 0x%08x\n", pll->state.hw_state.dpll);
3e369b76 3475 seq_printf(m, " dpll_md: 0x%08x\n",
2c42e535
ACO
3476 pll->state.hw_state.dpll_md);
3477 seq_printf(m, " fp0: 0x%08x\n", pll->state.hw_state.fp0);
3478 seq_printf(m, " fp1: 0x%08x\n", pll->state.hw_state.fp1);
3479 seq_printf(m, " wrpll: 0x%08x\n", pll->state.hw_state.wrpll);
728e29d7
DV
3480 }
3481 drm_modeset_unlock_all(dev);
3482
3483 return 0;
3484}
3485
1ed1ef9d 3486static int i915_wa_registers(struct seq_file *m, void *unused)
888b5995
AS
3487{
3488 int i;
3489 int ret;
e2f80391 3490 struct intel_engine_cs *engine;
36cdd013
DW
3491 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3492 struct drm_device *dev = &dev_priv->drm;
33136b06 3493 struct i915_workarounds *workarounds = &dev_priv->workarounds;
c3232b18 3494 enum intel_engine_id id;
888b5995 3495
888b5995
AS
3496 ret = mutex_lock_interruptible(&dev->struct_mutex);
3497 if (ret)
3498 return ret;
3499
3500 intel_runtime_pm_get(dev_priv);
3501
33136b06 3502 seq_printf(m, "Workarounds applied: %d\n", workarounds->count);
3b3f1650 3503 for_each_engine(engine, dev_priv, id)
33136b06 3504 seq_printf(m, "HW whitelist count for %s: %d\n",
c3232b18 3505 engine->name, workarounds->hw_whitelist_count[id]);
33136b06 3506 for (i = 0; i < workarounds->count; ++i) {
f0f59a00
VS
3507 i915_reg_t addr;
3508 u32 mask, value, read;
2fa60f6d 3509 bool ok;
888b5995 3510
33136b06
AS
3511 addr = workarounds->reg[i].addr;
3512 mask = workarounds->reg[i].mask;
3513 value = workarounds->reg[i].value;
2fa60f6d
MK
3514 read = I915_READ(addr);
3515 ok = (value & mask) == (read & mask);
3516 seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X, read: 0x%08x, status: %s\n",
f0f59a00 3517 i915_mmio_reg_offset(addr), value, mask, read, ok ? "OK" : "FAIL");
888b5995
AS
3518 }
3519
3520 intel_runtime_pm_put(dev_priv);
3521 mutex_unlock(&dev->struct_mutex);
3522
3523 return 0;
3524}
3525
c5511e44
DL
3526static int i915_ddb_info(struct seq_file *m, void *unused)
3527{
36cdd013
DW
3528 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3529 struct drm_device *dev = &dev_priv->drm;
c5511e44
DL
3530 struct skl_ddb_allocation *ddb;
3531 struct skl_ddb_entry *entry;
3532 enum pipe pipe;
3533 int plane;
3534
36cdd013 3535 if (INTEL_GEN(dev_priv) < 9)
2fcffe19
DL
3536 return 0;
3537
c5511e44
DL
3538 drm_modeset_lock_all(dev);
3539
3540 ddb = &dev_priv->wm.skl_hw.ddb;
3541
3542 seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
3543
3544 for_each_pipe(dev_priv, pipe) {
3545 seq_printf(m, "Pipe %c\n", pipe_name(pipe));
3546
8b364b41 3547 for_each_universal_plane(dev_priv, pipe, plane) {
c5511e44
DL
3548 entry = &ddb->plane[pipe][plane];
3549 seq_printf(m, " Plane%-8d%8u%8u%8u\n", plane + 1,
3550 entry->start, entry->end,
3551 skl_ddb_entry_size(entry));
3552 }
3553
4969d33e 3554 entry = &ddb->plane[pipe][PLANE_CURSOR];
c5511e44
DL
3555 seq_printf(m, " %-13s%8u%8u%8u\n", "Cursor", entry->start,
3556 entry->end, skl_ddb_entry_size(entry));
3557 }
3558
3559 drm_modeset_unlock_all(dev);
3560
3561 return 0;
3562}
3563
a54746e3 3564static void drrs_status_per_crtc(struct seq_file *m,
36cdd013
DW
3565 struct drm_device *dev,
3566 struct intel_crtc *intel_crtc)
a54746e3 3567{
fac5e23e 3568 struct drm_i915_private *dev_priv = to_i915(dev);
a54746e3
VK
3569 struct i915_drrs *drrs = &dev_priv->drrs;
3570 int vrefresh = 0;
26875fe5 3571 struct drm_connector *connector;
3f6a5e1e 3572 struct drm_connector_list_iter conn_iter;
a54746e3 3573
3f6a5e1e
DV
3574 drm_connector_list_iter_begin(dev, &conn_iter);
3575 drm_for_each_connector_iter(connector, &conn_iter) {
26875fe5
ML
3576 if (connector->state->crtc != &intel_crtc->base)
3577 continue;
3578
3579 seq_printf(m, "%s:\n", connector->name);
a54746e3 3580 }
3f6a5e1e 3581 drm_connector_list_iter_end(&conn_iter);
a54746e3
VK
3582
3583 if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT)
3584 seq_puts(m, "\tVBT: DRRS_type: Static");
3585 else if (dev_priv->vbt.drrs_type == SEAMLESS_DRRS_SUPPORT)
3586 seq_puts(m, "\tVBT: DRRS_type: Seamless");
3587 else if (dev_priv->vbt.drrs_type == DRRS_NOT_SUPPORTED)
3588 seq_puts(m, "\tVBT: DRRS_type: None");
3589 else
3590 seq_puts(m, "\tVBT: DRRS_type: FIXME: Unrecognized Value");
3591
3592 seq_puts(m, "\n\n");
3593
f77076c9 3594 if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) {
a54746e3
VK
3595 struct intel_panel *panel;
3596
3597 mutex_lock(&drrs->mutex);
3598 /* DRRS Supported */
3599 seq_puts(m, "\tDRRS Supported: Yes\n");
3600
3601 /* disable_drrs() will make drrs->dp NULL */
3602 if (!drrs->dp) {
3603 seq_puts(m, "Idleness DRRS: Disabled");
3604 mutex_unlock(&drrs->mutex);
3605 return;
3606 }
3607
3608 panel = &drrs->dp->attached_connector->panel;
3609 seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
3610 drrs->busy_frontbuffer_bits);
3611
3612 seq_puts(m, "\n\t\t");
3613 if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
3614 seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
3615 vrefresh = panel->fixed_mode->vrefresh;
3616 } else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
3617 seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
3618 vrefresh = panel->downclock_mode->vrefresh;
3619 } else {
3620 seq_printf(m, "DRRS_State: Unknown(%d)\n",
3621 drrs->refresh_rate_type);
3622 mutex_unlock(&drrs->mutex);
3623 return;
3624 }
3625 seq_printf(m, "\t\tVrefresh: %d", vrefresh);
3626
3627 seq_puts(m, "\n\t\t");
3628 mutex_unlock(&drrs->mutex);
3629 } else {
3630 /* DRRS not supported. Print the VBT parameter*/
3631 seq_puts(m, "\tDRRS Supported : No");
3632 }
3633 seq_puts(m, "\n");
3634}
3635
3636static int i915_drrs_status(struct seq_file *m, void *unused)
3637{
36cdd013
DW
3638 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3639 struct drm_device *dev = &dev_priv->drm;
a54746e3
VK
3640 struct intel_crtc *intel_crtc;
3641 int active_crtc_cnt = 0;
3642
26875fe5 3643 drm_modeset_lock_all(dev);
a54746e3 3644 for_each_intel_crtc(dev, intel_crtc) {
f77076c9 3645 if (intel_crtc->base.state->active) {
a54746e3
VK
3646 active_crtc_cnt++;
3647 seq_printf(m, "\nCRTC %d: ", active_crtc_cnt);
3648
3649 drrs_status_per_crtc(m, dev, intel_crtc);
3650 }
a54746e3 3651 }
26875fe5 3652 drm_modeset_unlock_all(dev);
a54746e3
VK
3653
3654 if (!active_crtc_cnt)
3655 seq_puts(m, "No active crtc found\n");
3656
3657 return 0;
3658}
3659
11bed958
DA
3660static int i915_dp_mst_info(struct seq_file *m, void *unused)
3661{
36cdd013
DW
3662 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3663 struct drm_device *dev = &dev_priv->drm;
11bed958
DA
3664 struct intel_encoder *intel_encoder;
3665 struct intel_digital_port *intel_dig_port;
b6dabe3b 3666 struct drm_connector *connector;
3f6a5e1e 3667 struct drm_connector_list_iter conn_iter;
b6dabe3b 3668
3f6a5e1e
DV
3669 drm_connector_list_iter_begin(dev, &conn_iter);
3670 drm_for_each_connector_iter(connector, &conn_iter) {
b6dabe3b 3671 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
11bed958 3672 continue;
b6dabe3b
ML
3673
3674 intel_encoder = intel_attached_encoder(connector);
3675 if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
3676 continue;
3677
3678 intel_dig_port = enc_to_dig_port(&intel_encoder->base);
11bed958
DA
3679 if (!intel_dig_port->dp.can_mst)
3680 continue;
b6dabe3b 3681
40ae80cc
JB
3682 seq_printf(m, "MST Source Port %c\n",
3683 port_name(intel_dig_port->port));
11bed958
DA
3684 drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
3685 }
3f6a5e1e
DV
3686 drm_connector_list_iter_end(&conn_iter);
3687
11bed958
DA
3688 return 0;
3689}
3690
eb3394fa 3691static ssize_t i915_displayport_test_active_write(struct file *file,
36cdd013
DW
3692 const char __user *ubuf,
3693 size_t len, loff_t *offp)
eb3394fa
TP
3694{
3695 char *input_buffer;
3696 int status = 0;
eb3394fa
TP
3697 struct drm_device *dev;
3698 struct drm_connector *connector;
3f6a5e1e 3699 struct drm_connector_list_iter conn_iter;
eb3394fa
TP
3700 struct intel_dp *intel_dp;
3701 int val = 0;
3702
9aaffa34 3703 dev = ((struct seq_file *)file->private_data)->private;
eb3394fa 3704
eb3394fa
TP
3705 if (len == 0)
3706 return 0;
3707
261aeba8
GT
3708 input_buffer = memdup_user_nul(ubuf, len);
3709 if (IS_ERR(input_buffer))
3710 return PTR_ERR(input_buffer);
eb3394fa 3711
eb3394fa
TP
3712 DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len);
3713
3f6a5e1e
DV
3714 drm_connector_list_iter_begin(dev, &conn_iter);
3715 drm_for_each_connector_iter(connector, &conn_iter) {
a874b6a3
ML
3716 struct intel_encoder *encoder;
3717
eb3394fa
TP
3718 if (connector->connector_type !=
3719 DRM_MODE_CONNECTOR_DisplayPort)
3720 continue;
3721
a874b6a3
ML
3722 encoder = to_intel_encoder(connector->encoder);
3723 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3724 continue;
3725
3726 if (encoder && connector->status == connector_status_connected) {
3727 intel_dp = enc_to_intel_dp(&encoder->base);
eb3394fa
TP
3728 status = kstrtoint(input_buffer, 10, &val);
3729 if (status < 0)
3f6a5e1e 3730 break;
eb3394fa
TP
3731 DRM_DEBUG_DRIVER("Got %d for test active\n", val);
3732 /* To prevent erroneous activation of the compliance
3733 * testing code, only accept an actual value of 1 here
3734 */
3735 if (val == 1)
c1617abc 3736 intel_dp->compliance.test_active = 1;
eb3394fa 3737 else
c1617abc 3738 intel_dp->compliance.test_active = 0;
eb3394fa
TP
3739 }
3740 }
3f6a5e1e 3741 drm_connector_list_iter_end(&conn_iter);
eb3394fa
TP
3742 kfree(input_buffer);
3743 if (status < 0)
3744 return status;
3745
3746 *offp += len;
3747 return len;
3748}
3749
3750static int i915_displayport_test_active_show(struct seq_file *m, void *data)
3751{
3752 struct drm_device *dev = m->private;
3753 struct drm_connector *connector;
3f6a5e1e 3754 struct drm_connector_list_iter conn_iter;
eb3394fa
TP
3755 struct intel_dp *intel_dp;
3756
3f6a5e1e
DV
3757 drm_connector_list_iter_begin(dev, &conn_iter);
3758 drm_for_each_connector_iter(connector, &conn_iter) {
a874b6a3
ML
3759 struct intel_encoder *encoder;
3760
eb3394fa
TP
3761 if (connector->connector_type !=
3762 DRM_MODE_CONNECTOR_DisplayPort)
3763 continue;
3764
a874b6a3
ML
3765 encoder = to_intel_encoder(connector->encoder);
3766 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3767 continue;
3768
3769 if (encoder && connector->status == connector_status_connected) {
3770 intel_dp = enc_to_intel_dp(&encoder->base);
c1617abc 3771 if (intel_dp->compliance.test_active)
eb3394fa
TP
3772 seq_puts(m, "1");
3773 else
3774 seq_puts(m, "0");
3775 } else
3776 seq_puts(m, "0");
3777 }
3f6a5e1e 3778 drm_connector_list_iter_end(&conn_iter);
eb3394fa
TP
3779
3780 return 0;
3781}
3782
3783static int i915_displayport_test_active_open(struct inode *inode,
36cdd013 3784 struct file *file)
eb3394fa 3785{
36cdd013 3786 struct drm_i915_private *dev_priv = inode->i_private;
eb3394fa 3787
36cdd013
DW
3788 return single_open(file, i915_displayport_test_active_show,
3789 &dev_priv->drm);
eb3394fa
TP
3790}
3791
3792static const struct file_operations i915_displayport_test_active_fops = {
3793 .owner = THIS_MODULE,
3794 .open = i915_displayport_test_active_open,
3795 .read = seq_read,
3796 .llseek = seq_lseek,
3797 .release = single_release,
3798 .write = i915_displayport_test_active_write
3799};
3800
3801static int i915_displayport_test_data_show(struct seq_file *m, void *data)
3802{
3803 struct drm_device *dev = m->private;
3804 struct drm_connector *connector;
3f6a5e1e 3805 struct drm_connector_list_iter conn_iter;
eb3394fa
TP
3806 struct intel_dp *intel_dp;
3807
3f6a5e1e
DV
3808 drm_connector_list_iter_begin(dev, &conn_iter);
3809 drm_for_each_connector_iter(connector, &conn_iter) {
a874b6a3
ML
3810 struct intel_encoder *encoder;
3811
eb3394fa
TP
3812 if (connector->connector_type !=
3813 DRM_MODE_CONNECTOR_DisplayPort)
3814 continue;
3815
a874b6a3
ML
3816 encoder = to_intel_encoder(connector->encoder);
3817 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3818 continue;
3819
3820 if (encoder && connector->status == connector_status_connected) {
3821 intel_dp = enc_to_intel_dp(&encoder->base);
b48a5ba9
MN
3822 if (intel_dp->compliance.test_type ==
3823 DP_TEST_LINK_EDID_READ)
3824 seq_printf(m, "%lx",
3825 intel_dp->compliance.test_data.edid);
611032bf
MN
3826 else if (intel_dp->compliance.test_type ==
3827 DP_TEST_LINK_VIDEO_PATTERN) {
3828 seq_printf(m, "hdisplay: %d\n",
3829 intel_dp->compliance.test_data.hdisplay);
3830 seq_printf(m, "vdisplay: %d\n",
3831 intel_dp->compliance.test_data.vdisplay);
3832 seq_printf(m, "bpc: %u\n",
3833 intel_dp->compliance.test_data.bpc);
3834 }
eb3394fa
TP
3835 } else
3836 seq_puts(m, "0");
3837 }
3f6a5e1e 3838 drm_connector_list_iter_end(&conn_iter);
eb3394fa
TP
3839
3840 return 0;
3841}
3842static int i915_displayport_test_data_open(struct inode *inode,
36cdd013 3843 struct file *file)
eb3394fa 3844{
36cdd013 3845 struct drm_i915_private *dev_priv = inode->i_private;
eb3394fa 3846
36cdd013
DW
3847 return single_open(file, i915_displayport_test_data_show,
3848 &dev_priv->drm);
eb3394fa
TP
3849}
3850
3851static const struct file_operations i915_displayport_test_data_fops = {
3852 .owner = THIS_MODULE,
3853 .open = i915_displayport_test_data_open,
3854 .read = seq_read,
3855 .llseek = seq_lseek,
3856 .release = single_release
3857};
3858
3859static int i915_displayport_test_type_show(struct seq_file *m, void *data)
3860{
3861 struct drm_device *dev = m->private;
3862 struct drm_connector *connector;
3f6a5e1e 3863 struct drm_connector_list_iter conn_iter;
eb3394fa
TP
3864 struct intel_dp *intel_dp;
3865
3f6a5e1e
DV
3866 drm_connector_list_iter_begin(dev, &conn_iter);
3867 drm_for_each_connector_iter(connector, &conn_iter) {
a874b6a3
ML
3868 struct intel_encoder *encoder;
3869
eb3394fa
TP
3870 if (connector->connector_type !=
3871 DRM_MODE_CONNECTOR_DisplayPort)
3872 continue;
3873
a874b6a3
ML
3874 encoder = to_intel_encoder(connector->encoder);
3875 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3876 continue;
3877
3878 if (encoder && connector->status == connector_status_connected) {
3879 intel_dp = enc_to_intel_dp(&encoder->base);
c1617abc 3880 seq_printf(m, "%02lx", intel_dp->compliance.test_type);
eb3394fa
TP
3881 } else
3882 seq_puts(m, "0");
3883 }
3f6a5e1e 3884 drm_connector_list_iter_end(&conn_iter);
eb3394fa
TP
3885
3886 return 0;
3887}
3888
3889static int i915_displayport_test_type_open(struct inode *inode,
3890 struct file *file)
3891{
36cdd013 3892 struct drm_i915_private *dev_priv = inode->i_private;
eb3394fa 3893
36cdd013
DW
3894 return single_open(file, i915_displayport_test_type_show,
3895 &dev_priv->drm);
eb3394fa
TP
3896}
3897
3898static const struct file_operations i915_displayport_test_type_fops = {
3899 .owner = THIS_MODULE,
3900 .open = i915_displayport_test_type_open,
3901 .read = seq_read,
3902 .llseek = seq_lseek,
3903 .release = single_release
3904};
3905
97e94b22 3906static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
369a1342 3907{
36cdd013
DW
3908 struct drm_i915_private *dev_priv = m->private;
3909 struct drm_device *dev = &dev_priv->drm;
369a1342 3910 int level;
de38b95c
VS
3911 int num_levels;
3912
36cdd013 3913 if (IS_CHERRYVIEW(dev_priv))
de38b95c 3914 num_levels = 3;
36cdd013 3915 else if (IS_VALLEYVIEW(dev_priv))
de38b95c 3916 num_levels = 1;
04548cba
VS
3917 else if (IS_G4X(dev_priv))
3918 num_levels = 3;
de38b95c 3919 else
5db94019 3920 num_levels = ilk_wm_max_level(dev_priv) + 1;
369a1342
VS
3921
3922 drm_modeset_lock_all(dev);
3923
3924 for (level = 0; level < num_levels; level++) {
3925 unsigned int latency = wm[level];
3926
97e94b22
DL
3927 /*
3928 * - WM1+ latency values in 0.5us units
de38b95c 3929 * - latencies are in us on gen9/vlv/chv
97e94b22 3930 */
04548cba
VS
3931 if (INTEL_GEN(dev_priv) >= 9 ||
3932 IS_VALLEYVIEW(dev_priv) ||
3933 IS_CHERRYVIEW(dev_priv) ||
3934 IS_G4X(dev_priv))
97e94b22
DL
3935 latency *= 10;
3936 else if (level > 0)
369a1342
VS
3937 latency *= 5;
3938
3939 seq_printf(m, "WM%d %u (%u.%u usec)\n",
97e94b22 3940 level, wm[level], latency / 10, latency % 10);
369a1342
VS
3941 }
3942
3943 drm_modeset_unlock_all(dev);
3944}
3945
3946static int pri_wm_latency_show(struct seq_file *m, void *data)
3947{
36cdd013 3948 struct drm_i915_private *dev_priv = m->private;
97e94b22
DL
3949 const uint16_t *latencies;
3950
36cdd013 3951 if (INTEL_GEN(dev_priv) >= 9)
97e94b22
DL
3952 latencies = dev_priv->wm.skl_latency;
3953 else
36cdd013 3954 latencies = dev_priv->wm.pri_latency;
369a1342 3955
97e94b22 3956 wm_latency_show(m, latencies);
369a1342
VS
3957
3958 return 0;
3959}
3960
3961static int spr_wm_latency_show(struct seq_file *m, void *data)
3962{
36cdd013 3963 struct drm_i915_private *dev_priv = m->private;
97e94b22
DL
3964 const uint16_t *latencies;
3965
36cdd013 3966 if (INTEL_GEN(dev_priv) >= 9)
97e94b22
DL
3967 latencies = dev_priv->wm.skl_latency;
3968 else
36cdd013 3969 latencies = dev_priv->wm.spr_latency;
369a1342 3970
97e94b22 3971 wm_latency_show(m, latencies);
369a1342
VS
3972
3973 return 0;
3974}
3975
3976static int cur_wm_latency_show(struct seq_file *m, void *data)
3977{
36cdd013 3978 struct drm_i915_private *dev_priv = m->private;
97e94b22
DL
3979 const uint16_t *latencies;
3980
36cdd013 3981 if (INTEL_GEN(dev_priv) >= 9)
97e94b22
DL
3982 latencies = dev_priv->wm.skl_latency;
3983 else
36cdd013 3984 latencies = dev_priv->wm.cur_latency;
369a1342 3985
97e94b22 3986 wm_latency_show(m, latencies);
369a1342
VS
3987
3988 return 0;
3989}
3990
3991static int pri_wm_latency_open(struct inode *inode, struct file *file)
3992{
36cdd013 3993 struct drm_i915_private *dev_priv = inode->i_private;
369a1342 3994
04548cba 3995 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
369a1342
VS
3996 return -ENODEV;
3997
36cdd013 3998 return single_open(file, pri_wm_latency_show, dev_priv);
369a1342
VS
3999}
4000
4001static int spr_wm_latency_open(struct inode *inode, struct file *file)
4002{
36cdd013 4003 struct drm_i915_private *dev_priv = inode->i_private;
369a1342 4004
36cdd013 4005 if (HAS_GMCH_DISPLAY(dev_priv))
369a1342
VS
4006 return -ENODEV;
4007
36cdd013 4008 return single_open(file, spr_wm_latency_show, dev_priv);
369a1342
VS
4009}
4010
4011static int cur_wm_latency_open(struct inode *inode, struct file *file)
4012{
36cdd013 4013 struct drm_i915_private *dev_priv = inode->i_private;
369a1342 4014
36cdd013 4015 if (HAS_GMCH_DISPLAY(dev_priv))
369a1342
VS
4016 return -ENODEV;
4017
36cdd013 4018 return single_open(file, cur_wm_latency_show, dev_priv);
369a1342
VS
4019}
4020
4021static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
97e94b22 4022 size_t len, loff_t *offp, uint16_t wm[8])
369a1342
VS
4023{
4024 struct seq_file *m = file->private_data;
36cdd013
DW
4025 struct drm_i915_private *dev_priv = m->private;
4026 struct drm_device *dev = &dev_priv->drm;
97e94b22 4027 uint16_t new[8] = { 0 };
de38b95c 4028 int num_levels;
369a1342
VS
4029 int level;
4030 int ret;
4031 char tmp[32];
4032
36cdd013 4033 if (IS_CHERRYVIEW(dev_priv))
de38b95c 4034 num_levels = 3;
36cdd013 4035 else if (IS_VALLEYVIEW(dev_priv))
de38b95c 4036 num_levels = 1;
04548cba
VS
4037 else if (IS_G4X(dev_priv))
4038 num_levels = 3;
de38b95c 4039 else
5db94019 4040 num_levels = ilk_wm_max_level(dev_priv) + 1;
de38b95c 4041
369a1342
VS
4042 if (len >= sizeof(tmp))
4043 return -EINVAL;
4044
4045 if (copy_from_user(tmp, ubuf, len))
4046 return -EFAULT;
4047
4048 tmp[len] = '\0';
4049
97e94b22
DL
4050 ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
4051 &new[0], &new[1], &new[2], &new[3],
4052 &new[4], &new[5], &new[6], &new[7]);
369a1342
VS
4053 if (ret != num_levels)
4054 return -EINVAL;
4055
4056 drm_modeset_lock_all(dev);
4057
4058 for (level = 0; level < num_levels; level++)
4059 wm[level] = new[level];
4060
4061 drm_modeset_unlock_all(dev);
4062
4063 return len;
4064}
4065
4066
4067static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
4068 size_t len, loff_t *offp)
4069{
4070 struct seq_file *m = file->private_data;
36cdd013 4071 struct drm_i915_private *dev_priv = m->private;
97e94b22 4072 uint16_t *latencies;
369a1342 4073
36cdd013 4074 if (INTEL_GEN(dev_priv) >= 9)
97e94b22
DL
4075 latencies = dev_priv->wm.skl_latency;
4076 else
36cdd013 4077 latencies = dev_priv->wm.pri_latency;
97e94b22
DL
4078
4079 return wm_latency_write(file, ubuf, len, offp, latencies);
369a1342
VS
4080}
4081
4082static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
4083 size_t len, loff_t *offp)
4084{
4085 struct seq_file *m = file->private_data;
36cdd013 4086 struct drm_i915_private *dev_priv = m->private;
97e94b22 4087 uint16_t *latencies;
369a1342 4088
36cdd013 4089 if (INTEL_GEN(dev_priv) >= 9)
97e94b22
DL
4090 latencies = dev_priv->wm.skl_latency;
4091 else
36cdd013 4092 latencies = dev_priv->wm.spr_latency;
97e94b22
DL
4093
4094 return wm_latency_write(file, ubuf, len, offp, latencies);
369a1342
VS
4095}
4096
4097static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
4098 size_t len, loff_t *offp)
4099{
4100 struct seq_file *m = file->private_data;
36cdd013 4101 struct drm_i915_private *dev_priv = m->private;
97e94b22
DL
4102 uint16_t *latencies;
4103
36cdd013 4104 if (INTEL_GEN(dev_priv) >= 9)
97e94b22
DL
4105 latencies = dev_priv->wm.skl_latency;
4106 else
36cdd013 4107 latencies = dev_priv->wm.cur_latency;
369a1342 4108
97e94b22 4109 return wm_latency_write(file, ubuf, len, offp, latencies);
369a1342
VS
4110}
4111
4112static const struct file_operations i915_pri_wm_latency_fops = {
4113 .owner = THIS_MODULE,
4114 .open = pri_wm_latency_open,
4115 .read = seq_read,
4116 .llseek = seq_lseek,
4117 .release = single_release,
4118 .write = pri_wm_latency_write
4119};
4120
4121static const struct file_operations i915_spr_wm_latency_fops = {
4122 .owner = THIS_MODULE,
4123 .open = spr_wm_latency_open,
4124 .read = seq_read,
4125 .llseek = seq_lseek,
4126 .release = single_release,
4127 .write = spr_wm_latency_write
4128};
4129
4130static const struct file_operations i915_cur_wm_latency_fops = {
4131 .owner = THIS_MODULE,
4132 .open = cur_wm_latency_open,
4133 .read = seq_read,
4134 .llseek = seq_lseek,
4135 .release = single_release,
4136 .write = cur_wm_latency_write
4137};
4138
647416f9
KC
4139static int
4140i915_wedged_get(void *data, u64 *val)
f3cd474b 4141{
36cdd013 4142 struct drm_i915_private *dev_priv = data;
f3cd474b 4143
d98c52cf 4144 *val = i915_terminally_wedged(&dev_priv->gpu_error);
f3cd474b 4145
647416f9 4146 return 0;
f3cd474b
CW
4147}
4148
647416f9
KC
4149static int
4150i915_wedged_set(void *data, u64 val)
f3cd474b 4151{
598b6b5a
CW
4152 struct drm_i915_private *i915 = data;
4153 struct intel_engine_cs *engine;
4154 unsigned int tmp;
d46c0517 4155
b8d24a06
MK
4156 /*
4157 * There is no safeguard against this debugfs entry colliding
4158 * with the hangcheck calling same i915_handle_error() in
4159 * parallel, causing an explosion. For now we assume that the
4160 * test harness is responsible enough not to inject gpu hangs
4161 * while it is writing to 'i915_wedged'
4162 */
4163
598b6b5a 4164 if (i915_reset_backoff(&i915->gpu_error))
b8d24a06
MK
4165 return -EAGAIN;
4166
598b6b5a
CW
4167 for_each_engine_masked(engine, i915, val, tmp) {
4168 engine->hangcheck.seqno = intel_engine_get_seqno(engine);
4169 engine->hangcheck.stalled = true;
4170 }
4171
4172 i915_handle_error(i915, val, "Manually setting wedged to %llu", val);
d46c0517 4173
598b6b5a 4174 wait_on_bit(&i915->gpu_error.flags,
d3df42b7
CW
4175 I915_RESET_HANDOFF,
4176 TASK_UNINTERRUPTIBLE);
4177
647416f9 4178 return 0;
f3cd474b
CW
4179}
4180
647416f9
KC
4181DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
4182 i915_wedged_get, i915_wedged_set,
3a3b4f98 4183 "%llu\n");
f3cd474b 4184
64486ae7
CW
4185static int
4186fault_irq_set(struct drm_i915_private *i915,
4187 unsigned long *irq,
4188 unsigned long val)
4189{
4190 int err;
4191
4192 err = mutex_lock_interruptible(&i915->drm.struct_mutex);
4193 if (err)
4194 return err;
4195
4196 err = i915_gem_wait_for_idle(i915,
4197 I915_WAIT_LOCKED |
4198 I915_WAIT_INTERRUPTIBLE);
4199 if (err)
4200 goto err_unlock;
4201
64486ae7
CW
4202 *irq = val;
4203 mutex_unlock(&i915->drm.struct_mutex);
4204
4205 /* Flush idle worker to disarm irq */
4206 while (flush_delayed_work(&i915->gt.idle_work))
4207 ;
4208
4209 return 0;
4210
4211err_unlock:
4212 mutex_unlock(&i915->drm.struct_mutex);
4213 return err;
4214}
4215
094f9a54
CW
4216static int
4217i915_ring_missed_irq_get(void *data, u64 *val)
4218{
36cdd013 4219 struct drm_i915_private *dev_priv = data;
094f9a54
CW
4220
4221 *val = dev_priv->gpu_error.missed_irq_rings;
4222 return 0;
4223}
4224
4225static int
4226i915_ring_missed_irq_set(void *data, u64 val)
4227{
64486ae7 4228 struct drm_i915_private *i915 = data;
094f9a54 4229
64486ae7 4230 return fault_irq_set(i915, &i915->gpu_error.missed_irq_rings, val);
094f9a54
CW
4231}
4232
4233DEFINE_SIMPLE_ATTRIBUTE(i915_ring_missed_irq_fops,
4234 i915_ring_missed_irq_get, i915_ring_missed_irq_set,
4235 "0x%08llx\n");
4236
4237static int
4238i915_ring_test_irq_get(void *data, u64 *val)
4239{
36cdd013 4240 struct drm_i915_private *dev_priv = data;
094f9a54
CW
4241
4242 *val = dev_priv->gpu_error.test_irq_rings;
4243
4244 return 0;
4245}
4246
4247static int
4248i915_ring_test_irq_set(void *data, u64 val)
4249{
64486ae7 4250 struct drm_i915_private *i915 = data;
094f9a54 4251
64486ae7 4252 val &= INTEL_INFO(i915)->ring_mask;
094f9a54 4253 DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val);
094f9a54 4254
64486ae7 4255 return fault_irq_set(i915, &i915->gpu_error.test_irq_rings, val);
094f9a54
CW
4256}
4257
4258DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops,
4259 i915_ring_test_irq_get, i915_ring_test_irq_set,
4260 "0x%08llx\n");
4261
dd624afd
CW
4262#define DROP_UNBOUND 0x1
4263#define DROP_BOUND 0x2
4264#define DROP_RETIRE 0x4
4265#define DROP_ACTIVE 0x8
fbbd37b3 4266#define DROP_FREED 0x10
8eadc19b 4267#define DROP_SHRINK_ALL 0x20
fbbd37b3
CW
4268#define DROP_ALL (DROP_UNBOUND | \
4269 DROP_BOUND | \
4270 DROP_RETIRE | \
4271 DROP_ACTIVE | \
8eadc19b
CW
4272 DROP_FREED | \
4273 DROP_SHRINK_ALL)
647416f9
KC
4274static int
4275i915_drop_caches_get(void *data, u64 *val)
dd624afd 4276{
647416f9 4277 *val = DROP_ALL;
dd624afd 4278
647416f9 4279 return 0;
dd624afd
CW
4280}
4281
647416f9
KC
4282static int
4283i915_drop_caches_set(void *data, u64 val)
dd624afd 4284{
36cdd013
DW
4285 struct drm_i915_private *dev_priv = data;
4286 struct drm_device *dev = &dev_priv->drm;
00c26cf9 4287 int ret = 0;
dd624afd 4288
2f9fe5ff 4289 DRM_DEBUG("Dropping caches: 0x%08llx\n", val);
dd624afd
CW
4290
4291 /* No need to check and wait for gpu resets, only libdrm auto-restarts
4292 * on ioctls on -EAGAIN. */
00c26cf9
CW
4293 if (val & (DROP_ACTIVE | DROP_RETIRE)) {
4294 ret = mutex_lock_interruptible(&dev->struct_mutex);
dd624afd 4295 if (ret)
00c26cf9 4296 return ret;
dd624afd 4297
00c26cf9
CW
4298 if (val & DROP_ACTIVE)
4299 ret = i915_gem_wait_for_idle(dev_priv,
4300 I915_WAIT_INTERRUPTIBLE |
4301 I915_WAIT_LOCKED);
dd624afd 4302
00c26cf9
CW
4303 if (val & DROP_RETIRE)
4304 i915_gem_retire_requests(dev_priv);
4305
4306 mutex_unlock(&dev->struct_mutex);
4307 }
dd624afd 4308
d92a8cfc 4309 fs_reclaim_acquire(GFP_KERNEL);
21ab4e74 4310 if (val & DROP_BOUND)
912d572d 4311 i915_gem_shrink(dev_priv, LONG_MAX, NULL, I915_SHRINK_BOUND);
4ad72b7f 4312
21ab4e74 4313 if (val & DROP_UNBOUND)
912d572d 4314 i915_gem_shrink(dev_priv, LONG_MAX, NULL, I915_SHRINK_UNBOUND);
dd624afd 4315
8eadc19b
CW
4316 if (val & DROP_SHRINK_ALL)
4317 i915_gem_shrink_all(dev_priv);
d92a8cfc 4318 fs_reclaim_release(GFP_KERNEL);
8eadc19b 4319
fbbd37b3
CW
4320 if (val & DROP_FREED) {
4321 synchronize_rcu();
bdeb9785 4322 i915_gem_drain_freed_objects(dev_priv);
fbbd37b3
CW
4323 }
4324
647416f9 4325 return ret;
dd624afd
CW
4326}
4327
647416f9
KC
4328DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
4329 i915_drop_caches_get, i915_drop_caches_set,
4330 "0x%08llx\n");
dd624afd 4331
647416f9
KC
4332static int
4333i915_max_freq_get(void *data, u64 *val)
358733e9 4334{
36cdd013 4335 struct drm_i915_private *dev_priv = data;
004777cb 4336
36cdd013 4337 if (INTEL_GEN(dev_priv) < 6)
004777cb
DV
4338 return -ENODEV;
4339
7c59a9c1 4340 *val = intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit);
647416f9 4341 return 0;
358733e9
JB
4342}
4343
647416f9
KC
4344static int
4345i915_max_freq_set(void *data, u64 val)
358733e9 4346{
36cdd013 4347 struct drm_i915_private *dev_priv = data;
bc4d91f6 4348 u32 hw_max, hw_min;
647416f9 4349 int ret;
004777cb 4350
36cdd013 4351 if (INTEL_GEN(dev_priv) < 6)
004777cb 4352 return -ENODEV;
358733e9 4353
647416f9 4354 DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val);
358733e9 4355
4fc688ce 4356 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
004777cb
DV
4357 if (ret)
4358 return ret;
4359
358733e9
JB
4360 /*
4361 * Turbo will still be enabled, but won't go above the set value.
4362 */
bc4d91f6 4363 val = intel_freq_opcode(dev_priv, val);
dd0a1aa1 4364
bc4d91f6
AG
4365 hw_max = dev_priv->rps.max_freq;
4366 hw_min = dev_priv->rps.min_freq;
dd0a1aa1 4367
b39fb297 4368 if (val < hw_min || val > hw_max || val < dev_priv->rps.min_freq_softlimit) {
dd0a1aa1
JM
4369 mutex_unlock(&dev_priv->rps.hw_lock);
4370 return -EINVAL;
0a073b84
JB
4371 }
4372
b39fb297 4373 dev_priv->rps.max_freq_softlimit = val;
dd0a1aa1 4374
9fcee2f7
CW
4375 if (intel_set_rps(dev_priv, val))
4376 DRM_DEBUG_DRIVER("failed to update RPS to new softlimit\n");
dd0a1aa1 4377
4fc688ce 4378 mutex_unlock(&dev_priv->rps.hw_lock);
358733e9 4379
647416f9 4380 return 0;
358733e9
JB
4381}
4382
647416f9
KC
4383DEFINE_SIMPLE_ATTRIBUTE(i915_max_freq_fops,
4384 i915_max_freq_get, i915_max_freq_set,
3a3b4f98 4385 "%llu\n");
358733e9 4386
647416f9
KC
4387static int
4388i915_min_freq_get(void *data, u64 *val)
1523c310 4389{
36cdd013 4390 struct drm_i915_private *dev_priv = data;
004777cb 4391
62e1baa1 4392 if (INTEL_GEN(dev_priv) < 6)
004777cb
DV
4393 return -ENODEV;
4394
7c59a9c1 4395 *val = intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit);
647416f9 4396 return 0;
1523c310
JB
4397}
4398
647416f9
KC
4399static int
4400i915_min_freq_set(void *data, u64 val)
1523c310 4401{
36cdd013 4402 struct drm_i915_private *dev_priv = data;
bc4d91f6 4403 u32 hw_max, hw_min;
647416f9 4404 int ret;
004777cb 4405
62e1baa1 4406 if (INTEL_GEN(dev_priv) < 6)
004777cb 4407 return -ENODEV;
1523c310 4408
647416f9 4409 DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val);
1523c310 4410
4fc688ce 4411 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
004777cb
DV
4412 if (ret)
4413 return ret;
4414
1523c310
JB
4415 /*
4416 * Turbo will still be enabled, but won't go below the set value.
4417 */
bc4d91f6 4418 val = intel_freq_opcode(dev_priv, val);
dd0a1aa1 4419
bc4d91f6
AG
4420 hw_max = dev_priv->rps.max_freq;
4421 hw_min = dev_priv->rps.min_freq;
dd0a1aa1 4422
36cdd013
DW
4423 if (val < hw_min ||
4424 val > hw_max || val > dev_priv->rps.max_freq_softlimit) {
dd0a1aa1
JM
4425 mutex_unlock(&dev_priv->rps.hw_lock);
4426 return -EINVAL;
0a073b84 4427 }
dd0a1aa1 4428
b39fb297 4429 dev_priv->rps.min_freq_softlimit = val;
dd0a1aa1 4430
9fcee2f7
CW
4431 if (intel_set_rps(dev_priv, val))
4432 DRM_DEBUG_DRIVER("failed to update RPS to new softlimit\n");
dd0a1aa1 4433
4fc688ce 4434 mutex_unlock(&dev_priv->rps.hw_lock);
1523c310 4435
647416f9 4436 return 0;
1523c310
JB
4437}
4438
647416f9
KC
4439DEFINE_SIMPLE_ATTRIBUTE(i915_min_freq_fops,
4440 i915_min_freq_get, i915_min_freq_set,
3a3b4f98 4441 "%llu\n");
1523c310 4442
647416f9
KC
4443static int
4444i915_cache_sharing_get(void *data, u64 *val)
07b7ddd9 4445{
36cdd013 4446 struct drm_i915_private *dev_priv = data;
07b7ddd9 4447 u32 snpcr;
07b7ddd9 4448
36cdd013 4449 if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv)))
004777cb
DV
4450 return -ENODEV;
4451
c8c8fb33 4452 intel_runtime_pm_get(dev_priv);
22bcfc6a 4453
07b7ddd9 4454 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
c8c8fb33
PZ
4455
4456 intel_runtime_pm_put(dev_priv);
07b7ddd9 4457
647416f9 4458 *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
07b7ddd9 4459
647416f9 4460 return 0;
07b7ddd9
JB
4461}
4462
647416f9
KC
4463static int
4464i915_cache_sharing_set(void *data, u64 val)
07b7ddd9 4465{
36cdd013 4466 struct drm_i915_private *dev_priv = data;
07b7ddd9 4467 u32 snpcr;
07b7ddd9 4468
36cdd013 4469 if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv)))
004777cb
DV
4470 return -ENODEV;
4471
647416f9 4472 if (val > 3)
07b7ddd9
JB
4473 return -EINVAL;
4474
c8c8fb33 4475 intel_runtime_pm_get(dev_priv);
647416f9 4476 DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
07b7ddd9
JB
4477
4478 /* Update the cache sharing policy here as well */
4479 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
4480 snpcr &= ~GEN6_MBC_SNPCR_MASK;
4481 snpcr |= (val << GEN6_MBC_SNPCR_SHIFT);
4482 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
4483
c8c8fb33 4484 intel_runtime_pm_put(dev_priv);
647416f9 4485 return 0;
07b7ddd9
JB
4486}
4487
647416f9
KC
4488DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
4489 i915_cache_sharing_get, i915_cache_sharing_set,
4490 "%llu\n");
07b7ddd9 4491
36cdd013 4492static void cherryview_sseu_device_status(struct drm_i915_private *dev_priv,
915490d5 4493 struct sseu_dev_info *sseu)
5d39525a 4494{
0a0b457f 4495 int ss_max = 2;
5d39525a
JM
4496 int ss;
4497 u32 sig1[ss_max], sig2[ss_max];
4498
4499 sig1[0] = I915_READ(CHV_POWER_SS0_SIG1);
4500 sig1[1] = I915_READ(CHV_POWER_SS1_SIG1);
4501 sig2[0] = I915_READ(CHV_POWER_SS0_SIG2);
4502 sig2[1] = I915_READ(CHV_POWER_SS1_SIG2);
4503
4504 for (ss = 0; ss < ss_max; ss++) {
4505 unsigned int eu_cnt;
4506
4507 if (sig1[ss] & CHV_SS_PG_ENABLE)
4508 /* skip disabled subslice */
4509 continue;
4510
f08a0c92 4511 sseu->slice_mask = BIT(0);
57ec171e 4512 sseu->subslice_mask |= BIT(ss);
5d39525a
JM
4513 eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
4514 ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
4515 ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
4516 ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2);
915490d5
ID
4517 sseu->eu_total += eu_cnt;
4518 sseu->eu_per_subslice = max_t(unsigned int,
4519 sseu->eu_per_subslice, eu_cnt);
5d39525a 4520 }
5d39525a
JM
4521}
4522
36cdd013 4523static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
915490d5 4524 struct sseu_dev_info *sseu)
5d39525a 4525{
1c046bc1 4526 int s_max = 3, ss_max = 4;
5d39525a
JM
4527 int s, ss;
4528 u32 s_reg[s_max], eu_reg[2*s_max], eu_mask[2];
4529
1c046bc1 4530 /* BXT has a single slice and at most 3 subslices. */
cc3f90f0 4531 if (IS_GEN9_LP(dev_priv)) {
1c046bc1
JM
4532 s_max = 1;
4533 ss_max = 3;
4534 }
4535
4536 for (s = 0; s < s_max; s++) {
4537 s_reg[s] = I915_READ(GEN9_SLICE_PGCTL_ACK(s));
4538 eu_reg[2*s] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s));
4539 eu_reg[2*s + 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s));
4540 }
4541
5d39525a
JM
4542 eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
4543 GEN9_PGCTL_SSA_EU19_ACK |
4544 GEN9_PGCTL_SSA_EU210_ACK |
4545 GEN9_PGCTL_SSA_EU311_ACK;
4546 eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
4547 GEN9_PGCTL_SSB_EU19_ACK |
4548 GEN9_PGCTL_SSB_EU210_ACK |
4549 GEN9_PGCTL_SSB_EU311_ACK;
4550
4551 for (s = 0; s < s_max; s++) {
4552 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
4553 /* skip disabled slice */
4554 continue;
4555
f08a0c92 4556 sseu->slice_mask |= BIT(s);
1c046bc1 4557
1dd7a3e7 4558 if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv))
57ec171e
ID
4559 sseu->subslice_mask =
4560 INTEL_INFO(dev_priv)->sseu.subslice_mask;
1c046bc1 4561
5d39525a
JM
4562 for (ss = 0; ss < ss_max; ss++) {
4563 unsigned int eu_cnt;
4564
cc3f90f0 4565 if (IS_GEN9_LP(dev_priv)) {
57ec171e
ID
4566 if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
4567 /* skip disabled subslice */
4568 continue;
1c046bc1 4569
57ec171e
ID
4570 sseu->subslice_mask |= BIT(ss);
4571 }
1c046bc1 4572
5d39525a
JM
4573 eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
4574 eu_mask[ss%2]);
915490d5
ID
4575 sseu->eu_total += eu_cnt;
4576 sseu->eu_per_subslice = max_t(unsigned int,
4577 sseu->eu_per_subslice,
4578 eu_cnt);
5d39525a
JM
4579 }
4580 }
4581}
4582
36cdd013 4583static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv,
915490d5 4584 struct sseu_dev_info *sseu)
91bedd34 4585{
91bedd34 4586 u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);
36cdd013 4587 int s;
91bedd34 4588
f08a0c92 4589 sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK;
91bedd34 4590
f08a0c92 4591 if (sseu->slice_mask) {
57ec171e 4592 sseu->subslice_mask = INTEL_INFO(dev_priv)->sseu.subslice_mask;
43b67998
ID
4593 sseu->eu_per_subslice =
4594 INTEL_INFO(dev_priv)->sseu.eu_per_subslice;
57ec171e
ID
4595 sseu->eu_total = sseu->eu_per_subslice *
4596 sseu_subslice_total(sseu);
91bedd34
ŁD
4597
4598 /* subtract fused off EU(s) from enabled slice(s) */
795b38b3 4599 for (s = 0; s < fls(sseu->slice_mask); s++) {
43b67998
ID
4600 u8 subslice_7eu =
4601 INTEL_INFO(dev_priv)->sseu.subslice_7eu[s];
91bedd34 4602
915490d5 4603 sseu->eu_total -= hweight8(subslice_7eu);
91bedd34
ŁD
4604 }
4605 }
4606}
4607
615d8908
ID
4608static void i915_print_sseu_info(struct seq_file *m, bool is_available_info,
4609 const struct sseu_dev_info *sseu)
4610{
4611 struct drm_i915_private *dev_priv = node_to_i915(m->private);
4612 const char *type = is_available_info ? "Available" : "Enabled";
4613
c67ba538
ID
4614 seq_printf(m, " %s Slice Mask: %04x\n", type,
4615 sseu->slice_mask);
615d8908 4616 seq_printf(m, " %s Slice Total: %u\n", type,
f08a0c92 4617 hweight8(sseu->slice_mask));
615d8908 4618 seq_printf(m, " %s Subslice Total: %u\n", type,
57ec171e 4619 sseu_subslice_total(sseu));
c67ba538
ID
4620 seq_printf(m, " %s Subslice Mask: %04x\n", type,
4621 sseu->subslice_mask);
615d8908 4622 seq_printf(m, " %s Subslice Per Slice: %u\n", type,
57ec171e 4623 hweight8(sseu->subslice_mask));
615d8908
ID
4624 seq_printf(m, " %s EU Total: %u\n", type,
4625 sseu->eu_total);
4626 seq_printf(m, " %s EU Per Subslice: %u\n", type,
4627 sseu->eu_per_subslice);
4628
4629 if (!is_available_info)
4630 return;
4631
4632 seq_printf(m, " Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev_priv)));
4633 if (HAS_POOLED_EU(dev_priv))
4634 seq_printf(m, " Min EU in pool: %u\n", sseu->min_eu_in_pool);
4635
4636 seq_printf(m, " Has Slice Power Gating: %s\n",
4637 yesno(sseu->has_slice_pg));
4638 seq_printf(m, " Has Subslice Power Gating: %s\n",
4639 yesno(sseu->has_subslice_pg));
4640 seq_printf(m, " Has EU Power Gating: %s\n",
4641 yesno(sseu->has_eu_pg));
4642}
4643
3873218f
JM
4644static int i915_sseu_status(struct seq_file *m, void *unused)
4645{
36cdd013 4646 struct drm_i915_private *dev_priv = node_to_i915(m->private);
915490d5 4647 struct sseu_dev_info sseu;
3873218f 4648
36cdd013 4649 if (INTEL_GEN(dev_priv) < 8)
3873218f
JM
4650 return -ENODEV;
4651
4652 seq_puts(m, "SSEU Device Info\n");
615d8908 4653 i915_print_sseu_info(m, true, &INTEL_INFO(dev_priv)->sseu);
3873218f 4654
7f992aba 4655 seq_puts(m, "SSEU Device Status\n");
915490d5 4656 memset(&sseu, 0, sizeof(sseu));
238010ed
DW
4657
4658 intel_runtime_pm_get(dev_priv);
4659
36cdd013 4660 if (IS_CHERRYVIEW(dev_priv)) {
915490d5 4661 cherryview_sseu_device_status(dev_priv, &sseu);
36cdd013 4662 } else if (IS_BROADWELL(dev_priv)) {
915490d5 4663 broadwell_sseu_device_status(dev_priv, &sseu);
36cdd013 4664 } else if (INTEL_GEN(dev_priv) >= 9) {
915490d5 4665 gen9_sseu_device_status(dev_priv, &sseu);
7f992aba 4666 }
238010ed
DW
4667
4668 intel_runtime_pm_put(dev_priv);
4669
615d8908 4670 i915_print_sseu_info(m, false, &sseu);
7f992aba 4671
3873218f
JM
4672 return 0;
4673}
4674
6d794d42
BW
4675static int i915_forcewake_open(struct inode *inode, struct file *file)
4676{
36cdd013 4677 struct drm_i915_private *dev_priv = inode->i_private;
6d794d42 4678
36cdd013 4679 if (INTEL_GEN(dev_priv) < 6)
6d794d42
BW
4680 return 0;
4681
6daccb0b 4682 intel_runtime_pm_get(dev_priv);
59bad947 4683 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
6d794d42
BW
4684
4685 return 0;
4686}
4687
c43b5634 4688static int i915_forcewake_release(struct inode *inode, struct file *file)
6d794d42 4689{
36cdd013 4690 struct drm_i915_private *dev_priv = inode->i_private;
6d794d42 4691
36cdd013 4692 if (INTEL_GEN(dev_priv) < 6)
6d794d42
BW
4693 return 0;
4694
59bad947 4695 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
6daccb0b 4696 intel_runtime_pm_put(dev_priv);
6d794d42
BW
4697
4698 return 0;
4699}
4700
4701static const struct file_operations i915_forcewake_fops = {
4702 .owner = THIS_MODULE,
4703 .open = i915_forcewake_open,
4704 .release = i915_forcewake_release,
4705};
4706
317eaa95
L
4707static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
4708{
4709 struct drm_i915_private *dev_priv = m->private;
4710 struct i915_hotplug *hotplug = &dev_priv->hotplug;
4711
4712 seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
4713 seq_printf(m, "Detected: %s\n",
4714 yesno(delayed_work_pending(&hotplug->reenable_work)));
4715
4716 return 0;
4717}
4718
4719static ssize_t i915_hpd_storm_ctl_write(struct file *file,
4720 const char __user *ubuf, size_t len,
4721 loff_t *offp)
4722{
4723 struct seq_file *m = file->private_data;
4724 struct drm_i915_private *dev_priv = m->private;
4725 struct i915_hotplug *hotplug = &dev_priv->hotplug;
4726 unsigned int new_threshold;
4727 int i;
4728 char *newline;
4729 char tmp[16];
4730
4731 if (len >= sizeof(tmp))
4732 return -EINVAL;
4733
4734 if (copy_from_user(tmp, ubuf, len))
4735 return -EFAULT;
4736
4737 tmp[len] = '\0';
4738
4739 /* Strip newline, if any */
4740 newline = strchr(tmp, '\n');
4741 if (newline)
4742 *newline = '\0';
4743
4744 if (strcmp(tmp, "reset") == 0)
4745 new_threshold = HPD_STORM_DEFAULT_THRESHOLD;
4746 else if (kstrtouint(tmp, 10, &new_threshold) != 0)
4747 return -EINVAL;
4748
4749 if (new_threshold > 0)
4750 DRM_DEBUG_KMS("Setting HPD storm detection threshold to %d\n",
4751 new_threshold);
4752 else
4753 DRM_DEBUG_KMS("Disabling HPD storm detection\n");
4754
4755 spin_lock_irq(&dev_priv->irq_lock);
4756 hotplug->hpd_storm_threshold = new_threshold;
4757 /* Reset the HPD storm stats so we don't accidentally trigger a storm */
4758 for_each_hpd_pin(i)
4759 hotplug->stats[i].count = 0;
4760 spin_unlock_irq(&dev_priv->irq_lock);
4761
4762 /* Re-enable hpd immediately if we were in an irq storm */
4763 flush_delayed_work(&dev_priv->hotplug.reenable_work);
4764
4765 return len;
4766}
4767
4768static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file)
4769{
4770 return single_open(file, i915_hpd_storm_ctl_show, inode->i_private);
4771}
4772
4773static const struct file_operations i915_hpd_storm_ctl_fops = {
4774 .owner = THIS_MODULE,
4775 .open = i915_hpd_storm_ctl_open,
4776 .read = seq_read,
4777 .llseek = seq_lseek,
4778 .release = single_release,
4779 .write = i915_hpd_storm_ctl_write
4780};
4781
06c5bf8c 4782static const struct drm_info_list i915_debugfs_list[] = {
311bd68e 4783 {"i915_capabilities", i915_capabilities, 0},
73aa808f 4784 {"i915_gem_objects", i915_gem_object_info, 0},
08c18323 4785 {"i915_gem_gtt", i915_gem_gtt_info, 0},
6da84829 4786 {"i915_gem_pin_display", i915_gem_gtt_info, 0, (void *)1},
6d2b8885 4787 {"i915_gem_stolen", i915_gem_stolen_list_info },
2017263e
BG
4788 {"i915_gem_request", i915_gem_request_info, 0},
4789 {"i915_gem_seqno", i915_gem_seqno_info, 0},
a6172a80 4790 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
2017263e 4791 {"i915_gem_interrupt", i915_interrupt_info, 0},
493018dc 4792 {"i915_gem_batch_pool", i915_gem_batch_pool_info, 0},
8b417c26 4793 {"i915_guc_info", i915_guc_info, 0},
fdf5d357 4794 {"i915_guc_load_status", i915_guc_load_status_info, 0},
4c7e77fc 4795 {"i915_guc_log_dump", i915_guc_log_dump, 0},
ac58d2ab 4796 {"i915_guc_load_err_log_dump", i915_guc_log_dump, 0, (void *)1},
a8b9370f 4797 {"i915_guc_stage_pool", i915_guc_stage_pool, 0},
0509ead1 4798 {"i915_huc_load_status", i915_huc_load_status_info, 0},
adb4bd12 4799 {"i915_frequency_info", i915_frequency_info, 0},
f654449a 4800 {"i915_hangcheck_info", i915_hangcheck_info, 0},
061d06a2 4801 {"i915_reset_info", i915_reset_info, 0},
f97108d1 4802 {"i915_drpc_info", i915_drpc_info, 0},
7648fa99 4803 {"i915_emon_status", i915_emon_status, 0},
23b2f8bb 4804 {"i915_ring_freq_table", i915_ring_freq_table, 0},
9a851789 4805 {"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
b5e50c3f 4806 {"i915_fbc_status", i915_fbc_status, 0},
92d44621 4807 {"i915_ips_status", i915_ips_status, 0},
4a9bef37 4808 {"i915_sr_status", i915_sr_status, 0},
44834a67 4809 {"i915_opregion", i915_opregion, 0},
ada8f955 4810 {"i915_vbt", i915_vbt, 0},
37811fcc 4811 {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
e76d3630 4812 {"i915_context_status", i915_context_status, 0},
c0ab1ae9 4813 {"i915_dump_lrc", i915_dump_lrc, 0},
f65367b5 4814 {"i915_forcewake_domains", i915_forcewake_domains, 0},
ea16a3cd 4815 {"i915_swizzle_info", i915_swizzle_info, 0},
3cf17fc5 4816 {"i915_ppgtt_info", i915_ppgtt_info, 0},
63573eb7 4817 {"i915_llc", i915_llc, 0},
e91fd8c6 4818 {"i915_edp_psr_status", i915_edp_psr_status, 0},
d2e216d0 4819 {"i915_sink_crc_eDP1", i915_sink_crc, 0},
ec013e7f 4820 {"i915_energy_uJ", i915_energy_uJ, 0},
6455c870 4821 {"i915_runtime_pm_status", i915_runtime_pm_status, 0},
1da51581 4822 {"i915_power_domain_info", i915_power_domain_info, 0},
b7cec66d 4823 {"i915_dmc_info", i915_dmc_info, 0},
53f5e3ca 4824 {"i915_display_info", i915_display_info, 0},
1b36595f 4825 {"i915_engine_info", i915_engine_info, 0},
e04934cf 4826 {"i915_semaphore_status", i915_semaphore_status, 0},
728e29d7 4827 {"i915_shared_dplls_info", i915_shared_dplls_info, 0},
11bed958 4828 {"i915_dp_mst_info", i915_dp_mst_info, 0},
1ed1ef9d 4829 {"i915_wa_registers", i915_wa_registers, 0},
c5511e44 4830 {"i915_ddb_info", i915_ddb_info, 0},
3873218f 4831 {"i915_sseu_status", i915_sseu_status, 0},
a54746e3 4832 {"i915_drrs_status", i915_drrs_status, 0},
1854d5ca 4833 {"i915_rps_boost_info", i915_rps_boost_info, 0},
2017263e 4834};
27c202ad 4835#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
2017263e 4836
06c5bf8c 4837static const struct i915_debugfs_files {
34b9674c
DV
4838 const char *name;
4839 const struct file_operations *fops;
4840} i915_debugfs_files[] = {
4841 {"i915_wedged", &i915_wedged_fops},
4842 {"i915_max_freq", &i915_max_freq_fops},
4843 {"i915_min_freq", &i915_min_freq_fops},
4844 {"i915_cache_sharing", &i915_cache_sharing_fops},
094f9a54
CW
4845 {"i915_ring_missed_irq", &i915_ring_missed_irq_fops},
4846 {"i915_ring_test_irq", &i915_ring_test_irq_fops},
34b9674c 4847 {"i915_gem_drop_caches", &i915_drop_caches_fops},
98a2f411 4848#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
34b9674c 4849 {"i915_error_state", &i915_error_state_fops},
5a4c6f1b 4850 {"i915_gpu_info", &i915_gpu_info_fops},
98a2f411 4851#endif
34b9674c 4852 {"i915_next_seqno", &i915_next_seqno_fops},
bd9db02f 4853 {"i915_display_crc_ctl", &i915_display_crc_ctl_fops},
369a1342
VS
4854 {"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
4855 {"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
4856 {"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
4127dc43 4857 {"i915_fbc_false_color", &i915_fbc_false_color_fops},
eb3394fa
TP
4858 {"i915_dp_test_data", &i915_displayport_test_data_fops},
4859 {"i915_dp_test_type", &i915_displayport_test_type_fops},
685534ef 4860 {"i915_dp_test_active", &i915_displayport_test_active_fops},
317eaa95
L
4861 {"i915_guc_log_control", &i915_guc_log_control_fops},
4862 {"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops}
34b9674c
DV
4863};
4864
1dac891c 4865int i915_debugfs_register(struct drm_i915_private *dev_priv)
2017263e 4866{
91c8a326 4867 struct drm_minor *minor = dev_priv->drm.primary;
b05eeb0f 4868 struct dentry *ent;
34b9674c 4869 int ret, i;
f3cd474b 4870
b05eeb0f
NT
4871 ent = debugfs_create_file("i915_forcewake_user", S_IRUSR,
4872 minor->debugfs_root, to_i915(minor->dev),
4873 &i915_forcewake_fops);
4874 if (!ent)
4875 return -ENOMEM;
6a9c308d 4876
731035fe
TV
4877 ret = intel_pipe_crc_create(minor);
4878 if (ret)
4879 return ret;
07144428 4880
34b9674c 4881 for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
b05eeb0f
NT
4882 ent = debugfs_create_file(i915_debugfs_files[i].name,
4883 S_IRUGO | S_IWUSR,
4884 minor->debugfs_root,
4885 to_i915(minor->dev),
34b9674c 4886 i915_debugfs_files[i].fops);
b05eeb0f
NT
4887 if (!ent)
4888 return -ENOMEM;
34b9674c 4889 }
40633219 4890
27c202ad
BG
4891 return drm_debugfs_create_files(i915_debugfs_list,
4892 I915_DEBUGFS_ENTRIES,
2017263e
BG
4893 minor->debugfs_root, minor);
4894}
4895
aa7471d2
JN
4896struct dpcd_block {
4897 /* DPCD dump start address. */
4898 unsigned int offset;
4899 /* DPCD dump end address, inclusive. If unset, .size will be used. */
4900 unsigned int end;
4901 /* DPCD dump size. Used if .end is unset. If unset, defaults to 1. */
4902 size_t size;
4903 /* Only valid for eDP. */
4904 bool edp;
4905};
4906
4907static const struct dpcd_block i915_dpcd_debug[] = {
4908 { .offset = DP_DPCD_REV, .size = DP_RECEIVER_CAP_SIZE },
4909 { .offset = DP_PSR_SUPPORT, .end = DP_PSR_CAPS },
4910 { .offset = DP_DOWNSTREAM_PORT_0, .size = 16 },
4911 { .offset = DP_LINK_BW_SET, .end = DP_EDP_CONFIGURATION_SET },
4912 { .offset = DP_SINK_COUNT, .end = DP_ADJUST_REQUEST_LANE2_3 },
4913 { .offset = DP_SET_POWER },
4914 { .offset = DP_EDP_DPCD_REV },
4915 { .offset = DP_EDP_GENERAL_CAP_1, .end = DP_EDP_GENERAL_CAP_3 },
4916 { .offset = DP_EDP_DISPLAY_CONTROL_REGISTER, .end = DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB },
4917 { .offset = DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET, .end = DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET },
4918};
4919
4920static int i915_dpcd_show(struct seq_file *m, void *data)
4921{
4922 struct drm_connector *connector = m->private;
4923 struct intel_dp *intel_dp =
4924 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4925 uint8_t buf[16];
4926 ssize_t err;
4927 int i;
4928
5c1a8875
MK
4929 if (connector->status != connector_status_connected)
4930 return -ENODEV;
4931
aa7471d2
JN
4932 for (i = 0; i < ARRAY_SIZE(i915_dpcd_debug); i++) {
4933 const struct dpcd_block *b = &i915_dpcd_debug[i];
4934 size_t size = b->end ? b->end - b->offset + 1 : (b->size ?: 1);
4935
4936 if (b->edp &&
4937 connector->connector_type != DRM_MODE_CONNECTOR_eDP)
4938 continue;
4939
4940 /* low tech for now */
4941 if (WARN_ON(size > sizeof(buf)))
4942 continue;
4943
4944 err = drm_dp_dpcd_read(&intel_dp->aux, b->offset, buf, size);
4945 if (err <= 0) {
4946 DRM_ERROR("dpcd read (%zu bytes at %u) failed (%zd)\n",
4947 size, b->offset, err);
4948 continue;
4949 }
4950
4951 seq_printf(m, "%04x: %*ph\n", b->offset, (int) size, buf);
b3f9d7d7 4952 }
aa7471d2
JN
4953
4954 return 0;
4955}
4956
4957static int i915_dpcd_open(struct inode *inode, struct file *file)
4958{
4959 return single_open(file, i915_dpcd_show, inode->i_private);
4960}
4961
4962static const struct file_operations i915_dpcd_fops = {
4963 .owner = THIS_MODULE,
4964 .open = i915_dpcd_open,
4965 .read = seq_read,
4966 .llseek = seq_lseek,
4967 .release = single_release,
4968};
4969
ecbd6781
DW
4970static int i915_panel_show(struct seq_file *m, void *data)
4971{
4972 struct drm_connector *connector = m->private;
4973 struct intel_dp *intel_dp =
4974 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4975
4976 if (connector->status != connector_status_connected)
4977 return -ENODEV;
4978
4979 seq_printf(m, "Panel power up delay: %d\n",
4980 intel_dp->panel_power_up_delay);
4981 seq_printf(m, "Panel power down delay: %d\n",
4982 intel_dp->panel_power_down_delay);
4983 seq_printf(m, "Backlight on delay: %d\n",
4984 intel_dp->backlight_on_delay);
4985 seq_printf(m, "Backlight off delay: %d\n",
4986 intel_dp->backlight_off_delay);
4987
4988 return 0;
4989}
4990
4991static int i915_panel_open(struct inode *inode, struct file *file)
4992{
4993 return single_open(file, i915_panel_show, inode->i_private);
4994}
4995
4996static const struct file_operations i915_panel_fops = {
4997 .owner = THIS_MODULE,
4998 .open = i915_panel_open,
4999 .read = seq_read,
5000 .llseek = seq_lseek,
5001 .release = single_release,
5002};
5003
aa7471d2
JN
5004/**
5005 * i915_debugfs_connector_add - add i915 specific connector debugfs files
5006 * @connector: pointer to a registered drm_connector
5007 *
5008 * Cleanup will be done by drm_connector_unregister() through a call to
5009 * drm_debugfs_connector_remove().
5010 *
5011 * Returns 0 on success, negative error codes on error.
5012 */
5013int i915_debugfs_connector_add(struct drm_connector *connector)
5014{
5015 struct dentry *root = connector->debugfs_entry;
5016
5017 /* The connector must have been registered beforehands. */
5018 if (!root)
5019 return -ENODEV;
5020
5021 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
5022 connector->connector_type == DRM_MODE_CONNECTOR_eDP)
ecbd6781
DW
5023 debugfs_create_file("i915_dpcd", S_IRUGO, root,
5024 connector, &i915_dpcd_fops);
5025
5026 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
5027 debugfs_create_file("i915_panel_timings", S_IRUGO, root,
5028 connector, &i915_panel_fops);
aa7471d2
JN
5029
5030 return 0;
5031}