]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blame - drivers/gpu/drm/i915/i915_debugfs.c
Merge tag 'sh-pfc-for-v5.1-tag2' of git://git.kernel.org/pub/scm/linux/kernel/git...
[mirror_ubuntu-focal-kernel.git] / drivers / gpu / drm / i915 / i915_debugfs.c
CommitLineData
2017263e
BG
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
26 *
27 */
28
f3cd474b 29#include <linux/debugfs.h>
e637d2cb 30#include <linux/sort.h>
d92a8cfc 31#include <linux/sched/mm.h>
4e5359cd 32#include "intel_drv.h"
a2695744 33#include "intel_guc_submission.h"
2017263e 34
36cdd013
DW
35static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
36{
37 return to_i915(node->minor->dev);
38}
39
70d39fe4
CW
40static int i915_capabilities(struct seq_file *m, void *data)
41{
36cdd013
DW
42 struct drm_i915_private *dev_priv = node_to_i915(m->private);
43 const struct intel_device_info *info = INTEL_INFO(dev_priv);
a8c9b849 44 struct drm_printer p = drm_seq_file_printer(m);
70d39fe4 45
36cdd013 46 seq_printf(m, "gen: %d\n", INTEL_GEN(dev_priv));
2e0d26f8 47 seq_printf(m, "platform: %s\n", intel_platform_name(info->platform));
36cdd013 48 seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv));
418e3cd8 49
a8c9b849 50 intel_device_info_dump_flags(info, &p);
5fbbe8d4 51 intel_device_info_dump_runtime(info, &p);
3fed1808 52 intel_driver_caps_print(&dev_priv->caps, &p);
70d39fe4 53
418e3cd8 54 kernel_param_lock(THIS_MODULE);
acfb9973 55 i915_params_dump(&i915_modparams, &p);
418e3cd8
CW
56 kernel_param_unlock(THIS_MODULE);
57
70d39fe4
CW
58 return 0;
59}
2017263e 60
a7363de7 61static char get_active_flag(struct drm_i915_gem_object *obj)
a6172a80 62{
573adb39 63 return i915_gem_object_is_active(obj) ? '*' : ' ';
a6172a80
CW
64}
65
a7363de7 66static char get_pin_flag(struct drm_i915_gem_object *obj)
be12a86b 67{
bd3d2252 68 return obj->pin_global ? 'p' : ' ';
be12a86b
TU
69}
70
a7363de7 71static char get_tiling_flag(struct drm_i915_gem_object *obj)
a6172a80 72{
3e510a8e 73 switch (i915_gem_object_get_tiling(obj)) {
0206e353 74 default:
be12a86b
TU
75 case I915_TILING_NONE: return ' ';
76 case I915_TILING_X: return 'X';
77 case I915_TILING_Y: return 'Y';
0206e353 78 }
a6172a80
CW
79}
80
a7363de7 81static char get_global_flag(struct drm_i915_gem_object *obj)
be12a86b 82{
a65adaf8 83 return obj->userfault_count ? 'g' : ' ';
be12a86b
TU
84}
85
a7363de7 86static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
1d693bcc 87{
a4f5ea64 88 return obj->mm.mapping ? 'M' : ' ';
1d693bcc
BW
89}
90
ca1543be
TU
91static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
92{
93 u64 size = 0;
94 struct i915_vma *vma;
95
e2189dd0
CW
96 for_each_ggtt_vma(vma, obj) {
97 if (drm_mm_node_allocated(&vma->node))
ca1543be
TU
98 size += vma->node.size;
99 }
100
101 return size;
102}
103
7393b7ee
MA
104static const char *
105stringify_page_sizes(unsigned int page_sizes, char *buf, size_t len)
106{
107 size_t x = 0;
108
109 switch (page_sizes) {
110 case 0:
111 return "";
112 case I915_GTT_PAGE_SIZE_4K:
113 return "4K";
114 case I915_GTT_PAGE_SIZE_64K:
115 return "64K";
116 case I915_GTT_PAGE_SIZE_2M:
117 return "2M";
118 default:
119 if (!buf)
120 return "M";
121
122 if (page_sizes & I915_GTT_PAGE_SIZE_2M)
123 x += snprintf(buf + x, len - x, "2M, ");
124 if (page_sizes & I915_GTT_PAGE_SIZE_64K)
125 x += snprintf(buf + x, len - x, "64K, ");
126 if (page_sizes & I915_GTT_PAGE_SIZE_4K)
127 x += snprintf(buf + x, len - x, "4K, ");
128 buf[x-2] = '\0';
129
130 return buf;
131 }
132}
133
37811fcc
CW
134static void
135describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
136{
b4716185 137 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
e2f80391 138 struct intel_engine_cs *engine;
1d693bcc 139 struct i915_vma *vma;
faf5bf0a 140 unsigned int frontbuffer_bits;
d7f46fc4
BW
141 int pin_count = 0;
142
188c1ab7
CW
143 lockdep_assert_held(&obj->base.dev->struct_mutex);
144
d07f0e59 145 seq_printf(m, "%pK: %c%c%c%c%c %8zdKiB %02x %02x %s%s%s",
37811fcc 146 &obj->base,
be12a86b 147 get_active_flag(obj),
37811fcc
CW
148 get_pin_flag(obj),
149 get_tiling_flag(obj),
1d693bcc 150 get_global_flag(obj),
be12a86b 151 get_pin_mapped_flag(obj),
a05a5862 152 obj->base.size / 1024,
c0a51fd0
CK
153 obj->read_domains,
154 obj->write_domain,
36cdd013 155 i915_cache_level_str(dev_priv, obj->cache_level),
a4f5ea64
CW
156 obj->mm.dirty ? " dirty" : "",
157 obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
37811fcc
CW
158 if (obj->base.name)
159 seq_printf(m, " (name: %d)", obj->base.name);
1c7f4bca 160 list_for_each_entry(vma, &obj->vma_list, obj_link) {
20dfbde4 161 if (i915_vma_is_pinned(vma))
d7f46fc4 162 pin_count++;
ba0635ff
DC
163 }
164 seq_printf(m, " (pinned x %d)", pin_count);
bd3d2252
CW
165 if (obj->pin_global)
166 seq_printf(m, " (global)");
1c7f4bca 167 list_for_each_entry(vma, &obj->vma_list, obj_link) {
15717de2
CW
168 if (!drm_mm_node_allocated(&vma->node))
169 continue;
170
7393b7ee 171 seq_printf(m, " (%sgtt offset: %08llx, size: %08llx, pages: %s",
3272db53 172 i915_vma_is_ggtt(vma) ? "g" : "pp",
7393b7ee
MA
173 vma->node.start, vma->node.size,
174 stringify_page_sizes(vma->page_sizes.gtt, NULL, 0));
21976853
CW
175 if (i915_vma_is_ggtt(vma)) {
176 switch (vma->ggtt_view.type) {
177 case I915_GGTT_VIEW_NORMAL:
178 seq_puts(m, ", normal");
179 break;
180
181 case I915_GGTT_VIEW_PARTIAL:
182 seq_printf(m, ", partial [%08llx+%x]",
8bab1193
CW
183 vma->ggtt_view.partial.offset << PAGE_SHIFT,
184 vma->ggtt_view.partial.size << PAGE_SHIFT);
21976853
CW
185 break;
186
187 case I915_GGTT_VIEW_ROTATED:
188 seq_printf(m, ", rotated [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
8bab1193
CW
189 vma->ggtt_view.rotated.plane[0].width,
190 vma->ggtt_view.rotated.plane[0].height,
191 vma->ggtt_view.rotated.plane[0].stride,
192 vma->ggtt_view.rotated.plane[0].offset,
193 vma->ggtt_view.rotated.plane[1].width,
194 vma->ggtt_view.rotated.plane[1].height,
195 vma->ggtt_view.rotated.plane[1].stride,
196 vma->ggtt_view.rotated.plane[1].offset);
21976853
CW
197 break;
198
199 default:
200 MISSING_CASE(vma->ggtt_view.type);
201 break;
202 }
203 }
49ef5294
CW
204 if (vma->fence)
205 seq_printf(m, " , fence: %d%s",
206 vma->fence->id,
207 i915_gem_active_isset(&vma->last_fence) ? "*" : "");
596c5923 208 seq_puts(m, ")");
1d693bcc 209 }
c1ad11fc 210 if (obj->stolen)
440fd528 211 seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
27c01aae 212
d07f0e59 213 engine = i915_gem_object_last_write_engine(obj);
27c01aae
CW
214 if (engine)
215 seq_printf(m, " (%s)", engine->name);
216
faf5bf0a
CW
217 frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
218 if (frontbuffer_bits)
219 seq_printf(m, " (frontbuffer: 0x%03x)", frontbuffer_bits);
37811fcc
CW
220}
221
e637d2cb 222static int obj_rank_by_stolen(const void *A, const void *B)
6d2b8885 223{
e637d2cb
CW
224 const struct drm_i915_gem_object *a =
225 *(const struct drm_i915_gem_object **)A;
226 const struct drm_i915_gem_object *b =
227 *(const struct drm_i915_gem_object **)B;
6d2b8885 228
2d05fa16
RV
229 if (a->stolen->start < b->stolen->start)
230 return -1;
231 if (a->stolen->start > b->stolen->start)
232 return 1;
233 return 0;
6d2b8885
CW
234}
235
236static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
237{
36cdd013
DW
238 struct drm_i915_private *dev_priv = node_to_i915(m->private);
239 struct drm_device *dev = &dev_priv->drm;
e637d2cb 240 struct drm_i915_gem_object **objects;
6d2b8885 241 struct drm_i915_gem_object *obj;
c44ef60e 242 u64 total_obj_size, total_gtt_size;
e637d2cb
CW
243 unsigned long total, count, n;
244 int ret;
245
246 total = READ_ONCE(dev_priv->mm.object_count);
2098105e 247 objects = kvmalloc_array(total, sizeof(*objects), GFP_KERNEL);
e637d2cb
CW
248 if (!objects)
249 return -ENOMEM;
6d2b8885
CW
250
251 ret = mutex_lock_interruptible(&dev->struct_mutex);
252 if (ret)
e637d2cb 253 goto out;
6d2b8885
CW
254
255 total_obj_size = total_gtt_size = count = 0;
f2123818
CW
256
257 spin_lock(&dev_priv->mm.obj_lock);
258 list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
e637d2cb
CW
259 if (count == total)
260 break;
261
6d2b8885
CW
262 if (obj->stolen == NULL)
263 continue;
264
e637d2cb 265 objects[count++] = obj;
6d2b8885 266 total_obj_size += obj->base.size;
ca1543be 267 total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
e637d2cb 268
6d2b8885 269 }
f2123818 270 list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
e637d2cb
CW
271 if (count == total)
272 break;
273
6d2b8885
CW
274 if (obj->stolen == NULL)
275 continue;
276
e637d2cb 277 objects[count++] = obj;
6d2b8885 278 total_obj_size += obj->base.size;
6d2b8885 279 }
f2123818 280 spin_unlock(&dev_priv->mm.obj_lock);
e637d2cb
CW
281
282 sort(objects, count, sizeof(*objects), obj_rank_by_stolen, NULL);
283
6d2b8885 284 seq_puts(m, "Stolen:\n");
e637d2cb 285 for (n = 0; n < count; n++) {
6d2b8885 286 seq_puts(m, " ");
e637d2cb 287 describe_obj(m, objects[n]);
6d2b8885 288 seq_putc(m, '\n');
6d2b8885 289 }
e637d2cb 290 seq_printf(m, "Total %lu objects, %llu bytes, %llu GTT size\n",
6d2b8885 291 count, total_obj_size, total_gtt_size);
e637d2cb
CW
292
293 mutex_unlock(&dev->struct_mutex);
294out:
2098105e 295 kvfree(objects);
e637d2cb 296 return ret;
6d2b8885
CW
297}
298
2db8e9d6 299struct file_stats {
6313c204 300 struct drm_i915_file_private *file_priv;
c44ef60e
MK
301 unsigned long count;
302 u64 total, unbound;
303 u64 global, shared;
304 u64 active, inactive;
2db8e9d6
CW
305};
306
307static int per_file_stats(int id, void *ptr, void *data)
308{
309 struct drm_i915_gem_object *obj = ptr;
310 struct file_stats *stats = data;
6313c204 311 struct i915_vma *vma;
2db8e9d6 312
0caf81b5
CW
313 lockdep_assert_held(&obj->base.dev->struct_mutex);
314
2db8e9d6
CW
315 stats->count++;
316 stats->total += obj->base.size;
15717de2
CW
317 if (!obj->bind_count)
318 stats->unbound += obj->base.size;
c67a17e9
CW
319 if (obj->base.name || obj->base.dma_buf)
320 stats->shared += obj->base.size;
321
894eeecc
CW
322 list_for_each_entry(vma, &obj->vma_list, obj_link) {
323 if (!drm_mm_node_allocated(&vma->node))
324 continue;
6313c204 325
3272db53 326 if (i915_vma_is_ggtt(vma)) {
894eeecc
CW
327 stats->global += vma->node.size;
328 } else {
329 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vma->vm);
6313c204 330
82ad6443 331 if (ppgtt->vm.file != stats->file_priv)
6313c204 332 continue;
6313c204 333 }
894eeecc 334
b0decaf7 335 if (i915_vma_is_active(vma))
894eeecc
CW
336 stats->active += vma->node.size;
337 else
338 stats->inactive += vma->node.size;
2db8e9d6
CW
339 }
340
341 return 0;
342}
343
b0da1b79
CW
344#define print_file_stats(m, name, stats) do { \
345 if (stats.count) \
c44ef60e 346 seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound)\n", \
b0da1b79
CW
347 name, \
348 stats.count, \
349 stats.total, \
350 stats.active, \
351 stats.inactive, \
352 stats.global, \
353 stats.shared, \
354 stats.unbound); \
355} while (0)
493018dc
BV
356
357static void print_batch_pool_stats(struct seq_file *m,
358 struct drm_i915_private *dev_priv)
359{
360 struct drm_i915_gem_object *obj;
361 struct file_stats stats;
e2f80391 362 struct intel_engine_cs *engine;
3b3f1650 363 enum intel_engine_id id;
b4ac5afc 364 int j;
493018dc
BV
365
366 memset(&stats, 0, sizeof(stats));
367
3b3f1650 368 for_each_engine(engine, dev_priv, id) {
e2f80391 369 for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
8d9d5744 370 list_for_each_entry(obj,
e2f80391 371 &engine->batch_pool.cache_list[j],
8d9d5744
CW
372 batch_pool_link)
373 per_file_stats(0, obj, &stats);
374 }
06fbca71 375 }
493018dc 376
b0da1b79 377 print_file_stats(m, "[k]batch pool", stats);
493018dc
BV
378}
379
ab82a063 380static int per_file_ctx_stats(int idx, void *ptr, void *data)
15da9565
CW
381{
382 struct i915_gem_context *ctx = ptr;
ab82a063
CW
383 struct intel_engine_cs *engine;
384 enum intel_engine_id id;
385
386 for_each_engine(engine, ctx->i915, id) {
387 struct intel_context *ce = to_intel_context(ctx, engine);
15da9565 388
ab82a063
CW
389 if (ce->state)
390 per_file_stats(0, ce->state->obj, data);
391 if (ce->ring)
392 per_file_stats(0, ce->ring->vma->obj, data);
15da9565
CW
393 }
394
395 return 0;
396}
397
398static void print_context_stats(struct seq_file *m,
399 struct drm_i915_private *dev_priv)
400{
36cdd013 401 struct drm_device *dev = &dev_priv->drm;
15da9565
CW
402 struct file_stats stats;
403 struct drm_file *file;
404
405 memset(&stats, 0, sizeof(stats));
406
36cdd013 407 mutex_lock(&dev->struct_mutex);
15da9565
CW
408 if (dev_priv->kernel_context)
409 per_file_ctx_stats(0, dev_priv->kernel_context, &stats);
410
36cdd013 411 list_for_each_entry(file, &dev->filelist, lhead) {
15da9565
CW
412 struct drm_i915_file_private *fpriv = file->driver_priv;
413 idr_for_each(&fpriv->context_idr, per_file_ctx_stats, &stats);
414 }
36cdd013 415 mutex_unlock(&dev->struct_mutex);
15da9565
CW
416
417 print_file_stats(m, "[k]contexts", stats);
418}
419
36cdd013 420static int i915_gem_object_info(struct seq_file *m, void *data)
73aa808f 421{
36cdd013
DW
422 struct drm_i915_private *dev_priv = node_to_i915(m->private);
423 struct drm_device *dev = &dev_priv->drm;
72e96d64 424 struct i915_ggtt *ggtt = &dev_priv->ggtt;
7393b7ee
MA
425 u32 count, mapped_count, purgeable_count, dpy_count, huge_count;
426 u64 size, mapped_size, purgeable_size, dpy_size, huge_size;
6299f992 427 struct drm_i915_gem_object *obj;
7393b7ee 428 unsigned int page_sizes = 0;
2db8e9d6 429 struct drm_file *file;
7393b7ee 430 char buf[80];
73aa808f
CW
431 int ret;
432
433 ret = mutex_lock_interruptible(&dev->struct_mutex);
434 if (ret)
435 return ret;
436
3ef7f228 437 seq_printf(m, "%u objects, %llu bytes\n",
6299f992
CW
438 dev_priv->mm.object_count,
439 dev_priv->mm.object_memory);
440
1544c42e
CW
441 size = count = 0;
442 mapped_size = mapped_count = 0;
443 purgeable_size = purgeable_count = 0;
7393b7ee 444 huge_size = huge_count = 0;
f2123818
CW
445
446 spin_lock(&dev_priv->mm.obj_lock);
447 list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
2bd160a1
CW
448 size += obj->base.size;
449 ++count;
450
a4f5ea64 451 if (obj->mm.madv == I915_MADV_DONTNEED) {
2bd160a1
CW
452 purgeable_size += obj->base.size;
453 ++purgeable_count;
454 }
455
a4f5ea64 456 if (obj->mm.mapping) {
2bd160a1
CW
457 mapped_count++;
458 mapped_size += obj->base.size;
be19b10d 459 }
7393b7ee
MA
460
461 if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
462 huge_count++;
463 huge_size += obj->base.size;
464 page_sizes |= obj->mm.page_sizes.sg;
465 }
b7abb714 466 }
c44ef60e 467 seq_printf(m, "%u unbound objects, %llu bytes\n", count, size);
6c085a72 468
2bd160a1 469 size = count = dpy_size = dpy_count = 0;
f2123818 470 list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
2bd160a1
CW
471 size += obj->base.size;
472 ++count;
473
bd3d2252 474 if (obj->pin_global) {
2bd160a1
CW
475 dpy_size += obj->base.size;
476 ++dpy_count;
6299f992 477 }
2bd160a1 478
a4f5ea64 479 if (obj->mm.madv == I915_MADV_DONTNEED) {
b7abb714
CW
480 purgeable_size += obj->base.size;
481 ++purgeable_count;
482 }
2bd160a1 483
a4f5ea64 484 if (obj->mm.mapping) {
2bd160a1
CW
485 mapped_count++;
486 mapped_size += obj->base.size;
be19b10d 487 }
7393b7ee
MA
488
489 if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
490 huge_count++;
491 huge_size += obj->base.size;
492 page_sizes |= obj->mm.page_sizes.sg;
493 }
6299f992 494 }
f2123818
CW
495 spin_unlock(&dev_priv->mm.obj_lock);
496
2bd160a1
CW
497 seq_printf(m, "%u bound objects, %llu bytes\n",
498 count, size);
c44ef60e 499 seq_printf(m, "%u purgeable objects, %llu bytes\n",
b7abb714 500 purgeable_count, purgeable_size);
2bd160a1
CW
501 seq_printf(m, "%u mapped objects, %llu bytes\n",
502 mapped_count, mapped_size);
7393b7ee
MA
503 seq_printf(m, "%u huge-paged objects (%s) %llu bytes\n",
504 huge_count,
505 stringify_page_sizes(page_sizes, buf, sizeof(buf)),
506 huge_size);
bd3d2252 507 seq_printf(m, "%u display objects (globally pinned), %llu bytes\n",
2bd160a1 508 dpy_count, dpy_size);
6299f992 509
b7128ef1 510 seq_printf(m, "%llu [%pa] gtt total\n",
82ad6443 511 ggtt->vm.total, &ggtt->mappable_end);
7393b7ee
MA
512 seq_printf(m, "Supported page sizes: %s\n",
513 stringify_page_sizes(INTEL_INFO(dev_priv)->page_sizes,
514 buf, sizeof(buf)));
73aa808f 515
493018dc
BV
516 seq_putc(m, '\n');
517 print_batch_pool_stats(m, dev_priv);
1d2ac403
DV
518 mutex_unlock(&dev->struct_mutex);
519
520 mutex_lock(&dev->filelist_mutex);
15da9565 521 print_context_stats(m, dev_priv);
2db8e9d6
CW
522 list_for_each_entry_reverse(file, &dev->filelist, lhead) {
523 struct file_stats stats;
c84455b4 524 struct drm_i915_file_private *file_priv = file->driver_priv;
e61e0f51 525 struct i915_request *request;
3ec2f427 526 struct task_struct *task;
2db8e9d6 527
0caf81b5
CW
528 mutex_lock(&dev->struct_mutex);
529
2db8e9d6 530 memset(&stats, 0, sizeof(stats));
6313c204 531 stats.file_priv = file->driver_priv;
5b5ffff0 532 spin_lock(&file->table_lock);
2db8e9d6 533 idr_for_each(&file->object_idr, per_file_stats, &stats);
5b5ffff0 534 spin_unlock(&file->table_lock);
3ec2f427
TH
535 /*
536 * Although we have a valid reference on file->pid, that does
537 * not guarantee that the task_struct who called get_pid() is
538 * still alive (e.g. get_pid(current) => fork() => exit()).
539 * Therefore, we need to protect this ->comm access using RCU.
540 */
c84455b4 541 request = list_first_entry_or_null(&file_priv->mm.request_list,
e61e0f51 542 struct i915_request,
c8659efa 543 client_link);
3ec2f427 544 rcu_read_lock();
4e0d64db
CW
545 task = pid_task(request && request->gem_context->pid ?
546 request->gem_context->pid : file->pid,
c84455b4 547 PIDTYPE_PID);
493018dc 548 print_file_stats(m, task ? task->comm : "<unknown>", stats);
3ec2f427 549 rcu_read_unlock();
0caf81b5 550
c84455b4 551 mutex_unlock(&dev->struct_mutex);
2db8e9d6 552 }
1d2ac403 553 mutex_unlock(&dev->filelist_mutex);
73aa808f
CW
554
555 return 0;
556}
557
aee56cff 558static int i915_gem_gtt_info(struct seq_file *m, void *data)
08c18323 559{
9f25d007 560 struct drm_info_node *node = m->private;
36cdd013
DW
561 struct drm_i915_private *dev_priv = node_to_i915(node);
562 struct drm_device *dev = &dev_priv->drm;
f2123818 563 struct drm_i915_gem_object **objects;
08c18323 564 struct drm_i915_gem_object *obj;
c44ef60e 565 u64 total_obj_size, total_gtt_size;
f2123818 566 unsigned long nobject, n;
08c18323
CW
567 int count, ret;
568
f2123818
CW
569 nobject = READ_ONCE(dev_priv->mm.object_count);
570 objects = kvmalloc_array(nobject, sizeof(*objects), GFP_KERNEL);
571 if (!objects)
572 return -ENOMEM;
573
08c18323
CW
574 ret = mutex_lock_interruptible(&dev->struct_mutex);
575 if (ret)
576 return ret;
577
f2123818
CW
578 count = 0;
579 spin_lock(&dev_priv->mm.obj_lock);
580 list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
581 objects[count++] = obj;
582 if (count == nobject)
583 break;
584 }
585 spin_unlock(&dev_priv->mm.obj_lock);
586
587 total_obj_size = total_gtt_size = 0;
588 for (n = 0; n < count; n++) {
589 obj = objects[n];
590
267f0c90 591 seq_puts(m, " ");
08c18323 592 describe_obj(m, obj);
267f0c90 593 seq_putc(m, '\n');
08c18323 594 total_obj_size += obj->base.size;
ca1543be 595 total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
08c18323
CW
596 }
597
598 mutex_unlock(&dev->struct_mutex);
599
c44ef60e 600 seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
08c18323 601 count, total_obj_size, total_gtt_size);
f2123818 602 kvfree(objects);
08c18323
CW
603
604 return 0;
605}
606
493018dc
BV
607static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
608{
36cdd013
DW
609 struct drm_i915_private *dev_priv = node_to_i915(m->private);
610 struct drm_device *dev = &dev_priv->drm;
493018dc 611 struct drm_i915_gem_object *obj;
e2f80391 612 struct intel_engine_cs *engine;
3b3f1650 613 enum intel_engine_id id;
8d9d5744 614 int total = 0;
b4ac5afc 615 int ret, j;
493018dc
BV
616
617 ret = mutex_lock_interruptible(&dev->struct_mutex);
618 if (ret)
619 return ret;
620
3b3f1650 621 for_each_engine(engine, dev_priv, id) {
e2f80391 622 for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
8d9d5744
CW
623 int count;
624
625 count = 0;
626 list_for_each_entry(obj,
e2f80391 627 &engine->batch_pool.cache_list[j],
8d9d5744
CW
628 batch_pool_link)
629 count++;
630 seq_printf(m, "%s cache[%d]: %d objects\n",
e2f80391 631 engine->name, j, count);
8d9d5744
CW
632
633 list_for_each_entry(obj,
e2f80391 634 &engine->batch_pool.cache_list[j],
8d9d5744
CW
635 batch_pool_link) {
636 seq_puts(m, " ");
637 describe_obj(m, obj);
638 seq_putc(m, '\n');
639 }
640
641 total += count;
06fbca71 642 }
493018dc
BV
643 }
644
8d9d5744 645 seq_printf(m, "total: %d\n", total);
493018dc
BV
646
647 mutex_unlock(&dev->struct_mutex);
648
649 return 0;
650}
651
80d89350
TU
652static void gen8_display_interrupt_info(struct seq_file *m)
653{
654 struct drm_i915_private *dev_priv = node_to_i915(m->private);
655 int pipe;
656
657 for_each_pipe(dev_priv, pipe) {
658 enum intel_display_power_domain power_domain;
659
660 power_domain = POWER_DOMAIN_PIPE(pipe);
661 if (!intel_display_power_get_if_enabled(dev_priv,
662 power_domain)) {
663 seq_printf(m, "Pipe %c power disabled\n",
664 pipe_name(pipe));
665 continue;
666 }
667 seq_printf(m, "Pipe %c IMR:\t%08x\n",
668 pipe_name(pipe),
669 I915_READ(GEN8_DE_PIPE_IMR(pipe)));
670 seq_printf(m, "Pipe %c IIR:\t%08x\n",
671 pipe_name(pipe),
672 I915_READ(GEN8_DE_PIPE_IIR(pipe)));
673 seq_printf(m, "Pipe %c IER:\t%08x\n",
674 pipe_name(pipe),
675 I915_READ(GEN8_DE_PIPE_IER(pipe)));
676
677 intel_display_power_put(dev_priv, power_domain);
678 }
679
680 seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
681 I915_READ(GEN8_DE_PORT_IMR));
682 seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
683 I915_READ(GEN8_DE_PORT_IIR));
684 seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
685 I915_READ(GEN8_DE_PORT_IER));
686
687 seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
688 I915_READ(GEN8_DE_MISC_IMR));
689 seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
690 I915_READ(GEN8_DE_MISC_IIR));
691 seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
692 I915_READ(GEN8_DE_MISC_IER));
693
694 seq_printf(m, "PCU interrupt mask:\t%08x\n",
695 I915_READ(GEN8_PCU_IMR));
696 seq_printf(m, "PCU interrupt identity:\t%08x\n",
697 I915_READ(GEN8_PCU_IIR));
698 seq_printf(m, "PCU interrupt enable:\t%08x\n",
699 I915_READ(GEN8_PCU_IER));
700}
701
2017263e
BG
702static int i915_interrupt_info(struct seq_file *m, void *data)
703{
36cdd013 704 struct drm_i915_private *dev_priv = node_to_i915(m->private);
e2f80391 705 struct intel_engine_cs *engine;
3b3f1650 706 enum intel_engine_id id;
4bb05040 707 int i, pipe;
de227ef0 708
c8c8fb33 709 intel_runtime_pm_get(dev_priv);
2017263e 710
36cdd013 711 if (IS_CHERRYVIEW(dev_priv)) {
74e1ca8c
VS
712 seq_printf(m, "Master Interrupt Control:\t%08x\n",
713 I915_READ(GEN8_MASTER_IRQ));
714
715 seq_printf(m, "Display IER:\t%08x\n",
716 I915_READ(VLV_IER));
717 seq_printf(m, "Display IIR:\t%08x\n",
718 I915_READ(VLV_IIR));
719 seq_printf(m, "Display IIR_RW:\t%08x\n",
720 I915_READ(VLV_IIR_RW));
721 seq_printf(m, "Display IMR:\t%08x\n",
722 I915_READ(VLV_IMR));
9c870d03
CW
723 for_each_pipe(dev_priv, pipe) {
724 enum intel_display_power_domain power_domain;
725
726 power_domain = POWER_DOMAIN_PIPE(pipe);
727 if (!intel_display_power_get_if_enabled(dev_priv,
728 power_domain)) {
729 seq_printf(m, "Pipe %c power disabled\n",
730 pipe_name(pipe));
731 continue;
732 }
733
74e1ca8c
VS
734 seq_printf(m, "Pipe %c stat:\t%08x\n",
735 pipe_name(pipe),
736 I915_READ(PIPESTAT(pipe)));
737
9c870d03
CW
738 intel_display_power_put(dev_priv, power_domain);
739 }
740
741 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
74e1ca8c
VS
742 seq_printf(m, "Port hotplug:\t%08x\n",
743 I915_READ(PORT_HOTPLUG_EN));
744 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
745 I915_READ(VLV_DPFLIPSTAT));
746 seq_printf(m, "DPINVGTT:\t%08x\n",
747 I915_READ(DPINVGTT));
9c870d03 748 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
74e1ca8c
VS
749
750 for (i = 0; i < 4; i++) {
751 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
752 i, I915_READ(GEN8_GT_IMR(i)));
753 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
754 i, I915_READ(GEN8_GT_IIR(i)));
755 seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
756 i, I915_READ(GEN8_GT_IER(i)));
757 }
758
759 seq_printf(m, "PCU interrupt mask:\t%08x\n",
760 I915_READ(GEN8_PCU_IMR));
761 seq_printf(m, "PCU interrupt identity:\t%08x\n",
762 I915_READ(GEN8_PCU_IIR));
763 seq_printf(m, "PCU interrupt enable:\t%08x\n",
764 I915_READ(GEN8_PCU_IER));
80d89350
TU
765 } else if (INTEL_GEN(dev_priv) >= 11) {
766 seq_printf(m, "Master Interrupt Control: %08x\n",
767 I915_READ(GEN11_GFX_MSTR_IRQ));
768
769 seq_printf(m, "Render/Copy Intr Enable: %08x\n",
770 I915_READ(GEN11_RENDER_COPY_INTR_ENABLE));
771 seq_printf(m, "VCS/VECS Intr Enable: %08x\n",
772 I915_READ(GEN11_VCS_VECS_INTR_ENABLE));
773 seq_printf(m, "GUC/SG Intr Enable:\t %08x\n",
774 I915_READ(GEN11_GUC_SG_INTR_ENABLE));
775 seq_printf(m, "GPM/WGBOXPERF Intr Enable: %08x\n",
776 I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE));
777 seq_printf(m, "Crypto Intr Enable:\t %08x\n",
778 I915_READ(GEN11_CRYPTO_RSVD_INTR_ENABLE));
779 seq_printf(m, "GUnit/CSME Intr Enable:\t %08x\n",
780 I915_READ(GEN11_GUNIT_CSME_INTR_ENABLE));
781
782 seq_printf(m, "Display Interrupt Control:\t%08x\n",
783 I915_READ(GEN11_DISPLAY_INT_CTL));
784
785 gen8_display_interrupt_info(m);
36cdd013 786 } else if (INTEL_GEN(dev_priv) >= 8) {
a123f157
BW
787 seq_printf(m, "Master Interrupt Control:\t%08x\n",
788 I915_READ(GEN8_MASTER_IRQ));
789
790 for (i = 0; i < 4; i++) {
791 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
792 i, I915_READ(GEN8_GT_IMR(i)));
793 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
794 i, I915_READ(GEN8_GT_IIR(i)));
795 seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
796 i, I915_READ(GEN8_GT_IER(i)));
797 }
798
80d89350 799 gen8_display_interrupt_info(m);
36cdd013 800 } else if (IS_VALLEYVIEW(dev_priv)) {
7e231dbe
JB
801 seq_printf(m, "Display IER:\t%08x\n",
802 I915_READ(VLV_IER));
803 seq_printf(m, "Display IIR:\t%08x\n",
804 I915_READ(VLV_IIR));
805 seq_printf(m, "Display IIR_RW:\t%08x\n",
806 I915_READ(VLV_IIR_RW));
807 seq_printf(m, "Display IMR:\t%08x\n",
808 I915_READ(VLV_IMR));
4f4631af
CW
809 for_each_pipe(dev_priv, pipe) {
810 enum intel_display_power_domain power_domain;
811
812 power_domain = POWER_DOMAIN_PIPE(pipe);
813 if (!intel_display_power_get_if_enabled(dev_priv,
814 power_domain)) {
815 seq_printf(m, "Pipe %c power disabled\n",
816 pipe_name(pipe));
817 continue;
818 }
819
7e231dbe
JB
820 seq_printf(m, "Pipe %c stat:\t%08x\n",
821 pipe_name(pipe),
822 I915_READ(PIPESTAT(pipe)));
4f4631af
CW
823 intel_display_power_put(dev_priv, power_domain);
824 }
7e231dbe
JB
825
826 seq_printf(m, "Master IER:\t%08x\n",
827 I915_READ(VLV_MASTER_IER));
828
829 seq_printf(m, "Render IER:\t%08x\n",
830 I915_READ(GTIER));
831 seq_printf(m, "Render IIR:\t%08x\n",
832 I915_READ(GTIIR));
833 seq_printf(m, "Render IMR:\t%08x\n",
834 I915_READ(GTIMR));
835
836 seq_printf(m, "PM IER:\t\t%08x\n",
837 I915_READ(GEN6_PMIER));
838 seq_printf(m, "PM IIR:\t\t%08x\n",
839 I915_READ(GEN6_PMIIR));
840 seq_printf(m, "PM IMR:\t\t%08x\n",
841 I915_READ(GEN6_PMIMR));
842
843 seq_printf(m, "Port hotplug:\t%08x\n",
844 I915_READ(PORT_HOTPLUG_EN));
845 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
846 I915_READ(VLV_DPFLIPSTAT));
847 seq_printf(m, "DPINVGTT:\t%08x\n",
848 I915_READ(DPINVGTT));
849
36cdd013 850 } else if (!HAS_PCH_SPLIT(dev_priv)) {
5f6a1695
ZW
851 seq_printf(m, "Interrupt enable: %08x\n",
852 I915_READ(IER));
853 seq_printf(m, "Interrupt identity: %08x\n",
854 I915_READ(IIR));
855 seq_printf(m, "Interrupt mask: %08x\n",
856 I915_READ(IMR));
055e393f 857 for_each_pipe(dev_priv, pipe)
9db4a9c7
JB
858 seq_printf(m, "Pipe %c stat: %08x\n",
859 pipe_name(pipe),
860 I915_READ(PIPESTAT(pipe)));
5f6a1695
ZW
861 } else {
862 seq_printf(m, "North Display Interrupt enable: %08x\n",
863 I915_READ(DEIER));
864 seq_printf(m, "North Display Interrupt identity: %08x\n",
865 I915_READ(DEIIR));
866 seq_printf(m, "North Display Interrupt mask: %08x\n",
867 I915_READ(DEIMR));
868 seq_printf(m, "South Display Interrupt enable: %08x\n",
869 I915_READ(SDEIER));
870 seq_printf(m, "South Display Interrupt identity: %08x\n",
871 I915_READ(SDEIIR));
872 seq_printf(m, "South Display Interrupt mask: %08x\n",
873 I915_READ(SDEIMR));
874 seq_printf(m, "Graphics Interrupt enable: %08x\n",
875 I915_READ(GTIER));
876 seq_printf(m, "Graphics Interrupt identity: %08x\n",
877 I915_READ(GTIIR));
878 seq_printf(m, "Graphics Interrupt mask: %08x\n",
879 I915_READ(GTIMR));
880 }
80d89350
TU
881
882 if (INTEL_GEN(dev_priv) >= 11) {
883 seq_printf(m, "RCS Intr Mask:\t %08x\n",
884 I915_READ(GEN11_RCS0_RSVD_INTR_MASK));
885 seq_printf(m, "BCS Intr Mask:\t %08x\n",
886 I915_READ(GEN11_BCS_RSVD_INTR_MASK));
887 seq_printf(m, "VCS0/VCS1 Intr Mask:\t %08x\n",
888 I915_READ(GEN11_VCS0_VCS1_INTR_MASK));
889 seq_printf(m, "VCS2/VCS3 Intr Mask:\t %08x\n",
890 I915_READ(GEN11_VCS2_VCS3_INTR_MASK));
891 seq_printf(m, "VECS0/VECS1 Intr Mask:\t %08x\n",
892 I915_READ(GEN11_VECS0_VECS1_INTR_MASK));
893 seq_printf(m, "GUC/SG Intr Mask:\t %08x\n",
894 I915_READ(GEN11_GUC_SG_INTR_MASK));
895 seq_printf(m, "GPM/WGBOXPERF Intr Mask: %08x\n",
896 I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK));
897 seq_printf(m, "Crypto Intr Mask:\t %08x\n",
898 I915_READ(GEN11_CRYPTO_RSVD_INTR_MASK));
899 seq_printf(m, "Gunit/CSME Intr Mask:\t %08x\n",
900 I915_READ(GEN11_GUNIT_CSME_INTR_MASK));
901
902 } else if (INTEL_GEN(dev_priv) >= 6) {
d5acadfe 903 for_each_engine(engine, dev_priv, id) {
a2c7f6fd
CW
904 seq_printf(m,
905 "Graphics Interrupt mask (%s): %08x\n",
e2f80391 906 engine->name, I915_READ_IMR(engine));
9862e600 907 }
9862e600 908 }
80d89350 909
c8c8fb33 910 intel_runtime_pm_put(dev_priv);
de227ef0 911
2017263e
BG
912 return 0;
913}
914
a6172a80
CW
915static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
916{
36cdd013
DW
917 struct drm_i915_private *dev_priv = node_to_i915(m->private);
918 struct drm_device *dev = &dev_priv->drm;
de227ef0
CW
919 int i, ret;
920
921 ret = mutex_lock_interruptible(&dev->struct_mutex);
922 if (ret)
923 return ret;
a6172a80 924
a6172a80
CW
925 seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
926 for (i = 0; i < dev_priv->num_fence_regs; i++) {
49ef5294 927 struct i915_vma *vma = dev_priv->fence_regs[i].vma;
a6172a80 928
6c085a72
CW
929 seq_printf(m, "Fence %d, pin count = %d, object = ",
930 i, dev_priv->fence_regs[i].pin_count);
49ef5294 931 if (!vma)
267f0c90 932 seq_puts(m, "unused");
c2c347a9 933 else
49ef5294 934 describe_obj(m, vma->obj);
267f0c90 935 seq_putc(m, '\n');
a6172a80
CW
936 }
937
05394f39 938 mutex_unlock(&dev->struct_mutex);
a6172a80
CW
939 return 0;
940}
941
98a2f411 942#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
5a4c6f1b
CW
943static ssize_t gpu_state_read(struct file *file, char __user *ubuf,
944 size_t count, loff_t *pos)
d5442303 945{
0e39037b 946 struct i915_gpu_state *error;
5a4c6f1b 947 ssize_t ret;
0e39037b 948 void *buf;
d5442303 949
0e39037b 950 error = file->private_data;
5a4c6f1b
CW
951 if (!error)
952 return 0;
d5442303 953
0e39037b
CW
954 /* Bounce buffer required because of kernfs __user API convenience. */
955 buf = kmalloc(count, GFP_KERNEL);
956 if (!buf)
957 return -ENOMEM;
d5442303 958
0e39037b
CW
959 ret = i915_gpu_state_copy_to_buffer(error, buf, *pos, count);
960 if (ret <= 0)
5a4c6f1b 961 goto out;
d5442303 962
0e39037b
CW
963 if (!copy_to_user(ubuf, buf, ret))
964 *pos += ret;
965 else
966 ret = -EFAULT;
d5442303 967
5a4c6f1b 968out:
0e39037b 969 kfree(buf);
5a4c6f1b
CW
970 return ret;
971}
edc3d884 972
5a4c6f1b
CW
973static int gpu_state_release(struct inode *inode, struct file *file)
974{
975 i915_gpu_state_put(file->private_data);
edc3d884 976 return 0;
d5442303
DV
977}
978
5a4c6f1b 979static int i915_gpu_info_open(struct inode *inode, struct file *file)
d5442303 980{
090e5fe3 981 struct drm_i915_private *i915 = inode->i_private;
5a4c6f1b 982 struct i915_gpu_state *gpu;
d5442303 983
090e5fe3
CW
984 intel_runtime_pm_get(i915);
985 gpu = i915_capture_gpu_state(i915);
986 intel_runtime_pm_put(i915);
107c595c
CW
987 if (IS_ERR(gpu))
988 return PTR_ERR(gpu);
d5442303 989
5a4c6f1b 990 file->private_data = gpu;
edc3d884
MK
991 return 0;
992}
993
5a4c6f1b
CW
994static const struct file_operations i915_gpu_info_fops = {
995 .owner = THIS_MODULE,
996 .open = i915_gpu_info_open,
997 .read = gpu_state_read,
998 .llseek = default_llseek,
999 .release = gpu_state_release,
1000};
1001
1002static ssize_t
1003i915_error_state_write(struct file *filp,
1004 const char __user *ubuf,
1005 size_t cnt,
1006 loff_t *ppos)
4dc955f7 1007{
5a4c6f1b 1008 struct i915_gpu_state *error = filp->private_data;
4dc955f7 1009
5a4c6f1b
CW
1010 if (!error)
1011 return 0;
edc3d884 1012
5a4c6f1b
CW
1013 DRM_DEBUG_DRIVER("Resetting error state\n");
1014 i915_reset_error_state(error->i915);
edc3d884 1015
5a4c6f1b
CW
1016 return cnt;
1017}
edc3d884 1018
5a4c6f1b
CW
1019static int i915_error_state_open(struct inode *inode, struct file *file)
1020{
107c595c
CW
1021 struct i915_gpu_state *error;
1022
1023 error = i915_first_error_state(inode->i_private);
1024 if (IS_ERR(error))
1025 return PTR_ERR(error);
1026
1027 file->private_data = error;
5a4c6f1b 1028 return 0;
d5442303
DV
1029}
1030
1031static const struct file_operations i915_error_state_fops = {
1032 .owner = THIS_MODULE,
1033 .open = i915_error_state_open,
5a4c6f1b 1034 .read = gpu_state_read,
d5442303
DV
1035 .write = i915_error_state_write,
1036 .llseek = default_llseek,
5a4c6f1b 1037 .release = gpu_state_release,
d5442303 1038};
98a2f411
CW
1039#endif
1040
647416f9
KC
1041static int
1042i915_next_seqno_set(void *data, u64 val)
1043{
36cdd013
DW
1044 struct drm_i915_private *dev_priv = data;
1045 struct drm_device *dev = &dev_priv->drm;
40633219
MK
1046 int ret;
1047
40633219
MK
1048 ret = mutex_lock_interruptible(&dev->struct_mutex);
1049 if (ret)
1050 return ret;
1051
65c475c6 1052 intel_runtime_pm_get(dev_priv);
73cb9701 1053 ret = i915_gem_set_global_seqno(dev, val);
65c475c6
CW
1054 intel_runtime_pm_put(dev_priv);
1055
40633219
MK
1056 mutex_unlock(&dev->struct_mutex);
1057
647416f9 1058 return ret;
40633219
MK
1059}
1060
647416f9 1061DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops,
9b6586ae 1062 NULL, i915_next_seqno_set,
3a3b4f98 1063 "0x%llx\n");
40633219 1064
adb4bd12 1065static int i915_frequency_info(struct seq_file *m, void *unused)
f97108d1 1066{
36cdd013 1067 struct drm_i915_private *dev_priv = node_to_i915(m->private);
562d9bae 1068 struct intel_rps *rps = &dev_priv->gt_pm.rps;
c8c8fb33
PZ
1069 int ret = 0;
1070
1071 intel_runtime_pm_get(dev_priv);
3b8d8d91 1072
36cdd013 1073 if (IS_GEN5(dev_priv)) {
3b8d8d91
JB
1074 u16 rgvswctl = I915_READ16(MEMSWCTL);
1075 u16 rgvstat = I915_READ16(MEMSTAT_ILK);
1076
1077 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
1078 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
1079 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
1080 MEMSTAT_VID_SHIFT);
1081 seq_printf(m, "Current P-state: %d\n",
1082 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
36cdd013 1083 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
0d6fc92a 1084 u32 rpmodectl, freq_sts;
666a4537 1085
9f817501 1086 mutex_lock(&dev_priv->pcu_lock);
0d6fc92a
SAK
1087
1088 rpmodectl = I915_READ(GEN6_RP_CONTROL);
1089 seq_printf(m, "Video Turbo Mode: %s\n",
1090 yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
1091 seq_printf(m, "HW control enabled: %s\n",
1092 yesno(rpmodectl & GEN6_RP_ENABLE));
1093 seq_printf(m, "SW control enabled: %s\n",
1094 yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
1095 GEN6_RP_MEDIA_SW_MODE));
1096
666a4537
WB
1097 freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
1098 seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
1099 seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
1100
1101 seq_printf(m, "actual GPU freq: %d MHz\n",
1102 intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
1103
1104 seq_printf(m, "current GPU freq: %d MHz\n",
562d9bae 1105 intel_gpu_freq(dev_priv, rps->cur_freq));
666a4537
WB
1106
1107 seq_printf(m, "max GPU freq: %d MHz\n",
562d9bae 1108 intel_gpu_freq(dev_priv, rps->max_freq));
666a4537
WB
1109
1110 seq_printf(m, "min GPU freq: %d MHz\n",
562d9bae 1111 intel_gpu_freq(dev_priv, rps->min_freq));
666a4537
WB
1112
1113 seq_printf(m, "idle GPU freq: %d MHz\n",
562d9bae 1114 intel_gpu_freq(dev_priv, rps->idle_freq));
666a4537
WB
1115
1116 seq_printf(m,
1117 "efficient (RPe) frequency: %d MHz\n",
562d9bae 1118 intel_gpu_freq(dev_priv, rps->efficient_freq));
9f817501 1119 mutex_unlock(&dev_priv->pcu_lock);
36cdd013 1120 } else if (INTEL_GEN(dev_priv) >= 6) {
35040562
BP
1121 u32 rp_state_limits;
1122 u32 gt_perf_status;
1123 u32 rp_state_cap;
0d8f9491 1124 u32 rpmodectl, rpinclimit, rpdeclimit;
8e8c06cd 1125 u32 rpstat, cagf, reqf;
ccab5c82
JB
1126 u32 rpupei, rpcurup, rpprevup;
1127 u32 rpdownei, rpcurdown, rpprevdown;
9dd3c605 1128 u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
3b8d8d91
JB
1129 int max_freq;
1130
35040562 1131 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
cc3f90f0 1132 if (IS_GEN9_LP(dev_priv)) {
35040562
BP
1133 rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
1134 gt_perf_status = I915_READ(BXT_GT_PERF_STATUS);
1135 } else {
1136 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
1137 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
1138 }
1139
3b8d8d91 1140 /* RPSTAT1 is in the GT power well */
59bad947 1141 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
3b8d8d91 1142
8e8c06cd 1143 reqf = I915_READ(GEN6_RPNSWREQ);
35ceabf3 1144 if (INTEL_GEN(dev_priv) >= 9)
60260a5b
AG
1145 reqf >>= 23;
1146 else {
1147 reqf &= ~GEN6_TURBO_DISABLE;
36cdd013 1148 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
60260a5b
AG
1149 reqf >>= 24;
1150 else
1151 reqf >>= 25;
1152 }
7c59a9c1 1153 reqf = intel_gpu_freq(dev_priv, reqf);
8e8c06cd 1154
0d8f9491
CW
1155 rpmodectl = I915_READ(GEN6_RP_CONTROL);
1156 rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
1157 rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);
1158
ccab5c82 1159 rpstat = I915_READ(GEN6_RPSTAT1);
d6cda9c7
AG
1160 rpupei = I915_READ(GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
1161 rpcurup = I915_READ(GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
1162 rpprevup = I915_READ(GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
1163 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
1164 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
1165 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
c84b2705
TU
1166 cagf = intel_gpu_freq(dev_priv,
1167 intel_get_cagf(dev_priv, rpstat));
ccab5c82 1168
59bad947 1169 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
d1ebd816 1170
6b7a6a7b
OM
1171 if (INTEL_GEN(dev_priv) >= 11) {
1172 pm_ier = I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE);
1173 pm_imr = I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK);
1174 /*
1175 * The equivalent to the PM ISR & IIR cannot be read
1176 * without affecting the current state of the system
1177 */
1178 pm_isr = 0;
1179 pm_iir = 0;
1180 } else if (INTEL_GEN(dev_priv) >= 8) {
9dd3c605
PZ
1181 pm_ier = I915_READ(GEN8_GT_IER(2));
1182 pm_imr = I915_READ(GEN8_GT_IMR(2));
1183 pm_isr = I915_READ(GEN8_GT_ISR(2));
1184 pm_iir = I915_READ(GEN8_GT_IIR(2));
6b7a6a7b
OM
1185 } else {
1186 pm_ier = I915_READ(GEN6_PMIER);
1187 pm_imr = I915_READ(GEN6_PMIMR);
1188 pm_isr = I915_READ(GEN6_PMISR);
1189 pm_iir = I915_READ(GEN6_PMIIR);
9dd3c605 1190 }
6b7a6a7b
OM
1191 pm_mask = I915_READ(GEN6_PMINTRMSK);
1192
960e5465
SAK
1193 seq_printf(m, "Video Turbo Mode: %s\n",
1194 yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
1195 seq_printf(m, "HW control enabled: %s\n",
1196 yesno(rpmodectl & GEN6_RP_ENABLE));
1197 seq_printf(m, "SW control enabled: %s\n",
1198 yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
1199 GEN6_RP_MEDIA_SW_MODE));
6b7a6a7b
OM
1200
1201 seq_printf(m, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n",
1202 pm_ier, pm_imr, pm_mask);
1203 if (INTEL_GEN(dev_priv) <= 10)
1204 seq_printf(m, "PM ISR=0x%08x IIR=0x%08x\n",
1205 pm_isr, pm_iir);
5dd04556 1206 seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n",
562d9bae 1207 rps->pm_intrmsk_mbz);
3b8d8d91 1208 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
3b8d8d91 1209 seq_printf(m, "Render p-state ratio: %d\n",
35ceabf3 1210 (gt_perf_status & (INTEL_GEN(dev_priv) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
3b8d8d91
JB
1211 seq_printf(m, "Render p-state VID: %d\n",
1212 gt_perf_status & 0xff);
1213 seq_printf(m, "Render p-state limit: %d\n",
1214 rp_state_limits & 0xff);
0d8f9491
CW
1215 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
1216 seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
1217 seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
1218 seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
8e8c06cd 1219 seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
f82855d3 1220 seq_printf(m, "CAGF: %dMHz\n", cagf);
d6cda9c7
AG
1221 seq_printf(m, "RP CUR UP EI: %d (%dus)\n",
1222 rpupei, GT_PM_INTERVAL_TO_US(dev_priv, rpupei));
1223 seq_printf(m, "RP CUR UP: %d (%dus)\n",
1224 rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup));
1225 seq_printf(m, "RP PREV UP: %d (%dus)\n",
1226 rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup));
60548c55
CW
1227 seq_printf(m, "Up threshold: %d%%\n",
1228 rps->power.up_threshold);
d86ed34a 1229
d6cda9c7
AG
1230 seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
1231 rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei));
1232 seq_printf(m, "RP CUR DOWN: %d (%dus)\n",
1233 rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown));
1234 seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
1235 rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown));
60548c55
CW
1236 seq_printf(m, "Down threshold: %d%%\n",
1237 rps->power.down_threshold);
3b8d8d91 1238
cc3f90f0 1239 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 :
35040562 1240 rp_state_cap >> 16) & 0xff;
35ceabf3 1241 max_freq *= (IS_GEN9_BC(dev_priv) ||
2b2874ef 1242 INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
3b8d8d91 1243 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
7c59a9c1 1244 intel_gpu_freq(dev_priv, max_freq));
3b8d8d91
JB
1245
1246 max_freq = (rp_state_cap & 0xff00) >> 8;
35ceabf3 1247 max_freq *= (IS_GEN9_BC(dev_priv) ||
2b2874ef 1248 INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
3b8d8d91 1249 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
7c59a9c1 1250 intel_gpu_freq(dev_priv, max_freq));
3b8d8d91 1251
cc3f90f0 1252 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 :
35040562 1253 rp_state_cap >> 0) & 0xff;
35ceabf3 1254 max_freq *= (IS_GEN9_BC(dev_priv) ||
2b2874ef 1255 INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
3b8d8d91 1256 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
7c59a9c1 1257 intel_gpu_freq(dev_priv, max_freq));
31c77388 1258 seq_printf(m, "Max overclocked frequency: %dMHz\n",
562d9bae 1259 intel_gpu_freq(dev_priv, rps->max_freq));
aed242ff 1260
d86ed34a 1261 seq_printf(m, "Current freq: %d MHz\n",
562d9bae 1262 intel_gpu_freq(dev_priv, rps->cur_freq));
d86ed34a 1263 seq_printf(m, "Actual freq: %d MHz\n", cagf);
aed242ff 1264 seq_printf(m, "Idle freq: %d MHz\n",
562d9bae 1265 intel_gpu_freq(dev_priv, rps->idle_freq));
d86ed34a 1266 seq_printf(m, "Min freq: %d MHz\n",
562d9bae 1267 intel_gpu_freq(dev_priv, rps->min_freq));
29ecd78d 1268 seq_printf(m, "Boost freq: %d MHz\n",
562d9bae 1269 intel_gpu_freq(dev_priv, rps->boost_freq));
d86ed34a 1270 seq_printf(m, "Max freq: %d MHz\n",
562d9bae 1271 intel_gpu_freq(dev_priv, rps->max_freq));
d86ed34a
CW
1272 seq_printf(m,
1273 "efficient (RPe) frequency: %d MHz\n",
562d9bae 1274 intel_gpu_freq(dev_priv, rps->efficient_freq));
3b8d8d91 1275 } else {
267f0c90 1276 seq_puts(m, "no P-state info available\n");
3b8d8d91 1277 }
f97108d1 1278
49cd97a3 1279 seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk.hw.cdclk);
1170f28c
MK
1280 seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq);
1281 seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq);
1282
c8c8fb33
PZ
1283 intel_runtime_pm_put(dev_priv);
1284 return ret;
f97108d1
JB
1285}
1286
d636951e
BW
1287static void i915_instdone_info(struct drm_i915_private *dev_priv,
1288 struct seq_file *m,
1289 struct intel_instdone *instdone)
1290{
f9e61372
BW
1291 int slice;
1292 int subslice;
1293
d636951e
BW
1294 seq_printf(m, "\t\tINSTDONE: 0x%08x\n",
1295 instdone->instdone);
1296
1297 if (INTEL_GEN(dev_priv) <= 3)
1298 return;
1299
1300 seq_printf(m, "\t\tSC_INSTDONE: 0x%08x\n",
1301 instdone->slice_common);
1302
1303 if (INTEL_GEN(dev_priv) <= 6)
1304 return;
1305
f9e61372
BW
1306 for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1307 seq_printf(m, "\t\tSAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
1308 slice, subslice, instdone->sampler[slice][subslice]);
1309
1310 for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1311 seq_printf(m, "\t\tROW_INSTDONE[%d][%d]: 0x%08x\n",
1312 slice, subslice, instdone->row[slice][subslice]);
d636951e
BW
1313}
1314
f654449a
CW
1315static int i915_hangcheck_info(struct seq_file *m, void *unused)
1316{
36cdd013 1317 struct drm_i915_private *dev_priv = node_to_i915(m->private);
e2f80391 1318 struct intel_engine_cs *engine;
666796da
TU
1319 u64 acthd[I915_NUM_ENGINES];
1320 u32 seqno[I915_NUM_ENGINES];
d636951e 1321 struct intel_instdone instdone;
c3232b18 1322 enum intel_engine_id id;
f654449a 1323
8af29b0c 1324 if (test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
8c185eca
CW
1325 seq_puts(m, "Wedged\n");
1326 if (test_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags))
1327 seq_puts(m, "Reset in progress: struct_mutex backoff\n");
1328 if (test_bit(I915_RESET_HANDOFF, &dev_priv->gpu_error.flags))
1329 seq_puts(m, "Reset in progress: reset handoff to waiter\n");
8af29b0c 1330 if (waitqueue_active(&dev_priv->gpu_error.wait_queue))
8c185eca 1331 seq_puts(m, "Waiter holding struct mutex\n");
8af29b0c 1332 if (waitqueue_active(&dev_priv->gpu_error.reset_queue))
8c185eca 1333 seq_puts(m, "struct_mutex blocked for reset\n");
8af29b0c 1334
4f044a88 1335 if (!i915_modparams.enable_hangcheck) {
8c185eca 1336 seq_puts(m, "Hangcheck disabled\n");
f654449a
CW
1337 return 0;
1338 }
1339
ebbc7546
MK
1340 intel_runtime_pm_get(dev_priv);
1341
3b3f1650 1342 for_each_engine(engine, dev_priv, id) {
7e37f889 1343 acthd[id] = intel_engine_get_active_head(engine);
1b7744e7 1344 seqno[id] = intel_engine_get_seqno(engine);
ebbc7546
MK
1345 }
1346
3b3f1650 1347 intel_engine_get_instdone(dev_priv->engine[RCS], &instdone);
61642ff0 1348
ebbc7546
MK
1349 intel_runtime_pm_put(dev_priv);
1350
8352aea3
CW
1351 if (timer_pending(&dev_priv->gpu_error.hangcheck_work.timer))
1352 seq_printf(m, "Hangcheck active, timer fires in %dms\n",
f654449a
CW
1353 jiffies_to_msecs(dev_priv->gpu_error.hangcheck_work.timer.expires -
1354 jiffies));
8352aea3
CW
1355 else if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work))
1356 seq_puts(m, "Hangcheck active, work pending\n");
1357 else
1358 seq_puts(m, "Hangcheck inactive\n");
f654449a 1359
f73b5674
CW
1360 seq_printf(m, "GT active? %s\n", yesno(dev_priv->gt.awake));
1361
3b3f1650 1362 for_each_engine(engine, dev_priv, id) {
33f53719
CW
1363 struct intel_breadcrumbs *b = &engine->breadcrumbs;
1364 struct rb_node *rb;
1365
e2f80391 1366 seq_printf(m, "%s:\n", engine->name);
52d7f16e 1367 seq_printf(m, "\tseqno = %x [current %x, last %x]\n",
cb399eab 1368 engine->hangcheck.seqno, seqno[id],
52d7f16e 1369 intel_engine_last_submit(engine));
1fd00c0f 1370 seq_printf(m, "\twaiters? %s, fake irq active? %s, stalled? %s, wedged? %s\n",
83348ba8
CW
1371 yesno(intel_engine_has_waiter(engine)),
1372 yesno(test_bit(engine->id,
3fe3b030 1373 &dev_priv->gpu_error.missed_irq_rings)),
1fd00c0f
CW
1374 yesno(engine->hangcheck.stalled),
1375 yesno(engine->hangcheck.wedged));
3fe3b030 1376
61d3dc70 1377 spin_lock_irq(&b->rb_lock);
33f53719 1378 for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
f802cf7e 1379 struct intel_wait *w = rb_entry(rb, typeof(*w), node);
33f53719
CW
1380
1381 seq_printf(m, "\t%s [%d] waiting for %x\n",
1382 w->tsk->comm, w->tsk->pid, w->seqno);
1383 }
61d3dc70 1384 spin_unlock_irq(&b->rb_lock);
33f53719 1385
f654449a 1386 seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
e2f80391 1387 (long long)engine->hangcheck.acthd,
c3232b18 1388 (long long)acthd[id]);
3fe3b030
MK
1389 seq_printf(m, "\taction = %s(%d) %d ms ago\n",
1390 hangcheck_action_to_str(engine->hangcheck.action),
1391 engine->hangcheck.action,
1392 jiffies_to_msecs(jiffies -
1393 engine->hangcheck.action_timestamp));
61642ff0 1394
e2f80391 1395 if (engine->id == RCS) {
d636951e 1396 seq_puts(m, "\tinstdone read =\n");
61642ff0 1397
d636951e 1398 i915_instdone_info(dev_priv, m, &instdone);
61642ff0 1399
d636951e 1400 seq_puts(m, "\tinstdone accu =\n");
61642ff0 1401
d636951e
BW
1402 i915_instdone_info(dev_priv, m,
1403 &engine->hangcheck.instdone);
61642ff0 1404 }
f654449a
CW
1405 }
1406
1407 return 0;
1408}
1409
061d06a2
MT
1410static int i915_reset_info(struct seq_file *m, void *unused)
1411{
1412 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1413 struct i915_gpu_error *error = &dev_priv->gpu_error;
1414 struct intel_engine_cs *engine;
1415 enum intel_engine_id id;
1416
1417 seq_printf(m, "full gpu reset = %u\n", i915_reset_count(error));
1418
1419 for_each_engine(engine, dev_priv, id) {
1420 seq_printf(m, "%s = %u\n", engine->name,
1421 i915_reset_engine_count(error, engine));
1422 }
1423
1424 return 0;
1425}
1426
4d85529d 1427static int ironlake_drpc_info(struct seq_file *m)
f97108d1 1428{
36cdd013 1429 struct drm_i915_private *dev_priv = node_to_i915(m->private);
616fdb5a
BW
1430 u32 rgvmodectl, rstdbyctl;
1431 u16 crstandvid;
616fdb5a 1432
616fdb5a
BW
1433 rgvmodectl = I915_READ(MEMMODECTL);
1434 rstdbyctl = I915_READ(RSTDBYCTL);
1435 crstandvid = I915_READ16(CRSTANDVID);
1436
742f491d 1437 seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN));
f97108d1
JB
1438 seq_printf(m, "Boost freq: %d\n",
1439 (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
1440 MEMMODE_BOOST_FREQ_SHIFT);
1441 seq_printf(m, "HW control enabled: %s\n",
742f491d 1442 yesno(rgvmodectl & MEMMODE_HWIDLE_EN));
f97108d1 1443 seq_printf(m, "SW control enabled: %s\n",
742f491d 1444 yesno(rgvmodectl & MEMMODE_SWMODE_EN));
f97108d1 1445 seq_printf(m, "Gated voltage change: %s\n",
742f491d 1446 yesno(rgvmodectl & MEMMODE_RCLK_GATE));
f97108d1
JB
1447 seq_printf(m, "Starting frequency: P%d\n",
1448 (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
7648fa99 1449 seq_printf(m, "Max P-state: P%d\n",
f97108d1 1450 (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
7648fa99
JB
1451 seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
1452 seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
1453 seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
1454 seq_printf(m, "Render standby enabled: %s\n",
742f491d 1455 yesno(!(rstdbyctl & RCX_SW_EXIT)));
267f0c90 1456 seq_puts(m, "Current RS state: ");
88271da3
JB
1457 switch (rstdbyctl & RSX_STATUS_MASK) {
1458 case RSX_STATUS_ON:
267f0c90 1459 seq_puts(m, "on\n");
88271da3
JB
1460 break;
1461 case RSX_STATUS_RC1:
267f0c90 1462 seq_puts(m, "RC1\n");
88271da3
JB
1463 break;
1464 case RSX_STATUS_RC1E:
267f0c90 1465 seq_puts(m, "RC1E\n");
88271da3
JB
1466 break;
1467 case RSX_STATUS_RS1:
267f0c90 1468 seq_puts(m, "RS1\n");
88271da3
JB
1469 break;
1470 case RSX_STATUS_RS2:
267f0c90 1471 seq_puts(m, "RS2 (RC6)\n");
88271da3
JB
1472 break;
1473 case RSX_STATUS_RS3:
267f0c90 1474 seq_puts(m, "RC3 (RC6+)\n");
88271da3
JB
1475 break;
1476 default:
267f0c90 1477 seq_puts(m, "unknown\n");
88271da3
JB
1478 break;
1479 }
f97108d1
JB
1480
1481 return 0;
1482}
1483
f65367b5 1484static int i915_forcewake_domains(struct seq_file *m, void *data)
669ab5aa 1485{
233ebf57 1486 struct drm_i915_private *i915 = node_to_i915(m->private);
b2cff0db 1487 struct intel_uncore_forcewake_domain *fw_domain;
d2dc94bc 1488 unsigned int tmp;
b2cff0db 1489
d7a133d8
CW
1490 seq_printf(m, "user.bypass_count = %u\n",
1491 i915->uncore.user_forcewake.count);
1492
233ebf57 1493 for_each_fw_domain(fw_domain, i915, tmp)
b2cff0db 1494 seq_printf(m, "%s.wake_count = %u\n",
33c582c1 1495 intel_uncore_forcewake_domain_to_str(fw_domain->id),
233ebf57 1496 READ_ONCE(fw_domain->wake_count));
669ab5aa 1497
b2cff0db
CW
1498 return 0;
1499}
1500
1362877e
MK
1501static void print_rc6_res(struct seq_file *m,
1502 const char *title,
1503 const i915_reg_t reg)
1504{
1505 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1506
1507 seq_printf(m, "%s %u (%llu us)\n",
1508 title, I915_READ(reg),
1509 intel_rc6_residency_us(dev_priv, reg));
1510}
1511
b2cff0db
CW
1512static int vlv_drpc_info(struct seq_file *m)
1513{
36cdd013 1514 struct drm_i915_private *dev_priv = node_to_i915(m->private);
0d6fc92a 1515 u32 rcctl1, pw_status;
669ab5aa 1516
6b312cd3 1517 pw_status = I915_READ(VLV_GTLC_PW_STATUS);
669ab5aa
D
1518 rcctl1 = I915_READ(GEN6_RC_CONTROL);
1519
669ab5aa
D
1520 seq_printf(m, "RC6 Enabled: %s\n",
1521 yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
1522 GEN6_RC_CTL_EI_MODE(1))));
1523 seq_printf(m, "Render Power Well: %s\n",
6b312cd3 1524 (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
669ab5aa 1525 seq_printf(m, "Media Power Well: %s\n",
6b312cd3 1526 (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
669ab5aa 1527
1362877e
MK
1528 print_rc6_res(m, "Render RC6 residency since boot:", VLV_GT_RENDER_RC6);
1529 print_rc6_res(m, "Media RC6 residency since boot:", VLV_GT_MEDIA_RC6);
9cc19be5 1530
f65367b5 1531 return i915_forcewake_domains(m, NULL);
669ab5aa
D
1532}
1533
4d85529d
BW
1534static int gen6_drpc_info(struct seq_file *m)
1535{
36cdd013 1536 struct drm_i915_private *dev_priv = node_to_i915(m->private);
960e5465 1537 u32 gt_core_status, rcctl1, rc6vids = 0;
f2dd7578 1538 u32 gen9_powergate_enable = 0, gen9_powergate_status = 0;
4d85529d 1539
75aa3f63 1540 gt_core_status = I915_READ_FW(GEN6_GT_CORE_STATUS);
ed71f1b4 1541 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
4d85529d 1542
4d85529d 1543 rcctl1 = I915_READ(GEN6_RC_CONTROL);
36cdd013 1544 if (INTEL_GEN(dev_priv) >= 9) {
f2dd7578
AG
1545 gen9_powergate_enable = I915_READ(GEN9_PG_ENABLE);
1546 gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS);
1547 }
cf632bd6 1548
51cc9ade
ID
1549 if (INTEL_GEN(dev_priv) <= 7) {
1550 mutex_lock(&dev_priv->pcu_lock);
1551 sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS,
1552 &rc6vids);
1553 mutex_unlock(&dev_priv->pcu_lock);
1554 }
4d85529d 1555
fff24e21 1556 seq_printf(m, "RC1e Enabled: %s\n",
4d85529d
BW
1557 yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
1558 seq_printf(m, "RC6 Enabled: %s\n",
1559 yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
36cdd013 1560 if (INTEL_GEN(dev_priv) >= 9) {
f2dd7578
AG
1561 seq_printf(m, "Render Well Gating Enabled: %s\n",
1562 yesno(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE));
1563 seq_printf(m, "Media Well Gating Enabled: %s\n",
1564 yesno(gen9_powergate_enable & GEN9_MEDIA_PG_ENABLE));
1565 }
4d85529d
BW
1566 seq_printf(m, "Deep RC6 Enabled: %s\n",
1567 yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
1568 seq_printf(m, "Deepest RC6 Enabled: %s\n",
1569 yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
267f0c90 1570 seq_puts(m, "Current RC state: ");
4d85529d
BW
1571 switch (gt_core_status & GEN6_RCn_MASK) {
1572 case GEN6_RC0:
1573 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
267f0c90 1574 seq_puts(m, "Core Power Down\n");
4d85529d 1575 else
267f0c90 1576 seq_puts(m, "on\n");
4d85529d
BW
1577 break;
1578 case GEN6_RC3:
267f0c90 1579 seq_puts(m, "RC3\n");
4d85529d
BW
1580 break;
1581 case GEN6_RC6:
267f0c90 1582 seq_puts(m, "RC6\n");
4d85529d
BW
1583 break;
1584 case GEN6_RC7:
267f0c90 1585 seq_puts(m, "RC7\n");
4d85529d
BW
1586 break;
1587 default:
267f0c90 1588 seq_puts(m, "Unknown\n");
4d85529d
BW
1589 break;
1590 }
1591
1592 seq_printf(m, "Core Power Down: %s\n",
1593 yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
36cdd013 1594 if (INTEL_GEN(dev_priv) >= 9) {
f2dd7578
AG
1595 seq_printf(m, "Render Power Well: %s\n",
1596 (gen9_powergate_status &
1597 GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down");
1598 seq_printf(m, "Media Power Well: %s\n",
1599 (gen9_powergate_status &
1600 GEN9_PWRGT_MEDIA_STATUS_MASK) ? "Up" : "Down");
1601 }
cce66a28
BW
1602
1603 /* Not exactly sure what this is */
1362877e
MK
1604 print_rc6_res(m, "RC6 \"Locked to RPn\" residency since boot:",
1605 GEN6_GT_GFX_RC6_LOCKED);
1606 print_rc6_res(m, "RC6 residency since boot:", GEN6_GT_GFX_RC6);
1607 print_rc6_res(m, "RC6+ residency since boot:", GEN6_GT_GFX_RC6p);
1608 print_rc6_res(m, "RC6++ residency since boot:", GEN6_GT_GFX_RC6pp);
cce66a28 1609
51cc9ade
ID
1610 if (INTEL_GEN(dev_priv) <= 7) {
1611 seq_printf(m, "RC6 voltage: %dmV\n",
1612 GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
1613 seq_printf(m, "RC6+ voltage: %dmV\n",
1614 GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
1615 seq_printf(m, "RC6++ voltage: %dmV\n",
1616 GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
1617 }
1618
f2dd7578 1619 return i915_forcewake_domains(m, NULL);
4d85529d
BW
1620}
1621
1622static int i915_drpc_info(struct seq_file *m, void *unused)
1623{
36cdd013 1624 struct drm_i915_private *dev_priv = node_to_i915(m->private);
cf632bd6
CW
1625 int err;
1626
1627 intel_runtime_pm_get(dev_priv);
4d85529d 1628
36cdd013 1629 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
cf632bd6 1630 err = vlv_drpc_info(m);
36cdd013 1631 else if (INTEL_GEN(dev_priv) >= 6)
cf632bd6 1632 err = gen6_drpc_info(m);
4d85529d 1633 else
cf632bd6
CW
1634 err = ironlake_drpc_info(m);
1635
1636 intel_runtime_pm_put(dev_priv);
1637
1638 return err;
4d85529d
BW
1639}
1640
9a851789
DV
1641static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
1642{
36cdd013 1643 struct drm_i915_private *dev_priv = node_to_i915(m->private);
9a851789
DV
1644
1645 seq_printf(m, "FB tracking busy bits: 0x%08x\n",
1646 dev_priv->fb_tracking.busy_bits);
1647
1648 seq_printf(m, "FB tracking flip bits: 0x%08x\n",
1649 dev_priv->fb_tracking.flip_bits);
1650
1651 return 0;
1652}
1653
b5e50c3f
JB
1654static int i915_fbc_status(struct seq_file *m, void *unused)
1655{
36cdd013 1656 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3138872c 1657 struct intel_fbc *fbc = &dev_priv->fbc;
b5e50c3f 1658
ab309a6a
MW
1659 if (!HAS_FBC(dev_priv))
1660 return -ENODEV;
b5e50c3f 1661
36623ef8 1662 intel_runtime_pm_get(dev_priv);
3138872c 1663 mutex_lock(&fbc->lock);
36623ef8 1664
0e631adc 1665 if (intel_fbc_is_active(dev_priv))
267f0c90 1666 seq_puts(m, "FBC enabled\n");
2e8144a5 1667 else
3138872c
CW
1668 seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
1669
3fd5d1ec
VS
1670 if (intel_fbc_is_active(dev_priv)) {
1671 u32 mask;
1672
1673 if (INTEL_GEN(dev_priv) >= 8)
1674 mask = I915_READ(IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK;
1675 else if (INTEL_GEN(dev_priv) >= 7)
1676 mask = I915_READ(IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK;
1677 else if (INTEL_GEN(dev_priv) >= 5)
1678 mask = I915_READ(ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK;
1679 else if (IS_G4X(dev_priv))
1680 mask = I915_READ(DPFC_STATUS) & DPFC_COMP_SEG_MASK;
1681 else
1682 mask = I915_READ(FBC_STATUS) & (FBC_STAT_COMPRESSING |
1683 FBC_STAT_COMPRESSED);
1684
1685 seq_printf(m, "Compressing: %s\n", yesno(mask));
0fc6a9dc 1686 }
31b9df10 1687
3138872c 1688 mutex_unlock(&fbc->lock);
36623ef8
PZ
1689 intel_runtime_pm_put(dev_priv);
1690
b5e50c3f
JB
1691 return 0;
1692}
1693
4127dc43 1694static int i915_fbc_false_color_get(void *data, u64 *val)
da46f936 1695{
36cdd013 1696 struct drm_i915_private *dev_priv = data;
da46f936 1697
36cdd013 1698 if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
da46f936
RV
1699 return -ENODEV;
1700
da46f936 1701 *val = dev_priv->fbc.false_color;
da46f936
RV
1702
1703 return 0;
1704}
1705
4127dc43 1706static int i915_fbc_false_color_set(void *data, u64 val)
da46f936 1707{
36cdd013 1708 struct drm_i915_private *dev_priv = data;
da46f936
RV
1709 u32 reg;
1710
36cdd013 1711 if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
da46f936
RV
1712 return -ENODEV;
1713
25ad93fd 1714 mutex_lock(&dev_priv->fbc.lock);
da46f936
RV
1715
1716 reg = I915_READ(ILK_DPFC_CONTROL);
1717 dev_priv->fbc.false_color = val;
1718
1719 I915_WRITE(ILK_DPFC_CONTROL, val ?
1720 (reg | FBC_CTL_FALSE_COLOR) :
1721 (reg & ~FBC_CTL_FALSE_COLOR));
1722
25ad93fd 1723 mutex_unlock(&dev_priv->fbc.lock);
da46f936
RV
1724 return 0;
1725}
1726
4127dc43
VS
1727DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops,
1728 i915_fbc_false_color_get, i915_fbc_false_color_set,
da46f936
RV
1729 "%llu\n");
1730
92d44621
PZ
1731static int i915_ips_status(struct seq_file *m, void *unused)
1732{
36cdd013 1733 struct drm_i915_private *dev_priv = node_to_i915(m->private);
92d44621 1734
ab309a6a
MW
1735 if (!HAS_IPS(dev_priv))
1736 return -ENODEV;
92d44621 1737
36623ef8
PZ
1738 intel_runtime_pm_get(dev_priv);
1739
0eaa53f0 1740 seq_printf(m, "Enabled by kernel parameter: %s\n",
4f044a88 1741 yesno(i915_modparams.enable_ips));
0eaa53f0 1742
36cdd013 1743 if (INTEL_GEN(dev_priv) >= 8) {
0eaa53f0
RV
1744 seq_puts(m, "Currently: unknown\n");
1745 } else {
1746 if (I915_READ(IPS_CTL) & IPS_ENABLE)
1747 seq_puts(m, "Currently: enabled\n");
1748 else
1749 seq_puts(m, "Currently: disabled\n");
1750 }
92d44621 1751
36623ef8
PZ
1752 intel_runtime_pm_put(dev_priv);
1753
92d44621
PZ
1754 return 0;
1755}
1756
4a9bef37
JB
1757static int i915_sr_status(struct seq_file *m, void *unused)
1758{
36cdd013 1759 struct drm_i915_private *dev_priv = node_to_i915(m->private);
4a9bef37
JB
1760 bool sr_enabled = false;
1761
36623ef8 1762 intel_runtime_pm_get(dev_priv);
9c870d03 1763 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
36623ef8 1764
7342a72c
CW
1765 if (INTEL_GEN(dev_priv) >= 9)
1766 /* no global SR status; inspect per-plane WM */;
1767 else if (HAS_PCH_SPLIT(dev_priv))
5ba2aaaa 1768 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
c0f86832 1769 else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) ||
36cdd013 1770 IS_I945G(dev_priv) || IS_I945GM(dev_priv))
4a9bef37 1771 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
36cdd013 1772 else if (IS_I915GM(dev_priv))
4a9bef37 1773 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
36cdd013 1774 else if (IS_PINEVIEW(dev_priv))
4a9bef37 1775 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
36cdd013 1776 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
77b64555 1777 sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
4a9bef37 1778
9c870d03 1779 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
36623ef8
PZ
1780 intel_runtime_pm_put(dev_priv);
1781
08c4d7fc 1782 seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
4a9bef37
JB
1783
1784 return 0;
1785}
1786
7648fa99
JB
1787static int i915_emon_status(struct seq_file *m, void *unused)
1788{
36cdd013
DW
1789 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1790 struct drm_device *dev = &dev_priv->drm;
7648fa99 1791 unsigned long temp, chipset, gfx;
de227ef0
CW
1792 int ret;
1793
36cdd013 1794 if (!IS_GEN5(dev_priv))
582be6b4
CW
1795 return -ENODEV;
1796
cab870b7
JRS
1797 intel_runtime_pm_get(dev_priv);
1798
de227ef0
CW
1799 ret = mutex_lock_interruptible(&dev->struct_mutex);
1800 if (ret)
1801 return ret;
7648fa99
JB
1802
1803 temp = i915_mch_val(dev_priv);
1804 chipset = i915_chipset_val(dev_priv);
1805 gfx = i915_gfx_val(dev_priv);
de227ef0 1806 mutex_unlock(&dev->struct_mutex);
7648fa99
JB
1807
1808 seq_printf(m, "GMCH temp: %ld\n", temp);
1809 seq_printf(m, "Chipset power: %ld\n", chipset);
1810 seq_printf(m, "GFX power: %ld\n", gfx);
1811 seq_printf(m, "Total power: %ld\n", chipset + gfx);
1812
cab870b7
JRS
1813 intel_runtime_pm_put(dev_priv);
1814
7648fa99
JB
1815 return 0;
1816}
1817
23b2f8bb
JB
1818static int i915_ring_freq_table(struct seq_file *m, void *unused)
1819{
36cdd013 1820 struct drm_i915_private *dev_priv = node_to_i915(m->private);
562d9bae 1821 struct intel_rps *rps = &dev_priv->gt_pm.rps;
f936ec34 1822 unsigned int max_gpu_freq, min_gpu_freq;
d586b5f4
CW
1823 int gpu_freq, ia_freq;
1824 int ret;
23b2f8bb 1825
ab309a6a
MW
1826 if (!HAS_LLC(dev_priv))
1827 return -ENODEV;
23b2f8bb 1828
5bfa0199
PZ
1829 intel_runtime_pm_get(dev_priv);
1830
9f817501 1831 ret = mutex_lock_interruptible(&dev_priv->pcu_lock);
23b2f8bb 1832 if (ret)
5bfa0199 1833 goto out;
23b2f8bb 1834
d586b5f4
CW
1835 min_gpu_freq = rps->min_freq;
1836 max_gpu_freq = rps->max_freq;
2b2874ef 1837 if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
f936ec34 1838 /* Convert GT frequency to 50 HZ units */
d586b5f4
CW
1839 min_gpu_freq /= GEN9_FREQ_SCALER;
1840 max_gpu_freq /= GEN9_FREQ_SCALER;
f936ec34
AG
1841 }
1842
267f0c90 1843 seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
23b2f8bb 1844
f936ec34 1845 for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
42c0526c
BW
1846 ia_freq = gpu_freq;
1847 sandybridge_pcode_read(dev_priv,
1848 GEN6_PCODE_READ_MIN_FREQ_TABLE,
1849 &ia_freq);
3ebecd07 1850 seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
f936ec34 1851 intel_gpu_freq(dev_priv, (gpu_freq *
35ceabf3 1852 (IS_GEN9_BC(dev_priv) ||
2b2874ef 1853 INTEL_GEN(dev_priv) >= 10 ?
b976dc53 1854 GEN9_FREQ_SCALER : 1))),
3ebecd07
CW
1855 ((ia_freq >> 0) & 0xff) * 100,
1856 ((ia_freq >> 8) & 0xff) * 100);
23b2f8bb
JB
1857 }
1858
9f817501 1859 mutex_unlock(&dev_priv->pcu_lock);
23b2f8bb 1860
5bfa0199
PZ
1861out:
1862 intel_runtime_pm_put(dev_priv);
1863 return ret;
23b2f8bb
JB
1864}
1865
44834a67
CW
1866static int i915_opregion(struct seq_file *m, void *unused)
1867{
36cdd013
DW
1868 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1869 struct drm_device *dev = &dev_priv->drm;
44834a67
CW
1870 struct intel_opregion *opregion = &dev_priv->opregion;
1871 int ret;
1872
1873 ret = mutex_lock_interruptible(&dev->struct_mutex);
1874 if (ret)
0d38f009 1875 goto out;
44834a67 1876
2455a8e4
JN
1877 if (opregion->header)
1878 seq_write(m, opregion->header, OPREGION_SIZE);
44834a67
CW
1879
1880 mutex_unlock(&dev->struct_mutex);
1881
0d38f009 1882out:
44834a67
CW
1883 return 0;
1884}
1885
ada8f955
JN
1886static int i915_vbt(struct seq_file *m, void *unused)
1887{
36cdd013 1888 struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
ada8f955
JN
1889
1890 if (opregion->vbt)
1891 seq_write(m, opregion->vbt, opregion->vbt_size);
1892
1893 return 0;
1894}
1895
37811fcc
CW
1896static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1897{
36cdd013
DW
1898 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1899 struct drm_device *dev = &dev_priv->drm;
b13b8402 1900 struct intel_framebuffer *fbdev_fb = NULL;
3a58ee10 1901 struct drm_framebuffer *drm_fb;
188c1ab7
CW
1902 int ret;
1903
1904 ret = mutex_lock_interruptible(&dev->struct_mutex);
1905 if (ret)
1906 return ret;
37811fcc 1907
0695726e 1908#ifdef CONFIG_DRM_FBDEV_EMULATION
346fb4e0 1909 if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) {
36cdd013 1910 fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
25bcce94
CW
1911
1912 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1913 fbdev_fb->base.width,
1914 fbdev_fb->base.height,
b00c600e 1915 fbdev_fb->base.format->depth,
272725c7 1916 fbdev_fb->base.format->cpp[0] * 8,
bae781b2 1917 fbdev_fb->base.modifier,
25bcce94 1918 drm_framebuffer_read_refcount(&fbdev_fb->base));
a5ff7a45 1919 describe_obj(m, intel_fb_obj(&fbdev_fb->base));
25bcce94
CW
1920 seq_putc(m, '\n');
1921 }
4520f53a 1922#endif
37811fcc 1923
4b096ac1 1924 mutex_lock(&dev->mode_config.fb_lock);
3a58ee10 1925 drm_for_each_fb(drm_fb, dev) {
b13b8402
NS
1926 struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
1927 if (fb == fbdev_fb)
37811fcc
CW
1928 continue;
1929
c1ca506d 1930 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
37811fcc
CW
1931 fb->base.width,
1932 fb->base.height,
b00c600e 1933 fb->base.format->depth,
272725c7 1934 fb->base.format->cpp[0] * 8,
bae781b2 1935 fb->base.modifier,
747a598f 1936 drm_framebuffer_read_refcount(&fb->base));
a5ff7a45 1937 describe_obj(m, intel_fb_obj(&fb->base));
267f0c90 1938 seq_putc(m, '\n');
37811fcc 1939 }
4b096ac1 1940 mutex_unlock(&dev->mode_config.fb_lock);
188c1ab7 1941 mutex_unlock(&dev->struct_mutex);
37811fcc
CW
1942
1943 return 0;
1944}
1945
7e37f889 1946static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
c9fe99bd 1947{
ef5032a0
CW
1948 seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, emit: %u)",
1949 ring->space, ring->head, ring->tail, ring->emit);
c9fe99bd
OM
1950}
1951
e76d3630
BW
1952static int i915_context_status(struct seq_file *m, void *unused)
1953{
36cdd013
DW
1954 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1955 struct drm_device *dev = &dev_priv->drm;
e2f80391 1956 struct intel_engine_cs *engine;
e2efd130 1957 struct i915_gem_context *ctx;
3b3f1650 1958 enum intel_engine_id id;
c3232b18 1959 int ret;
e76d3630 1960
f3d28878 1961 ret = mutex_lock_interruptible(&dev->struct_mutex);
e76d3630
BW
1962 if (ret)
1963 return ret;
1964
829a0af2 1965 list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
288f1ced
CW
1966 seq_puts(m, "HW context ");
1967 if (!list_empty(&ctx->hw_id_link))
1968 seq_printf(m, "%x [pin %u]", ctx->hw_id,
1969 atomic_read(&ctx->hw_id_pin_count));
c84455b4 1970 if (ctx->pid) {
d28b99ab
CW
1971 struct task_struct *task;
1972
c84455b4 1973 task = get_pid_task(ctx->pid, PIDTYPE_PID);
d28b99ab
CW
1974 if (task) {
1975 seq_printf(m, "(%s [%d]) ",
1976 task->comm, task->pid);
1977 put_task_struct(task);
1978 }
c84455b4
CW
1979 } else if (IS_ERR(ctx->file_priv)) {
1980 seq_puts(m, "(deleted) ");
d28b99ab
CW
1981 } else {
1982 seq_puts(m, "(kernel) ");
1983 }
1984
bca44d80
CW
1985 seq_putc(m, ctx->remap_slice ? 'R' : 'r');
1986 seq_putc(m, '\n');
c9fe99bd 1987
3b3f1650 1988 for_each_engine(engine, dev_priv, id) {
ab82a063
CW
1989 struct intel_context *ce =
1990 to_intel_context(ctx, engine);
bca44d80
CW
1991
1992 seq_printf(m, "%s: ", engine->name);
bca44d80 1993 if (ce->state)
bf3783e5 1994 describe_obj(m, ce->state->obj);
dca33ecc 1995 if (ce->ring)
7e37f889 1996 describe_ctx_ring(m, ce->ring);
c9fe99bd 1997 seq_putc(m, '\n');
c9fe99bd 1998 }
a33afea5 1999
a33afea5 2000 seq_putc(m, '\n');
a168c293
BW
2001 }
2002
f3d28878 2003 mutex_unlock(&dev->struct_mutex);
e76d3630
BW
2004
2005 return 0;
2006}
2007
ea16a3cd
DV
2008static const char *swizzle_string(unsigned swizzle)
2009{
aee56cff 2010 switch (swizzle) {
ea16a3cd
DV
2011 case I915_BIT_6_SWIZZLE_NONE:
2012 return "none";
2013 case I915_BIT_6_SWIZZLE_9:
2014 return "bit9";
2015 case I915_BIT_6_SWIZZLE_9_10:
2016 return "bit9/bit10";
2017 case I915_BIT_6_SWIZZLE_9_11:
2018 return "bit9/bit11";
2019 case I915_BIT_6_SWIZZLE_9_10_11:
2020 return "bit9/bit10/bit11";
2021 case I915_BIT_6_SWIZZLE_9_17:
2022 return "bit9/bit17";
2023 case I915_BIT_6_SWIZZLE_9_10_17:
2024 return "bit9/bit10/bit17";
2025 case I915_BIT_6_SWIZZLE_UNKNOWN:
8a168ca7 2026 return "unknown";
ea16a3cd
DV
2027 }
2028
2029 return "bug";
2030}
2031
2032static int i915_swizzle_info(struct seq_file *m, void *data)
2033{
36cdd013 2034 struct drm_i915_private *dev_priv = node_to_i915(m->private);
22bcfc6a 2035
c8c8fb33 2036 intel_runtime_pm_get(dev_priv);
ea16a3cd 2037
ea16a3cd
DV
2038 seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
2039 swizzle_string(dev_priv->mm.bit_6_swizzle_x));
2040 seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
2041 swizzle_string(dev_priv->mm.bit_6_swizzle_y));
2042
36cdd013 2043 if (IS_GEN3(dev_priv) || IS_GEN4(dev_priv)) {
ea16a3cd
DV
2044 seq_printf(m, "DDC = 0x%08x\n",
2045 I915_READ(DCC));
656bfa3a
DV
2046 seq_printf(m, "DDC2 = 0x%08x\n",
2047 I915_READ(DCC2));
ea16a3cd
DV
2048 seq_printf(m, "C0DRB3 = 0x%04x\n",
2049 I915_READ16(C0DRB3));
2050 seq_printf(m, "C1DRB3 = 0x%04x\n",
2051 I915_READ16(C1DRB3));
36cdd013 2052 } else if (INTEL_GEN(dev_priv) >= 6) {
3fa7d235
DV
2053 seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
2054 I915_READ(MAD_DIMM_C0));
2055 seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
2056 I915_READ(MAD_DIMM_C1));
2057 seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
2058 I915_READ(MAD_DIMM_C2));
2059 seq_printf(m, "TILECTL = 0x%08x\n",
2060 I915_READ(TILECTL));
36cdd013 2061 if (INTEL_GEN(dev_priv) >= 8)
9d3203e1
BW
2062 seq_printf(m, "GAMTARBMODE = 0x%08x\n",
2063 I915_READ(GAMTARBMODE));
2064 else
2065 seq_printf(m, "ARB_MODE = 0x%08x\n",
2066 I915_READ(ARB_MODE));
3fa7d235
DV
2067 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
2068 I915_READ(DISP_ARB_CTL));
ea16a3cd 2069 }
656bfa3a
DV
2070
2071 if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
2072 seq_puts(m, "L-shaped memory detected\n");
2073
c8c8fb33 2074 intel_runtime_pm_put(dev_priv);
ea16a3cd
DV
2075
2076 return 0;
2077}
2078
1c60fef5
BW
2079static int per_file_ctx(int id, void *ptr, void *data)
2080{
e2efd130 2081 struct i915_gem_context *ctx = ptr;
1c60fef5 2082 struct seq_file *m = data;
ae6c4806
DV
2083 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
2084
2085 if (!ppgtt) {
2086 seq_printf(m, " no ppgtt for context %d\n",
2087 ctx->user_handle);
2088 return 0;
2089 }
1c60fef5 2090
f83d6518
OM
2091 if (i915_gem_context_is_default(ctx))
2092 seq_puts(m, " default context:\n");
2093 else
821d66dd 2094 seq_printf(m, " context %d:\n", ctx->user_handle);
1c60fef5
BW
2095 ppgtt->debug_dump(ppgtt, m);
2096
2097 return 0;
2098}
2099
36cdd013
DW
2100static void gen8_ppgtt_info(struct seq_file *m,
2101 struct drm_i915_private *dev_priv)
3cf17fc5 2102{
77df6772 2103 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
3b3f1650
AG
2104 struct intel_engine_cs *engine;
2105 enum intel_engine_id id;
b4ac5afc 2106 int i;
3cf17fc5 2107
77df6772
BW
2108 if (!ppgtt)
2109 return;
2110
3b3f1650 2111 for_each_engine(engine, dev_priv, id) {
e2f80391 2112 seq_printf(m, "%s\n", engine->name);
77df6772 2113 for (i = 0; i < 4; i++) {
e2f80391 2114 u64 pdp = I915_READ(GEN8_RING_PDP_UDW(engine, i));
77df6772 2115 pdp <<= 32;
e2f80391 2116 pdp |= I915_READ(GEN8_RING_PDP_LDW(engine, i));
a2a5b15c 2117 seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp);
77df6772
BW
2118 }
2119 }
2120}
2121
36cdd013
DW
2122static void gen6_ppgtt_info(struct seq_file *m,
2123 struct drm_i915_private *dev_priv)
77df6772 2124{
e2f80391 2125 struct intel_engine_cs *engine;
3b3f1650 2126 enum intel_engine_id id;
3cf17fc5 2127
7e22dbbb 2128 if (IS_GEN6(dev_priv))
3cf17fc5
DV
2129 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
2130
3b3f1650 2131 for_each_engine(engine, dev_priv, id) {
e2f80391 2132 seq_printf(m, "%s\n", engine->name);
7e22dbbb 2133 if (IS_GEN7(dev_priv))
e2f80391
TU
2134 seq_printf(m, "GFX_MODE: 0x%08x\n",
2135 I915_READ(RING_MODE_GEN7(engine)));
2136 seq_printf(m, "PP_DIR_BASE: 0x%08x\n",
2137 I915_READ(RING_PP_DIR_BASE(engine)));
2138 seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n",
2139 I915_READ(RING_PP_DIR_BASE_READ(engine)));
2140 seq_printf(m, "PP_DIR_DCLV: 0x%08x\n",
2141 I915_READ(RING_PP_DIR_DCLV(engine)));
3cf17fc5
DV
2142 }
2143 if (dev_priv->mm.aliasing_ppgtt) {
2144 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
2145
267f0c90 2146 seq_puts(m, "aliasing PPGTT:\n");
44159ddb 2147 seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd.base.ggtt_offset);
1c60fef5 2148
87d60b63 2149 ppgtt->debug_dump(ppgtt, m);
ae6c4806 2150 }
1c60fef5 2151
3cf17fc5 2152 seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
77df6772
BW
2153}
2154
2155static int i915_ppgtt_info(struct seq_file *m, void *data)
2156{
36cdd013
DW
2157 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2158 struct drm_device *dev = &dev_priv->drm;
ea91e401 2159 struct drm_file *file;
637ee29e 2160 int ret;
77df6772 2161
637ee29e
CW
2162 mutex_lock(&dev->filelist_mutex);
2163 ret = mutex_lock_interruptible(&dev->struct_mutex);
77df6772 2164 if (ret)
637ee29e
CW
2165 goto out_unlock;
2166
c8c8fb33 2167 intel_runtime_pm_get(dev_priv);
77df6772 2168
36cdd013
DW
2169 if (INTEL_GEN(dev_priv) >= 8)
2170 gen8_ppgtt_info(m, dev_priv);
2171 else if (INTEL_GEN(dev_priv) >= 6)
2172 gen6_ppgtt_info(m, dev_priv);
77df6772 2173
ea91e401
MT
2174 list_for_each_entry_reverse(file, &dev->filelist, lhead) {
2175 struct drm_i915_file_private *file_priv = file->driver_priv;
7cb5dff8 2176 struct task_struct *task;
ea91e401 2177
7cb5dff8 2178 task = get_pid_task(file->pid, PIDTYPE_PID);
06812760
DC
2179 if (!task) {
2180 ret = -ESRCH;
637ee29e 2181 goto out_rpm;
06812760 2182 }
7cb5dff8
GT
2183 seq_printf(m, "\nproc: %s\n", task->comm);
2184 put_task_struct(task);
ea91e401
MT
2185 idr_for_each(&file_priv->context_idr, per_file_ctx,
2186 (void *)(unsigned long)m);
2187 }
2188
637ee29e 2189out_rpm:
c8c8fb33 2190 intel_runtime_pm_put(dev_priv);
3cf17fc5 2191 mutex_unlock(&dev->struct_mutex);
637ee29e
CW
2192out_unlock:
2193 mutex_unlock(&dev->filelist_mutex);
06812760 2194 return ret;
3cf17fc5
DV
2195}
2196
f5a4c67d
CW
2197static int count_irq_waiters(struct drm_i915_private *i915)
2198{
e2f80391 2199 struct intel_engine_cs *engine;
3b3f1650 2200 enum intel_engine_id id;
f5a4c67d 2201 int count = 0;
f5a4c67d 2202
3b3f1650 2203 for_each_engine(engine, i915, id)
688e6c72 2204 count += intel_engine_has_waiter(engine);
f5a4c67d
CW
2205
2206 return count;
2207}
2208
7466c291
CW
2209static const char *rps_power_to_str(unsigned int power)
2210{
2211 static const char * const strings[] = {
2212 [LOW_POWER] = "low power",
2213 [BETWEEN] = "mixed",
2214 [HIGH_POWER] = "high power",
2215 };
2216
2217 if (power >= ARRAY_SIZE(strings) || !strings[power])
2218 return "unknown";
2219
2220 return strings[power];
2221}
2222
1854d5ca
CW
2223static int i915_rps_boost_info(struct seq_file *m, void *data)
2224{
36cdd013
DW
2225 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2226 struct drm_device *dev = &dev_priv->drm;
562d9bae 2227 struct intel_rps *rps = &dev_priv->gt_pm.rps;
c0a6aa7e 2228 u32 act_freq = rps->cur_freq;
1854d5ca 2229 struct drm_file *file;
1854d5ca 2230
c0a6aa7e
CW
2231 if (intel_runtime_pm_get_if_in_use(dev_priv)) {
2232 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
2233 mutex_lock(&dev_priv->pcu_lock);
2234 act_freq = vlv_punit_read(dev_priv,
2235 PUNIT_REG_GPU_FREQ_STS);
2236 act_freq = (act_freq >> 8) & 0xff;
2237 mutex_unlock(&dev_priv->pcu_lock);
2238 } else {
2239 act_freq = intel_get_cagf(dev_priv,
2240 I915_READ(GEN6_RPSTAT1));
2241 }
2242 intel_runtime_pm_put(dev_priv);
2243 }
2244
562d9bae 2245 seq_printf(m, "RPS enabled? %d\n", rps->enabled);
28176ef4
CW
2246 seq_printf(m, "GPU busy? %s [%d requests]\n",
2247 yesno(dev_priv->gt.awake), dev_priv->gt.active_requests);
f5a4c67d 2248 seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv));
7b92c1bd 2249 seq_printf(m, "Boosts outstanding? %d\n",
562d9bae 2250 atomic_read(&rps->num_waiters));
60548c55 2251 seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
c0a6aa7e
CW
2252 seq_printf(m, "Frequency requested %d, actual %d\n",
2253 intel_gpu_freq(dev_priv, rps->cur_freq),
2254 intel_gpu_freq(dev_priv, act_freq));
7466c291 2255 seq_printf(m, " min hard:%d, soft:%d; max soft:%d, hard:%d\n",
562d9bae
SAK
2256 intel_gpu_freq(dev_priv, rps->min_freq),
2257 intel_gpu_freq(dev_priv, rps->min_freq_softlimit),
2258 intel_gpu_freq(dev_priv, rps->max_freq_softlimit),
2259 intel_gpu_freq(dev_priv, rps->max_freq));
7466c291 2260 seq_printf(m, " idle:%d, efficient:%d, boost:%d\n",
562d9bae
SAK
2261 intel_gpu_freq(dev_priv, rps->idle_freq),
2262 intel_gpu_freq(dev_priv, rps->efficient_freq),
2263 intel_gpu_freq(dev_priv, rps->boost_freq));
1d2ac403
DV
2264
2265 mutex_lock(&dev->filelist_mutex);
1854d5ca
CW
2266 list_for_each_entry_reverse(file, &dev->filelist, lhead) {
2267 struct drm_i915_file_private *file_priv = file->driver_priv;
2268 struct task_struct *task;
2269
2270 rcu_read_lock();
2271 task = pid_task(file->pid, PIDTYPE_PID);
7b92c1bd 2272 seq_printf(m, "%s [%d]: %d boosts\n",
1854d5ca
CW
2273 task ? task->comm : "<unknown>",
2274 task ? task->pid : -1,
562d9bae 2275 atomic_read(&file_priv->rps_client.boosts));
1854d5ca
CW
2276 rcu_read_unlock();
2277 }
7b92c1bd 2278 seq_printf(m, "Kernel (anonymous) boosts: %d\n",
562d9bae 2279 atomic_read(&rps->boosts));
1d2ac403 2280 mutex_unlock(&dev->filelist_mutex);
1854d5ca 2281
7466c291 2282 if (INTEL_GEN(dev_priv) >= 6 &&
562d9bae 2283 rps->enabled &&
28176ef4 2284 dev_priv->gt.active_requests) {
7466c291
CW
2285 u32 rpup, rpupei;
2286 u32 rpdown, rpdownei;
2287
2288 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
2289 rpup = I915_READ_FW(GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK;
2290 rpupei = I915_READ_FW(GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK;
2291 rpdown = I915_READ_FW(GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK;
2292 rpdownei = I915_READ_FW(GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK;
2293 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
2294
2295 seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
60548c55 2296 rps_power_to_str(rps->power.mode));
7466c291 2297 seq_printf(m, " Avg. up: %d%% [above threshold? %d%%]\n",
23f4a287 2298 rpup && rpupei ? 100 * rpup / rpupei : 0,
60548c55 2299 rps->power.up_threshold);
7466c291 2300 seq_printf(m, " Avg. down: %d%% [below threshold? %d%%]\n",
23f4a287 2301 rpdown && rpdownei ? 100 * rpdown / rpdownei : 0,
60548c55 2302 rps->power.down_threshold);
7466c291
CW
2303 } else {
2304 seq_puts(m, "\nRPS Autotuning inactive\n");
2305 }
2306
8d3afd7d 2307 return 0;
1854d5ca
CW
2308}
2309
63573eb7
BW
2310static int i915_llc(struct seq_file *m, void *data)
2311{
36cdd013 2312 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3accaf7e 2313 const bool edram = INTEL_GEN(dev_priv) > 8;
63573eb7 2314
36cdd013 2315 seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev_priv)));
3accaf7e
MK
2316 seq_printf(m, "%s: %lluMB\n", edram ? "eDRAM" : "eLLC",
2317 intel_uncore_edram_size(dev_priv)/1024/1024);
63573eb7
BW
2318
2319 return 0;
2320}
2321
0509ead1
AS
2322static int i915_huc_load_status_info(struct seq_file *m, void *data)
2323{
2324 struct drm_i915_private *dev_priv = node_to_i915(m->private);
56ffc742 2325 struct drm_printer p;
0509ead1 2326
ab309a6a
MW
2327 if (!HAS_HUC(dev_priv))
2328 return -ENODEV;
0509ead1 2329
56ffc742
MW
2330 p = drm_seq_file_printer(m);
2331 intel_uc_fw_dump(&dev_priv->huc.fw, &p);
0509ead1 2332
3582ad13 2333 intel_runtime_pm_get(dev_priv);
0509ead1 2334 seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2));
3582ad13 2335 intel_runtime_pm_put(dev_priv);
0509ead1
AS
2336
2337 return 0;
2338}
2339
fdf5d357
AD
2340static int i915_guc_load_status_info(struct seq_file *m, void *data)
2341{
36cdd013 2342 struct drm_i915_private *dev_priv = node_to_i915(m->private);
56ffc742 2343 struct drm_printer p;
fdf5d357
AD
2344 u32 tmp, i;
2345
ab309a6a
MW
2346 if (!HAS_GUC(dev_priv))
2347 return -ENODEV;
fdf5d357 2348
56ffc742
MW
2349 p = drm_seq_file_printer(m);
2350 intel_uc_fw_dump(&dev_priv->guc.fw, &p);
fdf5d357 2351
3582ad13 2352 intel_runtime_pm_get(dev_priv);
2353
fdf5d357
AD
2354 tmp = I915_READ(GUC_STATUS);
2355
2356 seq_printf(m, "\nGuC status 0x%08x:\n", tmp);
2357 seq_printf(m, "\tBootrom status = 0x%x\n",
2358 (tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
2359 seq_printf(m, "\tuKernel status = 0x%x\n",
2360 (tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
2361 seq_printf(m, "\tMIA Core status = 0x%x\n",
2362 (tmp & GS_MIA_MASK) >> GS_MIA_SHIFT);
2363 seq_puts(m, "\nScratch registers:\n");
2364 for (i = 0; i < 16; i++)
2365 seq_printf(m, "\t%2d: \t0x%x\n", i, I915_READ(SOFT_SCRATCH(i)));
2366
3582ad13 2367 intel_runtime_pm_put(dev_priv);
2368
fdf5d357
AD
2369 return 0;
2370}
2371
5e24e4a2
MW
2372static const char *
2373stringify_guc_log_type(enum guc_log_buffer_type type)
2374{
2375 switch (type) {
2376 case GUC_ISR_LOG_BUFFER:
2377 return "ISR";
2378 case GUC_DPC_LOG_BUFFER:
2379 return "DPC";
2380 case GUC_CRASH_DUMP_LOG_BUFFER:
2381 return "CRASH";
2382 default:
2383 MISSING_CASE(type);
2384 }
2385
2386 return "";
2387}
2388
5aa1ee4b
AG
2389static void i915_guc_log_info(struct seq_file *m,
2390 struct drm_i915_private *dev_priv)
2391{
5e24e4a2
MW
2392 struct intel_guc_log *log = &dev_priv->guc.log;
2393 enum guc_log_buffer_type type;
5aa1ee4b 2394
5e24e4a2
MW
2395 if (!intel_guc_log_relay_enabled(log)) {
2396 seq_puts(m, "GuC log relay disabled\n");
2397 return;
2398 }
5aa1ee4b 2399
5e24e4a2 2400 seq_puts(m, "GuC logging stats:\n");
5aa1ee4b 2401
6a96be24 2402 seq_printf(m, "\tRelay full count: %u\n",
5e24e4a2
MW
2403 log->relay.full_count);
2404
2405 for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
2406 seq_printf(m, "\t%s:\tflush count %10u, overflow count %10u\n",
2407 stringify_guc_log_type(type),
2408 log->stats[type].flush,
2409 log->stats[type].sampled_overflow);
2410 }
5aa1ee4b
AG
2411}
2412
8b417c26
DG
2413static void i915_guc_client_info(struct seq_file *m,
2414 struct drm_i915_private *dev_priv,
5afc8b49 2415 struct intel_guc_client *client)
8b417c26 2416{
e2f80391 2417 struct intel_engine_cs *engine;
c18468c4 2418 enum intel_engine_id id;
8b417c26 2419 uint64_t tot = 0;
8b417c26 2420
b09935a6
OM
2421 seq_printf(m, "\tPriority %d, GuC stage index: %u, PD offset 0x%x\n",
2422 client->priority, client->stage_id, client->proc_desc_offset);
59db36cf
MW
2423 seq_printf(m, "\tDoorbell id %d, offset: 0x%lx\n",
2424 client->doorbell_id, client->doorbell_offset);
8b417c26 2425
3b3f1650 2426 for_each_engine(engine, dev_priv, id) {
c18468c4
DG
2427 u64 submissions = client->submissions[id];
2428 tot += submissions;
8b417c26 2429 seq_printf(m, "\tSubmissions: %llu %s\n",
c18468c4 2430 submissions, engine->name);
8b417c26
DG
2431 }
2432 seq_printf(m, "\tTotal: %llu\n", tot);
2433}
2434
a8b9370f
OM
2435static int i915_guc_info(struct seq_file *m, void *data)
2436{
2437 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2438 const struct intel_guc *guc = &dev_priv->guc;
a8b9370f 2439
db557993 2440 if (!USES_GUC(dev_priv))
ab309a6a
MW
2441 return -ENODEV;
2442
db557993
MW
2443 i915_guc_log_info(m, dev_priv);
2444
2445 if (!USES_GUC_SUBMISSION(dev_priv))
2446 return 0;
2447
ab309a6a 2448 GEM_BUG_ON(!guc->execbuf_client);
a8b9370f 2449
db557993 2450 seq_printf(m, "\nDoorbell map:\n");
abddffdf 2451 seq_printf(m, "\t%*pb\n", GUC_NUM_DOORBELLS, guc->doorbell_bitmap);
db557993 2452 seq_printf(m, "Doorbell next cacheline: 0x%x\n", guc->db_cacheline);
9636f6db 2453
334636c6
CW
2454 seq_printf(m, "\nGuC execbuf client @ %p:\n", guc->execbuf_client);
2455 i915_guc_client_info(m, dev_priv, guc->execbuf_client);
e78c9175
CW
2456 if (guc->preempt_client) {
2457 seq_printf(m, "\nGuC preempt client @ %p:\n",
2458 guc->preempt_client);
2459 i915_guc_client_info(m, dev_priv, guc->preempt_client);
2460 }
8b417c26
DG
2461
2462 /* Add more as required ... */
2463
2464 return 0;
2465}
2466
a8b9370f 2467static int i915_guc_stage_pool(struct seq_file *m, void *data)
4c7e77fc 2468{
36cdd013 2469 struct drm_i915_private *dev_priv = node_to_i915(m->private);
a8b9370f
OM
2470 const struct intel_guc *guc = &dev_priv->guc;
2471 struct guc_stage_desc *desc = guc->stage_desc_pool_vaddr;
5afc8b49 2472 struct intel_guc_client *client = guc->execbuf_client;
a8b9370f
OM
2473 unsigned int tmp;
2474 int index;
4c7e77fc 2475
ab309a6a
MW
2476 if (!USES_GUC_SUBMISSION(dev_priv))
2477 return -ENODEV;
4c7e77fc 2478
a8b9370f
OM
2479 for (index = 0; index < GUC_MAX_STAGE_DESCRIPTORS; index++, desc++) {
2480 struct intel_engine_cs *engine;
2481
2482 if (!(desc->attribute & GUC_STAGE_DESC_ATTR_ACTIVE))
2483 continue;
2484
2485 seq_printf(m, "GuC stage descriptor %u:\n", index);
2486 seq_printf(m, "\tIndex: %u\n", desc->stage_id);
2487 seq_printf(m, "\tAttribute: 0x%x\n", desc->attribute);
2488 seq_printf(m, "\tPriority: %d\n", desc->priority);
2489 seq_printf(m, "\tDoorbell id: %d\n", desc->db_id);
2490 seq_printf(m, "\tEngines used: 0x%x\n",
2491 desc->engines_used);
2492 seq_printf(m, "\tDoorbell trigger phy: 0x%llx, cpu: 0x%llx, uK: 0x%x\n",
2493 desc->db_trigger_phy,
2494 desc->db_trigger_cpu,
2495 desc->db_trigger_uk);
2496 seq_printf(m, "\tProcess descriptor: 0x%x\n",
2497 desc->process_desc);
9a09485d 2498 seq_printf(m, "\tWorkqueue address: 0x%x, size: 0x%x\n",
a8b9370f
OM
2499 desc->wq_addr, desc->wq_size);
2500 seq_putc(m, '\n');
2501
2502 for_each_engine_masked(engine, dev_priv, client->engines, tmp) {
2503 u32 guc_engine_id = engine->guc_id;
2504 struct guc_execlist_context *lrc =
2505 &desc->lrc[guc_engine_id];
2506
2507 seq_printf(m, "\t%s LRC:\n", engine->name);
2508 seq_printf(m, "\t\tContext desc: 0x%x\n",
2509 lrc->context_desc);
2510 seq_printf(m, "\t\tContext id: 0x%x\n", lrc->context_id);
2511 seq_printf(m, "\t\tLRCA: 0x%x\n", lrc->ring_lrca);
2512 seq_printf(m, "\t\tRing begin: 0x%x\n", lrc->ring_begin);
2513 seq_printf(m, "\t\tRing end: 0x%x\n", lrc->ring_end);
2514 seq_putc(m, '\n');
2515 }
2516 }
2517
2518 return 0;
2519}
2520
4c7e77fc
AD
2521static int i915_guc_log_dump(struct seq_file *m, void *data)
2522{
ac58d2ab
DCS
2523 struct drm_info_node *node = m->private;
2524 struct drm_i915_private *dev_priv = node_to_i915(node);
2525 bool dump_load_err = !!node->info_ent->data;
2526 struct drm_i915_gem_object *obj = NULL;
2527 u32 *log;
2528 int i = 0;
4c7e77fc 2529
ab309a6a
MW
2530 if (!HAS_GUC(dev_priv))
2531 return -ENODEV;
2532
ac58d2ab
DCS
2533 if (dump_load_err)
2534 obj = dev_priv->guc.load_err_log;
2535 else if (dev_priv->guc.log.vma)
2536 obj = dev_priv->guc.log.vma->obj;
4c7e77fc 2537
ac58d2ab
DCS
2538 if (!obj)
2539 return 0;
4c7e77fc 2540
ac58d2ab
DCS
2541 log = i915_gem_object_pin_map(obj, I915_MAP_WC);
2542 if (IS_ERR(log)) {
2543 DRM_DEBUG("Failed to pin object\n");
2544 seq_puts(m, "(log data unaccessible)\n");
2545 return PTR_ERR(log);
4c7e77fc
AD
2546 }
2547
ac58d2ab
DCS
2548 for (i = 0; i < obj->base.size / sizeof(u32); i += 4)
2549 seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n",
2550 *(log + i), *(log + i + 1),
2551 *(log + i + 2), *(log + i + 3));
2552
4c7e77fc
AD
2553 seq_putc(m, '\n');
2554
ac58d2ab
DCS
2555 i915_gem_object_unpin_map(obj);
2556
4c7e77fc
AD
2557 return 0;
2558}
2559
4977a287 2560static int i915_guc_log_level_get(void *data, u64 *val)
685534ef 2561{
bcc36d8a 2562 struct drm_i915_private *dev_priv = data;
685534ef 2563
86aa8247 2564 if (!USES_GUC(dev_priv))
ab309a6a
MW
2565 return -ENODEV;
2566
50935ac7 2567 *val = intel_guc_log_get_level(&dev_priv->guc.log);
685534ef
SAK
2568
2569 return 0;
2570}
2571
4977a287 2572static int i915_guc_log_level_set(void *data, u64 val)
685534ef 2573{
bcc36d8a 2574 struct drm_i915_private *dev_priv = data;
685534ef 2575
86aa8247 2576 if (!USES_GUC(dev_priv))
ab309a6a
MW
2577 return -ENODEV;
2578
50935ac7 2579 return intel_guc_log_set_level(&dev_priv->guc.log, val);
685534ef
SAK
2580}
2581
4977a287
MW
2582DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_level_fops,
2583 i915_guc_log_level_get, i915_guc_log_level_set,
685534ef
SAK
2584 "%lld\n");
2585
4977a287
MW
2586static int i915_guc_log_relay_open(struct inode *inode, struct file *file)
2587{
2588 struct drm_i915_private *dev_priv = inode->i_private;
2589
2590 if (!USES_GUC(dev_priv))
2591 return -ENODEV;
2592
2593 file->private_data = &dev_priv->guc.log;
2594
2595 return intel_guc_log_relay_open(&dev_priv->guc.log);
2596}
2597
2598static ssize_t
2599i915_guc_log_relay_write(struct file *filp,
2600 const char __user *ubuf,
2601 size_t cnt,
2602 loff_t *ppos)
2603{
2604 struct intel_guc_log *log = filp->private_data;
2605
2606 intel_guc_log_relay_flush(log);
2607
2608 return cnt;
2609}
2610
2611static int i915_guc_log_relay_release(struct inode *inode, struct file *file)
2612{
2613 struct drm_i915_private *dev_priv = inode->i_private;
2614
2615 intel_guc_log_relay_close(&dev_priv->guc.log);
2616
2617 return 0;
2618}
2619
2620static const struct file_operations i915_guc_log_relay_fops = {
2621 .owner = THIS_MODULE,
2622 .open = i915_guc_log_relay_open,
2623 .write = i915_guc_log_relay_write,
2624 .release = i915_guc_log_relay_release,
2625};
2626
5b7b3086
DP
2627static int i915_psr_sink_status_show(struct seq_file *m, void *data)
2628{
2629 u8 val;
2630 static const char * const sink_status[] = {
2631 "inactive",
2632 "transition to active, capture and display",
2633 "active, display from RFB",
2634 "active, capture and display on sink device timings",
2635 "transition to inactive, capture and display, timing re-sync",
2636 "reserved",
2637 "reserved",
2638 "sink internal error",
2639 };
2640 struct drm_connector *connector = m->private;
7a72c78b 2641 struct drm_i915_private *dev_priv = to_i915(connector->dev);
5b7b3086
DP
2642 struct intel_dp *intel_dp =
2643 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
7a72c78b
RV
2644 int ret;
2645
2646 if (!CAN_PSR(dev_priv)) {
2647 seq_puts(m, "PSR Unsupported\n");
2648 return -ENODEV;
2649 }
5b7b3086
DP
2650
2651 if (connector->status != connector_status_connected)
2652 return -ENODEV;
2653
7a72c78b
RV
2654 ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val);
2655
2656 if (ret == 1) {
5b7b3086
DP
2657 const char *str = "unknown";
2658
2659 val &= DP_PSR_SINK_STATE_MASK;
2660 if (val < ARRAY_SIZE(sink_status))
2661 str = sink_status[val];
2662 seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str);
2663 } else {
7a72c78b 2664 return ret;
5b7b3086
DP
2665 }
2666
2667 return 0;
2668}
2669DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
2670
00b06296
VN
2671static void
2672psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
2673{
2674 u32 val, psr_status;
b86bef20 2675
00b06296
VN
2676 if (dev_priv->psr.psr2_enabled) {
2677 static const char * const live_status[] = {
2678 "IDLE",
2679 "CAPTURE",
2680 "CAPTURE_FS",
2681 "SLEEP",
2682 "BUFON_FW",
2683 "ML_UP",
2684 "SU_STANDBY",
2685 "FAST_SLEEP",
2686 "DEEP_SLEEP",
2687 "BUF_ON",
2688 "TG_ON"
2689 };
2690 psr_status = I915_READ(EDP_PSR2_STATUS);
2691 val = (psr_status & EDP_PSR2_STATUS_STATE_MASK) >>
2692 EDP_PSR2_STATUS_STATE_SHIFT;
2693 if (val < ARRAY_SIZE(live_status)) {
2694 seq_printf(m, "Source PSR status: 0x%x [%s]\n",
2695 psr_status, live_status[val]);
2696 return;
2697 }
2698 } else {
2699 static const char * const live_status[] = {
2700 "IDLE",
2701 "SRDONACK",
2702 "SRDENT",
2703 "BUFOFF",
2704 "BUFON",
2705 "AUXACK",
2706 "SRDOFFACK",
2707 "SRDENT_ON",
2708 };
2709 psr_status = I915_READ(EDP_PSR_STATUS);
2710 val = (psr_status & EDP_PSR_STATUS_STATE_MASK) >>
2711 EDP_PSR_STATUS_STATE_SHIFT;
2712 if (val < ARRAY_SIZE(live_status)) {
2713 seq_printf(m, "Source PSR status: 0x%x [%s]\n",
2714 psr_status, live_status[val]);
2715 return;
2716 }
2717 }
b86bef20 2718
00b06296 2719 seq_printf(m, "Source PSR status: 0x%x [%s]\n", psr_status, "unknown");
b86bef20
CW
2720}
2721
e91fd8c6
RV
2722static int i915_edp_psr_status(struct seq_file *m, void *data)
2723{
36cdd013 2724 struct drm_i915_private *dev_priv = node_to_i915(m->private);
a031d709
RV
2725 u32 psrperf = 0;
2726 bool enabled = false;
c9ef291a 2727 bool sink_support;
e91fd8c6 2728
ab309a6a
MW
2729 if (!HAS_PSR(dev_priv))
2730 return -ENODEV;
3553a8ea 2731
c9ef291a
DP
2732 sink_support = dev_priv->psr.sink_support;
2733 seq_printf(m, "Sink_Support: %s\n", yesno(sink_support));
2734 if (!sink_support)
2735 return 0;
2736
c8c8fb33
PZ
2737 intel_runtime_pm_get(dev_priv);
2738
fa128fa6 2739 mutex_lock(&dev_priv->psr.lock);
0577ab48
AS
2740 seq_printf(m, "PSR mode: %s\n",
2741 dev_priv->psr.psr2_enabled ? "PSR2" : "PSR1");
c44301fc 2742 seq_printf(m, "Enabled: %s\n", yesno(dev_priv->psr.enabled));
fa128fa6
DV
2743 seq_printf(m, "Busy frontbuffer bits: 0x%03x\n",
2744 dev_priv->psr.busy_frontbuffer_bits);
e91fd8c6 2745
ce3508fd
DP
2746 if (dev_priv->psr.psr2_enabled)
2747 enabled = I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE;
2748 else
2749 enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE;
60e5ffe3
RV
2750
2751 seq_printf(m, "Main link in standby mode: %s\n",
2752 yesno(dev_priv->psr.link_standby));
2753
ce3508fd 2754 seq_printf(m, "HW Enabled & Active bit: %s\n", yesno(enabled));
e91fd8c6 2755
05eec3c2 2756 /*
05eec3c2
RV
2757 * SKL+ Perf counter is reset to 0 everytime DC state is entered
2758 */
36cdd013 2759 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
443a389f 2760 psrperf = I915_READ(EDP_PSR_PERF_CNT) &
a031d709 2761 EDP_PSR_PERF_CNT_MASK;
a6cbdb8e
RV
2762
2763 seq_printf(m, "Performance_Counter: %u\n", psrperf);
2764 }
b86bef20 2765
00b06296 2766 psr_source_status(dev_priv, m);
fa128fa6 2767 mutex_unlock(&dev_priv->psr.lock);
e91fd8c6 2768
9844d4bf 2769 if (READ_ONCE(dev_priv->psr.debug) & I915_PSR_DEBUG_IRQ) {
3f983e54
DP
2770 seq_printf(m, "Last attempted entry at: %lld\n",
2771 dev_priv->psr.last_entry_attempt);
2772 seq_printf(m, "Last exit at: %lld\n",
2773 dev_priv->psr.last_exit);
2774 }
2775
c8c8fb33 2776 intel_runtime_pm_put(dev_priv);
e91fd8c6
RV
2777 return 0;
2778}
2779
54fd3149
DP
2780static int
2781i915_edp_psr_debug_set(void *data, u64 val)
2782{
2783 struct drm_i915_private *dev_priv = data;
c44301fc
ML
2784 struct drm_modeset_acquire_ctx ctx;
2785 int ret;
54fd3149
DP
2786
2787 if (!CAN_PSR(dev_priv))
2788 return -ENODEV;
2789
c44301fc 2790 DRM_DEBUG_KMS("Setting PSR debug to %llx\n", val);
54fd3149
DP
2791
2792 intel_runtime_pm_get(dev_priv);
c44301fc
ML
2793
2794 drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
2795
2796retry:
2797 ret = intel_psr_set_debugfs_mode(dev_priv, &ctx, val);
2798 if (ret == -EDEADLK) {
2799 ret = drm_modeset_backoff(&ctx);
2800 if (!ret)
2801 goto retry;
2802 }
2803
2804 drm_modeset_drop_locks(&ctx);
2805 drm_modeset_acquire_fini(&ctx);
2806
54fd3149
DP
2807 intel_runtime_pm_put(dev_priv);
2808
c44301fc 2809 return ret;
54fd3149
DP
2810}
2811
2812static int
2813i915_edp_psr_debug_get(void *data, u64 *val)
2814{
2815 struct drm_i915_private *dev_priv = data;
2816
2817 if (!CAN_PSR(dev_priv))
2818 return -ENODEV;
2819
2820 *val = READ_ONCE(dev_priv->psr.debug);
2821 return 0;
2822}
2823
2824DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
2825 i915_edp_psr_debug_get, i915_edp_psr_debug_set,
2826 "%llu\n");
2827
ec013e7f
JB
2828static int i915_energy_uJ(struct seq_file *m, void *data)
2829{
36cdd013 2830 struct drm_i915_private *dev_priv = node_to_i915(m->private);
d38014ea 2831 unsigned long long power;
ec013e7f
JB
2832 u32 units;
2833
36cdd013 2834 if (INTEL_GEN(dev_priv) < 6)
ec013e7f
JB
2835 return -ENODEV;
2836
36623ef8
PZ
2837 intel_runtime_pm_get(dev_priv);
2838
d38014ea
GKB
2839 if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power)) {
2840 intel_runtime_pm_put(dev_priv);
2841 return -ENODEV;
2842 }
2843
2844 units = (power & 0x1f00) >> 8;
ec013e7f 2845 power = I915_READ(MCH_SECP_NRG_STTS);
d38014ea 2846 power = (1000000 * power) >> units; /* convert to uJ */
ec013e7f 2847
36623ef8
PZ
2848 intel_runtime_pm_put(dev_priv);
2849
d38014ea 2850 seq_printf(m, "%llu", power);
371db66a
PZ
2851
2852 return 0;
2853}
2854
6455c870 2855static int i915_runtime_pm_status(struct seq_file *m, void *unused)
371db66a 2856{
36cdd013 2857 struct drm_i915_private *dev_priv = node_to_i915(m->private);
52a05c30 2858 struct pci_dev *pdev = dev_priv->drm.pdev;
371db66a 2859
a156e64d
CW
2860 if (!HAS_RUNTIME_PM(dev_priv))
2861 seq_puts(m, "Runtime power management not supported\n");
371db66a 2862
6f56103d
CW
2863 seq_printf(m, "GPU idle: %s (epoch %u)\n",
2864 yesno(!dev_priv->gt.awake), dev_priv->gt.epoch);
371db66a 2865 seq_printf(m, "IRQs disabled: %s\n",
9df7575f 2866 yesno(!intel_irqs_enabled(dev_priv)));
0d804184 2867#ifdef CONFIG_PM
a6aaec8b 2868 seq_printf(m, "Usage count: %d\n",
36cdd013 2869 atomic_read(&dev_priv->drm.dev->power.usage_count));
0d804184
CW
2870#else
2871 seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
2872#endif
a156e64d 2873 seq_printf(m, "PCI device power state: %s [%d]\n",
52a05c30
DW
2874 pci_power_name(pdev->current_state),
2875 pdev->current_state);
371db66a 2876
ec013e7f
JB
2877 return 0;
2878}
2879
1da51581
ID
2880static int i915_power_domain_info(struct seq_file *m, void *unused)
2881{
36cdd013 2882 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1da51581
ID
2883 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2884 int i;
2885
2886 mutex_lock(&power_domains->lock);
2887
2888 seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
2889 for (i = 0; i < power_domains->power_well_count; i++) {
2890 struct i915_power_well *power_well;
2891 enum intel_display_power_domain power_domain;
2892
2893 power_well = &power_domains->power_wells[i];
f28ec6f4 2894 seq_printf(m, "%-25s %d\n", power_well->desc->name,
1da51581
ID
2895 power_well->count);
2896
f28ec6f4 2897 for_each_power_domain(power_domain, power_well->desc->domains)
1da51581 2898 seq_printf(m, " %-23s %d\n",
9895ad03 2899 intel_display_power_domain_str(power_domain),
1da51581 2900 power_domains->domain_use_count[power_domain]);
1da51581
ID
2901 }
2902
2903 mutex_unlock(&power_domains->lock);
2904
2905 return 0;
2906}
2907
b7cec66d
DL
2908static int i915_dmc_info(struct seq_file *m, void *unused)
2909{
36cdd013 2910 struct drm_i915_private *dev_priv = node_to_i915(m->private);
b7cec66d
DL
2911 struct intel_csr *csr;
2912
ab309a6a
MW
2913 if (!HAS_CSR(dev_priv))
2914 return -ENODEV;
b7cec66d
DL
2915
2916 csr = &dev_priv->csr;
2917
6fb403de
MK
2918 intel_runtime_pm_get(dev_priv);
2919
b7cec66d
DL
2920 seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
2921 seq_printf(m, "path: %s\n", csr->fw_path);
2922
2923 if (!csr->dmc_payload)
6fb403de 2924 goto out;
b7cec66d
DL
2925
2926 seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
2927 CSR_VERSION_MINOR(csr->version));
2928
34b2f8da
ID
2929 if (WARN_ON(INTEL_GEN(dev_priv) > 11))
2930 goto out;
2931
2932 seq_printf(m, "DC3 -> DC5 count: %d\n",
2933 I915_READ(IS_BROXTON(dev_priv) ? BXT_CSR_DC3_DC5_COUNT :
2934 SKL_CSR_DC3_DC5_COUNT));
2935 if (!IS_GEN9_LP(dev_priv))
8337206d
DL
2936 seq_printf(m, "DC5 -> DC6 count: %d\n",
2937 I915_READ(SKL_CSR_DC5_DC6_COUNT));
8337206d 2938
6fb403de
MK
2939out:
2940 seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0)));
2941 seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE));
2942 seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL));
2943
8337206d
DL
2944 intel_runtime_pm_put(dev_priv);
2945
b7cec66d
DL
2946 return 0;
2947}
2948
53f5e3ca
JB
2949static void intel_seq_print_mode(struct seq_file *m, int tabs,
2950 struct drm_display_mode *mode)
2951{
2952 int i;
2953
2954 for (i = 0; i < tabs; i++)
2955 seq_putc(m, '\t');
2956
2957 seq_printf(m, "id %d:\"%s\" freq %d clock %d hdisp %d hss %d hse %d htot %d vdisp %d vss %d vse %d vtot %d type 0x%x flags 0x%x\n",
2958 mode->base.id, mode->name,
2959 mode->vrefresh, mode->clock,
2960 mode->hdisplay, mode->hsync_start,
2961 mode->hsync_end, mode->htotal,
2962 mode->vdisplay, mode->vsync_start,
2963 mode->vsync_end, mode->vtotal,
2964 mode->type, mode->flags);
2965}
2966
2967static void intel_encoder_info(struct seq_file *m,
2968 struct intel_crtc *intel_crtc,
2969 struct intel_encoder *intel_encoder)
2970{
36cdd013
DW
2971 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2972 struct drm_device *dev = &dev_priv->drm;
53f5e3ca
JB
2973 struct drm_crtc *crtc = &intel_crtc->base;
2974 struct intel_connector *intel_connector;
2975 struct drm_encoder *encoder;
2976
2977 encoder = &intel_encoder->base;
2978 seq_printf(m, "\tencoder %d: type: %s, connectors:\n",
8e329a03 2979 encoder->base.id, encoder->name);
53f5e3ca
JB
2980 for_each_connector_on_encoder(dev, encoder, intel_connector) {
2981 struct drm_connector *connector = &intel_connector->base;
2982 seq_printf(m, "\t\tconnector %d: type: %s, status: %s",
2983 connector->base.id,
c23cc417 2984 connector->name,
53f5e3ca
JB
2985 drm_get_connector_status_name(connector->status));
2986 if (connector->status == connector_status_connected) {
2987 struct drm_display_mode *mode = &crtc->mode;
2988 seq_printf(m, ", mode:\n");
2989 intel_seq_print_mode(m, 2, mode);
2990 } else {
2991 seq_putc(m, '\n');
2992 }
2993 }
2994}
2995
2996static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2997{
36cdd013
DW
2998 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2999 struct drm_device *dev = &dev_priv->drm;
53f5e3ca
JB
3000 struct drm_crtc *crtc = &intel_crtc->base;
3001 struct intel_encoder *intel_encoder;
23a48d53
ML
3002 struct drm_plane_state *plane_state = crtc->primary->state;
3003 struct drm_framebuffer *fb = plane_state->fb;
53f5e3ca 3004
23a48d53 3005 if (fb)
5aa8a937 3006 seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
23a48d53
ML
3007 fb->base.id, plane_state->src_x >> 16,
3008 plane_state->src_y >> 16, fb->width, fb->height);
5aa8a937
MR
3009 else
3010 seq_puts(m, "\tprimary plane disabled\n");
53f5e3ca
JB
3011 for_each_encoder_on_crtc(dev, crtc, intel_encoder)
3012 intel_encoder_info(m, intel_crtc, intel_encoder);
3013}
3014
3015static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
3016{
3017 struct drm_display_mode *mode = panel->fixed_mode;
3018
3019 seq_printf(m, "\tfixed mode:\n");
3020 intel_seq_print_mode(m, 2, mode);
3021}
3022
3023static void intel_dp_info(struct seq_file *m,
3024 struct intel_connector *intel_connector)
3025{
3026 struct intel_encoder *intel_encoder = intel_connector->encoder;
3027 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
3028
3029 seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
742f491d 3030 seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
b6dabe3b 3031 if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
53f5e3ca 3032 intel_panel_info(m, &intel_connector->panel);
80209e5f
MK
3033
3034 drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
3035 &intel_dp->aux);
53f5e3ca
JB
3036}
3037
9a148a96
LY
3038static void intel_dp_mst_info(struct seq_file *m,
3039 struct intel_connector *intel_connector)
3040{
3041 struct intel_encoder *intel_encoder = intel_connector->encoder;
3042 struct intel_dp_mst_encoder *intel_mst =
3043 enc_to_mst(&intel_encoder->base);
3044 struct intel_digital_port *intel_dig_port = intel_mst->primary;
3045 struct intel_dp *intel_dp = &intel_dig_port->dp;
3046 bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr,
3047 intel_connector->port);
3048
3049 seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
3050}
3051
53f5e3ca
JB
3052static void intel_hdmi_info(struct seq_file *m,
3053 struct intel_connector *intel_connector)
3054{
3055 struct intel_encoder *intel_encoder = intel_connector->encoder;
3056 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
3057
742f491d 3058 seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
53f5e3ca
JB
3059}
3060
3061static void intel_lvds_info(struct seq_file *m,
3062 struct intel_connector *intel_connector)
3063{
3064 intel_panel_info(m, &intel_connector->panel);
3065}
3066
3067static void intel_connector_info(struct seq_file *m,
3068 struct drm_connector *connector)
3069{
3070 struct intel_connector *intel_connector = to_intel_connector(connector);
3071 struct intel_encoder *intel_encoder = intel_connector->encoder;
f103fc7d 3072 struct drm_display_mode *mode;
53f5e3ca
JB
3073
3074 seq_printf(m, "connector %d: type %s, status: %s\n",
c23cc417 3075 connector->base.id, connector->name,
53f5e3ca 3076 drm_get_connector_status_name(connector->status));
3e037f9b
JRS
3077
3078 if (connector->status == connector_status_disconnected)
3079 return;
3080
3081 seq_printf(m, "\tname: %s\n", connector->display_info.name);
3082 seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
3083 connector->display_info.width_mm,
3084 connector->display_info.height_mm);
3085 seq_printf(m, "\tsubpixel order: %s\n",
3086 drm_get_subpixel_order_name(connector->display_info.subpixel_order));
3087 seq_printf(m, "\tCEA rev: %d\n", connector->display_info.cea_rev);
ee648a74 3088
77d1f615 3089 if (!intel_encoder)
ee648a74
ML
3090 return;
3091
3092 switch (connector->connector_type) {
3093 case DRM_MODE_CONNECTOR_DisplayPort:
3094 case DRM_MODE_CONNECTOR_eDP:
9a148a96
LY
3095 if (intel_encoder->type == INTEL_OUTPUT_DP_MST)
3096 intel_dp_mst_info(m, intel_connector);
3097 else
3098 intel_dp_info(m, intel_connector);
ee648a74
ML
3099 break;
3100 case DRM_MODE_CONNECTOR_LVDS:
3101 if (intel_encoder->type == INTEL_OUTPUT_LVDS)
36cd7444 3102 intel_lvds_info(m, intel_connector);
ee648a74
ML
3103 break;
3104 case DRM_MODE_CONNECTOR_HDMIA:
3105 if (intel_encoder->type == INTEL_OUTPUT_HDMI ||
7e732cac 3106 intel_encoder->type == INTEL_OUTPUT_DDI)
ee648a74
ML
3107 intel_hdmi_info(m, intel_connector);
3108 break;
3109 default:
3110 break;
36cd7444 3111 }
53f5e3ca 3112
f103fc7d
JB
3113 seq_printf(m, "\tmodes:\n");
3114 list_for_each_entry(mode, &connector->modes, head)
3115 intel_seq_print_mode(m, 2, mode);
53f5e3ca
JB
3116}
3117
3abc4e09
RF
3118static const char *plane_type(enum drm_plane_type type)
3119{
3120 switch (type) {
3121 case DRM_PLANE_TYPE_OVERLAY:
3122 return "OVL";
3123 case DRM_PLANE_TYPE_PRIMARY:
3124 return "PRI";
3125 case DRM_PLANE_TYPE_CURSOR:
3126 return "CUR";
3127 /*
3128 * Deliberately omitting default: to generate compiler warnings
3129 * when a new drm_plane_type gets added.
3130 */
3131 }
3132
3133 return "unknown";
3134}
3135
3136static const char *plane_rotation(unsigned int rotation)
3137{
3138 static char buf[48];
3139 /*
c2c446ad 3140 * According to doc only one DRM_MODE_ROTATE_ is allowed but this
3abc4e09
RF
3141 * will print them all to visualize if the values are misused
3142 */
3143 snprintf(buf, sizeof(buf),
3144 "%s%s%s%s%s%s(0x%08x)",
c2c446ad
RF
3145 (rotation & DRM_MODE_ROTATE_0) ? "0 " : "",
3146 (rotation & DRM_MODE_ROTATE_90) ? "90 " : "",
3147 (rotation & DRM_MODE_ROTATE_180) ? "180 " : "",
3148 (rotation & DRM_MODE_ROTATE_270) ? "270 " : "",
3149 (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "",
3150 (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "",
3abc4e09
RF
3151 rotation);
3152
3153 return buf;
3154}
3155
3156static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
3157{
36cdd013
DW
3158 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3159 struct drm_device *dev = &dev_priv->drm;
3abc4e09
RF
3160 struct intel_plane *intel_plane;
3161
3162 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
3163 struct drm_plane_state *state;
3164 struct drm_plane *plane = &intel_plane->base;
b3c11ac2 3165 struct drm_format_name_buf format_name;
3abc4e09
RF
3166
3167 if (!plane->state) {
3168 seq_puts(m, "plane->state is NULL!\n");
3169 continue;
3170 }
3171
3172 state = plane->state;
3173
90844f00 3174 if (state->fb) {
438b74a5
VS
3175 drm_get_format_name(state->fb->format->format,
3176 &format_name);
90844f00 3177 } else {
b3c11ac2 3178 sprintf(format_name.str, "N/A");
90844f00
EE
3179 }
3180
3abc4e09
RF
3181 seq_printf(m, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n",
3182 plane->base.id,
3183 plane_type(intel_plane->base.type),
3184 state->crtc_x, state->crtc_y,
3185 state->crtc_w, state->crtc_h,
3186 (state->src_x >> 16),
3187 ((state->src_x & 0xffff) * 15625) >> 10,
3188 (state->src_y >> 16),
3189 ((state->src_y & 0xffff) * 15625) >> 10,
3190 (state->src_w >> 16),
3191 ((state->src_w & 0xffff) * 15625) >> 10,
3192 (state->src_h >> 16),
3193 ((state->src_h & 0xffff) * 15625) >> 10,
b3c11ac2 3194 format_name.str,
3abc4e09
RF
3195 plane_rotation(state->rotation));
3196 }
3197}
3198
3199static void intel_scaler_info(struct seq_file *m, struct intel_crtc *intel_crtc)
3200{
3201 struct intel_crtc_state *pipe_config;
3202 int num_scalers = intel_crtc->num_scalers;
3203 int i;
3204
3205 pipe_config = to_intel_crtc_state(intel_crtc->base.state);
3206
3207 /* Not all platformas have a scaler */
3208 if (num_scalers) {
3209 seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
3210 num_scalers,
3211 pipe_config->scaler_state.scaler_users,
3212 pipe_config->scaler_state.scaler_id);
3213
58415918 3214 for (i = 0; i < num_scalers; i++) {
3abc4e09
RF
3215 struct intel_scaler *sc =
3216 &pipe_config->scaler_state.scalers[i];
3217
3218 seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
3219 i, yesno(sc->in_use), sc->mode);
3220 }
3221 seq_puts(m, "\n");
3222 } else {
3223 seq_puts(m, "\tNo scalers available on this platform\n");
3224 }
3225}
3226
53f5e3ca
JB
3227static int i915_display_info(struct seq_file *m, void *unused)
3228{
36cdd013
DW
3229 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3230 struct drm_device *dev = &dev_priv->drm;
065f2ec2 3231 struct intel_crtc *crtc;
53f5e3ca 3232 struct drm_connector *connector;
3f6a5e1e 3233 struct drm_connector_list_iter conn_iter;
53f5e3ca 3234
b0e5ddf3 3235 intel_runtime_pm_get(dev_priv);
53f5e3ca
JB
3236 seq_printf(m, "CRTC info\n");
3237 seq_printf(m, "---------\n");
d3fcc808 3238 for_each_intel_crtc(dev, crtc) {
f77076c9 3239 struct intel_crtc_state *pipe_config;
53f5e3ca 3240
3f6a5e1e 3241 drm_modeset_lock(&crtc->base.mutex, NULL);
f77076c9
ML
3242 pipe_config = to_intel_crtc_state(crtc->base.state);
3243
3abc4e09 3244 seq_printf(m, "CRTC %d: pipe: %c, active=%s, (size=%dx%d), dither=%s, bpp=%d\n",
065f2ec2 3245 crtc->base.base.id, pipe_name(crtc->pipe),
f77076c9 3246 yesno(pipe_config->base.active),
3abc4e09
RF
3247 pipe_config->pipe_src_w, pipe_config->pipe_src_h,
3248 yesno(pipe_config->dither), pipe_config->pipe_bpp);
3249
f77076c9 3250 if (pipe_config->base.active) {
cd5dcbf1
VS
3251 struct intel_plane *cursor =
3252 to_intel_plane(crtc->base.cursor);
3253
065f2ec2
CW
3254 intel_crtc_info(m, crtc);
3255
cd5dcbf1
VS
3256 seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x\n",
3257 yesno(cursor->base.state->visible),
3258 cursor->base.state->crtc_x,
3259 cursor->base.state->crtc_y,
3260 cursor->base.state->crtc_w,
3261 cursor->base.state->crtc_h,
3262 cursor->cursor.base);
3abc4e09
RF
3263 intel_scaler_info(m, crtc);
3264 intel_plane_info(m, crtc);
a23dc658 3265 }
cace841c
DV
3266
3267 seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n",
3268 yesno(!crtc->cpu_fifo_underrun_disabled),
3269 yesno(!crtc->pch_fifo_underrun_disabled));
3f6a5e1e 3270 drm_modeset_unlock(&crtc->base.mutex);
53f5e3ca
JB
3271 }
3272
3273 seq_printf(m, "\n");
3274 seq_printf(m, "Connector info\n");
3275 seq_printf(m, "--------------\n");
3f6a5e1e
DV
3276 mutex_lock(&dev->mode_config.mutex);
3277 drm_connector_list_iter_begin(dev, &conn_iter);
3278 drm_for_each_connector_iter(connector, &conn_iter)
53f5e3ca 3279 intel_connector_info(m, connector);
3f6a5e1e
DV
3280 drm_connector_list_iter_end(&conn_iter);
3281 mutex_unlock(&dev->mode_config.mutex);
3282
b0e5ddf3 3283 intel_runtime_pm_put(dev_priv);
53f5e3ca
JB
3284
3285 return 0;
3286}
3287
1b36595f
CW
3288static int i915_engine_info(struct seq_file *m, void *unused)
3289{
3290 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3291 struct intel_engine_cs *engine;
3b3f1650 3292 enum intel_engine_id id;
f636edb2 3293 struct drm_printer p;
1b36595f 3294
9c870d03
CW
3295 intel_runtime_pm_get(dev_priv);
3296
6f56103d
CW
3297 seq_printf(m, "GT awake? %s (epoch %u)\n",
3298 yesno(dev_priv->gt.awake), dev_priv->gt.epoch);
f73b5674
CW
3299 seq_printf(m, "Global active requests: %d\n",
3300 dev_priv->gt.active_requests);
f577a03b
LL
3301 seq_printf(m, "CS timestamp frequency: %u kHz\n",
3302 dev_priv->info.cs_timestamp_frequency_khz);
f73b5674 3303
f636edb2
CW
3304 p = drm_seq_file_printer(m);
3305 for_each_engine(engine, dev_priv, id)
0db18b17 3306 intel_engine_dump(engine, &p, "%s\n", engine->name);
1b36595f 3307
9c870d03
CW
3308 intel_runtime_pm_put(dev_priv);
3309
1b36595f
CW
3310 return 0;
3311}
3312
79e9cd5f
LL
3313static int i915_rcs_topology(struct seq_file *m, void *unused)
3314{
3315 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3316 struct drm_printer p = drm_seq_file_printer(m);
3317
3318 intel_device_info_dump_topology(&INTEL_INFO(dev_priv)->sseu, &p);
3319
3320 return 0;
3321}
3322
c5418a8b
CW
3323static int i915_shrinker_info(struct seq_file *m, void *unused)
3324{
3325 struct drm_i915_private *i915 = node_to_i915(m->private);
3326
3327 seq_printf(m, "seeks = %d\n", i915->mm.shrinker.seeks);
3328 seq_printf(m, "batch = %lu\n", i915->mm.shrinker.batch);
3329
3330 return 0;
3331}
3332
728e29d7
DV
3333static int i915_shared_dplls_info(struct seq_file *m, void *unused)
3334{
36cdd013
DW
3335 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3336 struct drm_device *dev = &dev_priv->drm;
728e29d7
DV
3337 int i;
3338
3339 drm_modeset_lock_all(dev);
3340 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
3341 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
3342
72f775fa 3343 seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name,
0823eb9c 3344 pll->info->id);
2dd66ebd 3345 seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n",
2c42e535 3346 pll->state.crtc_mask, pll->active_mask, yesno(pll->on));
728e29d7 3347 seq_printf(m, " tracked hardware state:\n");
2c42e535 3348 seq_printf(m, " dpll: 0x%08x\n", pll->state.hw_state.dpll);
3e369b76 3349 seq_printf(m, " dpll_md: 0x%08x\n",
2c42e535
ACO
3350 pll->state.hw_state.dpll_md);
3351 seq_printf(m, " fp0: 0x%08x\n", pll->state.hw_state.fp0);
3352 seq_printf(m, " fp1: 0x%08x\n", pll->state.hw_state.fp1);
3353 seq_printf(m, " wrpll: 0x%08x\n", pll->state.hw_state.wrpll);
c27e917e
PZ
3354 seq_printf(m, " cfgcr0: 0x%08x\n", pll->state.hw_state.cfgcr0);
3355 seq_printf(m, " cfgcr1: 0x%08x\n", pll->state.hw_state.cfgcr1);
3356 seq_printf(m, " mg_refclkin_ctl: 0x%08x\n",
3357 pll->state.hw_state.mg_refclkin_ctl);
3358 seq_printf(m, " mg_clktop2_coreclkctl1: 0x%08x\n",
3359 pll->state.hw_state.mg_clktop2_coreclkctl1);
3360 seq_printf(m, " mg_clktop2_hsclkctl: 0x%08x\n",
3361 pll->state.hw_state.mg_clktop2_hsclkctl);
3362 seq_printf(m, " mg_pll_div0: 0x%08x\n",
3363 pll->state.hw_state.mg_pll_div0);
3364 seq_printf(m, " mg_pll_div1: 0x%08x\n",
3365 pll->state.hw_state.mg_pll_div1);
3366 seq_printf(m, " mg_pll_lf: 0x%08x\n",
3367 pll->state.hw_state.mg_pll_lf);
3368 seq_printf(m, " mg_pll_frac_lock: 0x%08x\n",
3369 pll->state.hw_state.mg_pll_frac_lock);
3370 seq_printf(m, " mg_pll_ssc: 0x%08x\n",
3371 pll->state.hw_state.mg_pll_ssc);
3372 seq_printf(m, " mg_pll_bias: 0x%08x\n",
3373 pll->state.hw_state.mg_pll_bias);
3374 seq_printf(m, " mg_pll_tdc_coldst_bias: 0x%08x\n",
3375 pll->state.hw_state.mg_pll_tdc_coldst_bias);
728e29d7
DV
3376 }
3377 drm_modeset_unlock_all(dev);
3378
3379 return 0;
3380}
3381
1ed1ef9d 3382static int i915_wa_registers(struct seq_file *m, void *unused)
888b5995 3383{
452420d2
TU
3384 struct drm_i915_private *i915 = node_to_i915(m->private);
3385 const struct i915_wa_list *wal = &i915->engine[RCS]->ctx_wa_list;
3386 struct i915_wa *wa;
3387 unsigned int i;
888b5995 3388
452420d2
TU
3389 seq_printf(m, "Workarounds applied: %u\n", wal->count);
3390 for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
548764bb 3391 seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n",
452420d2 3392 i915_mmio_reg_offset(wa->reg), wa->val, wa->mask);
888b5995
AS
3393
3394 return 0;
3395}
3396
d2d4f39b
KM
3397static int i915_ipc_status_show(struct seq_file *m, void *data)
3398{
3399 struct drm_i915_private *dev_priv = m->private;
3400
3401 seq_printf(m, "Isochronous Priority Control: %s\n",
3402 yesno(dev_priv->ipc_enabled));
3403 return 0;
3404}
3405
3406static int i915_ipc_status_open(struct inode *inode, struct file *file)
3407{
3408 struct drm_i915_private *dev_priv = inode->i_private;
3409
3410 if (!HAS_IPC(dev_priv))
3411 return -ENODEV;
3412
3413 return single_open(file, i915_ipc_status_show, dev_priv);
3414}
3415
3416static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
3417 size_t len, loff_t *offp)
3418{
3419 struct seq_file *m = file->private_data;
3420 struct drm_i915_private *dev_priv = m->private;
3421 int ret;
3422 bool enable;
3423
3424 ret = kstrtobool_from_user(ubuf, len, &enable);
3425 if (ret < 0)
3426 return ret;
3427
3428 intel_runtime_pm_get(dev_priv);
3429 if (!dev_priv->ipc_enabled && enable)
3430 DRM_INFO("Enabling IPC: WM will be proper only after next commit\n");
3431 dev_priv->wm.distrust_bios_wm = true;
3432 dev_priv->ipc_enabled = enable;
3433 intel_enable_ipc(dev_priv);
3434 intel_runtime_pm_put(dev_priv);
3435
3436 return len;
3437}
3438
3439static const struct file_operations i915_ipc_status_fops = {
3440 .owner = THIS_MODULE,
3441 .open = i915_ipc_status_open,
3442 .read = seq_read,
3443 .llseek = seq_lseek,
3444 .release = single_release,
3445 .write = i915_ipc_status_write
3446};
3447
c5511e44
DL
3448static int i915_ddb_info(struct seq_file *m, void *unused)
3449{
36cdd013
DW
3450 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3451 struct drm_device *dev = &dev_priv->drm;
c5511e44 3452 struct skl_ddb_entry *entry;
ff43bc37 3453 struct intel_crtc *crtc;
c5511e44 3454
36cdd013 3455 if (INTEL_GEN(dev_priv) < 9)
ab309a6a 3456 return -ENODEV;
2fcffe19 3457
c5511e44
DL
3458 drm_modeset_lock_all(dev);
3459
c5511e44
DL
3460 seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
3461
ff43bc37
VS
3462 for_each_intel_crtc(&dev_priv->drm, crtc) {
3463 struct intel_crtc_state *crtc_state =
3464 to_intel_crtc_state(crtc->base.state);
3465 enum pipe pipe = crtc->pipe;
3466 enum plane_id plane_id;
3467
c5511e44
DL
3468 seq_printf(m, "Pipe %c\n", pipe_name(pipe));
3469
ff43bc37
VS
3470 for_each_plane_id_on_crtc(crtc, plane_id) {
3471 entry = &crtc_state->wm.skl.plane_ddb_y[plane_id];
3472 seq_printf(m, " Plane%-8d%8u%8u%8u\n", plane_id + 1,
c5511e44
DL
3473 entry->start, entry->end,
3474 skl_ddb_entry_size(entry));
3475 }
3476
ff43bc37 3477 entry = &crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
c5511e44
DL
3478 seq_printf(m, " %-13s%8u%8u%8u\n", "Cursor", entry->start,
3479 entry->end, skl_ddb_entry_size(entry));
3480 }
3481
3482 drm_modeset_unlock_all(dev);
3483
3484 return 0;
3485}
3486
a54746e3 3487static void drrs_status_per_crtc(struct seq_file *m,
36cdd013
DW
3488 struct drm_device *dev,
3489 struct intel_crtc *intel_crtc)
a54746e3 3490{
fac5e23e 3491 struct drm_i915_private *dev_priv = to_i915(dev);
a54746e3
VK
3492 struct i915_drrs *drrs = &dev_priv->drrs;
3493 int vrefresh = 0;
26875fe5 3494 struct drm_connector *connector;
3f6a5e1e 3495 struct drm_connector_list_iter conn_iter;
a54746e3 3496
3f6a5e1e
DV
3497 drm_connector_list_iter_begin(dev, &conn_iter);
3498 drm_for_each_connector_iter(connector, &conn_iter) {
26875fe5
ML
3499 if (connector->state->crtc != &intel_crtc->base)
3500 continue;
3501
3502 seq_printf(m, "%s:\n", connector->name);
a54746e3 3503 }
3f6a5e1e 3504 drm_connector_list_iter_end(&conn_iter);
a54746e3
VK
3505
3506 if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT)
3507 seq_puts(m, "\tVBT: DRRS_type: Static");
3508 else if (dev_priv->vbt.drrs_type == SEAMLESS_DRRS_SUPPORT)
3509 seq_puts(m, "\tVBT: DRRS_type: Seamless");
3510 else if (dev_priv->vbt.drrs_type == DRRS_NOT_SUPPORTED)
3511 seq_puts(m, "\tVBT: DRRS_type: None");
3512 else
3513 seq_puts(m, "\tVBT: DRRS_type: FIXME: Unrecognized Value");
3514
3515 seq_puts(m, "\n\n");
3516
f77076c9 3517 if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) {
a54746e3
VK
3518 struct intel_panel *panel;
3519
3520 mutex_lock(&drrs->mutex);
3521 /* DRRS Supported */
3522 seq_puts(m, "\tDRRS Supported: Yes\n");
3523
3524 /* disable_drrs() will make drrs->dp NULL */
3525 if (!drrs->dp) {
ce6e2137
R
3526 seq_puts(m, "Idleness DRRS: Disabled\n");
3527 if (dev_priv->psr.enabled)
3528 seq_puts(m,
3529 "\tAs PSR is enabled, DRRS is not enabled\n");
a54746e3
VK
3530 mutex_unlock(&drrs->mutex);
3531 return;
3532 }
3533
3534 panel = &drrs->dp->attached_connector->panel;
3535 seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
3536 drrs->busy_frontbuffer_bits);
3537
3538 seq_puts(m, "\n\t\t");
3539 if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
3540 seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
3541 vrefresh = panel->fixed_mode->vrefresh;
3542 } else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
3543 seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
3544 vrefresh = panel->downclock_mode->vrefresh;
3545 } else {
3546 seq_printf(m, "DRRS_State: Unknown(%d)\n",
3547 drrs->refresh_rate_type);
3548 mutex_unlock(&drrs->mutex);
3549 return;
3550 }
3551 seq_printf(m, "\t\tVrefresh: %d", vrefresh);
3552
3553 seq_puts(m, "\n\t\t");
3554 mutex_unlock(&drrs->mutex);
3555 } else {
3556 /* DRRS not supported. Print the VBT parameter*/
3557 seq_puts(m, "\tDRRS Supported : No");
3558 }
3559 seq_puts(m, "\n");
3560}
3561
3562static int i915_drrs_status(struct seq_file *m, void *unused)
3563{
36cdd013
DW
3564 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3565 struct drm_device *dev = &dev_priv->drm;
a54746e3
VK
3566 struct intel_crtc *intel_crtc;
3567 int active_crtc_cnt = 0;
3568
26875fe5 3569 drm_modeset_lock_all(dev);
a54746e3 3570 for_each_intel_crtc(dev, intel_crtc) {
f77076c9 3571 if (intel_crtc->base.state->active) {
a54746e3
VK
3572 active_crtc_cnt++;
3573 seq_printf(m, "\nCRTC %d: ", active_crtc_cnt);
3574
3575 drrs_status_per_crtc(m, dev, intel_crtc);
3576 }
a54746e3 3577 }
26875fe5 3578 drm_modeset_unlock_all(dev);
a54746e3
VK
3579
3580 if (!active_crtc_cnt)
3581 seq_puts(m, "No active crtc found\n");
3582
3583 return 0;
3584}
3585
11bed958
DA
3586static int i915_dp_mst_info(struct seq_file *m, void *unused)
3587{
36cdd013
DW
3588 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3589 struct drm_device *dev = &dev_priv->drm;
11bed958
DA
3590 struct intel_encoder *intel_encoder;
3591 struct intel_digital_port *intel_dig_port;
b6dabe3b 3592 struct drm_connector *connector;
3f6a5e1e 3593 struct drm_connector_list_iter conn_iter;
b6dabe3b 3594
3f6a5e1e
DV
3595 drm_connector_list_iter_begin(dev, &conn_iter);
3596 drm_for_each_connector_iter(connector, &conn_iter) {
b6dabe3b 3597 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
11bed958 3598 continue;
b6dabe3b
ML
3599
3600 intel_encoder = intel_attached_encoder(connector);
3601 if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
3602 continue;
3603
3604 intel_dig_port = enc_to_dig_port(&intel_encoder->base);
11bed958
DA
3605 if (!intel_dig_port->dp.can_mst)
3606 continue;
b6dabe3b 3607
40ae80cc 3608 seq_printf(m, "MST Source Port %c\n",
8f4f2797 3609 port_name(intel_dig_port->base.port));
11bed958
DA
3610 drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
3611 }
3f6a5e1e
DV
3612 drm_connector_list_iter_end(&conn_iter);
3613
11bed958
DA
3614 return 0;
3615}
3616
eb3394fa 3617static ssize_t i915_displayport_test_active_write(struct file *file,
36cdd013
DW
3618 const char __user *ubuf,
3619 size_t len, loff_t *offp)
eb3394fa
TP
3620{
3621 char *input_buffer;
3622 int status = 0;
eb3394fa
TP
3623 struct drm_device *dev;
3624 struct drm_connector *connector;
3f6a5e1e 3625 struct drm_connector_list_iter conn_iter;
eb3394fa
TP
3626 struct intel_dp *intel_dp;
3627 int val = 0;
3628
9aaffa34 3629 dev = ((struct seq_file *)file->private_data)->private;
eb3394fa 3630
eb3394fa
TP
3631 if (len == 0)
3632 return 0;
3633
261aeba8
GT
3634 input_buffer = memdup_user_nul(ubuf, len);
3635 if (IS_ERR(input_buffer))
3636 return PTR_ERR(input_buffer);
eb3394fa 3637
eb3394fa
TP
3638 DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len);
3639
3f6a5e1e
DV
3640 drm_connector_list_iter_begin(dev, &conn_iter);
3641 drm_for_each_connector_iter(connector, &conn_iter) {
a874b6a3
ML
3642 struct intel_encoder *encoder;
3643
eb3394fa
TP
3644 if (connector->connector_type !=
3645 DRM_MODE_CONNECTOR_DisplayPort)
3646 continue;
3647
a874b6a3
ML
3648 encoder = to_intel_encoder(connector->encoder);
3649 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3650 continue;
3651
3652 if (encoder && connector->status == connector_status_connected) {
3653 intel_dp = enc_to_intel_dp(&encoder->base);
eb3394fa
TP
3654 status = kstrtoint(input_buffer, 10, &val);
3655 if (status < 0)
3f6a5e1e 3656 break;
eb3394fa
TP
3657 DRM_DEBUG_DRIVER("Got %d for test active\n", val);
3658 /* To prevent erroneous activation of the compliance
3659 * testing code, only accept an actual value of 1 here
3660 */
3661 if (val == 1)
c1617abc 3662 intel_dp->compliance.test_active = 1;
eb3394fa 3663 else
c1617abc 3664 intel_dp->compliance.test_active = 0;
eb3394fa
TP
3665 }
3666 }
3f6a5e1e 3667 drm_connector_list_iter_end(&conn_iter);
eb3394fa
TP
3668 kfree(input_buffer);
3669 if (status < 0)
3670 return status;
3671
3672 *offp += len;
3673 return len;
3674}
3675
3676static int i915_displayport_test_active_show(struct seq_file *m, void *data)
3677{
e4006713
AS
3678 struct drm_i915_private *dev_priv = m->private;
3679 struct drm_device *dev = &dev_priv->drm;
eb3394fa 3680 struct drm_connector *connector;
3f6a5e1e 3681 struct drm_connector_list_iter conn_iter;
eb3394fa
TP
3682 struct intel_dp *intel_dp;
3683
3f6a5e1e
DV
3684 drm_connector_list_iter_begin(dev, &conn_iter);
3685 drm_for_each_connector_iter(connector, &conn_iter) {
a874b6a3
ML
3686 struct intel_encoder *encoder;
3687
eb3394fa
TP
3688 if (connector->connector_type !=
3689 DRM_MODE_CONNECTOR_DisplayPort)
3690 continue;
3691
a874b6a3
ML
3692 encoder = to_intel_encoder(connector->encoder);
3693 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3694 continue;
3695
3696 if (encoder && connector->status == connector_status_connected) {
3697 intel_dp = enc_to_intel_dp(&encoder->base);
c1617abc 3698 if (intel_dp->compliance.test_active)
eb3394fa
TP
3699 seq_puts(m, "1");
3700 else
3701 seq_puts(m, "0");
3702 } else
3703 seq_puts(m, "0");
3704 }
3f6a5e1e 3705 drm_connector_list_iter_end(&conn_iter);
eb3394fa
TP
3706
3707 return 0;
3708}
3709
3710static int i915_displayport_test_active_open(struct inode *inode,
36cdd013 3711 struct file *file)
eb3394fa 3712{
36cdd013 3713 return single_open(file, i915_displayport_test_active_show,
e4006713 3714 inode->i_private);
eb3394fa
TP
3715}
3716
3717static const struct file_operations i915_displayport_test_active_fops = {
3718 .owner = THIS_MODULE,
3719 .open = i915_displayport_test_active_open,
3720 .read = seq_read,
3721 .llseek = seq_lseek,
3722 .release = single_release,
3723 .write = i915_displayport_test_active_write
3724};
3725
3726static int i915_displayport_test_data_show(struct seq_file *m, void *data)
3727{
e4006713
AS
3728 struct drm_i915_private *dev_priv = m->private;
3729 struct drm_device *dev = &dev_priv->drm;
eb3394fa 3730 struct drm_connector *connector;
3f6a5e1e 3731 struct drm_connector_list_iter conn_iter;
eb3394fa
TP
3732 struct intel_dp *intel_dp;
3733
3f6a5e1e
DV
3734 drm_connector_list_iter_begin(dev, &conn_iter);
3735 drm_for_each_connector_iter(connector, &conn_iter) {
a874b6a3
ML
3736 struct intel_encoder *encoder;
3737
eb3394fa
TP
3738 if (connector->connector_type !=
3739 DRM_MODE_CONNECTOR_DisplayPort)
3740 continue;
3741
a874b6a3
ML
3742 encoder = to_intel_encoder(connector->encoder);
3743 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3744 continue;
3745
3746 if (encoder && connector->status == connector_status_connected) {
3747 intel_dp = enc_to_intel_dp(&encoder->base);
b48a5ba9
MN
3748 if (intel_dp->compliance.test_type ==
3749 DP_TEST_LINK_EDID_READ)
3750 seq_printf(m, "%lx",
3751 intel_dp->compliance.test_data.edid);
611032bf
MN
3752 else if (intel_dp->compliance.test_type ==
3753 DP_TEST_LINK_VIDEO_PATTERN) {
3754 seq_printf(m, "hdisplay: %d\n",
3755 intel_dp->compliance.test_data.hdisplay);
3756 seq_printf(m, "vdisplay: %d\n",
3757 intel_dp->compliance.test_data.vdisplay);
3758 seq_printf(m, "bpc: %u\n",
3759 intel_dp->compliance.test_data.bpc);
3760 }
eb3394fa
TP
3761 } else
3762 seq_puts(m, "0");
3763 }
3f6a5e1e 3764 drm_connector_list_iter_end(&conn_iter);
eb3394fa
TP
3765
3766 return 0;
3767}
e4006713 3768DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_data);
eb3394fa
TP
3769
3770static int i915_displayport_test_type_show(struct seq_file *m, void *data)
3771{
e4006713
AS
3772 struct drm_i915_private *dev_priv = m->private;
3773 struct drm_device *dev = &dev_priv->drm;
eb3394fa 3774 struct drm_connector *connector;
3f6a5e1e 3775 struct drm_connector_list_iter conn_iter;
eb3394fa
TP
3776 struct intel_dp *intel_dp;
3777
3f6a5e1e
DV
3778 drm_connector_list_iter_begin(dev, &conn_iter);
3779 drm_for_each_connector_iter(connector, &conn_iter) {
a874b6a3
ML
3780 struct intel_encoder *encoder;
3781
eb3394fa
TP
3782 if (connector->connector_type !=
3783 DRM_MODE_CONNECTOR_DisplayPort)
3784 continue;
3785
a874b6a3
ML
3786 encoder = to_intel_encoder(connector->encoder);
3787 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3788 continue;
3789
3790 if (encoder && connector->status == connector_status_connected) {
3791 intel_dp = enc_to_intel_dp(&encoder->base);
c1617abc 3792 seq_printf(m, "%02lx", intel_dp->compliance.test_type);
eb3394fa
TP
3793 } else
3794 seq_puts(m, "0");
3795 }
3f6a5e1e 3796 drm_connector_list_iter_end(&conn_iter);
eb3394fa
TP
3797
3798 return 0;
3799}
e4006713 3800DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type);
eb3394fa 3801
97e94b22 3802static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
369a1342 3803{
36cdd013
DW
3804 struct drm_i915_private *dev_priv = m->private;
3805 struct drm_device *dev = &dev_priv->drm;
369a1342 3806 int level;
de38b95c
VS
3807 int num_levels;
3808
36cdd013 3809 if (IS_CHERRYVIEW(dev_priv))
de38b95c 3810 num_levels = 3;
36cdd013 3811 else if (IS_VALLEYVIEW(dev_priv))
de38b95c 3812 num_levels = 1;
04548cba
VS
3813 else if (IS_G4X(dev_priv))
3814 num_levels = 3;
de38b95c 3815 else
5db94019 3816 num_levels = ilk_wm_max_level(dev_priv) + 1;
369a1342
VS
3817
3818 drm_modeset_lock_all(dev);
3819
3820 for (level = 0; level < num_levels; level++) {
3821 unsigned int latency = wm[level];
3822
97e94b22
DL
3823 /*
3824 * - WM1+ latency values in 0.5us units
de38b95c 3825 * - latencies are in us on gen9/vlv/chv
97e94b22 3826 */
04548cba
VS
3827 if (INTEL_GEN(dev_priv) >= 9 ||
3828 IS_VALLEYVIEW(dev_priv) ||
3829 IS_CHERRYVIEW(dev_priv) ||
3830 IS_G4X(dev_priv))
97e94b22
DL
3831 latency *= 10;
3832 else if (level > 0)
369a1342
VS
3833 latency *= 5;
3834
3835 seq_printf(m, "WM%d %u (%u.%u usec)\n",
97e94b22 3836 level, wm[level], latency / 10, latency % 10);
369a1342
VS
3837 }
3838
3839 drm_modeset_unlock_all(dev);
3840}
3841
3842static int pri_wm_latency_show(struct seq_file *m, void *data)
3843{
36cdd013 3844 struct drm_i915_private *dev_priv = m->private;
97e94b22
DL
3845 const uint16_t *latencies;
3846
36cdd013 3847 if (INTEL_GEN(dev_priv) >= 9)
97e94b22
DL
3848 latencies = dev_priv->wm.skl_latency;
3849 else
36cdd013 3850 latencies = dev_priv->wm.pri_latency;
369a1342 3851
97e94b22 3852 wm_latency_show(m, latencies);
369a1342
VS
3853
3854 return 0;
3855}
3856
3857static int spr_wm_latency_show(struct seq_file *m, void *data)
3858{
36cdd013 3859 struct drm_i915_private *dev_priv = m->private;
97e94b22
DL
3860 const uint16_t *latencies;
3861
36cdd013 3862 if (INTEL_GEN(dev_priv) >= 9)
97e94b22
DL
3863 latencies = dev_priv->wm.skl_latency;
3864 else
36cdd013 3865 latencies = dev_priv->wm.spr_latency;
369a1342 3866
97e94b22 3867 wm_latency_show(m, latencies);
369a1342
VS
3868
3869 return 0;
3870}
3871
3872static int cur_wm_latency_show(struct seq_file *m, void *data)
3873{
36cdd013 3874 struct drm_i915_private *dev_priv = m->private;
97e94b22
DL
3875 const uint16_t *latencies;
3876
36cdd013 3877 if (INTEL_GEN(dev_priv) >= 9)
97e94b22
DL
3878 latencies = dev_priv->wm.skl_latency;
3879 else
36cdd013 3880 latencies = dev_priv->wm.cur_latency;
369a1342 3881
97e94b22 3882 wm_latency_show(m, latencies);
369a1342
VS
3883
3884 return 0;
3885}
3886
3887static int pri_wm_latency_open(struct inode *inode, struct file *file)
3888{
36cdd013 3889 struct drm_i915_private *dev_priv = inode->i_private;
369a1342 3890
04548cba 3891 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
369a1342
VS
3892 return -ENODEV;
3893
36cdd013 3894 return single_open(file, pri_wm_latency_show, dev_priv);
369a1342
VS
3895}
3896
3897static int spr_wm_latency_open(struct inode *inode, struct file *file)
3898{
36cdd013 3899 struct drm_i915_private *dev_priv = inode->i_private;
369a1342 3900
36cdd013 3901 if (HAS_GMCH_DISPLAY(dev_priv))
369a1342
VS
3902 return -ENODEV;
3903
36cdd013 3904 return single_open(file, spr_wm_latency_show, dev_priv);
369a1342
VS
3905}
3906
3907static int cur_wm_latency_open(struct inode *inode, struct file *file)
3908{
36cdd013 3909 struct drm_i915_private *dev_priv = inode->i_private;
369a1342 3910
36cdd013 3911 if (HAS_GMCH_DISPLAY(dev_priv))
369a1342
VS
3912 return -ENODEV;
3913
36cdd013 3914 return single_open(file, cur_wm_latency_show, dev_priv);
369a1342
VS
3915}
3916
3917static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
97e94b22 3918 size_t len, loff_t *offp, uint16_t wm[8])
369a1342
VS
3919{
3920 struct seq_file *m = file->private_data;
36cdd013
DW
3921 struct drm_i915_private *dev_priv = m->private;
3922 struct drm_device *dev = &dev_priv->drm;
97e94b22 3923 uint16_t new[8] = { 0 };
de38b95c 3924 int num_levels;
369a1342
VS
3925 int level;
3926 int ret;
3927 char tmp[32];
3928
36cdd013 3929 if (IS_CHERRYVIEW(dev_priv))
de38b95c 3930 num_levels = 3;
36cdd013 3931 else if (IS_VALLEYVIEW(dev_priv))
de38b95c 3932 num_levels = 1;
04548cba
VS
3933 else if (IS_G4X(dev_priv))
3934 num_levels = 3;
de38b95c 3935 else
5db94019 3936 num_levels = ilk_wm_max_level(dev_priv) + 1;
de38b95c 3937
369a1342
VS
3938 if (len >= sizeof(tmp))
3939 return -EINVAL;
3940
3941 if (copy_from_user(tmp, ubuf, len))
3942 return -EFAULT;
3943
3944 tmp[len] = '\0';
3945
97e94b22
DL
3946 ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
3947 &new[0], &new[1], &new[2], &new[3],
3948 &new[4], &new[5], &new[6], &new[7]);
369a1342
VS
3949 if (ret != num_levels)
3950 return -EINVAL;
3951
3952 drm_modeset_lock_all(dev);
3953
3954 for (level = 0; level < num_levels; level++)
3955 wm[level] = new[level];
3956
3957 drm_modeset_unlock_all(dev);
3958
3959 return len;
3960}
3961
3962
3963static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
3964 size_t len, loff_t *offp)
3965{
3966 struct seq_file *m = file->private_data;
36cdd013 3967 struct drm_i915_private *dev_priv = m->private;
97e94b22 3968 uint16_t *latencies;
369a1342 3969
36cdd013 3970 if (INTEL_GEN(dev_priv) >= 9)
97e94b22
DL
3971 latencies = dev_priv->wm.skl_latency;
3972 else
36cdd013 3973 latencies = dev_priv->wm.pri_latency;
97e94b22
DL
3974
3975 return wm_latency_write(file, ubuf, len, offp, latencies);
369a1342
VS
3976}
3977
3978static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
3979 size_t len, loff_t *offp)
3980{
3981 struct seq_file *m = file->private_data;
36cdd013 3982 struct drm_i915_private *dev_priv = m->private;
97e94b22 3983 uint16_t *latencies;
369a1342 3984
36cdd013 3985 if (INTEL_GEN(dev_priv) >= 9)
97e94b22
DL
3986 latencies = dev_priv->wm.skl_latency;
3987 else
36cdd013 3988 latencies = dev_priv->wm.spr_latency;
97e94b22
DL
3989
3990 return wm_latency_write(file, ubuf, len, offp, latencies);
369a1342
VS
3991}
3992
3993static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
3994 size_t len, loff_t *offp)
3995{
3996 struct seq_file *m = file->private_data;
36cdd013 3997 struct drm_i915_private *dev_priv = m->private;
97e94b22
DL
3998 uint16_t *latencies;
3999
36cdd013 4000 if (INTEL_GEN(dev_priv) >= 9)
97e94b22
DL
4001 latencies = dev_priv->wm.skl_latency;
4002 else
36cdd013 4003 latencies = dev_priv->wm.cur_latency;
369a1342 4004
97e94b22 4005 return wm_latency_write(file, ubuf, len, offp, latencies);
369a1342
VS
4006}
4007
4008static const struct file_operations i915_pri_wm_latency_fops = {
4009 .owner = THIS_MODULE,
4010 .open = pri_wm_latency_open,
4011 .read = seq_read,
4012 .llseek = seq_lseek,
4013 .release = single_release,
4014 .write = pri_wm_latency_write
4015};
4016
4017static const struct file_operations i915_spr_wm_latency_fops = {
4018 .owner = THIS_MODULE,
4019 .open = spr_wm_latency_open,
4020 .read = seq_read,
4021 .llseek = seq_lseek,
4022 .release = single_release,
4023 .write = spr_wm_latency_write
4024};
4025
4026static const struct file_operations i915_cur_wm_latency_fops = {
4027 .owner = THIS_MODULE,
4028 .open = cur_wm_latency_open,
4029 .read = seq_read,
4030 .llseek = seq_lseek,
4031 .release = single_release,
4032 .write = cur_wm_latency_write
4033};
4034
647416f9
KC
4035static int
4036i915_wedged_get(void *data, u64 *val)
f3cd474b 4037{
36cdd013 4038 struct drm_i915_private *dev_priv = data;
f3cd474b 4039
d98c52cf 4040 *val = i915_terminally_wedged(&dev_priv->gpu_error);
f3cd474b 4041
647416f9 4042 return 0;
f3cd474b
CW
4043}
4044
647416f9
KC
4045static int
4046i915_wedged_set(void *data, u64 val)
f3cd474b 4047{
598b6b5a
CW
4048 struct drm_i915_private *i915 = data;
4049 struct intel_engine_cs *engine;
4050 unsigned int tmp;
d46c0517 4051
b8d24a06
MK
4052 /*
4053 * There is no safeguard against this debugfs entry colliding
4054 * with the hangcheck calling same i915_handle_error() in
4055 * parallel, causing an explosion. For now we assume that the
4056 * test harness is responsible enough not to inject gpu hangs
4057 * while it is writing to 'i915_wedged'
4058 */
4059
598b6b5a 4060 if (i915_reset_backoff(&i915->gpu_error))
b8d24a06
MK
4061 return -EAGAIN;
4062
598b6b5a
CW
4063 for_each_engine_masked(engine, i915, val, tmp) {
4064 engine->hangcheck.seqno = intel_engine_get_seqno(engine);
4065 engine->hangcheck.stalled = true;
4066 }
4067
ce800754
CW
4068 i915_handle_error(i915, val, I915_ERROR_CAPTURE,
4069 "Manually set wedged engine mask = %llx", val);
d46c0517 4070
598b6b5a 4071 wait_on_bit(&i915->gpu_error.flags,
d3df42b7
CW
4072 I915_RESET_HANDOFF,
4073 TASK_UNINTERRUPTIBLE);
4074
647416f9 4075 return 0;
f3cd474b
CW
4076}
4077
647416f9
KC
4078DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
4079 i915_wedged_get, i915_wedged_set,
3a3b4f98 4080 "%llu\n");
f3cd474b 4081
64486ae7
CW
4082static int
4083fault_irq_set(struct drm_i915_private *i915,
4084 unsigned long *irq,
4085 unsigned long val)
4086{
4087 int err;
4088
4089 err = mutex_lock_interruptible(&i915->drm.struct_mutex);
4090 if (err)
4091 return err;
4092
4093 err = i915_gem_wait_for_idle(i915,
4094 I915_WAIT_LOCKED |
ec625fb9
CW
4095 I915_WAIT_INTERRUPTIBLE,
4096 MAX_SCHEDULE_TIMEOUT);
64486ae7
CW
4097 if (err)
4098 goto err_unlock;
4099
64486ae7
CW
4100 *irq = val;
4101 mutex_unlock(&i915->drm.struct_mutex);
4102
4103 /* Flush idle worker to disarm irq */
7c26240e 4104 drain_delayed_work(&i915->gt.idle_work);
64486ae7
CW
4105
4106 return 0;
4107
4108err_unlock:
4109 mutex_unlock(&i915->drm.struct_mutex);
4110 return err;
4111}
4112
094f9a54
CW
4113static int
4114i915_ring_missed_irq_get(void *data, u64 *val)
4115{
36cdd013 4116 struct drm_i915_private *dev_priv = data;
094f9a54
CW
4117
4118 *val = dev_priv->gpu_error.missed_irq_rings;
4119 return 0;
4120}
4121
4122static int
4123i915_ring_missed_irq_set(void *data, u64 val)
4124{
64486ae7 4125 struct drm_i915_private *i915 = data;
094f9a54 4126
64486ae7 4127 return fault_irq_set(i915, &i915->gpu_error.missed_irq_rings, val);
094f9a54
CW
4128}
4129
4130DEFINE_SIMPLE_ATTRIBUTE(i915_ring_missed_irq_fops,
4131 i915_ring_missed_irq_get, i915_ring_missed_irq_set,
4132 "0x%08llx\n");
4133
4134static int
4135i915_ring_test_irq_get(void *data, u64 *val)
4136{
36cdd013 4137 struct drm_i915_private *dev_priv = data;
094f9a54
CW
4138
4139 *val = dev_priv->gpu_error.test_irq_rings;
4140
4141 return 0;
4142}
4143
4144static int
4145i915_ring_test_irq_set(void *data, u64 val)
4146{
64486ae7 4147 struct drm_i915_private *i915 = data;
094f9a54 4148
5f521722
CW
4149 /* GuC keeps the user interrupt permanently enabled for submission */
4150 if (USES_GUC_SUBMISSION(i915))
4151 return -ENODEV;
4152
4153 /*
4154 * From icl, we can no longer individually mask interrupt generation
4155 * from each engine.
4156 */
4157 if (INTEL_GEN(i915) >= 11)
4158 return -ENODEV;
4159
64486ae7 4160 val &= INTEL_INFO(i915)->ring_mask;
094f9a54 4161 DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val);
094f9a54 4162
64486ae7 4163 return fault_irq_set(i915, &i915->gpu_error.test_irq_rings, val);
094f9a54
CW
4164}
4165
4166DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops,
4167 i915_ring_test_irq_get, i915_ring_test_irq_set,
4168 "0x%08llx\n");
4169
b4a0b32d
CW
4170#define DROP_UNBOUND BIT(0)
4171#define DROP_BOUND BIT(1)
4172#define DROP_RETIRE BIT(2)
4173#define DROP_ACTIVE BIT(3)
4174#define DROP_FREED BIT(4)
4175#define DROP_SHRINK_ALL BIT(5)
4176#define DROP_IDLE BIT(6)
6b048706
CW
4177#define DROP_RESET_ACTIVE BIT(7)
4178#define DROP_RESET_SEQNO BIT(8)
fbbd37b3
CW
4179#define DROP_ALL (DROP_UNBOUND | \
4180 DROP_BOUND | \
4181 DROP_RETIRE | \
4182 DROP_ACTIVE | \
8eadc19b 4183 DROP_FREED | \
b4a0b32d 4184 DROP_SHRINK_ALL |\
6b048706
CW
4185 DROP_IDLE | \
4186 DROP_RESET_ACTIVE | \
4187 DROP_RESET_SEQNO)
647416f9
KC
4188static int
4189i915_drop_caches_get(void *data, u64 *val)
dd624afd 4190{
647416f9 4191 *val = DROP_ALL;
dd624afd 4192
647416f9 4193 return 0;
dd624afd
CW
4194}
4195
647416f9
KC
4196static int
4197i915_drop_caches_set(void *data, u64 val)
dd624afd 4198{
6b048706 4199 struct drm_i915_private *i915 = data;
00c26cf9 4200 int ret = 0;
dd624afd 4201
b4a0b32d
CW
4202 DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
4203 val, val & DROP_ALL);
9d3eb2c3 4204 intel_runtime_pm_get(i915);
dd624afd 4205
6b048706
CW
4206 if (val & DROP_RESET_ACTIVE && !intel_engines_are_idle(i915))
4207 i915_gem_set_wedged(i915);
4208
dd624afd
CW
4209 /* No need to check and wait for gpu resets, only libdrm auto-restarts
4210 * on ioctls on -EAGAIN. */
6b048706
CW
4211 if (val & (DROP_ACTIVE | DROP_RETIRE | DROP_RESET_SEQNO)) {
4212 ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
dd624afd 4213 if (ret)
198a2a2f 4214 goto out;
dd624afd 4215
00c26cf9 4216 if (val & DROP_ACTIVE)
6b048706 4217 ret = i915_gem_wait_for_idle(i915,
00c26cf9 4218 I915_WAIT_INTERRUPTIBLE |
ec625fb9
CW
4219 I915_WAIT_LOCKED,
4220 MAX_SCHEDULE_TIMEOUT);
00c26cf9 4221
9d3eb2c3 4222 if (ret == 0 && val & DROP_RESET_SEQNO)
6b048706 4223 ret = i915_gem_set_global_seqno(&i915->drm, 1);
6b048706 4224
00c26cf9 4225 if (val & DROP_RETIRE)
6b048706 4226 i915_retire_requests(i915);
00c26cf9 4227
6b048706
CW
4228 mutex_unlock(&i915->drm.struct_mutex);
4229 }
4230
4231 if (val & DROP_RESET_ACTIVE &&
4232 i915_terminally_wedged(&i915->gpu_error)) {
4233 i915_handle_error(i915, ALL_ENGINES, 0, NULL);
4234 wait_on_bit(&i915->gpu_error.flags,
4235 I915_RESET_HANDOFF,
4236 TASK_UNINTERRUPTIBLE);
00c26cf9 4237 }
dd624afd 4238
d92a8cfc 4239 fs_reclaim_acquire(GFP_KERNEL);
21ab4e74 4240 if (val & DROP_BOUND)
6b048706 4241 i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_BOUND);
4ad72b7f 4242
21ab4e74 4243 if (val & DROP_UNBOUND)
6b048706 4244 i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_UNBOUND);
dd624afd 4245
8eadc19b 4246 if (val & DROP_SHRINK_ALL)
6b048706 4247 i915_gem_shrink_all(i915);
d92a8cfc 4248 fs_reclaim_release(GFP_KERNEL);
8eadc19b 4249
4dfacb0b
CW
4250 if (val & DROP_IDLE) {
4251 do {
6b048706
CW
4252 if (READ_ONCE(i915->gt.active_requests))
4253 flush_delayed_work(&i915->gt.retire_work);
4254 drain_delayed_work(&i915->gt.idle_work);
4255 } while (READ_ONCE(i915->gt.awake));
4dfacb0b 4256 }
b4a0b32d 4257
c9c70471 4258 if (val & DROP_FREED)
6b048706 4259 i915_gem_drain_freed_objects(i915);
fbbd37b3 4260
198a2a2f 4261out:
9d3eb2c3
CW
4262 intel_runtime_pm_put(i915);
4263
647416f9 4264 return ret;
dd624afd
CW
4265}
4266
647416f9
KC
4267DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
4268 i915_drop_caches_get, i915_drop_caches_set,
4269 "0x%08llx\n");
dd624afd 4270
647416f9
KC
4271static int
4272i915_cache_sharing_get(void *data, u64 *val)
07b7ddd9 4273{
36cdd013 4274 struct drm_i915_private *dev_priv = data;
07b7ddd9 4275 u32 snpcr;
07b7ddd9 4276
36cdd013 4277 if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv)))
004777cb
DV
4278 return -ENODEV;
4279
c8c8fb33 4280 intel_runtime_pm_get(dev_priv);
22bcfc6a 4281
07b7ddd9 4282 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
c8c8fb33
PZ
4283
4284 intel_runtime_pm_put(dev_priv);
07b7ddd9 4285
647416f9 4286 *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
07b7ddd9 4287
647416f9 4288 return 0;
07b7ddd9
JB
4289}
4290
647416f9
KC
4291static int
4292i915_cache_sharing_set(void *data, u64 val)
07b7ddd9 4293{
36cdd013 4294 struct drm_i915_private *dev_priv = data;
07b7ddd9 4295 u32 snpcr;
07b7ddd9 4296
36cdd013 4297 if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv)))
004777cb
DV
4298 return -ENODEV;
4299
647416f9 4300 if (val > 3)
07b7ddd9
JB
4301 return -EINVAL;
4302
c8c8fb33 4303 intel_runtime_pm_get(dev_priv);
647416f9 4304 DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
07b7ddd9
JB
4305
4306 /* Update the cache sharing policy here as well */
4307 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
4308 snpcr &= ~GEN6_MBC_SNPCR_MASK;
4309 snpcr |= (val << GEN6_MBC_SNPCR_SHIFT);
4310 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
4311
c8c8fb33 4312 intel_runtime_pm_put(dev_priv);
647416f9 4313 return 0;
07b7ddd9
JB
4314}
4315
647416f9
KC
4316DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
4317 i915_cache_sharing_get, i915_cache_sharing_set,
4318 "%llu\n");
07b7ddd9 4319
36cdd013 4320static void cherryview_sseu_device_status(struct drm_i915_private *dev_priv,
915490d5 4321 struct sseu_dev_info *sseu)
5d39525a 4322{
7aa0b14e
CW
4323#define SS_MAX 2
4324 const int ss_max = SS_MAX;
4325 u32 sig1[SS_MAX], sig2[SS_MAX];
5d39525a 4326 int ss;
5d39525a
JM
4327
4328 sig1[0] = I915_READ(CHV_POWER_SS0_SIG1);
4329 sig1[1] = I915_READ(CHV_POWER_SS1_SIG1);
4330 sig2[0] = I915_READ(CHV_POWER_SS0_SIG2);
4331 sig2[1] = I915_READ(CHV_POWER_SS1_SIG2);
4332
4333 for (ss = 0; ss < ss_max; ss++) {
4334 unsigned int eu_cnt;
4335
4336 if (sig1[ss] & CHV_SS_PG_ENABLE)
4337 /* skip disabled subslice */
4338 continue;
4339
f08a0c92 4340 sseu->slice_mask = BIT(0);
8cc76693 4341 sseu->subslice_mask[0] |= BIT(ss);
5d39525a
JM
4342 eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
4343 ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
4344 ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
4345 ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2);
915490d5
ID
4346 sseu->eu_total += eu_cnt;
4347 sseu->eu_per_subslice = max_t(unsigned int,
4348 sseu->eu_per_subslice, eu_cnt);
5d39525a 4349 }
7aa0b14e 4350#undef SS_MAX
5d39525a
JM
4351}
4352
f8c3dcf9
RV
4353static void gen10_sseu_device_status(struct drm_i915_private *dev_priv,
4354 struct sseu_dev_info *sseu)
4355{
c7fb3c6c 4356#define SS_MAX 6
f8c3dcf9 4357 const struct intel_device_info *info = INTEL_INFO(dev_priv);
c7fb3c6c 4358 u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
f8c3dcf9 4359 int s, ss;
f8c3dcf9 4360
b3e7f866 4361 for (s = 0; s < info->sseu.max_slices; s++) {
f8c3dcf9
RV
4362 /*
4363 * FIXME: Valid SS Mask respects the spec and read
3c64ea8c 4364 * only valid bits for those registers, excluding reserved
f8c3dcf9
RV
4365 * although this seems wrong because it would leave many
4366 * subslices without ACK.
4367 */
4368 s_reg[s] = I915_READ(GEN10_SLICE_PGCTL_ACK(s)) &
4369 GEN10_PGCTL_VALID_SS_MASK(s);
4370 eu_reg[2 * s] = I915_READ(GEN10_SS01_EU_PGCTL_ACK(s));
4371 eu_reg[2 * s + 1] = I915_READ(GEN10_SS23_EU_PGCTL_ACK(s));
4372 }
4373
4374 eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
4375 GEN9_PGCTL_SSA_EU19_ACK |
4376 GEN9_PGCTL_SSA_EU210_ACK |
4377 GEN9_PGCTL_SSA_EU311_ACK;
4378 eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
4379 GEN9_PGCTL_SSB_EU19_ACK |
4380 GEN9_PGCTL_SSB_EU210_ACK |
4381 GEN9_PGCTL_SSB_EU311_ACK;
4382
b3e7f866 4383 for (s = 0; s < info->sseu.max_slices; s++) {
f8c3dcf9
RV
4384 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
4385 /* skip disabled slice */
4386 continue;
4387
4388 sseu->slice_mask |= BIT(s);
8cc76693 4389 sseu->subslice_mask[s] = info->sseu.subslice_mask[s];
f8c3dcf9 4390
b3e7f866 4391 for (ss = 0; ss < info->sseu.max_subslices; ss++) {
f8c3dcf9
RV
4392 unsigned int eu_cnt;
4393
4394 if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
4395 /* skip disabled subslice */
4396 continue;
4397
4398 eu_cnt = 2 * hweight32(eu_reg[2 * s + ss / 2] &
4399 eu_mask[ss % 2]);
4400 sseu->eu_total += eu_cnt;
4401 sseu->eu_per_subslice = max_t(unsigned int,
4402 sseu->eu_per_subslice,
4403 eu_cnt);
4404 }
4405 }
c7fb3c6c 4406#undef SS_MAX
f8c3dcf9
RV
4407}
4408
36cdd013 4409static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
915490d5 4410 struct sseu_dev_info *sseu)
5d39525a 4411{
c7fb3c6c 4412#define SS_MAX 3
b3e7f866 4413 const struct intel_device_info *info = INTEL_INFO(dev_priv);
c7fb3c6c 4414 u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
5d39525a 4415 int s, ss;
1c046bc1 4416
b3e7f866 4417 for (s = 0; s < info->sseu.max_slices; s++) {
1c046bc1
JM
4418 s_reg[s] = I915_READ(GEN9_SLICE_PGCTL_ACK(s));
4419 eu_reg[2*s] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s));
4420 eu_reg[2*s + 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s));
4421 }
4422
5d39525a
JM
4423 eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
4424 GEN9_PGCTL_SSA_EU19_ACK |
4425 GEN9_PGCTL_SSA_EU210_ACK |
4426 GEN9_PGCTL_SSA_EU311_ACK;
4427 eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
4428 GEN9_PGCTL_SSB_EU19_ACK |
4429 GEN9_PGCTL_SSB_EU210_ACK |
4430 GEN9_PGCTL_SSB_EU311_ACK;
4431
b3e7f866 4432 for (s = 0; s < info->sseu.max_slices; s++) {
5d39525a
JM
4433 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
4434 /* skip disabled slice */
4435 continue;
4436
f08a0c92 4437 sseu->slice_mask |= BIT(s);
1c046bc1 4438
f8c3dcf9 4439 if (IS_GEN9_BC(dev_priv))
8cc76693
LL
4440 sseu->subslice_mask[s] =
4441 INTEL_INFO(dev_priv)->sseu.subslice_mask[s];
1c046bc1 4442
b3e7f866 4443 for (ss = 0; ss < info->sseu.max_subslices; ss++) {
5d39525a
JM
4444 unsigned int eu_cnt;
4445
cc3f90f0 4446 if (IS_GEN9_LP(dev_priv)) {
57ec171e
ID
4447 if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
4448 /* skip disabled subslice */
4449 continue;
1c046bc1 4450
8cc76693 4451 sseu->subslice_mask[s] |= BIT(ss);
57ec171e 4452 }
1c046bc1 4453
5d39525a
JM
4454 eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
4455 eu_mask[ss%2]);
915490d5
ID
4456 sseu->eu_total += eu_cnt;
4457 sseu->eu_per_subslice = max_t(unsigned int,
4458 sseu->eu_per_subslice,
4459 eu_cnt);
5d39525a
JM
4460 }
4461 }
c7fb3c6c 4462#undef SS_MAX
5d39525a
JM
4463}
4464
36cdd013 4465static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv,
915490d5 4466 struct sseu_dev_info *sseu)
91bedd34 4467{
91bedd34 4468 u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);
36cdd013 4469 int s;
91bedd34 4470
f08a0c92 4471 sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK;
91bedd34 4472
f08a0c92 4473 if (sseu->slice_mask) {
43b67998
ID
4474 sseu->eu_per_subslice =
4475 INTEL_INFO(dev_priv)->sseu.eu_per_subslice;
8cc76693
LL
4476 for (s = 0; s < fls(sseu->slice_mask); s++) {
4477 sseu->subslice_mask[s] =
4478 INTEL_INFO(dev_priv)->sseu.subslice_mask[s];
4479 }
57ec171e
ID
4480 sseu->eu_total = sseu->eu_per_subslice *
4481 sseu_subslice_total(sseu);
91bedd34
ŁD
4482
4483 /* subtract fused off EU(s) from enabled slice(s) */
795b38b3 4484 for (s = 0; s < fls(sseu->slice_mask); s++) {
43b67998
ID
4485 u8 subslice_7eu =
4486 INTEL_INFO(dev_priv)->sseu.subslice_7eu[s];
91bedd34 4487
915490d5 4488 sseu->eu_total -= hweight8(subslice_7eu);
91bedd34
ŁD
4489 }
4490 }
4491}
4492
615d8908
ID
4493static void i915_print_sseu_info(struct seq_file *m, bool is_available_info,
4494 const struct sseu_dev_info *sseu)
4495{
4496 struct drm_i915_private *dev_priv = node_to_i915(m->private);
4497 const char *type = is_available_info ? "Available" : "Enabled";
8cc76693 4498 int s;
615d8908 4499
c67ba538
ID
4500 seq_printf(m, " %s Slice Mask: %04x\n", type,
4501 sseu->slice_mask);
615d8908 4502 seq_printf(m, " %s Slice Total: %u\n", type,
f08a0c92 4503 hweight8(sseu->slice_mask));
615d8908 4504 seq_printf(m, " %s Subslice Total: %u\n", type,
57ec171e 4505 sseu_subslice_total(sseu));
8cc76693
LL
4506 for (s = 0; s < fls(sseu->slice_mask); s++) {
4507 seq_printf(m, " %s Slice%i subslices: %u\n", type,
4508 s, hweight8(sseu->subslice_mask[s]));
4509 }
615d8908
ID
4510 seq_printf(m, " %s EU Total: %u\n", type,
4511 sseu->eu_total);
4512 seq_printf(m, " %s EU Per Subslice: %u\n", type,
4513 sseu->eu_per_subslice);
4514
4515 if (!is_available_info)
4516 return;
4517
4518 seq_printf(m, " Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev_priv)));
4519 if (HAS_POOLED_EU(dev_priv))
4520 seq_printf(m, " Min EU in pool: %u\n", sseu->min_eu_in_pool);
4521
4522 seq_printf(m, " Has Slice Power Gating: %s\n",
4523 yesno(sseu->has_slice_pg));
4524 seq_printf(m, " Has Subslice Power Gating: %s\n",
4525 yesno(sseu->has_subslice_pg));
4526 seq_printf(m, " Has EU Power Gating: %s\n",
4527 yesno(sseu->has_eu_pg));
4528}
4529
3873218f
JM
4530static int i915_sseu_status(struct seq_file *m, void *unused)
4531{
36cdd013 4532 struct drm_i915_private *dev_priv = node_to_i915(m->private);
915490d5 4533 struct sseu_dev_info sseu;
3873218f 4534
36cdd013 4535 if (INTEL_GEN(dev_priv) < 8)
3873218f
JM
4536 return -ENODEV;
4537
4538 seq_puts(m, "SSEU Device Info\n");
615d8908 4539 i915_print_sseu_info(m, true, &INTEL_INFO(dev_priv)->sseu);
3873218f 4540
7f992aba 4541 seq_puts(m, "SSEU Device Status\n");
915490d5 4542 memset(&sseu, 0, sizeof(sseu));
8cc76693
LL
4543 sseu.max_slices = INTEL_INFO(dev_priv)->sseu.max_slices;
4544 sseu.max_subslices = INTEL_INFO(dev_priv)->sseu.max_subslices;
4545 sseu.max_eus_per_subslice =
4546 INTEL_INFO(dev_priv)->sseu.max_eus_per_subslice;
238010ed
DW
4547
4548 intel_runtime_pm_get(dev_priv);
4549
36cdd013 4550 if (IS_CHERRYVIEW(dev_priv)) {
915490d5 4551 cherryview_sseu_device_status(dev_priv, &sseu);
36cdd013 4552 } else if (IS_BROADWELL(dev_priv)) {
915490d5 4553 broadwell_sseu_device_status(dev_priv, &sseu);
f8c3dcf9 4554 } else if (IS_GEN9(dev_priv)) {
915490d5 4555 gen9_sseu_device_status(dev_priv, &sseu);
f8c3dcf9
RV
4556 } else if (INTEL_GEN(dev_priv) >= 10) {
4557 gen10_sseu_device_status(dev_priv, &sseu);
7f992aba 4558 }
238010ed
DW
4559
4560 intel_runtime_pm_put(dev_priv);
4561
615d8908 4562 i915_print_sseu_info(m, false, &sseu);
7f992aba 4563
3873218f
JM
4564 return 0;
4565}
4566
6d794d42
BW
4567static int i915_forcewake_open(struct inode *inode, struct file *file)
4568{
d7a133d8 4569 struct drm_i915_private *i915 = inode->i_private;
6d794d42 4570
d7a133d8 4571 if (INTEL_GEN(i915) < 6)
6d794d42
BW
4572 return 0;
4573
d7a133d8
CW
4574 intel_runtime_pm_get(i915);
4575 intel_uncore_forcewake_user_get(i915);
6d794d42
BW
4576
4577 return 0;
4578}
4579
c43b5634 4580static int i915_forcewake_release(struct inode *inode, struct file *file)
6d794d42 4581{
d7a133d8 4582 struct drm_i915_private *i915 = inode->i_private;
6d794d42 4583
d7a133d8 4584 if (INTEL_GEN(i915) < 6)
6d794d42
BW
4585 return 0;
4586
d7a133d8
CW
4587 intel_uncore_forcewake_user_put(i915);
4588 intel_runtime_pm_put(i915);
6d794d42
BW
4589
4590 return 0;
4591}
4592
4593static const struct file_operations i915_forcewake_fops = {
4594 .owner = THIS_MODULE,
4595 .open = i915_forcewake_open,
4596 .release = i915_forcewake_release,
4597};
4598
317eaa95
L
4599static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
4600{
4601 struct drm_i915_private *dev_priv = m->private;
4602 struct i915_hotplug *hotplug = &dev_priv->hotplug;
4603
6fc5d789
LP
4604 /* Synchronize with everything first in case there's been an HPD
4605 * storm, but we haven't finished handling it in the kernel yet
4606 */
4607 synchronize_irq(dev_priv->drm.irq);
4608 flush_work(&dev_priv->hotplug.dig_port_work);
4609 flush_work(&dev_priv->hotplug.hotplug_work);
4610
317eaa95
L
4611 seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
4612 seq_printf(m, "Detected: %s\n",
4613 yesno(delayed_work_pending(&hotplug->reenable_work)));
4614
4615 return 0;
4616}
4617
4618static ssize_t i915_hpd_storm_ctl_write(struct file *file,
4619 const char __user *ubuf, size_t len,
4620 loff_t *offp)
4621{
4622 struct seq_file *m = file->private_data;
4623 struct drm_i915_private *dev_priv = m->private;
4624 struct i915_hotplug *hotplug = &dev_priv->hotplug;
4625 unsigned int new_threshold;
4626 int i;
4627 char *newline;
4628 char tmp[16];
4629
4630 if (len >= sizeof(tmp))
4631 return -EINVAL;
4632
4633 if (copy_from_user(tmp, ubuf, len))
4634 return -EFAULT;
4635
4636 tmp[len] = '\0';
4637
4638 /* Strip newline, if any */
4639 newline = strchr(tmp, '\n');
4640 if (newline)
4641 *newline = '\0';
4642
4643 if (strcmp(tmp, "reset") == 0)
4644 new_threshold = HPD_STORM_DEFAULT_THRESHOLD;
4645 else if (kstrtouint(tmp, 10, &new_threshold) != 0)
4646 return -EINVAL;
4647
4648 if (new_threshold > 0)
4649 DRM_DEBUG_KMS("Setting HPD storm detection threshold to %d\n",
4650 new_threshold);
4651 else
4652 DRM_DEBUG_KMS("Disabling HPD storm detection\n");
4653
4654 spin_lock_irq(&dev_priv->irq_lock);
4655 hotplug->hpd_storm_threshold = new_threshold;
4656 /* Reset the HPD storm stats so we don't accidentally trigger a storm */
4657 for_each_hpd_pin(i)
4658 hotplug->stats[i].count = 0;
4659 spin_unlock_irq(&dev_priv->irq_lock);
4660
4661 /* Re-enable hpd immediately if we were in an irq storm */
4662 flush_delayed_work(&dev_priv->hotplug.reenable_work);
4663
4664 return len;
4665}
4666
4667static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file)
4668{
4669 return single_open(file, i915_hpd_storm_ctl_show, inode->i_private);
4670}
4671
4672static const struct file_operations i915_hpd_storm_ctl_fops = {
4673 .owner = THIS_MODULE,
4674 .open = i915_hpd_storm_ctl_open,
4675 .read = seq_read,
4676 .llseek = seq_lseek,
4677 .release = single_release,
4678 .write = i915_hpd_storm_ctl_write
4679};
4680
9a64c650
LP
4681static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data)
4682{
4683 struct drm_i915_private *dev_priv = m->private;
4684
4685 seq_printf(m, "Enabled: %s\n",
4686 yesno(dev_priv->hotplug.hpd_short_storm_enabled));
4687
4688 return 0;
4689}
4690
4691static int
4692i915_hpd_short_storm_ctl_open(struct inode *inode, struct file *file)
4693{
4694 return single_open(file, i915_hpd_short_storm_ctl_show,
4695 inode->i_private);
4696}
4697
4698static ssize_t i915_hpd_short_storm_ctl_write(struct file *file,
4699 const char __user *ubuf,
4700 size_t len, loff_t *offp)
4701{
4702 struct seq_file *m = file->private_data;
4703 struct drm_i915_private *dev_priv = m->private;
4704 struct i915_hotplug *hotplug = &dev_priv->hotplug;
4705 char *newline;
4706 char tmp[16];
4707 int i;
4708 bool new_state;
4709
4710 if (len >= sizeof(tmp))
4711 return -EINVAL;
4712
4713 if (copy_from_user(tmp, ubuf, len))
4714 return -EFAULT;
4715
4716 tmp[len] = '\0';
4717
4718 /* Strip newline, if any */
4719 newline = strchr(tmp, '\n');
4720 if (newline)
4721 *newline = '\0';
4722
4723 /* Reset to the "default" state for this system */
4724 if (strcmp(tmp, "reset") == 0)
4725 new_state = !HAS_DP_MST(dev_priv);
4726 else if (kstrtobool(tmp, &new_state) != 0)
4727 return -EINVAL;
4728
4729 DRM_DEBUG_KMS("%sabling HPD short storm detection\n",
4730 new_state ? "En" : "Dis");
4731
4732 spin_lock_irq(&dev_priv->irq_lock);
4733 hotplug->hpd_short_storm_enabled = new_state;
4734 /* Reset the HPD storm stats so we don't accidentally trigger a storm */
4735 for_each_hpd_pin(i)
4736 hotplug->stats[i].count = 0;
4737 spin_unlock_irq(&dev_priv->irq_lock);
4738
4739 /* Re-enable hpd immediately if we were in an irq storm */
4740 flush_delayed_work(&dev_priv->hotplug.reenable_work);
4741
4742 return len;
4743}
4744
4745static const struct file_operations i915_hpd_short_storm_ctl_fops = {
4746 .owner = THIS_MODULE,
4747 .open = i915_hpd_short_storm_ctl_open,
4748 .read = seq_read,
4749 .llseek = seq_lseek,
4750 .release = single_release,
4751 .write = i915_hpd_short_storm_ctl_write,
4752};
4753
35954e88
R
4754static int i915_drrs_ctl_set(void *data, u64 val)
4755{
4756 struct drm_i915_private *dev_priv = data;
4757 struct drm_device *dev = &dev_priv->drm;
138bdac8 4758 struct intel_crtc *crtc;
35954e88
R
4759
4760 if (INTEL_GEN(dev_priv) < 7)
4761 return -ENODEV;
4762
138bdac8
ML
4763 for_each_intel_crtc(dev, crtc) {
4764 struct drm_connector_list_iter conn_iter;
4765 struct intel_crtc_state *crtc_state;
4766 struct drm_connector *connector;
4767 struct drm_crtc_commit *commit;
4768 int ret;
4769
4770 ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex);
4771 if (ret)
4772 return ret;
4773
4774 crtc_state = to_intel_crtc_state(crtc->base.state);
4775
4776 if (!crtc_state->base.active ||
4777 !crtc_state->has_drrs)
4778 goto out;
35954e88 4779
138bdac8
ML
4780 commit = crtc_state->base.commit;
4781 if (commit) {
4782 ret = wait_for_completion_interruptible(&commit->hw_done);
4783 if (ret)
4784 goto out;
4785 }
4786
4787 drm_connector_list_iter_begin(dev, &conn_iter);
4788 drm_for_each_connector_iter(connector, &conn_iter) {
4789 struct intel_encoder *encoder;
4790 struct intel_dp *intel_dp;
4791
4792 if (!(crtc_state->base.connector_mask &
4793 drm_connector_mask(connector)))
4794 continue;
4795
4796 encoder = intel_attached_encoder(connector);
35954e88
R
4797 if (encoder->type != INTEL_OUTPUT_EDP)
4798 continue;
4799
4800 DRM_DEBUG_DRIVER("Manually %sabling DRRS. %llu\n",
4801 val ? "en" : "dis", val);
4802
4803 intel_dp = enc_to_intel_dp(&encoder->base);
4804 if (val)
4805 intel_edp_drrs_enable(intel_dp,
138bdac8 4806 crtc_state);
35954e88
R
4807 else
4808 intel_edp_drrs_disable(intel_dp,
138bdac8 4809 crtc_state);
35954e88 4810 }
138bdac8
ML
4811 drm_connector_list_iter_end(&conn_iter);
4812
4813out:
4814 drm_modeset_unlock(&crtc->base.mutex);
4815 if (ret)
4816 return ret;
35954e88 4817 }
35954e88
R
4818
4819 return 0;
4820}
4821
4822DEFINE_SIMPLE_ATTRIBUTE(i915_drrs_ctl_fops, NULL, i915_drrs_ctl_set, "%llu\n");
4823
d52ad9cb
ML
4824static ssize_t
4825i915_fifo_underrun_reset_write(struct file *filp,
4826 const char __user *ubuf,
4827 size_t cnt, loff_t *ppos)
4828{
4829 struct drm_i915_private *dev_priv = filp->private_data;
4830 struct intel_crtc *intel_crtc;
4831 struct drm_device *dev = &dev_priv->drm;
4832 int ret;
4833 bool reset;
4834
4835 ret = kstrtobool_from_user(ubuf, cnt, &reset);
4836 if (ret)
4837 return ret;
4838
4839 if (!reset)
4840 return cnt;
4841
4842 for_each_intel_crtc(dev, intel_crtc) {
4843 struct drm_crtc_commit *commit;
4844 struct intel_crtc_state *crtc_state;
4845
4846 ret = drm_modeset_lock_single_interruptible(&intel_crtc->base.mutex);
4847 if (ret)
4848 return ret;
4849
4850 crtc_state = to_intel_crtc_state(intel_crtc->base.state);
4851 commit = crtc_state->base.commit;
4852 if (commit) {
4853 ret = wait_for_completion_interruptible(&commit->hw_done);
4854 if (!ret)
4855 ret = wait_for_completion_interruptible(&commit->flip_done);
4856 }
4857
4858 if (!ret && crtc_state->base.active) {
4859 DRM_DEBUG_KMS("Re-arming FIFO underruns on pipe %c\n",
4860 pipe_name(intel_crtc->pipe));
4861
4862 intel_crtc_arm_fifo_underrun(intel_crtc, crtc_state);
4863 }
4864
4865 drm_modeset_unlock(&intel_crtc->base.mutex);
4866
4867 if (ret)
4868 return ret;
4869 }
4870
4871 ret = intel_fbc_reset_underrun(dev_priv);
4872 if (ret)
4873 return ret;
4874
4875 return cnt;
4876}
4877
4878static const struct file_operations i915_fifo_underrun_reset_ops = {
4879 .owner = THIS_MODULE,
4880 .open = simple_open,
4881 .write = i915_fifo_underrun_reset_write,
4882 .llseek = default_llseek,
4883};
4884
06c5bf8c 4885static const struct drm_info_list i915_debugfs_list[] = {
311bd68e 4886 {"i915_capabilities", i915_capabilities, 0},
73aa808f 4887 {"i915_gem_objects", i915_gem_object_info, 0},
08c18323 4888 {"i915_gem_gtt", i915_gem_gtt_info, 0},
6d2b8885 4889 {"i915_gem_stolen", i915_gem_stolen_list_info },
a6172a80 4890 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
2017263e 4891 {"i915_gem_interrupt", i915_interrupt_info, 0},
493018dc 4892 {"i915_gem_batch_pool", i915_gem_batch_pool_info, 0},
8b417c26 4893 {"i915_guc_info", i915_guc_info, 0},
fdf5d357 4894 {"i915_guc_load_status", i915_guc_load_status_info, 0},
4c7e77fc 4895 {"i915_guc_log_dump", i915_guc_log_dump, 0},
ac58d2ab 4896 {"i915_guc_load_err_log_dump", i915_guc_log_dump, 0, (void *)1},
a8b9370f 4897 {"i915_guc_stage_pool", i915_guc_stage_pool, 0},
0509ead1 4898 {"i915_huc_load_status", i915_huc_load_status_info, 0},
adb4bd12 4899 {"i915_frequency_info", i915_frequency_info, 0},
f654449a 4900 {"i915_hangcheck_info", i915_hangcheck_info, 0},
061d06a2 4901 {"i915_reset_info", i915_reset_info, 0},
f97108d1 4902 {"i915_drpc_info", i915_drpc_info, 0},
7648fa99 4903 {"i915_emon_status", i915_emon_status, 0},
23b2f8bb 4904 {"i915_ring_freq_table", i915_ring_freq_table, 0},
9a851789 4905 {"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
b5e50c3f 4906 {"i915_fbc_status", i915_fbc_status, 0},
92d44621 4907 {"i915_ips_status", i915_ips_status, 0},
4a9bef37 4908 {"i915_sr_status", i915_sr_status, 0},
44834a67 4909 {"i915_opregion", i915_opregion, 0},
ada8f955 4910 {"i915_vbt", i915_vbt, 0},
37811fcc 4911 {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
e76d3630 4912 {"i915_context_status", i915_context_status, 0},
f65367b5 4913 {"i915_forcewake_domains", i915_forcewake_domains, 0},
ea16a3cd 4914 {"i915_swizzle_info", i915_swizzle_info, 0},
3cf17fc5 4915 {"i915_ppgtt_info", i915_ppgtt_info, 0},
63573eb7 4916 {"i915_llc", i915_llc, 0},
e91fd8c6 4917 {"i915_edp_psr_status", i915_edp_psr_status, 0},
ec013e7f 4918 {"i915_energy_uJ", i915_energy_uJ, 0},
6455c870 4919 {"i915_runtime_pm_status", i915_runtime_pm_status, 0},
1da51581 4920 {"i915_power_domain_info", i915_power_domain_info, 0},
b7cec66d 4921 {"i915_dmc_info", i915_dmc_info, 0},
53f5e3ca 4922 {"i915_display_info", i915_display_info, 0},
1b36595f 4923 {"i915_engine_info", i915_engine_info, 0},
79e9cd5f 4924 {"i915_rcs_topology", i915_rcs_topology, 0},
c5418a8b 4925 {"i915_shrinker_info", i915_shrinker_info, 0},
728e29d7 4926 {"i915_shared_dplls_info", i915_shared_dplls_info, 0},
11bed958 4927 {"i915_dp_mst_info", i915_dp_mst_info, 0},
1ed1ef9d 4928 {"i915_wa_registers", i915_wa_registers, 0},
c5511e44 4929 {"i915_ddb_info", i915_ddb_info, 0},
3873218f 4930 {"i915_sseu_status", i915_sseu_status, 0},
a54746e3 4931 {"i915_drrs_status", i915_drrs_status, 0},
1854d5ca 4932 {"i915_rps_boost_info", i915_rps_boost_info, 0},
2017263e 4933};
27c202ad 4934#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
2017263e 4935
06c5bf8c 4936static const struct i915_debugfs_files {
34b9674c
DV
4937 const char *name;
4938 const struct file_operations *fops;
4939} i915_debugfs_files[] = {
4940 {"i915_wedged", &i915_wedged_fops},
34b9674c 4941 {"i915_cache_sharing", &i915_cache_sharing_fops},
094f9a54
CW
4942 {"i915_ring_missed_irq", &i915_ring_missed_irq_fops},
4943 {"i915_ring_test_irq", &i915_ring_test_irq_fops},
34b9674c 4944 {"i915_gem_drop_caches", &i915_drop_caches_fops},
98a2f411 4945#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
34b9674c 4946 {"i915_error_state", &i915_error_state_fops},
5a4c6f1b 4947 {"i915_gpu_info", &i915_gpu_info_fops},
98a2f411 4948#endif
d52ad9cb 4949 {"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops},
34b9674c 4950 {"i915_next_seqno", &i915_next_seqno_fops},
369a1342
VS
4951 {"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
4952 {"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
4953 {"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
4127dc43 4954 {"i915_fbc_false_color", &i915_fbc_false_color_fops},
eb3394fa
TP
4955 {"i915_dp_test_data", &i915_displayport_test_data_fops},
4956 {"i915_dp_test_type", &i915_displayport_test_type_fops},
685534ef 4957 {"i915_dp_test_active", &i915_displayport_test_active_fops},
4977a287
MW
4958 {"i915_guc_log_level", &i915_guc_log_level_fops},
4959 {"i915_guc_log_relay", &i915_guc_log_relay_fops},
d2d4f39b 4960 {"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
9a64c650 4961 {"i915_hpd_short_storm_ctl", &i915_hpd_short_storm_ctl_fops},
35954e88 4962 {"i915_ipc_status", &i915_ipc_status_fops},
54fd3149
DP
4963 {"i915_drrs_ctl", &i915_drrs_ctl_fops},
4964 {"i915_edp_psr_debug", &i915_edp_psr_debug_fops}
34b9674c
DV
4965};
4966
1dac891c 4967int i915_debugfs_register(struct drm_i915_private *dev_priv)
2017263e 4968{
91c8a326 4969 struct drm_minor *minor = dev_priv->drm.primary;
b05eeb0f 4970 struct dentry *ent;
6cc42152 4971 int i;
f3cd474b 4972
b05eeb0f
NT
4973 ent = debugfs_create_file("i915_forcewake_user", S_IRUSR,
4974 minor->debugfs_root, to_i915(minor->dev),
4975 &i915_forcewake_fops);
4976 if (!ent)
4977 return -ENOMEM;
6a9c308d 4978
34b9674c 4979 for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
b05eeb0f
NT
4980 ent = debugfs_create_file(i915_debugfs_files[i].name,
4981 S_IRUGO | S_IWUSR,
4982 minor->debugfs_root,
4983 to_i915(minor->dev),
34b9674c 4984 i915_debugfs_files[i].fops);
b05eeb0f
NT
4985 if (!ent)
4986 return -ENOMEM;
34b9674c 4987 }
40633219 4988
27c202ad
BG
4989 return drm_debugfs_create_files(i915_debugfs_list,
4990 I915_DEBUGFS_ENTRIES,
2017263e
BG
4991 minor->debugfs_root, minor);
4992}
4993
aa7471d2
JN
4994struct dpcd_block {
4995 /* DPCD dump start address. */
4996 unsigned int offset;
4997 /* DPCD dump end address, inclusive. If unset, .size will be used. */
4998 unsigned int end;
4999 /* DPCD dump size. Used if .end is unset. If unset, defaults to 1. */
5000 size_t size;
5001 /* Only valid for eDP. */
5002 bool edp;
5003};
5004
5005static const struct dpcd_block i915_dpcd_debug[] = {
5006 { .offset = DP_DPCD_REV, .size = DP_RECEIVER_CAP_SIZE },
5007 { .offset = DP_PSR_SUPPORT, .end = DP_PSR_CAPS },
5008 { .offset = DP_DOWNSTREAM_PORT_0, .size = 16 },
5009 { .offset = DP_LINK_BW_SET, .end = DP_EDP_CONFIGURATION_SET },
5010 { .offset = DP_SINK_COUNT, .end = DP_ADJUST_REQUEST_LANE2_3 },
5011 { .offset = DP_SET_POWER },
5012 { .offset = DP_EDP_DPCD_REV },
5013 { .offset = DP_EDP_GENERAL_CAP_1, .end = DP_EDP_GENERAL_CAP_3 },
5014 { .offset = DP_EDP_DISPLAY_CONTROL_REGISTER, .end = DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB },
5015 { .offset = DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET, .end = DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET },
5016};
5017
5018static int i915_dpcd_show(struct seq_file *m, void *data)
5019{
5020 struct drm_connector *connector = m->private;
5021 struct intel_dp *intel_dp =
5022 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
5023 uint8_t buf[16];
5024 ssize_t err;
5025 int i;
5026
5c1a8875
MK
5027 if (connector->status != connector_status_connected)
5028 return -ENODEV;
5029
aa7471d2
JN
5030 for (i = 0; i < ARRAY_SIZE(i915_dpcd_debug); i++) {
5031 const struct dpcd_block *b = &i915_dpcd_debug[i];
5032 size_t size = b->end ? b->end - b->offset + 1 : (b->size ?: 1);
5033
5034 if (b->edp &&
5035 connector->connector_type != DRM_MODE_CONNECTOR_eDP)
5036 continue;
5037
5038 /* low tech for now */
5039 if (WARN_ON(size > sizeof(buf)))
5040 continue;
5041
5042 err = drm_dp_dpcd_read(&intel_dp->aux, b->offset, buf, size);
65404c89
CW
5043 if (err < 0)
5044 seq_printf(m, "%04x: ERROR %d\n", b->offset, (int)err);
5045 else
5046 seq_printf(m, "%04x: %*ph\n", b->offset, (int)err, buf);
b3f9d7d7 5047 }
aa7471d2
JN
5048
5049 return 0;
5050}
e4006713 5051DEFINE_SHOW_ATTRIBUTE(i915_dpcd);
aa7471d2 5052
ecbd6781
DW
5053static int i915_panel_show(struct seq_file *m, void *data)
5054{
5055 struct drm_connector *connector = m->private;
5056 struct intel_dp *intel_dp =
5057 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
5058
5059 if (connector->status != connector_status_connected)
5060 return -ENODEV;
5061
5062 seq_printf(m, "Panel power up delay: %d\n",
5063 intel_dp->panel_power_up_delay);
5064 seq_printf(m, "Panel power down delay: %d\n",
5065 intel_dp->panel_power_down_delay);
5066 seq_printf(m, "Backlight on delay: %d\n",
5067 intel_dp->backlight_on_delay);
5068 seq_printf(m, "Backlight off delay: %d\n",
5069 intel_dp->backlight_off_delay);
5070
5071 return 0;
5072}
e4006713 5073DEFINE_SHOW_ATTRIBUTE(i915_panel);
ecbd6781 5074
bdc93fe0
R
5075static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
5076{
5077 struct drm_connector *connector = m->private;
5078 struct intel_connector *intel_connector = to_intel_connector(connector);
5079
5080 if (connector->status != connector_status_connected)
5081 return -ENODEV;
5082
5083 /* HDCP is supported by connector */
d3dacc70 5084 if (!intel_connector->hdcp.shim)
bdc93fe0
R
5085 return -EINVAL;
5086
5087 seq_printf(m, "%s:%d HDCP version: ", connector->name,
5088 connector->base.id);
5089 seq_printf(m, "%s ", !intel_hdcp_capable(intel_connector) ?
5090 "None" : "HDCP1.4");
5091 seq_puts(m, "\n");
5092
5093 return 0;
5094}
5095DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
5096
aa7471d2
JN
5097/**
5098 * i915_debugfs_connector_add - add i915 specific connector debugfs files
5099 * @connector: pointer to a registered drm_connector
5100 *
5101 * Cleanup will be done by drm_connector_unregister() through a call to
5102 * drm_debugfs_connector_remove().
5103 *
5104 * Returns 0 on success, negative error codes on error.
5105 */
5106int i915_debugfs_connector_add(struct drm_connector *connector)
5107{
5108 struct dentry *root = connector->debugfs_entry;
5109
5110 /* The connector must have been registered beforehands. */
5111 if (!root)
5112 return -ENODEV;
5113
5114 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
5115 connector->connector_type == DRM_MODE_CONNECTOR_eDP)
ecbd6781
DW
5116 debugfs_create_file("i915_dpcd", S_IRUGO, root,
5117 connector, &i915_dpcd_fops);
5118
5b7b3086 5119 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
ecbd6781
DW
5120 debugfs_create_file("i915_panel_timings", S_IRUGO, root,
5121 connector, &i915_panel_fops);
5b7b3086
DP
5122 debugfs_create_file("i915_psr_sink_status", S_IRUGO, root,
5123 connector, &i915_psr_sink_status_fops);
5124 }
aa7471d2 5125
bdc93fe0
R
5126 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
5127 connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
5128 connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) {
5129 debugfs_create_file("i915_hdcp_sink_capability", S_IRUGO, root,
5130 connector, &i915_hdcp_sink_capability_fops);
5131 }
5132
aa7471d2
JN
5133 return 0;
5134}