]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blame - drivers/gpu/drm/i915/i915_debugfs.c
drm/i915/dp: Link train Fallback on eDP only if fallback link BW can fit panel's...
[mirror_ubuntu-focal-kernel.git] / drivers / gpu / drm / i915 / i915_debugfs.c
CommitLineData
2017263e
BG
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
26 *
27 */
28
f3cd474b 29#include <linux/debugfs.h>
e637d2cb 30#include <linux/sort.h>
d92a8cfc 31#include <linux/sched/mm.h>
4e5359cd 32#include "intel_drv.h"
a2695744 33#include "intel_guc_submission.h"
2017263e 34
36cdd013
DW
35static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
36{
37 return to_i915(node->minor->dev);
38}
39
70d39fe4
CW
40static int i915_capabilities(struct seq_file *m, void *data)
41{
36cdd013
DW
42 struct drm_i915_private *dev_priv = node_to_i915(m->private);
43 const struct intel_device_info *info = INTEL_INFO(dev_priv);
a8c9b849 44 struct drm_printer p = drm_seq_file_printer(m);
70d39fe4 45
36cdd013 46 seq_printf(m, "gen: %d\n", INTEL_GEN(dev_priv));
2e0d26f8 47 seq_printf(m, "platform: %s\n", intel_platform_name(info->platform));
36cdd013 48 seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv));
418e3cd8 49
a8c9b849 50 intel_device_info_dump_flags(info, &p);
5fbbe8d4 51 intel_device_info_dump_runtime(info, &p);
3fed1808 52 intel_driver_caps_print(&dev_priv->caps, &p);
70d39fe4 53
418e3cd8 54 kernel_param_lock(THIS_MODULE);
acfb9973 55 i915_params_dump(&i915_modparams, &p);
418e3cd8
CW
56 kernel_param_unlock(THIS_MODULE);
57
70d39fe4
CW
58 return 0;
59}
2017263e 60
a7363de7 61static char get_active_flag(struct drm_i915_gem_object *obj)
a6172a80 62{
573adb39 63 return i915_gem_object_is_active(obj) ? '*' : ' ';
a6172a80
CW
64}
65
a7363de7 66static char get_pin_flag(struct drm_i915_gem_object *obj)
be12a86b 67{
bd3d2252 68 return obj->pin_global ? 'p' : ' ';
be12a86b
TU
69}
70
a7363de7 71static char get_tiling_flag(struct drm_i915_gem_object *obj)
a6172a80 72{
3e510a8e 73 switch (i915_gem_object_get_tiling(obj)) {
0206e353 74 default:
be12a86b
TU
75 case I915_TILING_NONE: return ' ';
76 case I915_TILING_X: return 'X';
77 case I915_TILING_Y: return 'Y';
0206e353 78 }
a6172a80
CW
79}
80
a7363de7 81static char get_global_flag(struct drm_i915_gem_object *obj)
be12a86b 82{
a65adaf8 83 return obj->userfault_count ? 'g' : ' ';
be12a86b
TU
84}
85
a7363de7 86static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
1d693bcc 87{
a4f5ea64 88 return obj->mm.mapping ? 'M' : ' ';
1d693bcc
BW
89}
90
ca1543be
TU
91static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
92{
93 u64 size = 0;
94 struct i915_vma *vma;
95
e2189dd0
CW
96 for_each_ggtt_vma(vma, obj) {
97 if (drm_mm_node_allocated(&vma->node))
ca1543be
TU
98 size += vma->node.size;
99 }
100
101 return size;
102}
103
7393b7ee
MA
104static const char *
105stringify_page_sizes(unsigned int page_sizes, char *buf, size_t len)
106{
107 size_t x = 0;
108
109 switch (page_sizes) {
110 case 0:
111 return "";
112 case I915_GTT_PAGE_SIZE_4K:
113 return "4K";
114 case I915_GTT_PAGE_SIZE_64K:
115 return "64K";
116 case I915_GTT_PAGE_SIZE_2M:
117 return "2M";
118 default:
119 if (!buf)
120 return "M";
121
122 if (page_sizes & I915_GTT_PAGE_SIZE_2M)
123 x += snprintf(buf + x, len - x, "2M, ");
124 if (page_sizes & I915_GTT_PAGE_SIZE_64K)
125 x += snprintf(buf + x, len - x, "64K, ");
126 if (page_sizes & I915_GTT_PAGE_SIZE_4K)
127 x += snprintf(buf + x, len - x, "4K, ");
128 buf[x-2] = '\0';
129
130 return buf;
131 }
132}
133
37811fcc
CW
134static void
135describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
136{
b4716185 137 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
e2f80391 138 struct intel_engine_cs *engine;
1d693bcc 139 struct i915_vma *vma;
faf5bf0a 140 unsigned int frontbuffer_bits;
d7f46fc4
BW
141 int pin_count = 0;
142
188c1ab7
CW
143 lockdep_assert_held(&obj->base.dev->struct_mutex);
144
d07f0e59 145 seq_printf(m, "%pK: %c%c%c%c%c %8zdKiB %02x %02x %s%s%s",
37811fcc 146 &obj->base,
be12a86b 147 get_active_flag(obj),
37811fcc
CW
148 get_pin_flag(obj),
149 get_tiling_flag(obj),
1d693bcc 150 get_global_flag(obj),
be12a86b 151 get_pin_mapped_flag(obj),
a05a5862 152 obj->base.size / 1024,
c0a51fd0
CK
153 obj->read_domains,
154 obj->write_domain,
36cdd013 155 i915_cache_level_str(dev_priv, obj->cache_level),
a4f5ea64
CW
156 obj->mm.dirty ? " dirty" : "",
157 obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
37811fcc
CW
158 if (obj->base.name)
159 seq_printf(m, " (name: %d)", obj->base.name);
1c7f4bca 160 list_for_each_entry(vma, &obj->vma_list, obj_link) {
20dfbde4 161 if (i915_vma_is_pinned(vma))
d7f46fc4 162 pin_count++;
ba0635ff
DC
163 }
164 seq_printf(m, " (pinned x %d)", pin_count);
bd3d2252
CW
165 if (obj->pin_global)
166 seq_printf(m, " (global)");
1c7f4bca 167 list_for_each_entry(vma, &obj->vma_list, obj_link) {
15717de2
CW
168 if (!drm_mm_node_allocated(&vma->node))
169 continue;
170
7393b7ee 171 seq_printf(m, " (%sgtt offset: %08llx, size: %08llx, pages: %s",
3272db53 172 i915_vma_is_ggtt(vma) ? "g" : "pp",
7393b7ee
MA
173 vma->node.start, vma->node.size,
174 stringify_page_sizes(vma->page_sizes.gtt, NULL, 0));
21976853
CW
175 if (i915_vma_is_ggtt(vma)) {
176 switch (vma->ggtt_view.type) {
177 case I915_GGTT_VIEW_NORMAL:
178 seq_puts(m, ", normal");
179 break;
180
181 case I915_GGTT_VIEW_PARTIAL:
182 seq_printf(m, ", partial [%08llx+%x]",
8bab1193
CW
183 vma->ggtt_view.partial.offset << PAGE_SHIFT,
184 vma->ggtt_view.partial.size << PAGE_SHIFT);
21976853
CW
185 break;
186
187 case I915_GGTT_VIEW_ROTATED:
188 seq_printf(m, ", rotated [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
8bab1193
CW
189 vma->ggtt_view.rotated.plane[0].width,
190 vma->ggtt_view.rotated.plane[0].height,
191 vma->ggtt_view.rotated.plane[0].stride,
192 vma->ggtt_view.rotated.plane[0].offset,
193 vma->ggtt_view.rotated.plane[1].width,
194 vma->ggtt_view.rotated.plane[1].height,
195 vma->ggtt_view.rotated.plane[1].stride,
196 vma->ggtt_view.rotated.plane[1].offset);
21976853
CW
197 break;
198
199 default:
200 MISSING_CASE(vma->ggtt_view.type);
201 break;
202 }
203 }
49ef5294
CW
204 if (vma->fence)
205 seq_printf(m, " , fence: %d%s",
206 vma->fence->id,
207 i915_gem_active_isset(&vma->last_fence) ? "*" : "");
596c5923 208 seq_puts(m, ")");
1d693bcc 209 }
c1ad11fc 210 if (obj->stolen)
440fd528 211 seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
27c01aae 212
d07f0e59 213 engine = i915_gem_object_last_write_engine(obj);
27c01aae
CW
214 if (engine)
215 seq_printf(m, " (%s)", engine->name);
216
faf5bf0a
CW
217 frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
218 if (frontbuffer_bits)
219 seq_printf(m, " (frontbuffer: 0x%03x)", frontbuffer_bits);
37811fcc
CW
220}
221
e637d2cb 222static int obj_rank_by_stolen(const void *A, const void *B)
6d2b8885 223{
e637d2cb
CW
224 const struct drm_i915_gem_object *a =
225 *(const struct drm_i915_gem_object **)A;
226 const struct drm_i915_gem_object *b =
227 *(const struct drm_i915_gem_object **)B;
6d2b8885 228
2d05fa16
RV
229 if (a->stolen->start < b->stolen->start)
230 return -1;
231 if (a->stolen->start > b->stolen->start)
232 return 1;
233 return 0;
6d2b8885
CW
234}
235
236static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
237{
36cdd013
DW
238 struct drm_i915_private *dev_priv = node_to_i915(m->private);
239 struct drm_device *dev = &dev_priv->drm;
e637d2cb 240 struct drm_i915_gem_object **objects;
6d2b8885 241 struct drm_i915_gem_object *obj;
c44ef60e 242 u64 total_obj_size, total_gtt_size;
e637d2cb
CW
243 unsigned long total, count, n;
244 int ret;
245
246 total = READ_ONCE(dev_priv->mm.object_count);
2098105e 247 objects = kvmalloc_array(total, sizeof(*objects), GFP_KERNEL);
e637d2cb
CW
248 if (!objects)
249 return -ENOMEM;
6d2b8885
CW
250
251 ret = mutex_lock_interruptible(&dev->struct_mutex);
252 if (ret)
e637d2cb 253 goto out;
6d2b8885
CW
254
255 total_obj_size = total_gtt_size = count = 0;
f2123818
CW
256
257 spin_lock(&dev_priv->mm.obj_lock);
258 list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
e637d2cb
CW
259 if (count == total)
260 break;
261
6d2b8885
CW
262 if (obj->stolen == NULL)
263 continue;
264
e637d2cb 265 objects[count++] = obj;
6d2b8885 266 total_obj_size += obj->base.size;
ca1543be 267 total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
e637d2cb 268
6d2b8885 269 }
f2123818 270 list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
e637d2cb
CW
271 if (count == total)
272 break;
273
6d2b8885
CW
274 if (obj->stolen == NULL)
275 continue;
276
e637d2cb 277 objects[count++] = obj;
6d2b8885 278 total_obj_size += obj->base.size;
6d2b8885 279 }
f2123818 280 spin_unlock(&dev_priv->mm.obj_lock);
e637d2cb
CW
281
282 sort(objects, count, sizeof(*objects), obj_rank_by_stolen, NULL);
283
6d2b8885 284 seq_puts(m, "Stolen:\n");
e637d2cb 285 for (n = 0; n < count; n++) {
6d2b8885 286 seq_puts(m, " ");
e637d2cb 287 describe_obj(m, objects[n]);
6d2b8885 288 seq_putc(m, '\n');
6d2b8885 289 }
e637d2cb 290 seq_printf(m, "Total %lu objects, %llu bytes, %llu GTT size\n",
6d2b8885 291 count, total_obj_size, total_gtt_size);
e637d2cb
CW
292
293 mutex_unlock(&dev->struct_mutex);
294out:
2098105e 295 kvfree(objects);
e637d2cb 296 return ret;
6d2b8885
CW
297}
298
2db8e9d6 299struct file_stats {
6313c204 300 struct drm_i915_file_private *file_priv;
c44ef60e
MK
301 unsigned long count;
302 u64 total, unbound;
303 u64 global, shared;
304 u64 active, inactive;
2db8e9d6
CW
305};
306
307static int per_file_stats(int id, void *ptr, void *data)
308{
309 struct drm_i915_gem_object *obj = ptr;
310 struct file_stats *stats = data;
6313c204 311 struct i915_vma *vma;
2db8e9d6 312
0caf81b5
CW
313 lockdep_assert_held(&obj->base.dev->struct_mutex);
314
2db8e9d6
CW
315 stats->count++;
316 stats->total += obj->base.size;
15717de2
CW
317 if (!obj->bind_count)
318 stats->unbound += obj->base.size;
c67a17e9
CW
319 if (obj->base.name || obj->base.dma_buf)
320 stats->shared += obj->base.size;
321
894eeecc
CW
322 list_for_each_entry(vma, &obj->vma_list, obj_link) {
323 if (!drm_mm_node_allocated(&vma->node))
324 continue;
6313c204 325
3272db53 326 if (i915_vma_is_ggtt(vma)) {
894eeecc
CW
327 stats->global += vma->node.size;
328 } else {
329 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vma->vm);
6313c204 330
82ad6443 331 if (ppgtt->vm.file != stats->file_priv)
6313c204 332 continue;
6313c204 333 }
894eeecc 334
b0decaf7 335 if (i915_vma_is_active(vma))
894eeecc
CW
336 stats->active += vma->node.size;
337 else
338 stats->inactive += vma->node.size;
2db8e9d6
CW
339 }
340
341 return 0;
342}
343
b0da1b79
CW
344#define print_file_stats(m, name, stats) do { \
345 if (stats.count) \
c44ef60e 346 seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound)\n", \
b0da1b79
CW
347 name, \
348 stats.count, \
349 stats.total, \
350 stats.active, \
351 stats.inactive, \
352 stats.global, \
353 stats.shared, \
354 stats.unbound); \
355} while (0)
493018dc
BV
356
357static void print_batch_pool_stats(struct seq_file *m,
358 struct drm_i915_private *dev_priv)
359{
360 struct drm_i915_gem_object *obj;
361 struct file_stats stats;
e2f80391 362 struct intel_engine_cs *engine;
3b3f1650 363 enum intel_engine_id id;
b4ac5afc 364 int j;
493018dc
BV
365
366 memset(&stats, 0, sizeof(stats));
367
3b3f1650 368 for_each_engine(engine, dev_priv, id) {
e2f80391 369 for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
8d9d5744 370 list_for_each_entry(obj,
e2f80391 371 &engine->batch_pool.cache_list[j],
8d9d5744
CW
372 batch_pool_link)
373 per_file_stats(0, obj, &stats);
374 }
06fbca71 375 }
493018dc 376
b0da1b79 377 print_file_stats(m, "[k]batch pool", stats);
493018dc
BV
378}
379
ab82a063 380static int per_file_ctx_stats(int idx, void *ptr, void *data)
15da9565
CW
381{
382 struct i915_gem_context *ctx = ptr;
ab82a063
CW
383 struct intel_engine_cs *engine;
384 enum intel_engine_id id;
385
386 for_each_engine(engine, ctx->i915, id) {
387 struct intel_context *ce = to_intel_context(ctx, engine);
15da9565 388
ab82a063
CW
389 if (ce->state)
390 per_file_stats(0, ce->state->obj, data);
391 if (ce->ring)
392 per_file_stats(0, ce->ring->vma->obj, data);
15da9565
CW
393 }
394
395 return 0;
396}
397
398static void print_context_stats(struct seq_file *m,
399 struct drm_i915_private *dev_priv)
400{
36cdd013 401 struct drm_device *dev = &dev_priv->drm;
15da9565
CW
402 struct file_stats stats;
403 struct drm_file *file;
404
405 memset(&stats, 0, sizeof(stats));
406
36cdd013 407 mutex_lock(&dev->struct_mutex);
15da9565
CW
408 if (dev_priv->kernel_context)
409 per_file_ctx_stats(0, dev_priv->kernel_context, &stats);
410
36cdd013 411 list_for_each_entry(file, &dev->filelist, lhead) {
15da9565
CW
412 struct drm_i915_file_private *fpriv = file->driver_priv;
413 idr_for_each(&fpriv->context_idr, per_file_ctx_stats, &stats);
414 }
36cdd013 415 mutex_unlock(&dev->struct_mutex);
15da9565
CW
416
417 print_file_stats(m, "[k]contexts", stats);
418}
419
36cdd013 420static int i915_gem_object_info(struct seq_file *m, void *data)
73aa808f 421{
36cdd013
DW
422 struct drm_i915_private *dev_priv = node_to_i915(m->private);
423 struct drm_device *dev = &dev_priv->drm;
72e96d64 424 struct i915_ggtt *ggtt = &dev_priv->ggtt;
7393b7ee
MA
425 u32 count, mapped_count, purgeable_count, dpy_count, huge_count;
426 u64 size, mapped_size, purgeable_size, dpy_size, huge_size;
6299f992 427 struct drm_i915_gem_object *obj;
7393b7ee 428 unsigned int page_sizes = 0;
2db8e9d6 429 struct drm_file *file;
7393b7ee 430 char buf[80];
73aa808f
CW
431 int ret;
432
433 ret = mutex_lock_interruptible(&dev->struct_mutex);
434 if (ret)
435 return ret;
436
3ef7f228 437 seq_printf(m, "%u objects, %llu bytes\n",
6299f992
CW
438 dev_priv->mm.object_count,
439 dev_priv->mm.object_memory);
440
1544c42e
CW
441 size = count = 0;
442 mapped_size = mapped_count = 0;
443 purgeable_size = purgeable_count = 0;
7393b7ee 444 huge_size = huge_count = 0;
f2123818
CW
445
446 spin_lock(&dev_priv->mm.obj_lock);
447 list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
2bd160a1
CW
448 size += obj->base.size;
449 ++count;
450
a4f5ea64 451 if (obj->mm.madv == I915_MADV_DONTNEED) {
2bd160a1
CW
452 purgeable_size += obj->base.size;
453 ++purgeable_count;
454 }
455
a4f5ea64 456 if (obj->mm.mapping) {
2bd160a1
CW
457 mapped_count++;
458 mapped_size += obj->base.size;
be19b10d 459 }
7393b7ee
MA
460
461 if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
462 huge_count++;
463 huge_size += obj->base.size;
464 page_sizes |= obj->mm.page_sizes.sg;
465 }
b7abb714 466 }
c44ef60e 467 seq_printf(m, "%u unbound objects, %llu bytes\n", count, size);
6c085a72 468
2bd160a1 469 size = count = dpy_size = dpy_count = 0;
f2123818 470 list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
2bd160a1
CW
471 size += obj->base.size;
472 ++count;
473
bd3d2252 474 if (obj->pin_global) {
2bd160a1
CW
475 dpy_size += obj->base.size;
476 ++dpy_count;
6299f992 477 }
2bd160a1 478
a4f5ea64 479 if (obj->mm.madv == I915_MADV_DONTNEED) {
b7abb714
CW
480 purgeable_size += obj->base.size;
481 ++purgeable_count;
482 }
2bd160a1 483
a4f5ea64 484 if (obj->mm.mapping) {
2bd160a1
CW
485 mapped_count++;
486 mapped_size += obj->base.size;
be19b10d 487 }
7393b7ee
MA
488
489 if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
490 huge_count++;
491 huge_size += obj->base.size;
492 page_sizes |= obj->mm.page_sizes.sg;
493 }
6299f992 494 }
f2123818
CW
495 spin_unlock(&dev_priv->mm.obj_lock);
496
2bd160a1
CW
497 seq_printf(m, "%u bound objects, %llu bytes\n",
498 count, size);
c44ef60e 499 seq_printf(m, "%u purgeable objects, %llu bytes\n",
b7abb714 500 purgeable_count, purgeable_size);
2bd160a1
CW
501 seq_printf(m, "%u mapped objects, %llu bytes\n",
502 mapped_count, mapped_size);
7393b7ee
MA
503 seq_printf(m, "%u huge-paged objects (%s) %llu bytes\n",
504 huge_count,
505 stringify_page_sizes(page_sizes, buf, sizeof(buf)),
506 huge_size);
bd3d2252 507 seq_printf(m, "%u display objects (globally pinned), %llu bytes\n",
2bd160a1 508 dpy_count, dpy_size);
6299f992 509
b7128ef1 510 seq_printf(m, "%llu [%pa] gtt total\n",
82ad6443 511 ggtt->vm.total, &ggtt->mappable_end);
7393b7ee
MA
512 seq_printf(m, "Supported page sizes: %s\n",
513 stringify_page_sizes(INTEL_INFO(dev_priv)->page_sizes,
514 buf, sizeof(buf)));
73aa808f 515
493018dc
BV
516 seq_putc(m, '\n');
517 print_batch_pool_stats(m, dev_priv);
1d2ac403
DV
518 mutex_unlock(&dev->struct_mutex);
519
520 mutex_lock(&dev->filelist_mutex);
15da9565 521 print_context_stats(m, dev_priv);
2db8e9d6
CW
522 list_for_each_entry_reverse(file, &dev->filelist, lhead) {
523 struct file_stats stats;
c84455b4 524 struct drm_i915_file_private *file_priv = file->driver_priv;
e61e0f51 525 struct i915_request *request;
3ec2f427 526 struct task_struct *task;
2db8e9d6 527
0caf81b5
CW
528 mutex_lock(&dev->struct_mutex);
529
2db8e9d6 530 memset(&stats, 0, sizeof(stats));
6313c204 531 stats.file_priv = file->driver_priv;
5b5ffff0 532 spin_lock(&file->table_lock);
2db8e9d6 533 idr_for_each(&file->object_idr, per_file_stats, &stats);
5b5ffff0 534 spin_unlock(&file->table_lock);
3ec2f427
TH
535 /*
536 * Although we have a valid reference on file->pid, that does
537 * not guarantee that the task_struct who called get_pid() is
538 * still alive (e.g. get_pid(current) => fork() => exit()).
539 * Therefore, we need to protect this ->comm access using RCU.
540 */
c84455b4 541 request = list_first_entry_or_null(&file_priv->mm.request_list,
e61e0f51 542 struct i915_request,
c8659efa 543 client_link);
3ec2f427 544 rcu_read_lock();
4e0d64db
CW
545 task = pid_task(request && request->gem_context->pid ?
546 request->gem_context->pid : file->pid,
c84455b4 547 PIDTYPE_PID);
493018dc 548 print_file_stats(m, task ? task->comm : "<unknown>", stats);
3ec2f427 549 rcu_read_unlock();
0caf81b5 550
c84455b4 551 mutex_unlock(&dev->struct_mutex);
2db8e9d6 552 }
1d2ac403 553 mutex_unlock(&dev->filelist_mutex);
73aa808f
CW
554
555 return 0;
556}
557
aee56cff 558static int i915_gem_gtt_info(struct seq_file *m, void *data)
08c18323 559{
9f25d007 560 struct drm_info_node *node = m->private;
36cdd013
DW
561 struct drm_i915_private *dev_priv = node_to_i915(node);
562 struct drm_device *dev = &dev_priv->drm;
f2123818 563 struct drm_i915_gem_object **objects;
08c18323 564 struct drm_i915_gem_object *obj;
c44ef60e 565 u64 total_obj_size, total_gtt_size;
f2123818 566 unsigned long nobject, n;
08c18323
CW
567 int count, ret;
568
f2123818
CW
569 nobject = READ_ONCE(dev_priv->mm.object_count);
570 objects = kvmalloc_array(nobject, sizeof(*objects), GFP_KERNEL);
571 if (!objects)
572 return -ENOMEM;
573
08c18323
CW
574 ret = mutex_lock_interruptible(&dev->struct_mutex);
575 if (ret)
576 return ret;
577
f2123818
CW
578 count = 0;
579 spin_lock(&dev_priv->mm.obj_lock);
580 list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
581 objects[count++] = obj;
582 if (count == nobject)
583 break;
584 }
585 spin_unlock(&dev_priv->mm.obj_lock);
586
587 total_obj_size = total_gtt_size = 0;
588 for (n = 0; n < count; n++) {
589 obj = objects[n];
590
267f0c90 591 seq_puts(m, " ");
08c18323 592 describe_obj(m, obj);
267f0c90 593 seq_putc(m, '\n');
08c18323 594 total_obj_size += obj->base.size;
ca1543be 595 total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
08c18323
CW
596 }
597
598 mutex_unlock(&dev->struct_mutex);
599
c44ef60e 600 seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
08c18323 601 count, total_obj_size, total_gtt_size);
f2123818 602 kvfree(objects);
08c18323
CW
603
604 return 0;
605}
606
493018dc
BV
607static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
608{
36cdd013
DW
609 struct drm_i915_private *dev_priv = node_to_i915(m->private);
610 struct drm_device *dev = &dev_priv->drm;
493018dc 611 struct drm_i915_gem_object *obj;
e2f80391 612 struct intel_engine_cs *engine;
3b3f1650 613 enum intel_engine_id id;
8d9d5744 614 int total = 0;
b4ac5afc 615 int ret, j;
493018dc
BV
616
617 ret = mutex_lock_interruptible(&dev->struct_mutex);
618 if (ret)
619 return ret;
620
3b3f1650 621 for_each_engine(engine, dev_priv, id) {
e2f80391 622 for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
8d9d5744
CW
623 int count;
624
625 count = 0;
626 list_for_each_entry(obj,
e2f80391 627 &engine->batch_pool.cache_list[j],
8d9d5744
CW
628 batch_pool_link)
629 count++;
630 seq_printf(m, "%s cache[%d]: %d objects\n",
e2f80391 631 engine->name, j, count);
8d9d5744
CW
632
633 list_for_each_entry(obj,
e2f80391 634 &engine->batch_pool.cache_list[j],
8d9d5744
CW
635 batch_pool_link) {
636 seq_puts(m, " ");
637 describe_obj(m, obj);
638 seq_putc(m, '\n');
639 }
640
641 total += count;
06fbca71 642 }
493018dc
BV
643 }
644
8d9d5744 645 seq_printf(m, "total: %d\n", total);
493018dc
BV
646
647 mutex_unlock(&dev->struct_mutex);
648
649 return 0;
650}
651
80d89350
TU
652static void gen8_display_interrupt_info(struct seq_file *m)
653{
654 struct drm_i915_private *dev_priv = node_to_i915(m->private);
655 int pipe;
656
657 for_each_pipe(dev_priv, pipe) {
658 enum intel_display_power_domain power_domain;
659
660 power_domain = POWER_DOMAIN_PIPE(pipe);
661 if (!intel_display_power_get_if_enabled(dev_priv,
662 power_domain)) {
663 seq_printf(m, "Pipe %c power disabled\n",
664 pipe_name(pipe));
665 continue;
666 }
667 seq_printf(m, "Pipe %c IMR:\t%08x\n",
668 pipe_name(pipe),
669 I915_READ(GEN8_DE_PIPE_IMR(pipe)));
670 seq_printf(m, "Pipe %c IIR:\t%08x\n",
671 pipe_name(pipe),
672 I915_READ(GEN8_DE_PIPE_IIR(pipe)));
673 seq_printf(m, "Pipe %c IER:\t%08x\n",
674 pipe_name(pipe),
675 I915_READ(GEN8_DE_PIPE_IER(pipe)));
676
677 intel_display_power_put(dev_priv, power_domain);
678 }
679
680 seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
681 I915_READ(GEN8_DE_PORT_IMR));
682 seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
683 I915_READ(GEN8_DE_PORT_IIR));
684 seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
685 I915_READ(GEN8_DE_PORT_IER));
686
687 seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
688 I915_READ(GEN8_DE_MISC_IMR));
689 seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
690 I915_READ(GEN8_DE_MISC_IIR));
691 seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
692 I915_READ(GEN8_DE_MISC_IER));
693
694 seq_printf(m, "PCU interrupt mask:\t%08x\n",
695 I915_READ(GEN8_PCU_IMR));
696 seq_printf(m, "PCU interrupt identity:\t%08x\n",
697 I915_READ(GEN8_PCU_IIR));
698 seq_printf(m, "PCU interrupt enable:\t%08x\n",
699 I915_READ(GEN8_PCU_IER));
700}
701
2017263e
BG
702static int i915_interrupt_info(struct seq_file *m, void *data)
703{
36cdd013 704 struct drm_i915_private *dev_priv = node_to_i915(m->private);
e2f80391 705 struct intel_engine_cs *engine;
3b3f1650 706 enum intel_engine_id id;
4bb05040 707 int i, pipe;
de227ef0 708
c8c8fb33 709 intel_runtime_pm_get(dev_priv);
2017263e 710
36cdd013 711 if (IS_CHERRYVIEW(dev_priv)) {
74e1ca8c
VS
712 seq_printf(m, "Master Interrupt Control:\t%08x\n",
713 I915_READ(GEN8_MASTER_IRQ));
714
715 seq_printf(m, "Display IER:\t%08x\n",
716 I915_READ(VLV_IER));
717 seq_printf(m, "Display IIR:\t%08x\n",
718 I915_READ(VLV_IIR));
719 seq_printf(m, "Display IIR_RW:\t%08x\n",
720 I915_READ(VLV_IIR_RW));
721 seq_printf(m, "Display IMR:\t%08x\n",
722 I915_READ(VLV_IMR));
9c870d03
CW
723 for_each_pipe(dev_priv, pipe) {
724 enum intel_display_power_domain power_domain;
725
726 power_domain = POWER_DOMAIN_PIPE(pipe);
727 if (!intel_display_power_get_if_enabled(dev_priv,
728 power_domain)) {
729 seq_printf(m, "Pipe %c power disabled\n",
730 pipe_name(pipe));
731 continue;
732 }
733
74e1ca8c
VS
734 seq_printf(m, "Pipe %c stat:\t%08x\n",
735 pipe_name(pipe),
736 I915_READ(PIPESTAT(pipe)));
737
9c870d03
CW
738 intel_display_power_put(dev_priv, power_domain);
739 }
740
741 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
74e1ca8c
VS
742 seq_printf(m, "Port hotplug:\t%08x\n",
743 I915_READ(PORT_HOTPLUG_EN));
744 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
745 I915_READ(VLV_DPFLIPSTAT));
746 seq_printf(m, "DPINVGTT:\t%08x\n",
747 I915_READ(DPINVGTT));
9c870d03 748 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
74e1ca8c
VS
749
750 for (i = 0; i < 4; i++) {
751 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
752 i, I915_READ(GEN8_GT_IMR(i)));
753 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
754 i, I915_READ(GEN8_GT_IIR(i)));
755 seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
756 i, I915_READ(GEN8_GT_IER(i)));
757 }
758
759 seq_printf(m, "PCU interrupt mask:\t%08x\n",
760 I915_READ(GEN8_PCU_IMR));
761 seq_printf(m, "PCU interrupt identity:\t%08x\n",
762 I915_READ(GEN8_PCU_IIR));
763 seq_printf(m, "PCU interrupt enable:\t%08x\n",
764 I915_READ(GEN8_PCU_IER));
80d89350
TU
765 } else if (INTEL_GEN(dev_priv) >= 11) {
766 seq_printf(m, "Master Interrupt Control: %08x\n",
767 I915_READ(GEN11_GFX_MSTR_IRQ));
768
769 seq_printf(m, "Render/Copy Intr Enable: %08x\n",
770 I915_READ(GEN11_RENDER_COPY_INTR_ENABLE));
771 seq_printf(m, "VCS/VECS Intr Enable: %08x\n",
772 I915_READ(GEN11_VCS_VECS_INTR_ENABLE));
773 seq_printf(m, "GUC/SG Intr Enable:\t %08x\n",
774 I915_READ(GEN11_GUC_SG_INTR_ENABLE));
775 seq_printf(m, "GPM/WGBOXPERF Intr Enable: %08x\n",
776 I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE));
777 seq_printf(m, "Crypto Intr Enable:\t %08x\n",
778 I915_READ(GEN11_CRYPTO_RSVD_INTR_ENABLE));
779 seq_printf(m, "GUnit/CSME Intr Enable:\t %08x\n",
780 I915_READ(GEN11_GUNIT_CSME_INTR_ENABLE));
781
782 seq_printf(m, "Display Interrupt Control:\t%08x\n",
783 I915_READ(GEN11_DISPLAY_INT_CTL));
784
785 gen8_display_interrupt_info(m);
36cdd013 786 } else if (INTEL_GEN(dev_priv) >= 8) {
a123f157
BW
787 seq_printf(m, "Master Interrupt Control:\t%08x\n",
788 I915_READ(GEN8_MASTER_IRQ));
789
790 for (i = 0; i < 4; i++) {
791 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
792 i, I915_READ(GEN8_GT_IMR(i)));
793 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
794 i, I915_READ(GEN8_GT_IIR(i)));
795 seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
796 i, I915_READ(GEN8_GT_IER(i)));
797 }
798
80d89350 799 gen8_display_interrupt_info(m);
36cdd013 800 } else if (IS_VALLEYVIEW(dev_priv)) {
7e231dbe
JB
801 seq_printf(m, "Display IER:\t%08x\n",
802 I915_READ(VLV_IER));
803 seq_printf(m, "Display IIR:\t%08x\n",
804 I915_READ(VLV_IIR));
805 seq_printf(m, "Display IIR_RW:\t%08x\n",
806 I915_READ(VLV_IIR_RW));
807 seq_printf(m, "Display IMR:\t%08x\n",
808 I915_READ(VLV_IMR));
4f4631af
CW
809 for_each_pipe(dev_priv, pipe) {
810 enum intel_display_power_domain power_domain;
811
812 power_domain = POWER_DOMAIN_PIPE(pipe);
813 if (!intel_display_power_get_if_enabled(dev_priv,
814 power_domain)) {
815 seq_printf(m, "Pipe %c power disabled\n",
816 pipe_name(pipe));
817 continue;
818 }
819
7e231dbe
JB
820 seq_printf(m, "Pipe %c stat:\t%08x\n",
821 pipe_name(pipe),
822 I915_READ(PIPESTAT(pipe)));
4f4631af
CW
823 intel_display_power_put(dev_priv, power_domain);
824 }
7e231dbe
JB
825
826 seq_printf(m, "Master IER:\t%08x\n",
827 I915_READ(VLV_MASTER_IER));
828
829 seq_printf(m, "Render IER:\t%08x\n",
830 I915_READ(GTIER));
831 seq_printf(m, "Render IIR:\t%08x\n",
832 I915_READ(GTIIR));
833 seq_printf(m, "Render IMR:\t%08x\n",
834 I915_READ(GTIMR));
835
836 seq_printf(m, "PM IER:\t\t%08x\n",
837 I915_READ(GEN6_PMIER));
838 seq_printf(m, "PM IIR:\t\t%08x\n",
839 I915_READ(GEN6_PMIIR));
840 seq_printf(m, "PM IMR:\t\t%08x\n",
841 I915_READ(GEN6_PMIMR));
842
843 seq_printf(m, "Port hotplug:\t%08x\n",
844 I915_READ(PORT_HOTPLUG_EN));
845 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
846 I915_READ(VLV_DPFLIPSTAT));
847 seq_printf(m, "DPINVGTT:\t%08x\n",
848 I915_READ(DPINVGTT));
849
36cdd013 850 } else if (!HAS_PCH_SPLIT(dev_priv)) {
5f6a1695
ZW
851 seq_printf(m, "Interrupt enable: %08x\n",
852 I915_READ(IER));
853 seq_printf(m, "Interrupt identity: %08x\n",
854 I915_READ(IIR));
855 seq_printf(m, "Interrupt mask: %08x\n",
856 I915_READ(IMR));
055e393f 857 for_each_pipe(dev_priv, pipe)
9db4a9c7
JB
858 seq_printf(m, "Pipe %c stat: %08x\n",
859 pipe_name(pipe),
860 I915_READ(PIPESTAT(pipe)));
5f6a1695
ZW
861 } else {
862 seq_printf(m, "North Display Interrupt enable: %08x\n",
863 I915_READ(DEIER));
864 seq_printf(m, "North Display Interrupt identity: %08x\n",
865 I915_READ(DEIIR));
866 seq_printf(m, "North Display Interrupt mask: %08x\n",
867 I915_READ(DEIMR));
868 seq_printf(m, "South Display Interrupt enable: %08x\n",
869 I915_READ(SDEIER));
870 seq_printf(m, "South Display Interrupt identity: %08x\n",
871 I915_READ(SDEIIR));
872 seq_printf(m, "South Display Interrupt mask: %08x\n",
873 I915_READ(SDEIMR));
874 seq_printf(m, "Graphics Interrupt enable: %08x\n",
875 I915_READ(GTIER));
876 seq_printf(m, "Graphics Interrupt identity: %08x\n",
877 I915_READ(GTIIR));
878 seq_printf(m, "Graphics Interrupt mask: %08x\n",
879 I915_READ(GTIMR));
880 }
80d89350
TU
881
882 if (INTEL_GEN(dev_priv) >= 11) {
883 seq_printf(m, "RCS Intr Mask:\t %08x\n",
884 I915_READ(GEN11_RCS0_RSVD_INTR_MASK));
885 seq_printf(m, "BCS Intr Mask:\t %08x\n",
886 I915_READ(GEN11_BCS_RSVD_INTR_MASK));
887 seq_printf(m, "VCS0/VCS1 Intr Mask:\t %08x\n",
888 I915_READ(GEN11_VCS0_VCS1_INTR_MASK));
889 seq_printf(m, "VCS2/VCS3 Intr Mask:\t %08x\n",
890 I915_READ(GEN11_VCS2_VCS3_INTR_MASK));
891 seq_printf(m, "VECS0/VECS1 Intr Mask:\t %08x\n",
892 I915_READ(GEN11_VECS0_VECS1_INTR_MASK));
893 seq_printf(m, "GUC/SG Intr Mask:\t %08x\n",
894 I915_READ(GEN11_GUC_SG_INTR_MASK));
895 seq_printf(m, "GPM/WGBOXPERF Intr Mask: %08x\n",
896 I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK));
897 seq_printf(m, "Crypto Intr Mask:\t %08x\n",
898 I915_READ(GEN11_CRYPTO_RSVD_INTR_MASK));
899 seq_printf(m, "Gunit/CSME Intr Mask:\t %08x\n",
900 I915_READ(GEN11_GUNIT_CSME_INTR_MASK));
901
902 } else if (INTEL_GEN(dev_priv) >= 6) {
d5acadfe 903 for_each_engine(engine, dev_priv, id) {
a2c7f6fd
CW
904 seq_printf(m,
905 "Graphics Interrupt mask (%s): %08x\n",
e2f80391 906 engine->name, I915_READ_IMR(engine));
9862e600 907 }
9862e600 908 }
80d89350 909
c8c8fb33 910 intel_runtime_pm_put(dev_priv);
de227ef0 911
2017263e
BG
912 return 0;
913}
914
a6172a80
CW
915static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
916{
36cdd013
DW
917 struct drm_i915_private *dev_priv = node_to_i915(m->private);
918 struct drm_device *dev = &dev_priv->drm;
de227ef0
CW
919 int i, ret;
920
921 ret = mutex_lock_interruptible(&dev->struct_mutex);
922 if (ret)
923 return ret;
a6172a80 924
a6172a80
CW
925 seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
926 for (i = 0; i < dev_priv->num_fence_regs; i++) {
49ef5294 927 struct i915_vma *vma = dev_priv->fence_regs[i].vma;
a6172a80 928
6c085a72
CW
929 seq_printf(m, "Fence %d, pin count = %d, object = ",
930 i, dev_priv->fence_regs[i].pin_count);
49ef5294 931 if (!vma)
267f0c90 932 seq_puts(m, "unused");
c2c347a9 933 else
49ef5294 934 describe_obj(m, vma->obj);
267f0c90 935 seq_putc(m, '\n');
a6172a80
CW
936 }
937
05394f39 938 mutex_unlock(&dev->struct_mutex);
a6172a80
CW
939 return 0;
940}
941
98a2f411 942#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
5a4c6f1b
CW
943static ssize_t gpu_state_read(struct file *file, char __user *ubuf,
944 size_t count, loff_t *pos)
d5442303 945{
5a4c6f1b
CW
946 struct i915_gpu_state *error = file->private_data;
947 struct drm_i915_error_state_buf str;
948 ssize_t ret;
949 loff_t tmp;
d5442303 950
5a4c6f1b
CW
951 if (!error)
952 return 0;
d5442303 953
5a4c6f1b
CW
954 ret = i915_error_state_buf_init(&str, error->i915, count, *pos);
955 if (ret)
956 return ret;
d5442303 957
5a4c6f1b
CW
958 ret = i915_error_state_to_str(&str, error);
959 if (ret)
960 goto out;
d5442303 961
5a4c6f1b
CW
962 tmp = 0;
963 ret = simple_read_from_buffer(ubuf, count, &tmp, str.buf, str.bytes);
964 if (ret < 0)
965 goto out;
d5442303 966
5a4c6f1b
CW
967 *pos = str.start + ret;
968out:
969 i915_error_state_buf_release(&str);
970 return ret;
971}
edc3d884 972
5a4c6f1b
CW
973static int gpu_state_release(struct inode *inode, struct file *file)
974{
975 i915_gpu_state_put(file->private_data);
edc3d884 976 return 0;
d5442303
DV
977}
978
5a4c6f1b 979static int i915_gpu_info_open(struct inode *inode, struct file *file)
d5442303 980{
090e5fe3 981 struct drm_i915_private *i915 = inode->i_private;
5a4c6f1b 982 struct i915_gpu_state *gpu;
d5442303 983
090e5fe3
CW
984 intel_runtime_pm_get(i915);
985 gpu = i915_capture_gpu_state(i915);
986 intel_runtime_pm_put(i915);
5a4c6f1b
CW
987 if (!gpu)
988 return -ENOMEM;
d5442303 989
5a4c6f1b 990 file->private_data = gpu;
edc3d884
MK
991 return 0;
992}
993
5a4c6f1b
CW
994static const struct file_operations i915_gpu_info_fops = {
995 .owner = THIS_MODULE,
996 .open = i915_gpu_info_open,
997 .read = gpu_state_read,
998 .llseek = default_llseek,
999 .release = gpu_state_release,
1000};
1001
1002static ssize_t
1003i915_error_state_write(struct file *filp,
1004 const char __user *ubuf,
1005 size_t cnt,
1006 loff_t *ppos)
4dc955f7 1007{
5a4c6f1b 1008 struct i915_gpu_state *error = filp->private_data;
4dc955f7 1009
5a4c6f1b
CW
1010 if (!error)
1011 return 0;
edc3d884 1012
5a4c6f1b
CW
1013 DRM_DEBUG_DRIVER("Resetting error state\n");
1014 i915_reset_error_state(error->i915);
edc3d884 1015
5a4c6f1b
CW
1016 return cnt;
1017}
edc3d884 1018
5a4c6f1b
CW
1019static int i915_error_state_open(struct inode *inode, struct file *file)
1020{
1021 file->private_data = i915_first_error_state(inode->i_private);
1022 return 0;
d5442303
DV
1023}
1024
1025static const struct file_operations i915_error_state_fops = {
1026 .owner = THIS_MODULE,
1027 .open = i915_error_state_open,
5a4c6f1b 1028 .read = gpu_state_read,
d5442303
DV
1029 .write = i915_error_state_write,
1030 .llseek = default_llseek,
5a4c6f1b 1031 .release = gpu_state_release,
d5442303 1032};
98a2f411
CW
1033#endif
1034
647416f9
KC
1035static int
1036i915_next_seqno_set(void *data, u64 val)
1037{
36cdd013
DW
1038 struct drm_i915_private *dev_priv = data;
1039 struct drm_device *dev = &dev_priv->drm;
40633219
MK
1040 int ret;
1041
40633219
MK
1042 ret = mutex_lock_interruptible(&dev->struct_mutex);
1043 if (ret)
1044 return ret;
1045
65c475c6 1046 intel_runtime_pm_get(dev_priv);
73cb9701 1047 ret = i915_gem_set_global_seqno(dev, val);
65c475c6
CW
1048 intel_runtime_pm_put(dev_priv);
1049
40633219
MK
1050 mutex_unlock(&dev->struct_mutex);
1051
647416f9 1052 return ret;
40633219
MK
1053}
1054
647416f9 1055DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops,
9b6586ae 1056 NULL, i915_next_seqno_set,
3a3b4f98 1057 "0x%llx\n");
40633219 1058
adb4bd12 1059static int i915_frequency_info(struct seq_file *m, void *unused)
f97108d1 1060{
36cdd013 1061 struct drm_i915_private *dev_priv = node_to_i915(m->private);
562d9bae 1062 struct intel_rps *rps = &dev_priv->gt_pm.rps;
c8c8fb33
PZ
1063 int ret = 0;
1064
1065 intel_runtime_pm_get(dev_priv);
3b8d8d91 1066
36cdd013 1067 if (IS_GEN5(dev_priv)) {
3b8d8d91
JB
1068 u16 rgvswctl = I915_READ16(MEMSWCTL);
1069 u16 rgvstat = I915_READ16(MEMSTAT_ILK);
1070
1071 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
1072 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
1073 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
1074 MEMSTAT_VID_SHIFT);
1075 seq_printf(m, "Current P-state: %d\n",
1076 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
36cdd013 1077 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
0d6fc92a 1078 u32 rpmodectl, freq_sts;
666a4537 1079
9f817501 1080 mutex_lock(&dev_priv->pcu_lock);
0d6fc92a
SAK
1081
1082 rpmodectl = I915_READ(GEN6_RP_CONTROL);
1083 seq_printf(m, "Video Turbo Mode: %s\n",
1084 yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
1085 seq_printf(m, "HW control enabled: %s\n",
1086 yesno(rpmodectl & GEN6_RP_ENABLE));
1087 seq_printf(m, "SW control enabled: %s\n",
1088 yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
1089 GEN6_RP_MEDIA_SW_MODE));
1090
666a4537
WB
1091 freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
1092 seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
1093 seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
1094
1095 seq_printf(m, "actual GPU freq: %d MHz\n",
1096 intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
1097
1098 seq_printf(m, "current GPU freq: %d MHz\n",
562d9bae 1099 intel_gpu_freq(dev_priv, rps->cur_freq));
666a4537
WB
1100
1101 seq_printf(m, "max GPU freq: %d MHz\n",
562d9bae 1102 intel_gpu_freq(dev_priv, rps->max_freq));
666a4537
WB
1103
1104 seq_printf(m, "min GPU freq: %d MHz\n",
562d9bae 1105 intel_gpu_freq(dev_priv, rps->min_freq));
666a4537
WB
1106
1107 seq_printf(m, "idle GPU freq: %d MHz\n",
562d9bae 1108 intel_gpu_freq(dev_priv, rps->idle_freq));
666a4537
WB
1109
1110 seq_printf(m,
1111 "efficient (RPe) frequency: %d MHz\n",
562d9bae 1112 intel_gpu_freq(dev_priv, rps->efficient_freq));
9f817501 1113 mutex_unlock(&dev_priv->pcu_lock);
36cdd013 1114 } else if (INTEL_GEN(dev_priv) >= 6) {
35040562
BP
1115 u32 rp_state_limits;
1116 u32 gt_perf_status;
1117 u32 rp_state_cap;
0d8f9491 1118 u32 rpmodectl, rpinclimit, rpdeclimit;
8e8c06cd 1119 u32 rpstat, cagf, reqf;
ccab5c82
JB
1120 u32 rpupei, rpcurup, rpprevup;
1121 u32 rpdownei, rpcurdown, rpprevdown;
9dd3c605 1122 u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
3b8d8d91
JB
1123 int max_freq;
1124
35040562 1125 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
cc3f90f0 1126 if (IS_GEN9_LP(dev_priv)) {
35040562
BP
1127 rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
1128 gt_perf_status = I915_READ(BXT_GT_PERF_STATUS);
1129 } else {
1130 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
1131 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
1132 }
1133
3b8d8d91 1134 /* RPSTAT1 is in the GT power well */
59bad947 1135 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
3b8d8d91 1136
8e8c06cd 1137 reqf = I915_READ(GEN6_RPNSWREQ);
35ceabf3 1138 if (INTEL_GEN(dev_priv) >= 9)
60260a5b
AG
1139 reqf >>= 23;
1140 else {
1141 reqf &= ~GEN6_TURBO_DISABLE;
36cdd013 1142 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
60260a5b
AG
1143 reqf >>= 24;
1144 else
1145 reqf >>= 25;
1146 }
7c59a9c1 1147 reqf = intel_gpu_freq(dev_priv, reqf);
8e8c06cd 1148
0d8f9491
CW
1149 rpmodectl = I915_READ(GEN6_RP_CONTROL);
1150 rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
1151 rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);
1152
ccab5c82 1153 rpstat = I915_READ(GEN6_RPSTAT1);
d6cda9c7
AG
1154 rpupei = I915_READ(GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
1155 rpcurup = I915_READ(GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
1156 rpprevup = I915_READ(GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
1157 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
1158 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
1159 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
c84b2705
TU
1160 cagf = intel_gpu_freq(dev_priv,
1161 intel_get_cagf(dev_priv, rpstat));
ccab5c82 1162
59bad947 1163 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
d1ebd816 1164
6b7a6a7b
OM
1165 if (INTEL_GEN(dev_priv) >= 11) {
1166 pm_ier = I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE);
1167 pm_imr = I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK);
1168 /*
1169 * The equivalent to the PM ISR & IIR cannot be read
1170 * without affecting the current state of the system
1171 */
1172 pm_isr = 0;
1173 pm_iir = 0;
1174 } else if (INTEL_GEN(dev_priv) >= 8) {
9dd3c605
PZ
1175 pm_ier = I915_READ(GEN8_GT_IER(2));
1176 pm_imr = I915_READ(GEN8_GT_IMR(2));
1177 pm_isr = I915_READ(GEN8_GT_ISR(2));
1178 pm_iir = I915_READ(GEN8_GT_IIR(2));
6b7a6a7b
OM
1179 } else {
1180 pm_ier = I915_READ(GEN6_PMIER);
1181 pm_imr = I915_READ(GEN6_PMIMR);
1182 pm_isr = I915_READ(GEN6_PMISR);
1183 pm_iir = I915_READ(GEN6_PMIIR);
9dd3c605 1184 }
6b7a6a7b
OM
1185 pm_mask = I915_READ(GEN6_PMINTRMSK);
1186
960e5465
SAK
1187 seq_printf(m, "Video Turbo Mode: %s\n",
1188 yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
1189 seq_printf(m, "HW control enabled: %s\n",
1190 yesno(rpmodectl & GEN6_RP_ENABLE));
1191 seq_printf(m, "SW control enabled: %s\n",
1192 yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
1193 GEN6_RP_MEDIA_SW_MODE));
6b7a6a7b
OM
1194
1195 seq_printf(m, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n",
1196 pm_ier, pm_imr, pm_mask);
1197 if (INTEL_GEN(dev_priv) <= 10)
1198 seq_printf(m, "PM ISR=0x%08x IIR=0x%08x\n",
1199 pm_isr, pm_iir);
5dd04556 1200 seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n",
562d9bae 1201 rps->pm_intrmsk_mbz);
3b8d8d91 1202 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
3b8d8d91 1203 seq_printf(m, "Render p-state ratio: %d\n",
35ceabf3 1204 (gt_perf_status & (INTEL_GEN(dev_priv) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
3b8d8d91
JB
1205 seq_printf(m, "Render p-state VID: %d\n",
1206 gt_perf_status & 0xff);
1207 seq_printf(m, "Render p-state limit: %d\n",
1208 rp_state_limits & 0xff);
0d8f9491
CW
1209 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
1210 seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
1211 seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
1212 seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
8e8c06cd 1213 seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
f82855d3 1214 seq_printf(m, "CAGF: %dMHz\n", cagf);
d6cda9c7
AG
1215 seq_printf(m, "RP CUR UP EI: %d (%dus)\n",
1216 rpupei, GT_PM_INTERVAL_TO_US(dev_priv, rpupei));
1217 seq_printf(m, "RP CUR UP: %d (%dus)\n",
1218 rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup));
1219 seq_printf(m, "RP PREV UP: %d (%dus)\n",
1220 rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup));
60548c55
CW
1221 seq_printf(m, "Up threshold: %d%%\n",
1222 rps->power.up_threshold);
d86ed34a 1223
d6cda9c7
AG
1224 seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
1225 rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei));
1226 seq_printf(m, "RP CUR DOWN: %d (%dus)\n",
1227 rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown));
1228 seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
1229 rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown));
60548c55
CW
1230 seq_printf(m, "Down threshold: %d%%\n",
1231 rps->power.down_threshold);
3b8d8d91 1232
cc3f90f0 1233 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 :
35040562 1234 rp_state_cap >> 16) & 0xff;
35ceabf3 1235 max_freq *= (IS_GEN9_BC(dev_priv) ||
2b2874ef 1236 INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
3b8d8d91 1237 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
7c59a9c1 1238 intel_gpu_freq(dev_priv, max_freq));
3b8d8d91
JB
1239
1240 max_freq = (rp_state_cap & 0xff00) >> 8;
35ceabf3 1241 max_freq *= (IS_GEN9_BC(dev_priv) ||
2b2874ef 1242 INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
3b8d8d91 1243 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
7c59a9c1 1244 intel_gpu_freq(dev_priv, max_freq));
3b8d8d91 1245
cc3f90f0 1246 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 :
35040562 1247 rp_state_cap >> 0) & 0xff;
35ceabf3 1248 max_freq *= (IS_GEN9_BC(dev_priv) ||
2b2874ef 1249 INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
3b8d8d91 1250 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
7c59a9c1 1251 intel_gpu_freq(dev_priv, max_freq));
31c77388 1252 seq_printf(m, "Max overclocked frequency: %dMHz\n",
562d9bae 1253 intel_gpu_freq(dev_priv, rps->max_freq));
aed242ff 1254
d86ed34a 1255 seq_printf(m, "Current freq: %d MHz\n",
562d9bae 1256 intel_gpu_freq(dev_priv, rps->cur_freq));
d86ed34a 1257 seq_printf(m, "Actual freq: %d MHz\n", cagf);
aed242ff 1258 seq_printf(m, "Idle freq: %d MHz\n",
562d9bae 1259 intel_gpu_freq(dev_priv, rps->idle_freq));
d86ed34a 1260 seq_printf(m, "Min freq: %d MHz\n",
562d9bae 1261 intel_gpu_freq(dev_priv, rps->min_freq));
29ecd78d 1262 seq_printf(m, "Boost freq: %d MHz\n",
562d9bae 1263 intel_gpu_freq(dev_priv, rps->boost_freq));
d86ed34a 1264 seq_printf(m, "Max freq: %d MHz\n",
562d9bae 1265 intel_gpu_freq(dev_priv, rps->max_freq));
d86ed34a
CW
1266 seq_printf(m,
1267 "efficient (RPe) frequency: %d MHz\n",
562d9bae 1268 intel_gpu_freq(dev_priv, rps->efficient_freq));
3b8d8d91 1269 } else {
267f0c90 1270 seq_puts(m, "no P-state info available\n");
3b8d8d91 1271 }
f97108d1 1272
49cd97a3 1273 seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk.hw.cdclk);
1170f28c
MK
1274 seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq);
1275 seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq);
1276
c8c8fb33
PZ
1277 intel_runtime_pm_put(dev_priv);
1278 return ret;
f97108d1
JB
1279}
1280
d636951e
BW
1281static void i915_instdone_info(struct drm_i915_private *dev_priv,
1282 struct seq_file *m,
1283 struct intel_instdone *instdone)
1284{
f9e61372
BW
1285 int slice;
1286 int subslice;
1287
d636951e
BW
1288 seq_printf(m, "\t\tINSTDONE: 0x%08x\n",
1289 instdone->instdone);
1290
1291 if (INTEL_GEN(dev_priv) <= 3)
1292 return;
1293
1294 seq_printf(m, "\t\tSC_INSTDONE: 0x%08x\n",
1295 instdone->slice_common);
1296
1297 if (INTEL_GEN(dev_priv) <= 6)
1298 return;
1299
f9e61372
BW
1300 for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1301 seq_printf(m, "\t\tSAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
1302 slice, subslice, instdone->sampler[slice][subslice]);
1303
1304 for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1305 seq_printf(m, "\t\tROW_INSTDONE[%d][%d]: 0x%08x\n",
1306 slice, subslice, instdone->row[slice][subslice]);
d636951e
BW
1307}
1308
f654449a
CW
1309static int i915_hangcheck_info(struct seq_file *m, void *unused)
1310{
36cdd013 1311 struct drm_i915_private *dev_priv = node_to_i915(m->private);
e2f80391 1312 struct intel_engine_cs *engine;
666796da
TU
1313 u64 acthd[I915_NUM_ENGINES];
1314 u32 seqno[I915_NUM_ENGINES];
d636951e 1315 struct intel_instdone instdone;
c3232b18 1316 enum intel_engine_id id;
f654449a 1317
8af29b0c 1318 if (test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
8c185eca
CW
1319 seq_puts(m, "Wedged\n");
1320 if (test_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags))
1321 seq_puts(m, "Reset in progress: struct_mutex backoff\n");
1322 if (test_bit(I915_RESET_HANDOFF, &dev_priv->gpu_error.flags))
1323 seq_puts(m, "Reset in progress: reset handoff to waiter\n");
8af29b0c 1324 if (waitqueue_active(&dev_priv->gpu_error.wait_queue))
8c185eca 1325 seq_puts(m, "Waiter holding struct mutex\n");
8af29b0c 1326 if (waitqueue_active(&dev_priv->gpu_error.reset_queue))
8c185eca 1327 seq_puts(m, "struct_mutex blocked for reset\n");
8af29b0c 1328
4f044a88 1329 if (!i915_modparams.enable_hangcheck) {
8c185eca 1330 seq_puts(m, "Hangcheck disabled\n");
f654449a
CW
1331 return 0;
1332 }
1333
ebbc7546
MK
1334 intel_runtime_pm_get(dev_priv);
1335
3b3f1650 1336 for_each_engine(engine, dev_priv, id) {
7e37f889 1337 acthd[id] = intel_engine_get_active_head(engine);
1b7744e7 1338 seqno[id] = intel_engine_get_seqno(engine);
ebbc7546
MK
1339 }
1340
3b3f1650 1341 intel_engine_get_instdone(dev_priv->engine[RCS], &instdone);
61642ff0 1342
ebbc7546
MK
1343 intel_runtime_pm_put(dev_priv);
1344
8352aea3
CW
1345 if (timer_pending(&dev_priv->gpu_error.hangcheck_work.timer))
1346 seq_printf(m, "Hangcheck active, timer fires in %dms\n",
f654449a
CW
1347 jiffies_to_msecs(dev_priv->gpu_error.hangcheck_work.timer.expires -
1348 jiffies));
8352aea3
CW
1349 else if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work))
1350 seq_puts(m, "Hangcheck active, work pending\n");
1351 else
1352 seq_puts(m, "Hangcheck inactive\n");
f654449a 1353
f73b5674
CW
1354 seq_printf(m, "GT active? %s\n", yesno(dev_priv->gt.awake));
1355
3b3f1650 1356 for_each_engine(engine, dev_priv, id) {
33f53719
CW
1357 struct intel_breadcrumbs *b = &engine->breadcrumbs;
1358 struct rb_node *rb;
1359
e2f80391 1360 seq_printf(m, "%s:\n", engine->name);
52d7f16e 1361 seq_printf(m, "\tseqno = %x [current %x, last %x]\n",
cb399eab 1362 engine->hangcheck.seqno, seqno[id],
52d7f16e 1363 intel_engine_last_submit(engine));
1fd00c0f 1364 seq_printf(m, "\twaiters? %s, fake irq active? %s, stalled? %s, wedged? %s\n",
83348ba8
CW
1365 yesno(intel_engine_has_waiter(engine)),
1366 yesno(test_bit(engine->id,
3fe3b030 1367 &dev_priv->gpu_error.missed_irq_rings)),
1fd00c0f
CW
1368 yesno(engine->hangcheck.stalled),
1369 yesno(engine->hangcheck.wedged));
3fe3b030 1370
61d3dc70 1371 spin_lock_irq(&b->rb_lock);
33f53719 1372 for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
f802cf7e 1373 struct intel_wait *w = rb_entry(rb, typeof(*w), node);
33f53719
CW
1374
1375 seq_printf(m, "\t%s [%d] waiting for %x\n",
1376 w->tsk->comm, w->tsk->pid, w->seqno);
1377 }
61d3dc70 1378 spin_unlock_irq(&b->rb_lock);
33f53719 1379
f654449a 1380 seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
e2f80391 1381 (long long)engine->hangcheck.acthd,
c3232b18 1382 (long long)acthd[id]);
3fe3b030
MK
1383 seq_printf(m, "\taction = %s(%d) %d ms ago\n",
1384 hangcheck_action_to_str(engine->hangcheck.action),
1385 engine->hangcheck.action,
1386 jiffies_to_msecs(jiffies -
1387 engine->hangcheck.action_timestamp));
61642ff0 1388
e2f80391 1389 if (engine->id == RCS) {
d636951e 1390 seq_puts(m, "\tinstdone read =\n");
61642ff0 1391
d636951e 1392 i915_instdone_info(dev_priv, m, &instdone);
61642ff0 1393
d636951e 1394 seq_puts(m, "\tinstdone accu =\n");
61642ff0 1395
d636951e
BW
1396 i915_instdone_info(dev_priv, m,
1397 &engine->hangcheck.instdone);
61642ff0 1398 }
f654449a
CW
1399 }
1400
1401 return 0;
1402}
1403
061d06a2
MT
1404static int i915_reset_info(struct seq_file *m, void *unused)
1405{
1406 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1407 struct i915_gpu_error *error = &dev_priv->gpu_error;
1408 struct intel_engine_cs *engine;
1409 enum intel_engine_id id;
1410
1411 seq_printf(m, "full gpu reset = %u\n", i915_reset_count(error));
1412
1413 for_each_engine(engine, dev_priv, id) {
1414 seq_printf(m, "%s = %u\n", engine->name,
1415 i915_reset_engine_count(error, engine));
1416 }
1417
1418 return 0;
1419}
1420
4d85529d 1421static int ironlake_drpc_info(struct seq_file *m)
f97108d1 1422{
36cdd013 1423 struct drm_i915_private *dev_priv = node_to_i915(m->private);
616fdb5a
BW
1424 u32 rgvmodectl, rstdbyctl;
1425 u16 crstandvid;
616fdb5a 1426
616fdb5a
BW
1427 rgvmodectl = I915_READ(MEMMODECTL);
1428 rstdbyctl = I915_READ(RSTDBYCTL);
1429 crstandvid = I915_READ16(CRSTANDVID);
1430
742f491d 1431 seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN));
f97108d1
JB
1432 seq_printf(m, "Boost freq: %d\n",
1433 (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
1434 MEMMODE_BOOST_FREQ_SHIFT);
1435 seq_printf(m, "HW control enabled: %s\n",
742f491d 1436 yesno(rgvmodectl & MEMMODE_HWIDLE_EN));
f97108d1 1437 seq_printf(m, "SW control enabled: %s\n",
742f491d 1438 yesno(rgvmodectl & MEMMODE_SWMODE_EN));
f97108d1 1439 seq_printf(m, "Gated voltage change: %s\n",
742f491d 1440 yesno(rgvmodectl & MEMMODE_RCLK_GATE));
f97108d1
JB
1441 seq_printf(m, "Starting frequency: P%d\n",
1442 (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
7648fa99 1443 seq_printf(m, "Max P-state: P%d\n",
f97108d1 1444 (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
7648fa99
JB
1445 seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
1446 seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
1447 seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
1448 seq_printf(m, "Render standby enabled: %s\n",
742f491d 1449 yesno(!(rstdbyctl & RCX_SW_EXIT)));
267f0c90 1450 seq_puts(m, "Current RS state: ");
88271da3
JB
1451 switch (rstdbyctl & RSX_STATUS_MASK) {
1452 case RSX_STATUS_ON:
267f0c90 1453 seq_puts(m, "on\n");
88271da3
JB
1454 break;
1455 case RSX_STATUS_RC1:
267f0c90 1456 seq_puts(m, "RC1\n");
88271da3
JB
1457 break;
1458 case RSX_STATUS_RC1E:
267f0c90 1459 seq_puts(m, "RC1E\n");
88271da3
JB
1460 break;
1461 case RSX_STATUS_RS1:
267f0c90 1462 seq_puts(m, "RS1\n");
88271da3
JB
1463 break;
1464 case RSX_STATUS_RS2:
267f0c90 1465 seq_puts(m, "RS2 (RC6)\n");
88271da3
JB
1466 break;
1467 case RSX_STATUS_RS3:
267f0c90 1468 seq_puts(m, "RC3 (RC6+)\n");
88271da3
JB
1469 break;
1470 default:
267f0c90 1471 seq_puts(m, "unknown\n");
88271da3
JB
1472 break;
1473 }
f97108d1
JB
1474
1475 return 0;
1476}
1477
f65367b5 1478static int i915_forcewake_domains(struct seq_file *m, void *data)
669ab5aa 1479{
233ebf57 1480 struct drm_i915_private *i915 = node_to_i915(m->private);
b2cff0db 1481 struct intel_uncore_forcewake_domain *fw_domain;
d2dc94bc 1482 unsigned int tmp;
b2cff0db 1483
d7a133d8
CW
1484 seq_printf(m, "user.bypass_count = %u\n",
1485 i915->uncore.user_forcewake.count);
1486
233ebf57 1487 for_each_fw_domain(fw_domain, i915, tmp)
b2cff0db 1488 seq_printf(m, "%s.wake_count = %u\n",
33c582c1 1489 intel_uncore_forcewake_domain_to_str(fw_domain->id),
233ebf57 1490 READ_ONCE(fw_domain->wake_count));
669ab5aa 1491
b2cff0db
CW
1492 return 0;
1493}
1494
1362877e
MK
1495static void print_rc6_res(struct seq_file *m,
1496 const char *title,
1497 const i915_reg_t reg)
1498{
1499 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1500
1501 seq_printf(m, "%s %u (%llu us)\n",
1502 title, I915_READ(reg),
1503 intel_rc6_residency_us(dev_priv, reg));
1504}
1505
b2cff0db
CW
1506static int vlv_drpc_info(struct seq_file *m)
1507{
36cdd013 1508 struct drm_i915_private *dev_priv = node_to_i915(m->private);
0d6fc92a 1509 u32 rcctl1, pw_status;
669ab5aa 1510
6b312cd3 1511 pw_status = I915_READ(VLV_GTLC_PW_STATUS);
669ab5aa
D
1512 rcctl1 = I915_READ(GEN6_RC_CONTROL);
1513
669ab5aa
D
1514 seq_printf(m, "RC6 Enabled: %s\n",
1515 yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
1516 GEN6_RC_CTL_EI_MODE(1))));
1517 seq_printf(m, "Render Power Well: %s\n",
6b312cd3 1518 (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
669ab5aa 1519 seq_printf(m, "Media Power Well: %s\n",
6b312cd3 1520 (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
669ab5aa 1521
1362877e
MK
1522 print_rc6_res(m, "Render RC6 residency since boot:", VLV_GT_RENDER_RC6);
1523 print_rc6_res(m, "Media RC6 residency since boot:", VLV_GT_MEDIA_RC6);
9cc19be5 1524
f65367b5 1525 return i915_forcewake_domains(m, NULL);
669ab5aa
D
1526}
1527
4d85529d
BW
1528static int gen6_drpc_info(struct seq_file *m)
1529{
36cdd013 1530 struct drm_i915_private *dev_priv = node_to_i915(m->private);
960e5465 1531 u32 gt_core_status, rcctl1, rc6vids = 0;
f2dd7578 1532 u32 gen9_powergate_enable = 0, gen9_powergate_status = 0;
4d85529d 1533
75aa3f63 1534 gt_core_status = I915_READ_FW(GEN6_GT_CORE_STATUS);
ed71f1b4 1535 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
4d85529d 1536
4d85529d 1537 rcctl1 = I915_READ(GEN6_RC_CONTROL);
36cdd013 1538 if (INTEL_GEN(dev_priv) >= 9) {
f2dd7578
AG
1539 gen9_powergate_enable = I915_READ(GEN9_PG_ENABLE);
1540 gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS);
1541 }
cf632bd6 1542
51cc9ade
ID
1543 if (INTEL_GEN(dev_priv) <= 7) {
1544 mutex_lock(&dev_priv->pcu_lock);
1545 sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS,
1546 &rc6vids);
1547 mutex_unlock(&dev_priv->pcu_lock);
1548 }
4d85529d 1549
fff24e21 1550 seq_printf(m, "RC1e Enabled: %s\n",
4d85529d
BW
1551 yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
1552 seq_printf(m, "RC6 Enabled: %s\n",
1553 yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
36cdd013 1554 if (INTEL_GEN(dev_priv) >= 9) {
f2dd7578
AG
1555 seq_printf(m, "Render Well Gating Enabled: %s\n",
1556 yesno(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE));
1557 seq_printf(m, "Media Well Gating Enabled: %s\n",
1558 yesno(gen9_powergate_enable & GEN9_MEDIA_PG_ENABLE));
1559 }
4d85529d
BW
1560 seq_printf(m, "Deep RC6 Enabled: %s\n",
1561 yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
1562 seq_printf(m, "Deepest RC6 Enabled: %s\n",
1563 yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
267f0c90 1564 seq_puts(m, "Current RC state: ");
4d85529d
BW
1565 switch (gt_core_status & GEN6_RCn_MASK) {
1566 case GEN6_RC0:
1567 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
267f0c90 1568 seq_puts(m, "Core Power Down\n");
4d85529d 1569 else
267f0c90 1570 seq_puts(m, "on\n");
4d85529d
BW
1571 break;
1572 case GEN6_RC3:
267f0c90 1573 seq_puts(m, "RC3\n");
4d85529d
BW
1574 break;
1575 case GEN6_RC6:
267f0c90 1576 seq_puts(m, "RC6\n");
4d85529d
BW
1577 break;
1578 case GEN6_RC7:
267f0c90 1579 seq_puts(m, "RC7\n");
4d85529d
BW
1580 break;
1581 default:
267f0c90 1582 seq_puts(m, "Unknown\n");
4d85529d
BW
1583 break;
1584 }
1585
1586 seq_printf(m, "Core Power Down: %s\n",
1587 yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
36cdd013 1588 if (INTEL_GEN(dev_priv) >= 9) {
f2dd7578
AG
1589 seq_printf(m, "Render Power Well: %s\n",
1590 (gen9_powergate_status &
1591 GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down");
1592 seq_printf(m, "Media Power Well: %s\n",
1593 (gen9_powergate_status &
1594 GEN9_PWRGT_MEDIA_STATUS_MASK) ? "Up" : "Down");
1595 }
cce66a28
BW
1596
1597 /* Not exactly sure what this is */
1362877e
MK
1598 print_rc6_res(m, "RC6 \"Locked to RPn\" residency since boot:",
1599 GEN6_GT_GFX_RC6_LOCKED);
1600 print_rc6_res(m, "RC6 residency since boot:", GEN6_GT_GFX_RC6);
1601 print_rc6_res(m, "RC6+ residency since boot:", GEN6_GT_GFX_RC6p);
1602 print_rc6_res(m, "RC6++ residency since boot:", GEN6_GT_GFX_RC6pp);
cce66a28 1603
51cc9ade
ID
1604 if (INTEL_GEN(dev_priv) <= 7) {
1605 seq_printf(m, "RC6 voltage: %dmV\n",
1606 GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
1607 seq_printf(m, "RC6+ voltage: %dmV\n",
1608 GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
1609 seq_printf(m, "RC6++ voltage: %dmV\n",
1610 GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
1611 }
1612
f2dd7578 1613 return i915_forcewake_domains(m, NULL);
4d85529d
BW
1614}
1615
1616static int i915_drpc_info(struct seq_file *m, void *unused)
1617{
36cdd013 1618 struct drm_i915_private *dev_priv = node_to_i915(m->private);
cf632bd6
CW
1619 int err;
1620
1621 intel_runtime_pm_get(dev_priv);
4d85529d 1622
36cdd013 1623 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
cf632bd6 1624 err = vlv_drpc_info(m);
36cdd013 1625 else if (INTEL_GEN(dev_priv) >= 6)
cf632bd6 1626 err = gen6_drpc_info(m);
4d85529d 1627 else
cf632bd6
CW
1628 err = ironlake_drpc_info(m);
1629
1630 intel_runtime_pm_put(dev_priv);
1631
1632 return err;
4d85529d
BW
1633}
1634
9a851789
DV
1635static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
1636{
36cdd013 1637 struct drm_i915_private *dev_priv = node_to_i915(m->private);
9a851789
DV
1638
1639 seq_printf(m, "FB tracking busy bits: 0x%08x\n",
1640 dev_priv->fb_tracking.busy_bits);
1641
1642 seq_printf(m, "FB tracking flip bits: 0x%08x\n",
1643 dev_priv->fb_tracking.flip_bits);
1644
1645 return 0;
1646}
1647
b5e50c3f
JB
1648static int i915_fbc_status(struct seq_file *m, void *unused)
1649{
36cdd013 1650 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3138872c 1651 struct intel_fbc *fbc = &dev_priv->fbc;
b5e50c3f 1652
ab309a6a
MW
1653 if (!HAS_FBC(dev_priv))
1654 return -ENODEV;
b5e50c3f 1655
36623ef8 1656 intel_runtime_pm_get(dev_priv);
3138872c 1657 mutex_lock(&fbc->lock);
36623ef8 1658
0e631adc 1659 if (intel_fbc_is_active(dev_priv))
267f0c90 1660 seq_puts(m, "FBC enabled\n");
2e8144a5 1661 else
3138872c
CW
1662 seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
1663
3fd5d1ec
VS
1664 if (intel_fbc_is_active(dev_priv)) {
1665 u32 mask;
1666
1667 if (INTEL_GEN(dev_priv) >= 8)
1668 mask = I915_READ(IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK;
1669 else if (INTEL_GEN(dev_priv) >= 7)
1670 mask = I915_READ(IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK;
1671 else if (INTEL_GEN(dev_priv) >= 5)
1672 mask = I915_READ(ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK;
1673 else if (IS_G4X(dev_priv))
1674 mask = I915_READ(DPFC_STATUS) & DPFC_COMP_SEG_MASK;
1675 else
1676 mask = I915_READ(FBC_STATUS) & (FBC_STAT_COMPRESSING |
1677 FBC_STAT_COMPRESSED);
1678
1679 seq_printf(m, "Compressing: %s\n", yesno(mask));
0fc6a9dc 1680 }
31b9df10 1681
3138872c 1682 mutex_unlock(&fbc->lock);
36623ef8
PZ
1683 intel_runtime_pm_put(dev_priv);
1684
b5e50c3f
JB
1685 return 0;
1686}
1687
4127dc43 1688static int i915_fbc_false_color_get(void *data, u64 *val)
da46f936 1689{
36cdd013 1690 struct drm_i915_private *dev_priv = data;
da46f936 1691
36cdd013 1692 if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
da46f936
RV
1693 return -ENODEV;
1694
da46f936 1695 *val = dev_priv->fbc.false_color;
da46f936
RV
1696
1697 return 0;
1698}
1699
4127dc43 1700static int i915_fbc_false_color_set(void *data, u64 val)
da46f936 1701{
36cdd013 1702 struct drm_i915_private *dev_priv = data;
da46f936
RV
1703 u32 reg;
1704
36cdd013 1705 if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
da46f936
RV
1706 return -ENODEV;
1707
25ad93fd 1708 mutex_lock(&dev_priv->fbc.lock);
da46f936
RV
1709
1710 reg = I915_READ(ILK_DPFC_CONTROL);
1711 dev_priv->fbc.false_color = val;
1712
1713 I915_WRITE(ILK_DPFC_CONTROL, val ?
1714 (reg | FBC_CTL_FALSE_COLOR) :
1715 (reg & ~FBC_CTL_FALSE_COLOR));
1716
25ad93fd 1717 mutex_unlock(&dev_priv->fbc.lock);
da46f936
RV
1718 return 0;
1719}
1720
4127dc43
VS
1721DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops,
1722 i915_fbc_false_color_get, i915_fbc_false_color_set,
da46f936
RV
1723 "%llu\n");
1724
92d44621
PZ
1725static int i915_ips_status(struct seq_file *m, void *unused)
1726{
36cdd013 1727 struct drm_i915_private *dev_priv = node_to_i915(m->private);
92d44621 1728
ab309a6a
MW
1729 if (!HAS_IPS(dev_priv))
1730 return -ENODEV;
92d44621 1731
36623ef8
PZ
1732 intel_runtime_pm_get(dev_priv);
1733
0eaa53f0 1734 seq_printf(m, "Enabled by kernel parameter: %s\n",
4f044a88 1735 yesno(i915_modparams.enable_ips));
0eaa53f0 1736
36cdd013 1737 if (INTEL_GEN(dev_priv) >= 8) {
0eaa53f0
RV
1738 seq_puts(m, "Currently: unknown\n");
1739 } else {
1740 if (I915_READ(IPS_CTL) & IPS_ENABLE)
1741 seq_puts(m, "Currently: enabled\n");
1742 else
1743 seq_puts(m, "Currently: disabled\n");
1744 }
92d44621 1745
36623ef8
PZ
1746 intel_runtime_pm_put(dev_priv);
1747
92d44621
PZ
1748 return 0;
1749}
1750
4a9bef37
JB
1751static int i915_sr_status(struct seq_file *m, void *unused)
1752{
36cdd013 1753 struct drm_i915_private *dev_priv = node_to_i915(m->private);
4a9bef37
JB
1754 bool sr_enabled = false;
1755
36623ef8 1756 intel_runtime_pm_get(dev_priv);
9c870d03 1757 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
36623ef8 1758
7342a72c
CW
1759 if (INTEL_GEN(dev_priv) >= 9)
1760 /* no global SR status; inspect per-plane WM */;
1761 else if (HAS_PCH_SPLIT(dev_priv))
5ba2aaaa 1762 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
c0f86832 1763 else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) ||
36cdd013 1764 IS_I945G(dev_priv) || IS_I945GM(dev_priv))
4a9bef37 1765 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
36cdd013 1766 else if (IS_I915GM(dev_priv))
4a9bef37 1767 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
36cdd013 1768 else if (IS_PINEVIEW(dev_priv))
4a9bef37 1769 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
36cdd013 1770 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
77b64555 1771 sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
4a9bef37 1772
9c870d03 1773 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
36623ef8
PZ
1774 intel_runtime_pm_put(dev_priv);
1775
08c4d7fc 1776 seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
4a9bef37
JB
1777
1778 return 0;
1779}
1780
7648fa99
JB
1781static int i915_emon_status(struct seq_file *m, void *unused)
1782{
36cdd013
DW
1783 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1784 struct drm_device *dev = &dev_priv->drm;
7648fa99 1785 unsigned long temp, chipset, gfx;
de227ef0
CW
1786 int ret;
1787
36cdd013 1788 if (!IS_GEN5(dev_priv))
582be6b4
CW
1789 return -ENODEV;
1790
de227ef0
CW
1791 ret = mutex_lock_interruptible(&dev->struct_mutex);
1792 if (ret)
1793 return ret;
7648fa99
JB
1794
1795 temp = i915_mch_val(dev_priv);
1796 chipset = i915_chipset_val(dev_priv);
1797 gfx = i915_gfx_val(dev_priv);
de227ef0 1798 mutex_unlock(&dev->struct_mutex);
7648fa99
JB
1799
1800 seq_printf(m, "GMCH temp: %ld\n", temp);
1801 seq_printf(m, "Chipset power: %ld\n", chipset);
1802 seq_printf(m, "GFX power: %ld\n", gfx);
1803 seq_printf(m, "Total power: %ld\n", chipset + gfx);
1804
1805 return 0;
1806}
1807
23b2f8bb
JB
1808static int i915_ring_freq_table(struct seq_file *m, void *unused)
1809{
36cdd013 1810 struct drm_i915_private *dev_priv = node_to_i915(m->private);
562d9bae 1811 struct intel_rps *rps = &dev_priv->gt_pm.rps;
f936ec34 1812 unsigned int max_gpu_freq, min_gpu_freq;
d586b5f4
CW
1813 int gpu_freq, ia_freq;
1814 int ret;
23b2f8bb 1815
ab309a6a
MW
1816 if (!HAS_LLC(dev_priv))
1817 return -ENODEV;
23b2f8bb 1818
5bfa0199
PZ
1819 intel_runtime_pm_get(dev_priv);
1820
9f817501 1821 ret = mutex_lock_interruptible(&dev_priv->pcu_lock);
23b2f8bb 1822 if (ret)
5bfa0199 1823 goto out;
23b2f8bb 1824
d586b5f4
CW
1825 min_gpu_freq = rps->min_freq;
1826 max_gpu_freq = rps->max_freq;
2b2874ef 1827 if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
f936ec34 1828 /* Convert GT frequency to 50 HZ units */
d586b5f4
CW
1829 min_gpu_freq /= GEN9_FREQ_SCALER;
1830 max_gpu_freq /= GEN9_FREQ_SCALER;
f936ec34
AG
1831 }
1832
267f0c90 1833 seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
23b2f8bb 1834
f936ec34 1835 for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
42c0526c
BW
1836 ia_freq = gpu_freq;
1837 sandybridge_pcode_read(dev_priv,
1838 GEN6_PCODE_READ_MIN_FREQ_TABLE,
1839 &ia_freq);
3ebecd07 1840 seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
f936ec34 1841 intel_gpu_freq(dev_priv, (gpu_freq *
35ceabf3 1842 (IS_GEN9_BC(dev_priv) ||
2b2874ef 1843 INTEL_GEN(dev_priv) >= 10 ?
b976dc53 1844 GEN9_FREQ_SCALER : 1))),
3ebecd07
CW
1845 ((ia_freq >> 0) & 0xff) * 100,
1846 ((ia_freq >> 8) & 0xff) * 100);
23b2f8bb
JB
1847 }
1848
9f817501 1849 mutex_unlock(&dev_priv->pcu_lock);
23b2f8bb 1850
5bfa0199
PZ
1851out:
1852 intel_runtime_pm_put(dev_priv);
1853 return ret;
23b2f8bb
JB
1854}
1855
44834a67
CW
1856static int i915_opregion(struct seq_file *m, void *unused)
1857{
36cdd013
DW
1858 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1859 struct drm_device *dev = &dev_priv->drm;
44834a67
CW
1860 struct intel_opregion *opregion = &dev_priv->opregion;
1861 int ret;
1862
1863 ret = mutex_lock_interruptible(&dev->struct_mutex);
1864 if (ret)
0d38f009 1865 goto out;
44834a67 1866
2455a8e4
JN
1867 if (opregion->header)
1868 seq_write(m, opregion->header, OPREGION_SIZE);
44834a67
CW
1869
1870 mutex_unlock(&dev->struct_mutex);
1871
0d38f009 1872out:
44834a67
CW
1873 return 0;
1874}
1875
ada8f955
JN
1876static int i915_vbt(struct seq_file *m, void *unused)
1877{
36cdd013 1878 struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
ada8f955
JN
1879
1880 if (opregion->vbt)
1881 seq_write(m, opregion->vbt, opregion->vbt_size);
1882
1883 return 0;
1884}
1885
37811fcc
CW
1886static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1887{
36cdd013
DW
1888 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1889 struct drm_device *dev = &dev_priv->drm;
b13b8402 1890 struct intel_framebuffer *fbdev_fb = NULL;
3a58ee10 1891 struct drm_framebuffer *drm_fb;
188c1ab7
CW
1892 int ret;
1893
1894 ret = mutex_lock_interruptible(&dev->struct_mutex);
1895 if (ret)
1896 return ret;
37811fcc 1897
0695726e 1898#ifdef CONFIG_DRM_FBDEV_EMULATION
346fb4e0 1899 if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) {
36cdd013 1900 fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
25bcce94
CW
1901
1902 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1903 fbdev_fb->base.width,
1904 fbdev_fb->base.height,
b00c600e 1905 fbdev_fb->base.format->depth,
272725c7 1906 fbdev_fb->base.format->cpp[0] * 8,
bae781b2 1907 fbdev_fb->base.modifier,
25bcce94 1908 drm_framebuffer_read_refcount(&fbdev_fb->base));
a5ff7a45 1909 describe_obj(m, intel_fb_obj(&fbdev_fb->base));
25bcce94
CW
1910 seq_putc(m, '\n');
1911 }
4520f53a 1912#endif
37811fcc 1913
4b096ac1 1914 mutex_lock(&dev->mode_config.fb_lock);
3a58ee10 1915 drm_for_each_fb(drm_fb, dev) {
b13b8402
NS
1916 struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
1917 if (fb == fbdev_fb)
37811fcc
CW
1918 continue;
1919
c1ca506d 1920 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
37811fcc
CW
1921 fb->base.width,
1922 fb->base.height,
b00c600e 1923 fb->base.format->depth,
272725c7 1924 fb->base.format->cpp[0] * 8,
bae781b2 1925 fb->base.modifier,
747a598f 1926 drm_framebuffer_read_refcount(&fb->base));
a5ff7a45 1927 describe_obj(m, intel_fb_obj(&fb->base));
267f0c90 1928 seq_putc(m, '\n');
37811fcc 1929 }
4b096ac1 1930 mutex_unlock(&dev->mode_config.fb_lock);
188c1ab7 1931 mutex_unlock(&dev->struct_mutex);
37811fcc
CW
1932
1933 return 0;
1934}
1935
7e37f889 1936static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
c9fe99bd 1937{
ef5032a0
CW
1938 seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, emit: %u)",
1939 ring->space, ring->head, ring->tail, ring->emit);
c9fe99bd
OM
1940}
1941
e76d3630
BW
1942static int i915_context_status(struct seq_file *m, void *unused)
1943{
36cdd013
DW
1944 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1945 struct drm_device *dev = &dev_priv->drm;
e2f80391 1946 struct intel_engine_cs *engine;
e2efd130 1947 struct i915_gem_context *ctx;
3b3f1650 1948 enum intel_engine_id id;
c3232b18 1949 int ret;
e76d3630 1950
f3d28878 1951 ret = mutex_lock_interruptible(&dev->struct_mutex);
e76d3630
BW
1952 if (ret)
1953 return ret;
1954
829a0af2 1955 list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
288f1ced
CW
1956 seq_puts(m, "HW context ");
1957 if (!list_empty(&ctx->hw_id_link))
1958 seq_printf(m, "%x [pin %u]", ctx->hw_id,
1959 atomic_read(&ctx->hw_id_pin_count));
c84455b4 1960 if (ctx->pid) {
d28b99ab
CW
1961 struct task_struct *task;
1962
c84455b4 1963 task = get_pid_task(ctx->pid, PIDTYPE_PID);
d28b99ab
CW
1964 if (task) {
1965 seq_printf(m, "(%s [%d]) ",
1966 task->comm, task->pid);
1967 put_task_struct(task);
1968 }
c84455b4
CW
1969 } else if (IS_ERR(ctx->file_priv)) {
1970 seq_puts(m, "(deleted) ");
d28b99ab
CW
1971 } else {
1972 seq_puts(m, "(kernel) ");
1973 }
1974
bca44d80
CW
1975 seq_putc(m, ctx->remap_slice ? 'R' : 'r');
1976 seq_putc(m, '\n');
c9fe99bd 1977
3b3f1650 1978 for_each_engine(engine, dev_priv, id) {
ab82a063
CW
1979 struct intel_context *ce =
1980 to_intel_context(ctx, engine);
bca44d80
CW
1981
1982 seq_printf(m, "%s: ", engine->name);
bca44d80 1983 if (ce->state)
bf3783e5 1984 describe_obj(m, ce->state->obj);
dca33ecc 1985 if (ce->ring)
7e37f889 1986 describe_ctx_ring(m, ce->ring);
c9fe99bd 1987 seq_putc(m, '\n');
c9fe99bd 1988 }
a33afea5 1989
a33afea5 1990 seq_putc(m, '\n');
a168c293
BW
1991 }
1992
f3d28878 1993 mutex_unlock(&dev->struct_mutex);
e76d3630
BW
1994
1995 return 0;
1996}
1997
ea16a3cd
DV
1998static const char *swizzle_string(unsigned swizzle)
1999{
aee56cff 2000 switch (swizzle) {
ea16a3cd
DV
2001 case I915_BIT_6_SWIZZLE_NONE:
2002 return "none";
2003 case I915_BIT_6_SWIZZLE_9:
2004 return "bit9";
2005 case I915_BIT_6_SWIZZLE_9_10:
2006 return "bit9/bit10";
2007 case I915_BIT_6_SWIZZLE_9_11:
2008 return "bit9/bit11";
2009 case I915_BIT_6_SWIZZLE_9_10_11:
2010 return "bit9/bit10/bit11";
2011 case I915_BIT_6_SWIZZLE_9_17:
2012 return "bit9/bit17";
2013 case I915_BIT_6_SWIZZLE_9_10_17:
2014 return "bit9/bit10/bit17";
2015 case I915_BIT_6_SWIZZLE_UNKNOWN:
8a168ca7 2016 return "unknown";
ea16a3cd
DV
2017 }
2018
2019 return "bug";
2020}
2021
2022static int i915_swizzle_info(struct seq_file *m, void *data)
2023{
36cdd013 2024 struct drm_i915_private *dev_priv = node_to_i915(m->private);
22bcfc6a 2025
c8c8fb33 2026 intel_runtime_pm_get(dev_priv);
ea16a3cd 2027
ea16a3cd
DV
2028 seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
2029 swizzle_string(dev_priv->mm.bit_6_swizzle_x));
2030 seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
2031 swizzle_string(dev_priv->mm.bit_6_swizzle_y));
2032
36cdd013 2033 if (IS_GEN3(dev_priv) || IS_GEN4(dev_priv)) {
ea16a3cd
DV
2034 seq_printf(m, "DDC = 0x%08x\n",
2035 I915_READ(DCC));
656bfa3a
DV
2036 seq_printf(m, "DDC2 = 0x%08x\n",
2037 I915_READ(DCC2));
ea16a3cd
DV
2038 seq_printf(m, "C0DRB3 = 0x%04x\n",
2039 I915_READ16(C0DRB3));
2040 seq_printf(m, "C1DRB3 = 0x%04x\n",
2041 I915_READ16(C1DRB3));
36cdd013 2042 } else if (INTEL_GEN(dev_priv) >= 6) {
3fa7d235
DV
2043 seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
2044 I915_READ(MAD_DIMM_C0));
2045 seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
2046 I915_READ(MAD_DIMM_C1));
2047 seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
2048 I915_READ(MAD_DIMM_C2));
2049 seq_printf(m, "TILECTL = 0x%08x\n",
2050 I915_READ(TILECTL));
36cdd013 2051 if (INTEL_GEN(dev_priv) >= 8)
9d3203e1
BW
2052 seq_printf(m, "GAMTARBMODE = 0x%08x\n",
2053 I915_READ(GAMTARBMODE));
2054 else
2055 seq_printf(m, "ARB_MODE = 0x%08x\n",
2056 I915_READ(ARB_MODE));
3fa7d235
DV
2057 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
2058 I915_READ(DISP_ARB_CTL));
ea16a3cd 2059 }
656bfa3a
DV
2060
2061 if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
2062 seq_puts(m, "L-shaped memory detected\n");
2063
c8c8fb33 2064 intel_runtime_pm_put(dev_priv);
ea16a3cd
DV
2065
2066 return 0;
2067}
2068
1c60fef5
BW
2069static int per_file_ctx(int id, void *ptr, void *data)
2070{
e2efd130 2071 struct i915_gem_context *ctx = ptr;
1c60fef5 2072 struct seq_file *m = data;
ae6c4806
DV
2073 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
2074
2075 if (!ppgtt) {
2076 seq_printf(m, " no ppgtt for context %d\n",
2077 ctx->user_handle);
2078 return 0;
2079 }
1c60fef5 2080
f83d6518
OM
2081 if (i915_gem_context_is_default(ctx))
2082 seq_puts(m, " default context:\n");
2083 else
821d66dd 2084 seq_printf(m, " context %d:\n", ctx->user_handle);
1c60fef5
BW
2085 ppgtt->debug_dump(ppgtt, m);
2086
2087 return 0;
2088}
2089
36cdd013
DW
2090static void gen8_ppgtt_info(struct seq_file *m,
2091 struct drm_i915_private *dev_priv)
3cf17fc5 2092{
77df6772 2093 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
3b3f1650
AG
2094 struct intel_engine_cs *engine;
2095 enum intel_engine_id id;
b4ac5afc 2096 int i;
3cf17fc5 2097
77df6772
BW
2098 if (!ppgtt)
2099 return;
2100
3b3f1650 2101 for_each_engine(engine, dev_priv, id) {
e2f80391 2102 seq_printf(m, "%s\n", engine->name);
77df6772 2103 for (i = 0; i < 4; i++) {
e2f80391 2104 u64 pdp = I915_READ(GEN8_RING_PDP_UDW(engine, i));
77df6772 2105 pdp <<= 32;
e2f80391 2106 pdp |= I915_READ(GEN8_RING_PDP_LDW(engine, i));
a2a5b15c 2107 seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp);
77df6772
BW
2108 }
2109 }
2110}
2111
36cdd013
DW
2112static void gen6_ppgtt_info(struct seq_file *m,
2113 struct drm_i915_private *dev_priv)
77df6772 2114{
e2f80391 2115 struct intel_engine_cs *engine;
3b3f1650 2116 enum intel_engine_id id;
3cf17fc5 2117
7e22dbbb 2118 if (IS_GEN6(dev_priv))
3cf17fc5
DV
2119 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
2120
3b3f1650 2121 for_each_engine(engine, dev_priv, id) {
e2f80391 2122 seq_printf(m, "%s\n", engine->name);
7e22dbbb 2123 if (IS_GEN7(dev_priv))
e2f80391
TU
2124 seq_printf(m, "GFX_MODE: 0x%08x\n",
2125 I915_READ(RING_MODE_GEN7(engine)));
2126 seq_printf(m, "PP_DIR_BASE: 0x%08x\n",
2127 I915_READ(RING_PP_DIR_BASE(engine)));
2128 seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n",
2129 I915_READ(RING_PP_DIR_BASE_READ(engine)));
2130 seq_printf(m, "PP_DIR_DCLV: 0x%08x\n",
2131 I915_READ(RING_PP_DIR_DCLV(engine)));
3cf17fc5
DV
2132 }
2133 if (dev_priv->mm.aliasing_ppgtt) {
2134 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
2135
267f0c90 2136 seq_puts(m, "aliasing PPGTT:\n");
44159ddb 2137 seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd.base.ggtt_offset);
1c60fef5 2138
87d60b63 2139 ppgtt->debug_dump(ppgtt, m);
ae6c4806 2140 }
1c60fef5 2141
3cf17fc5 2142 seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
77df6772
BW
2143}
2144
2145static int i915_ppgtt_info(struct seq_file *m, void *data)
2146{
36cdd013
DW
2147 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2148 struct drm_device *dev = &dev_priv->drm;
ea91e401 2149 struct drm_file *file;
637ee29e 2150 int ret;
77df6772 2151
637ee29e
CW
2152 mutex_lock(&dev->filelist_mutex);
2153 ret = mutex_lock_interruptible(&dev->struct_mutex);
77df6772 2154 if (ret)
637ee29e
CW
2155 goto out_unlock;
2156
c8c8fb33 2157 intel_runtime_pm_get(dev_priv);
77df6772 2158
36cdd013
DW
2159 if (INTEL_GEN(dev_priv) >= 8)
2160 gen8_ppgtt_info(m, dev_priv);
2161 else if (INTEL_GEN(dev_priv) >= 6)
2162 gen6_ppgtt_info(m, dev_priv);
77df6772 2163
ea91e401
MT
2164 list_for_each_entry_reverse(file, &dev->filelist, lhead) {
2165 struct drm_i915_file_private *file_priv = file->driver_priv;
7cb5dff8 2166 struct task_struct *task;
ea91e401 2167
7cb5dff8 2168 task = get_pid_task(file->pid, PIDTYPE_PID);
06812760
DC
2169 if (!task) {
2170 ret = -ESRCH;
637ee29e 2171 goto out_rpm;
06812760 2172 }
7cb5dff8
GT
2173 seq_printf(m, "\nproc: %s\n", task->comm);
2174 put_task_struct(task);
ea91e401
MT
2175 idr_for_each(&file_priv->context_idr, per_file_ctx,
2176 (void *)(unsigned long)m);
2177 }
2178
637ee29e 2179out_rpm:
c8c8fb33 2180 intel_runtime_pm_put(dev_priv);
3cf17fc5 2181 mutex_unlock(&dev->struct_mutex);
637ee29e
CW
2182out_unlock:
2183 mutex_unlock(&dev->filelist_mutex);
06812760 2184 return ret;
3cf17fc5
DV
2185}
2186
f5a4c67d
CW
2187static int count_irq_waiters(struct drm_i915_private *i915)
2188{
e2f80391 2189 struct intel_engine_cs *engine;
3b3f1650 2190 enum intel_engine_id id;
f5a4c67d 2191 int count = 0;
f5a4c67d 2192
3b3f1650 2193 for_each_engine(engine, i915, id)
688e6c72 2194 count += intel_engine_has_waiter(engine);
f5a4c67d
CW
2195
2196 return count;
2197}
2198
7466c291
CW
2199static const char *rps_power_to_str(unsigned int power)
2200{
2201 static const char * const strings[] = {
2202 [LOW_POWER] = "low power",
2203 [BETWEEN] = "mixed",
2204 [HIGH_POWER] = "high power",
2205 };
2206
2207 if (power >= ARRAY_SIZE(strings) || !strings[power])
2208 return "unknown";
2209
2210 return strings[power];
2211}
2212
1854d5ca
CW
2213static int i915_rps_boost_info(struct seq_file *m, void *data)
2214{
36cdd013
DW
2215 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2216 struct drm_device *dev = &dev_priv->drm;
562d9bae 2217 struct intel_rps *rps = &dev_priv->gt_pm.rps;
c0a6aa7e 2218 u32 act_freq = rps->cur_freq;
1854d5ca 2219 struct drm_file *file;
1854d5ca 2220
c0a6aa7e
CW
2221 if (intel_runtime_pm_get_if_in_use(dev_priv)) {
2222 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
2223 mutex_lock(&dev_priv->pcu_lock);
2224 act_freq = vlv_punit_read(dev_priv,
2225 PUNIT_REG_GPU_FREQ_STS);
2226 act_freq = (act_freq >> 8) & 0xff;
2227 mutex_unlock(&dev_priv->pcu_lock);
2228 } else {
2229 act_freq = intel_get_cagf(dev_priv,
2230 I915_READ(GEN6_RPSTAT1));
2231 }
2232 intel_runtime_pm_put(dev_priv);
2233 }
2234
562d9bae 2235 seq_printf(m, "RPS enabled? %d\n", rps->enabled);
28176ef4
CW
2236 seq_printf(m, "GPU busy? %s [%d requests]\n",
2237 yesno(dev_priv->gt.awake), dev_priv->gt.active_requests);
f5a4c67d 2238 seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv));
7b92c1bd 2239 seq_printf(m, "Boosts outstanding? %d\n",
562d9bae 2240 atomic_read(&rps->num_waiters));
60548c55 2241 seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
c0a6aa7e
CW
2242 seq_printf(m, "Frequency requested %d, actual %d\n",
2243 intel_gpu_freq(dev_priv, rps->cur_freq),
2244 intel_gpu_freq(dev_priv, act_freq));
7466c291 2245 seq_printf(m, " min hard:%d, soft:%d; max soft:%d, hard:%d\n",
562d9bae
SAK
2246 intel_gpu_freq(dev_priv, rps->min_freq),
2247 intel_gpu_freq(dev_priv, rps->min_freq_softlimit),
2248 intel_gpu_freq(dev_priv, rps->max_freq_softlimit),
2249 intel_gpu_freq(dev_priv, rps->max_freq));
7466c291 2250 seq_printf(m, " idle:%d, efficient:%d, boost:%d\n",
562d9bae
SAK
2251 intel_gpu_freq(dev_priv, rps->idle_freq),
2252 intel_gpu_freq(dev_priv, rps->efficient_freq),
2253 intel_gpu_freq(dev_priv, rps->boost_freq));
1d2ac403
DV
2254
2255 mutex_lock(&dev->filelist_mutex);
1854d5ca
CW
2256 list_for_each_entry_reverse(file, &dev->filelist, lhead) {
2257 struct drm_i915_file_private *file_priv = file->driver_priv;
2258 struct task_struct *task;
2259
2260 rcu_read_lock();
2261 task = pid_task(file->pid, PIDTYPE_PID);
7b92c1bd 2262 seq_printf(m, "%s [%d]: %d boosts\n",
1854d5ca
CW
2263 task ? task->comm : "<unknown>",
2264 task ? task->pid : -1,
562d9bae 2265 atomic_read(&file_priv->rps_client.boosts));
1854d5ca
CW
2266 rcu_read_unlock();
2267 }
7b92c1bd 2268 seq_printf(m, "Kernel (anonymous) boosts: %d\n",
562d9bae 2269 atomic_read(&rps->boosts));
1d2ac403 2270 mutex_unlock(&dev->filelist_mutex);
1854d5ca 2271
7466c291 2272 if (INTEL_GEN(dev_priv) >= 6 &&
562d9bae 2273 rps->enabled &&
28176ef4 2274 dev_priv->gt.active_requests) {
7466c291
CW
2275 u32 rpup, rpupei;
2276 u32 rpdown, rpdownei;
2277
2278 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
2279 rpup = I915_READ_FW(GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK;
2280 rpupei = I915_READ_FW(GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK;
2281 rpdown = I915_READ_FW(GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK;
2282 rpdownei = I915_READ_FW(GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK;
2283 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
2284
2285 seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
60548c55 2286 rps_power_to_str(rps->power.mode));
7466c291 2287 seq_printf(m, " Avg. up: %d%% [above threshold? %d%%]\n",
23f4a287 2288 rpup && rpupei ? 100 * rpup / rpupei : 0,
60548c55 2289 rps->power.up_threshold);
7466c291 2290 seq_printf(m, " Avg. down: %d%% [below threshold? %d%%]\n",
23f4a287 2291 rpdown && rpdownei ? 100 * rpdown / rpdownei : 0,
60548c55 2292 rps->power.down_threshold);
7466c291
CW
2293 } else {
2294 seq_puts(m, "\nRPS Autotuning inactive\n");
2295 }
2296
8d3afd7d 2297 return 0;
1854d5ca
CW
2298}
2299
63573eb7
BW
2300static int i915_llc(struct seq_file *m, void *data)
2301{
36cdd013 2302 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3accaf7e 2303 const bool edram = INTEL_GEN(dev_priv) > 8;
63573eb7 2304
36cdd013 2305 seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev_priv)));
3accaf7e
MK
2306 seq_printf(m, "%s: %lluMB\n", edram ? "eDRAM" : "eLLC",
2307 intel_uncore_edram_size(dev_priv)/1024/1024);
63573eb7
BW
2308
2309 return 0;
2310}
2311
0509ead1
AS
2312static int i915_huc_load_status_info(struct seq_file *m, void *data)
2313{
2314 struct drm_i915_private *dev_priv = node_to_i915(m->private);
56ffc742 2315 struct drm_printer p;
0509ead1 2316
ab309a6a
MW
2317 if (!HAS_HUC(dev_priv))
2318 return -ENODEV;
0509ead1 2319
56ffc742
MW
2320 p = drm_seq_file_printer(m);
2321 intel_uc_fw_dump(&dev_priv->huc.fw, &p);
0509ead1 2322
3582ad13 2323 intel_runtime_pm_get(dev_priv);
0509ead1 2324 seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2));
3582ad13 2325 intel_runtime_pm_put(dev_priv);
0509ead1
AS
2326
2327 return 0;
2328}
2329
fdf5d357
AD
2330static int i915_guc_load_status_info(struct seq_file *m, void *data)
2331{
36cdd013 2332 struct drm_i915_private *dev_priv = node_to_i915(m->private);
56ffc742 2333 struct drm_printer p;
fdf5d357
AD
2334 u32 tmp, i;
2335
ab309a6a
MW
2336 if (!HAS_GUC(dev_priv))
2337 return -ENODEV;
fdf5d357 2338
56ffc742
MW
2339 p = drm_seq_file_printer(m);
2340 intel_uc_fw_dump(&dev_priv->guc.fw, &p);
fdf5d357 2341
3582ad13 2342 intel_runtime_pm_get(dev_priv);
2343
fdf5d357
AD
2344 tmp = I915_READ(GUC_STATUS);
2345
2346 seq_printf(m, "\nGuC status 0x%08x:\n", tmp);
2347 seq_printf(m, "\tBootrom status = 0x%x\n",
2348 (tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
2349 seq_printf(m, "\tuKernel status = 0x%x\n",
2350 (tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
2351 seq_printf(m, "\tMIA Core status = 0x%x\n",
2352 (tmp & GS_MIA_MASK) >> GS_MIA_SHIFT);
2353 seq_puts(m, "\nScratch registers:\n");
2354 for (i = 0; i < 16; i++)
2355 seq_printf(m, "\t%2d: \t0x%x\n", i, I915_READ(SOFT_SCRATCH(i)));
2356
3582ad13 2357 intel_runtime_pm_put(dev_priv);
2358
fdf5d357
AD
2359 return 0;
2360}
2361
5e24e4a2
MW
2362static const char *
2363stringify_guc_log_type(enum guc_log_buffer_type type)
2364{
2365 switch (type) {
2366 case GUC_ISR_LOG_BUFFER:
2367 return "ISR";
2368 case GUC_DPC_LOG_BUFFER:
2369 return "DPC";
2370 case GUC_CRASH_DUMP_LOG_BUFFER:
2371 return "CRASH";
2372 default:
2373 MISSING_CASE(type);
2374 }
2375
2376 return "";
2377}
2378
5aa1ee4b
AG
2379static void i915_guc_log_info(struct seq_file *m,
2380 struct drm_i915_private *dev_priv)
2381{
5e24e4a2
MW
2382 struct intel_guc_log *log = &dev_priv->guc.log;
2383 enum guc_log_buffer_type type;
5aa1ee4b 2384
5e24e4a2
MW
2385 if (!intel_guc_log_relay_enabled(log)) {
2386 seq_puts(m, "GuC log relay disabled\n");
2387 return;
2388 }
5aa1ee4b 2389
5e24e4a2 2390 seq_puts(m, "GuC logging stats:\n");
5aa1ee4b 2391
6a96be24 2392 seq_printf(m, "\tRelay full count: %u\n",
5e24e4a2
MW
2393 log->relay.full_count);
2394
2395 for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
2396 seq_printf(m, "\t%s:\tflush count %10u, overflow count %10u\n",
2397 stringify_guc_log_type(type),
2398 log->stats[type].flush,
2399 log->stats[type].sampled_overflow);
2400 }
5aa1ee4b
AG
2401}
2402
8b417c26
DG
2403static void i915_guc_client_info(struct seq_file *m,
2404 struct drm_i915_private *dev_priv,
5afc8b49 2405 struct intel_guc_client *client)
8b417c26 2406{
e2f80391 2407 struct intel_engine_cs *engine;
c18468c4 2408 enum intel_engine_id id;
8b417c26 2409 uint64_t tot = 0;
8b417c26 2410
b09935a6
OM
2411 seq_printf(m, "\tPriority %d, GuC stage index: %u, PD offset 0x%x\n",
2412 client->priority, client->stage_id, client->proc_desc_offset);
59db36cf
MW
2413 seq_printf(m, "\tDoorbell id %d, offset: 0x%lx\n",
2414 client->doorbell_id, client->doorbell_offset);
8b417c26 2415
3b3f1650 2416 for_each_engine(engine, dev_priv, id) {
c18468c4
DG
2417 u64 submissions = client->submissions[id];
2418 tot += submissions;
8b417c26 2419 seq_printf(m, "\tSubmissions: %llu %s\n",
c18468c4 2420 submissions, engine->name);
8b417c26
DG
2421 }
2422 seq_printf(m, "\tTotal: %llu\n", tot);
2423}
2424
a8b9370f
OM
2425static int i915_guc_info(struct seq_file *m, void *data)
2426{
2427 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2428 const struct intel_guc *guc = &dev_priv->guc;
a8b9370f 2429
db557993 2430 if (!USES_GUC(dev_priv))
ab309a6a
MW
2431 return -ENODEV;
2432
db557993
MW
2433 i915_guc_log_info(m, dev_priv);
2434
2435 if (!USES_GUC_SUBMISSION(dev_priv))
2436 return 0;
2437
ab309a6a 2438 GEM_BUG_ON(!guc->execbuf_client);
a8b9370f 2439
db557993 2440 seq_printf(m, "\nDoorbell map:\n");
abddffdf 2441 seq_printf(m, "\t%*pb\n", GUC_NUM_DOORBELLS, guc->doorbell_bitmap);
db557993 2442 seq_printf(m, "Doorbell next cacheline: 0x%x\n", guc->db_cacheline);
9636f6db 2443
334636c6
CW
2444 seq_printf(m, "\nGuC execbuf client @ %p:\n", guc->execbuf_client);
2445 i915_guc_client_info(m, dev_priv, guc->execbuf_client);
e78c9175
CW
2446 if (guc->preempt_client) {
2447 seq_printf(m, "\nGuC preempt client @ %p:\n",
2448 guc->preempt_client);
2449 i915_guc_client_info(m, dev_priv, guc->preempt_client);
2450 }
8b417c26
DG
2451
2452 /* Add more as required ... */
2453
2454 return 0;
2455}
2456
a8b9370f 2457static int i915_guc_stage_pool(struct seq_file *m, void *data)
4c7e77fc 2458{
36cdd013 2459 struct drm_i915_private *dev_priv = node_to_i915(m->private);
a8b9370f
OM
2460 const struct intel_guc *guc = &dev_priv->guc;
2461 struct guc_stage_desc *desc = guc->stage_desc_pool_vaddr;
5afc8b49 2462 struct intel_guc_client *client = guc->execbuf_client;
a8b9370f
OM
2463 unsigned int tmp;
2464 int index;
4c7e77fc 2465
ab309a6a
MW
2466 if (!USES_GUC_SUBMISSION(dev_priv))
2467 return -ENODEV;
4c7e77fc 2468
a8b9370f
OM
2469 for (index = 0; index < GUC_MAX_STAGE_DESCRIPTORS; index++, desc++) {
2470 struct intel_engine_cs *engine;
2471
2472 if (!(desc->attribute & GUC_STAGE_DESC_ATTR_ACTIVE))
2473 continue;
2474
2475 seq_printf(m, "GuC stage descriptor %u:\n", index);
2476 seq_printf(m, "\tIndex: %u\n", desc->stage_id);
2477 seq_printf(m, "\tAttribute: 0x%x\n", desc->attribute);
2478 seq_printf(m, "\tPriority: %d\n", desc->priority);
2479 seq_printf(m, "\tDoorbell id: %d\n", desc->db_id);
2480 seq_printf(m, "\tEngines used: 0x%x\n",
2481 desc->engines_used);
2482 seq_printf(m, "\tDoorbell trigger phy: 0x%llx, cpu: 0x%llx, uK: 0x%x\n",
2483 desc->db_trigger_phy,
2484 desc->db_trigger_cpu,
2485 desc->db_trigger_uk);
2486 seq_printf(m, "\tProcess descriptor: 0x%x\n",
2487 desc->process_desc);
9a09485d 2488 seq_printf(m, "\tWorkqueue address: 0x%x, size: 0x%x\n",
a8b9370f
OM
2489 desc->wq_addr, desc->wq_size);
2490 seq_putc(m, '\n');
2491
2492 for_each_engine_masked(engine, dev_priv, client->engines, tmp) {
2493 u32 guc_engine_id = engine->guc_id;
2494 struct guc_execlist_context *lrc =
2495 &desc->lrc[guc_engine_id];
2496
2497 seq_printf(m, "\t%s LRC:\n", engine->name);
2498 seq_printf(m, "\t\tContext desc: 0x%x\n",
2499 lrc->context_desc);
2500 seq_printf(m, "\t\tContext id: 0x%x\n", lrc->context_id);
2501 seq_printf(m, "\t\tLRCA: 0x%x\n", lrc->ring_lrca);
2502 seq_printf(m, "\t\tRing begin: 0x%x\n", lrc->ring_begin);
2503 seq_printf(m, "\t\tRing end: 0x%x\n", lrc->ring_end);
2504 seq_putc(m, '\n');
2505 }
2506 }
2507
2508 return 0;
2509}
2510
4c7e77fc
AD
2511static int i915_guc_log_dump(struct seq_file *m, void *data)
2512{
ac58d2ab
DCS
2513 struct drm_info_node *node = m->private;
2514 struct drm_i915_private *dev_priv = node_to_i915(node);
2515 bool dump_load_err = !!node->info_ent->data;
2516 struct drm_i915_gem_object *obj = NULL;
2517 u32 *log;
2518 int i = 0;
4c7e77fc 2519
ab309a6a
MW
2520 if (!HAS_GUC(dev_priv))
2521 return -ENODEV;
2522
ac58d2ab
DCS
2523 if (dump_load_err)
2524 obj = dev_priv->guc.load_err_log;
2525 else if (dev_priv->guc.log.vma)
2526 obj = dev_priv->guc.log.vma->obj;
4c7e77fc 2527
ac58d2ab
DCS
2528 if (!obj)
2529 return 0;
4c7e77fc 2530
ac58d2ab
DCS
2531 log = i915_gem_object_pin_map(obj, I915_MAP_WC);
2532 if (IS_ERR(log)) {
2533 DRM_DEBUG("Failed to pin object\n");
2534 seq_puts(m, "(log data unaccessible)\n");
2535 return PTR_ERR(log);
4c7e77fc
AD
2536 }
2537
ac58d2ab
DCS
2538 for (i = 0; i < obj->base.size / sizeof(u32); i += 4)
2539 seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n",
2540 *(log + i), *(log + i + 1),
2541 *(log + i + 2), *(log + i + 3));
2542
4c7e77fc
AD
2543 seq_putc(m, '\n');
2544
ac58d2ab
DCS
2545 i915_gem_object_unpin_map(obj);
2546
4c7e77fc
AD
2547 return 0;
2548}
2549
4977a287 2550static int i915_guc_log_level_get(void *data, u64 *val)
685534ef 2551{
bcc36d8a 2552 struct drm_i915_private *dev_priv = data;
685534ef 2553
86aa8247 2554 if (!USES_GUC(dev_priv))
ab309a6a
MW
2555 return -ENODEV;
2556
50935ac7 2557 *val = intel_guc_log_get_level(&dev_priv->guc.log);
685534ef
SAK
2558
2559 return 0;
2560}
2561
4977a287 2562static int i915_guc_log_level_set(void *data, u64 val)
685534ef 2563{
bcc36d8a 2564 struct drm_i915_private *dev_priv = data;
685534ef 2565
86aa8247 2566 if (!USES_GUC(dev_priv))
ab309a6a
MW
2567 return -ENODEV;
2568
50935ac7 2569 return intel_guc_log_set_level(&dev_priv->guc.log, val);
685534ef
SAK
2570}
2571
4977a287
MW
2572DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_level_fops,
2573 i915_guc_log_level_get, i915_guc_log_level_set,
685534ef
SAK
2574 "%lld\n");
2575
4977a287
MW
2576static int i915_guc_log_relay_open(struct inode *inode, struct file *file)
2577{
2578 struct drm_i915_private *dev_priv = inode->i_private;
2579
2580 if (!USES_GUC(dev_priv))
2581 return -ENODEV;
2582
2583 file->private_data = &dev_priv->guc.log;
2584
2585 return intel_guc_log_relay_open(&dev_priv->guc.log);
2586}
2587
2588static ssize_t
2589i915_guc_log_relay_write(struct file *filp,
2590 const char __user *ubuf,
2591 size_t cnt,
2592 loff_t *ppos)
2593{
2594 struct intel_guc_log *log = filp->private_data;
2595
2596 intel_guc_log_relay_flush(log);
2597
2598 return cnt;
2599}
2600
2601static int i915_guc_log_relay_release(struct inode *inode, struct file *file)
2602{
2603 struct drm_i915_private *dev_priv = inode->i_private;
2604
2605 intel_guc_log_relay_close(&dev_priv->guc.log);
2606
2607 return 0;
2608}
2609
2610static const struct file_operations i915_guc_log_relay_fops = {
2611 .owner = THIS_MODULE,
2612 .open = i915_guc_log_relay_open,
2613 .write = i915_guc_log_relay_write,
2614 .release = i915_guc_log_relay_release,
2615};
2616
5b7b3086
DP
2617static int i915_psr_sink_status_show(struct seq_file *m, void *data)
2618{
2619 u8 val;
2620 static const char * const sink_status[] = {
2621 "inactive",
2622 "transition to active, capture and display",
2623 "active, display from RFB",
2624 "active, capture and display on sink device timings",
2625 "transition to inactive, capture and display, timing re-sync",
2626 "reserved",
2627 "reserved",
2628 "sink internal error",
2629 };
2630 struct drm_connector *connector = m->private;
7a72c78b 2631 struct drm_i915_private *dev_priv = to_i915(connector->dev);
5b7b3086
DP
2632 struct intel_dp *intel_dp =
2633 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
7a72c78b
RV
2634 int ret;
2635
2636 if (!CAN_PSR(dev_priv)) {
2637 seq_puts(m, "PSR Unsupported\n");
2638 return -ENODEV;
2639 }
5b7b3086
DP
2640
2641 if (connector->status != connector_status_connected)
2642 return -ENODEV;
2643
7a72c78b
RV
2644 ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val);
2645
2646 if (ret == 1) {
5b7b3086
DP
2647 const char *str = "unknown";
2648
2649 val &= DP_PSR_SINK_STATE_MASK;
2650 if (val < ARRAY_SIZE(sink_status))
2651 str = sink_status[val];
2652 seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str);
2653 } else {
7a72c78b 2654 return ret;
5b7b3086
DP
2655 }
2656
2657 return 0;
2658}
2659DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
2660
00b06296
VN
2661static void
2662psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
2663{
2664 u32 val, psr_status;
b86bef20 2665
00b06296
VN
2666 if (dev_priv->psr.psr2_enabled) {
2667 static const char * const live_status[] = {
2668 "IDLE",
2669 "CAPTURE",
2670 "CAPTURE_FS",
2671 "SLEEP",
2672 "BUFON_FW",
2673 "ML_UP",
2674 "SU_STANDBY",
2675 "FAST_SLEEP",
2676 "DEEP_SLEEP",
2677 "BUF_ON",
2678 "TG_ON"
2679 };
2680 psr_status = I915_READ(EDP_PSR2_STATUS);
2681 val = (psr_status & EDP_PSR2_STATUS_STATE_MASK) >>
2682 EDP_PSR2_STATUS_STATE_SHIFT;
2683 if (val < ARRAY_SIZE(live_status)) {
2684 seq_printf(m, "Source PSR status: 0x%x [%s]\n",
2685 psr_status, live_status[val]);
2686 return;
2687 }
2688 } else {
2689 static const char * const live_status[] = {
2690 "IDLE",
2691 "SRDONACK",
2692 "SRDENT",
2693 "BUFOFF",
2694 "BUFON",
2695 "AUXACK",
2696 "SRDOFFACK",
2697 "SRDENT_ON",
2698 };
2699 psr_status = I915_READ(EDP_PSR_STATUS);
2700 val = (psr_status & EDP_PSR_STATUS_STATE_MASK) >>
2701 EDP_PSR_STATUS_STATE_SHIFT;
2702 if (val < ARRAY_SIZE(live_status)) {
2703 seq_printf(m, "Source PSR status: 0x%x [%s]\n",
2704 psr_status, live_status[val]);
2705 return;
2706 }
2707 }
b86bef20 2708
00b06296 2709 seq_printf(m, "Source PSR status: 0x%x [%s]\n", psr_status, "unknown");
b86bef20
CW
2710}
2711
e91fd8c6
RV
2712static int i915_edp_psr_status(struct seq_file *m, void *data)
2713{
36cdd013 2714 struct drm_i915_private *dev_priv = node_to_i915(m->private);
a031d709
RV
2715 u32 psrperf = 0;
2716 bool enabled = false;
c9ef291a 2717 bool sink_support;
e91fd8c6 2718
ab309a6a
MW
2719 if (!HAS_PSR(dev_priv))
2720 return -ENODEV;
3553a8ea 2721
c9ef291a
DP
2722 sink_support = dev_priv->psr.sink_support;
2723 seq_printf(m, "Sink_Support: %s\n", yesno(sink_support));
2724 if (!sink_support)
2725 return 0;
2726
c8c8fb33
PZ
2727 intel_runtime_pm_get(dev_priv);
2728
fa128fa6 2729 mutex_lock(&dev_priv->psr.lock);
0577ab48
AS
2730 seq_printf(m, "PSR mode: %s\n",
2731 dev_priv->psr.psr2_enabled ? "PSR2" : "PSR1");
c44301fc 2732 seq_printf(m, "Enabled: %s\n", yesno(dev_priv->psr.enabled));
fa128fa6
DV
2733 seq_printf(m, "Busy frontbuffer bits: 0x%03x\n",
2734 dev_priv->psr.busy_frontbuffer_bits);
e91fd8c6 2735
ce3508fd
DP
2736 if (dev_priv->psr.psr2_enabled)
2737 enabled = I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE;
2738 else
2739 enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE;
60e5ffe3
RV
2740
2741 seq_printf(m, "Main link in standby mode: %s\n",
2742 yesno(dev_priv->psr.link_standby));
2743
ce3508fd 2744 seq_printf(m, "HW Enabled & Active bit: %s\n", yesno(enabled));
e91fd8c6 2745
05eec3c2 2746 /*
05eec3c2
RV
2747 * SKL+ Perf counter is reset to 0 everytime DC state is entered
2748 */
36cdd013 2749 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
443a389f 2750 psrperf = I915_READ(EDP_PSR_PERF_CNT) &
a031d709 2751 EDP_PSR_PERF_CNT_MASK;
a6cbdb8e
RV
2752
2753 seq_printf(m, "Performance_Counter: %u\n", psrperf);
2754 }
b86bef20 2755
00b06296 2756 psr_source_status(dev_priv, m);
fa128fa6 2757 mutex_unlock(&dev_priv->psr.lock);
e91fd8c6 2758
9844d4bf 2759 if (READ_ONCE(dev_priv->psr.debug) & I915_PSR_DEBUG_IRQ) {
3f983e54
DP
2760 seq_printf(m, "Last attempted entry at: %lld\n",
2761 dev_priv->psr.last_entry_attempt);
2762 seq_printf(m, "Last exit at: %lld\n",
2763 dev_priv->psr.last_exit);
2764 }
2765
c8c8fb33 2766 intel_runtime_pm_put(dev_priv);
e91fd8c6
RV
2767 return 0;
2768}
2769
54fd3149
DP
2770static int
2771i915_edp_psr_debug_set(void *data, u64 val)
2772{
2773 struct drm_i915_private *dev_priv = data;
c44301fc
ML
2774 struct drm_modeset_acquire_ctx ctx;
2775 int ret;
54fd3149
DP
2776
2777 if (!CAN_PSR(dev_priv))
2778 return -ENODEV;
2779
c44301fc 2780 DRM_DEBUG_KMS("Setting PSR debug to %llx\n", val);
54fd3149
DP
2781
2782 intel_runtime_pm_get(dev_priv);
c44301fc
ML
2783
2784 drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
2785
2786retry:
2787 ret = intel_psr_set_debugfs_mode(dev_priv, &ctx, val);
2788 if (ret == -EDEADLK) {
2789 ret = drm_modeset_backoff(&ctx);
2790 if (!ret)
2791 goto retry;
2792 }
2793
2794 drm_modeset_drop_locks(&ctx);
2795 drm_modeset_acquire_fini(&ctx);
2796
54fd3149
DP
2797 intel_runtime_pm_put(dev_priv);
2798
c44301fc 2799 return ret;
54fd3149
DP
2800}
2801
2802static int
2803i915_edp_psr_debug_get(void *data, u64 *val)
2804{
2805 struct drm_i915_private *dev_priv = data;
2806
2807 if (!CAN_PSR(dev_priv))
2808 return -ENODEV;
2809
2810 *val = READ_ONCE(dev_priv->psr.debug);
2811 return 0;
2812}
2813
2814DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
2815 i915_edp_psr_debug_get, i915_edp_psr_debug_set,
2816 "%llu\n");
2817
ec013e7f
JB
2818static int i915_energy_uJ(struct seq_file *m, void *data)
2819{
36cdd013 2820 struct drm_i915_private *dev_priv = node_to_i915(m->private);
d38014ea 2821 unsigned long long power;
ec013e7f
JB
2822 u32 units;
2823
36cdd013 2824 if (INTEL_GEN(dev_priv) < 6)
ec013e7f
JB
2825 return -ENODEV;
2826
36623ef8
PZ
2827 intel_runtime_pm_get(dev_priv);
2828
d38014ea
GKB
2829 if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power)) {
2830 intel_runtime_pm_put(dev_priv);
2831 return -ENODEV;
2832 }
2833
2834 units = (power & 0x1f00) >> 8;
ec013e7f 2835 power = I915_READ(MCH_SECP_NRG_STTS);
d38014ea 2836 power = (1000000 * power) >> units; /* convert to uJ */
ec013e7f 2837
36623ef8
PZ
2838 intel_runtime_pm_put(dev_priv);
2839
d38014ea 2840 seq_printf(m, "%llu", power);
371db66a
PZ
2841
2842 return 0;
2843}
2844
6455c870 2845static int i915_runtime_pm_status(struct seq_file *m, void *unused)
371db66a 2846{
36cdd013 2847 struct drm_i915_private *dev_priv = node_to_i915(m->private);
52a05c30 2848 struct pci_dev *pdev = dev_priv->drm.pdev;
371db66a 2849
a156e64d
CW
2850 if (!HAS_RUNTIME_PM(dev_priv))
2851 seq_puts(m, "Runtime power management not supported\n");
371db66a 2852
6f56103d
CW
2853 seq_printf(m, "GPU idle: %s (epoch %u)\n",
2854 yesno(!dev_priv->gt.awake), dev_priv->gt.epoch);
371db66a 2855 seq_printf(m, "IRQs disabled: %s\n",
9df7575f 2856 yesno(!intel_irqs_enabled(dev_priv)));
0d804184 2857#ifdef CONFIG_PM
a6aaec8b 2858 seq_printf(m, "Usage count: %d\n",
36cdd013 2859 atomic_read(&dev_priv->drm.dev->power.usage_count));
0d804184
CW
2860#else
2861 seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
2862#endif
a156e64d 2863 seq_printf(m, "PCI device power state: %s [%d]\n",
52a05c30
DW
2864 pci_power_name(pdev->current_state),
2865 pdev->current_state);
371db66a 2866
ec013e7f
JB
2867 return 0;
2868}
2869
1da51581
ID
2870static int i915_power_domain_info(struct seq_file *m, void *unused)
2871{
36cdd013 2872 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1da51581
ID
2873 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2874 int i;
2875
2876 mutex_lock(&power_domains->lock);
2877
2878 seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
2879 for (i = 0; i < power_domains->power_well_count; i++) {
2880 struct i915_power_well *power_well;
2881 enum intel_display_power_domain power_domain;
2882
2883 power_well = &power_domains->power_wells[i];
f28ec6f4 2884 seq_printf(m, "%-25s %d\n", power_well->desc->name,
1da51581
ID
2885 power_well->count);
2886
f28ec6f4 2887 for_each_power_domain(power_domain, power_well->desc->domains)
1da51581 2888 seq_printf(m, " %-23s %d\n",
9895ad03 2889 intel_display_power_domain_str(power_domain),
1da51581 2890 power_domains->domain_use_count[power_domain]);
1da51581
ID
2891 }
2892
2893 mutex_unlock(&power_domains->lock);
2894
2895 return 0;
2896}
2897
b7cec66d
DL
2898static int i915_dmc_info(struct seq_file *m, void *unused)
2899{
36cdd013 2900 struct drm_i915_private *dev_priv = node_to_i915(m->private);
b7cec66d
DL
2901 struct intel_csr *csr;
2902
ab309a6a
MW
2903 if (!HAS_CSR(dev_priv))
2904 return -ENODEV;
b7cec66d
DL
2905
2906 csr = &dev_priv->csr;
2907
6fb403de
MK
2908 intel_runtime_pm_get(dev_priv);
2909
b7cec66d
DL
2910 seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
2911 seq_printf(m, "path: %s\n", csr->fw_path);
2912
2913 if (!csr->dmc_payload)
6fb403de 2914 goto out;
b7cec66d
DL
2915
2916 seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
2917 CSR_VERSION_MINOR(csr->version));
2918
48de568c
MK
2919 if (IS_KABYLAKE(dev_priv) ||
2920 (IS_SKYLAKE(dev_priv) && csr->version >= CSR_VERSION(1, 6))) {
8337206d
DL
2921 seq_printf(m, "DC3 -> DC5 count: %d\n",
2922 I915_READ(SKL_CSR_DC3_DC5_COUNT));
2923 seq_printf(m, "DC5 -> DC6 count: %d\n",
2924 I915_READ(SKL_CSR_DC5_DC6_COUNT));
36cdd013 2925 } else if (IS_BROXTON(dev_priv) && csr->version >= CSR_VERSION(1, 4)) {
16e11b99
MK
2926 seq_printf(m, "DC3 -> DC5 count: %d\n",
2927 I915_READ(BXT_CSR_DC3_DC5_COUNT));
8337206d
DL
2928 }
2929
6fb403de
MK
2930out:
2931 seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0)));
2932 seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE));
2933 seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL));
2934
8337206d
DL
2935 intel_runtime_pm_put(dev_priv);
2936
b7cec66d
DL
2937 return 0;
2938}
2939
53f5e3ca
JB
2940static void intel_seq_print_mode(struct seq_file *m, int tabs,
2941 struct drm_display_mode *mode)
2942{
2943 int i;
2944
2945 for (i = 0; i < tabs; i++)
2946 seq_putc(m, '\t');
2947
2948 seq_printf(m, "id %d:\"%s\" freq %d clock %d hdisp %d hss %d hse %d htot %d vdisp %d vss %d vse %d vtot %d type 0x%x flags 0x%x\n",
2949 mode->base.id, mode->name,
2950 mode->vrefresh, mode->clock,
2951 mode->hdisplay, mode->hsync_start,
2952 mode->hsync_end, mode->htotal,
2953 mode->vdisplay, mode->vsync_start,
2954 mode->vsync_end, mode->vtotal,
2955 mode->type, mode->flags);
2956}
2957
2958static void intel_encoder_info(struct seq_file *m,
2959 struct intel_crtc *intel_crtc,
2960 struct intel_encoder *intel_encoder)
2961{
36cdd013
DW
2962 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2963 struct drm_device *dev = &dev_priv->drm;
53f5e3ca
JB
2964 struct drm_crtc *crtc = &intel_crtc->base;
2965 struct intel_connector *intel_connector;
2966 struct drm_encoder *encoder;
2967
2968 encoder = &intel_encoder->base;
2969 seq_printf(m, "\tencoder %d: type: %s, connectors:\n",
8e329a03 2970 encoder->base.id, encoder->name);
53f5e3ca
JB
2971 for_each_connector_on_encoder(dev, encoder, intel_connector) {
2972 struct drm_connector *connector = &intel_connector->base;
2973 seq_printf(m, "\t\tconnector %d: type: %s, status: %s",
2974 connector->base.id,
c23cc417 2975 connector->name,
53f5e3ca
JB
2976 drm_get_connector_status_name(connector->status));
2977 if (connector->status == connector_status_connected) {
2978 struct drm_display_mode *mode = &crtc->mode;
2979 seq_printf(m, ", mode:\n");
2980 intel_seq_print_mode(m, 2, mode);
2981 } else {
2982 seq_putc(m, '\n');
2983 }
2984 }
2985}
2986
2987static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2988{
36cdd013
DW
2989 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2990 struct drm_device *dev = &dev_priv->drm;
53f5e3ca
JB
2991 struct drm_crtc *crtc = &intel_crtc->base;
2992 struct intel_encoder *intel_encoder;
23a48d53
ML
2993 struct drm_plane_state *plane_state = crtc->primary->state;
2994 struct drm_framebuffer *fb = plane_state->fb;
53f5e3ca 2995
23a48d53 2996 if (fb)
5aa8a937 2997 seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
23a48d53
ML
2998 fb->base.id, plane_state->src_x >> 16,
2999 plane_state->src_y >> 16, fb->width, fb->height);
5aa8a937
MR
3000 else
3001 seq_puts(m, "\tprimary plane disabled\n");
53f5e3ca
JB
3002 for_each_encoder_on_crtc(dev, crtc, intel_encoder)
3003 intel_encoder_info(m, intel_crtc, intel_encoder);
3004}
3005
3006static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
3007{
3008 struct drm_display_mode *mode = panel->fixed_mode;
3009
3010 seq_printf(m, "\tfixed mode:\n");
3011 intel_seq_print_mode(m, 2, mode);
3012}
3013
3014static void intel_dp_info(struct seq_file *m,
3015 struct intel_connector *intel_connector)
3016{
3017 struct intel_encoder *intel_encoder = intel_connector->encoder;
3018 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
3019
3020 seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
742f491d 3021 seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
b6dabe3b 3022 if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
53f5e3ca 3023 intel_panel_info(m, &intel_connector->panel);
80209e5f
MK
3024
3025 drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
3026 &intel_dp->aux);
53f5e3ca
JB
3027}
3028
9a148a96
LY
3029static void intel_dp_mst_info(struct seq_file *m,
3030 struct intel_connector *intel_connector)
3031{
3032 struct intel_encoder *intel_encoder = intel_connector->encoder;
3033 struct intel_dp_mst_encoder *intel_mst =
3034 enc_to_mst(&intel_encoder->base);
3035 struct intel_digital_port *intel_dig_port = intel_mst->primary;
3036 struct intel_dp *intel_dp = &intel_dig_port->dp;
3037 bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr,
3038 intel_connector->port);
3039
3040 seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
3041}
3042
53f5e3ca
JB
3043static void intel_hdmi_info(struct seq_file *m,
3044 struct intel_connector *intel_connector)
3045{
3046 struct intel_encoder *intel_encoder = intel_connector->encoder;
3047 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
3048
742f491d 3049 seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
53f5e3ca
JB
3050}
3051
3052static void intel_lvds_info(struct seq_file *m,
3053 struct intel_connector *intel_connector)
3054{
3055 intel_panel_info(m, &intel_connector->panel);
3056}
3057
3058static void intel_connector_info(struct seq_file *m,
3059 struct drm_connector *connector)
3060{
3061 struct intel_connector *intel_connector = to_intel_connector(connector);
3062 struct intel_encoder *intel_encoder = intel_connector->encoder;
f103fc7d 3063 struct drm_display_mode *mode;
53f5e3ca
JB
3064
3065 seq_printf(m, "connector %d: type %s, status: %s\n",
c23cc417 3066 connector->base.id, connector->name,
53f5e3ca
JB
3067 drm_get_connector_status_name(connector->status));
3068 if (connector->status == connector_status_connected) {
3069 seq_printf(m, "\tname: %s\n", connector->display_info.name);
3070 seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
3071 connector->display_info.width_mm,
3072 connector->display_info.height_mm);
3073 seq_printf(m, "\tsubpixel order: %s\n",
3074 drm_get_subpixel_order_name(connector->display_info.subpixel_order));
3075 seq_printf(m, "\tCEA rev: %d\n",
3076 connector->display_info.cea_rev);
3077 }
ee648a74 3078
77d1f615 3079 if (!intel_encoder)
ee648a74
ML
3080 return;
3081
3082 switch (connector->connector_type) {
3083 case DRM_MODE_CONNECTOR_DisplayPort:
3084 case DRM_MODE_CONNECTOR_eDP:
9a148a96
LY
3085 if (intel_encoder->type == INTEL_OUTPUT_DP_MST)
3086 intel_dp_mst_info(m, intel_connector);
3087 else
3088 intel_dp_info(m, intel_connector);
ee648a74
ML
3089 break;
3090 case DRM_MODE_CONNECTOR_LVDS:
3091 if (intel_encoder->type == INTEL_OUTPUT_LVDS)
36cd7444 3092 intel_lvds_info(m, intel_connector);
ee648a74
ML
3093 break;
3094 case DRM_MODE_CONNECTOR_HDMIA:
3095 if (intel_encoder->type == INTEL_OUTPUT_HDMI ||
7e732cac 3096 intel_encoder->type == INTEL_OUTPUT_DDI)
ee648a74
ML
3097 intel_hdmi_info(m, intel_connector);
3098 break;
3099 default:
3100 break;
36cd7444 3101 }
53f5e3ca 3102
f103fc7d
JB
3103 seq_printf(m, "\tmodes:\n");
3104 list_for_each_entry(mode, &connector->modes, head)
3105 intel_seq_print_mode(m, 2, mode);
53f5e3ca
JB
3106}
3107
3abc4e09
RF
3108static const char *plane_type(enum drm_plane_type type)
3109{
3110 switch (type) {
3111 case DRM_PLANE_TYPE_OVERLAY:
3112 return "OVL";
3113 case DRM_PLANE_TYPE_PRIMARY:
3114 return "PRI";
3115 case DRM_PLANE_TYPE_CURSOR:
3116 return "CUR";
3117 /*
3118 * Deliberately omitting default: to generate compiler warnings
3119 * when a new drm_plane_type gets added.
3120 */
3121 }
3122
3123 return "unknown";
3124}
3125
3126static const char *plane_rotation(unsigned int rotation)
3127{
3128 static char buf[48];
3129 /*
c2c446ad 3130 * According to doc only one DRM_MODE_ROTATE_ is allowed but this
3abc4e09
RF
3131 * will print them all to visualize if the values are misused
3132 */
3133 snprintf(buf, sizeof(buf),
3134 "%s%s%s%s%s%s(0x%08x)",
c2c446ad
RF
3135 (rotation & DRM_MODE_ROTATE_0) ? "0 " : "",
3136 (rotation & DRM_MODE_ROTATE_90) ? "90 " : "",
3137 (rotation & DRM_MODE_ROTATE_180) ? "180 " : "",
3138 (rotation & DRM_MODE_ROTATE_270) ? "270 " : "",
3139 (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "",
3140 (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "",
3abc4e09
RF
3141 rotation);
3142
3143 return buf;
3144}
3145
3146static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
3147{
36cdd013
DW
3148 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3149 struct drm_device *dev = &dev_priv->drm;
3abc4e09
RF
3150 struct intel_plane *intel_plane;
3151
3152 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
3153 struct drm_plane_state *state;
3154 struct drm_plane *plane = &intel_plane->base;
b3c11ac2 3155 struct drm_format_name_buf format_name;
3abc4e09
RF
3156
3157 if (!plane->state) {
3158 seq_puts(m, "plane->state is NULL!\n");
3159 continue;
3160 }
3161
3162 state = plane->state;
3163
90844f00 3164 if (state->fb) {
438b74a5
VS
3165 drm_get_format_name(state->fb->format->format,
3166 &format_name);
90844f00 3167 } else {
b3c11ac2 3168 sprintf(format_name.str, "N/A");
90844f00
EE
3169 }
3170
3abc4e09
RF
3171 seq_printf(m, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n",
3172 plane->base.id,
3173 plane_type(intel_plane->base.type),
3174 state->crtc_x, state->crtc_y,
3175 state->crtc_w, state->crtc_h,
3176 (state->src_x >> 16),
3177 ((state->src_x & 0xffff) * 15625) >> 10,
3178 (state->src_y >> 16),
3179 ((state->src_y & 0xffff) * 15625) >> 10,
3180 (state->src_w >> 16),
3181 ((state->src_w & 0xffff) * 15625) >> 10,
3182 (state->src_h >> 16),
3183 ((state->src_h & 0xffff) * 15625) >> 10,
b3c11ac2 3184 format_name.str,
3abc4e09
RF
3185 plane_rotation(state->rotation));
3186 }
3187}
3188
3189static void intel_scaler_info(struct seq_file *m, struct intel_crtc *intel_crtc)
3190{
3191 struct intel_crtc_state *pipe_config;
3192 int num_scalers = intel_crtc->num_scalers;
3193 int i;
3194
3195 pipe_config = to_intel_crtc_state(intel_crtc->base.state);
3196
3197 /* Not all platformas have a scaler */
3198 if (num_scalers) {
3199 seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
3200 num_scalers,
3201 pipe_config->scaler_state.scaler_users,
3202 pipe_config->scaler_state.scaler_id);
3203
58415918 3204 for (i = 0; i < num_scalers; i++) {
3abc4e09
RF
3205 struct intel_scaler *sc =
3206 &pipe_config->scaler_state.scalers[i];
3207
3208 seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
3209 i, yesno(sc->in_use), sc->mode);
3210 }
3211 seq_puts(m, "\n");
3212 } else {
3213 seq_puts(m, "\tNo scalers available on this platform\n");
3214 }
3215}
3216
53f5e3ca
JB
3217static int i915_display_info(struct seq_file *m, void *unused)
3218{
36cdd013
DW
3219 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3220 struct drm_device *dev = &dev_priv->drm;
065f2ec2 3221 struct intel_crtc *crtc;
53f5e3ca 3222 struct drm_connector *connector;
3f6a5e1e 3223 struct drm_connector_list_iter conn_iter;
53f5e3ca 3224
b0e5ddf3 3225 intel_runtime_pm_get(dev_priv);
53f5e3ca
JB
3226 seq_printf(m, "CRTC info\n");
3227 seq_printf(m, "---------\n");
d3fcc808 3228 for_each_intel_crtc(dev, crtc) {
f77076c9 3229 struct intel_crtc_state *pipe_config;
53f5e3ca 3230
3f6a5e1e 3231 drm_modeset_lock(&crtc->base.mutex, NULL);
f77076c9
ML
3232 pipe_config = to_intel_crtc_state(crtc->base.state);
3233
3abc4e09 3234 seq_printf(m, "CRTC %d: pipe: %c, active=%s, (size=%dx%d), dither=%s, bpp=%d\n",
065f2ec2 3235 crtc->base.base.id, pipe_name(crtc->pipe),
f77076c9 3236 yesno(pipe_config->base.active),
3abc4e09
RF
3237 pipe_config->pipe_src_w, pipe_config->pipe_src_h,
3238 yesno(pipe_config->dither), pipe_config->pipe_bpp);
3239
f77076c9 3240 if (pipe_config->base.active) {
cd5dcbf1
VS
3241 struct intel_plane *cursor =
3242 to_intel_plane(crtc->base.cursor);
3243
065f2ec2
CW
3244 intel_crtc_info(m, crtc);
3245
cd5dcbf1
VS
3246 seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x\n",
3247 yesno(cursor->base.state->visible),
3248 cursor->base.state->crtc_x,
3249 cursor->base.state->crtc_y,
3250 cursor->base.state->crtc_w,
3251 cursor->base.state->crtc_h,
3252 cursor->cursor.base);
3abc4e09
RF
3253 intel_scaler_info(m, crtc);
3254 intel_plane_info(m, crtc);
a23dc658 3255 }
cace841c
DV
3256
3257 seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n",
3258 yesno(!crtc->cpu_fifo_underrun_disabled),
3259 yesno(!crtc->pch_fifo_underrun_disabled));
3f6a5e1e 3260 drm_modeset_unlock(&crtc->base.mutex);
53f5e3ca
JB
3261 }
3262
3263 seq_printf(m, "\n");
3264 seq_printf(m, "Connector info\n");
3265 seq_printf(m, "--------------\n");
3f6a5e1e
DV
3266 mutex_lock(&dev->mode_config.mutex);
3267 drm_connector_list_iter_begin(dev, &conn_iter);
3268 drm_for_each_connector_iter(connector, &conn_iter)
53f5e3ca 3269 intel_connector_info(m, connector);
3f6a5e1e
DV
3270 drm_connector_list_iter_end(&conn_iter);
3271 mutex_unlock(&dev->mode_config.mutex);
3272
b0e5ddf3 3273 intel_runtime_pm_put(dev_priv);
53f5e3ca
JB
3274
3275 return 0;
3276}
3277
1b36595f
CW
3278static int i915_engine_info(struct seq_file *m, void *unused)
3279{
3280 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3281 struct intel_engine_cs *engine;
3b3f1650 3282 enum intel_engine_id id;
f636edb2 3283 struct drm_printer p;
1b36595f 3284
9c870d03
CW
3285 intel_runtime_pm_get(dev_priv);
3286
6f56103d
CW
3287 seq_printf(m, "GT awake? %s (epoch %u)\n",
3288 yesno(dev_priv->gt.awake), dev_priv->gt.epoch);
f73b5674
CW
3289 seq_printf(m, "Global active requests: %d\n",
3290 dev_priv->gt.active_requests);
f577a03b
LL
3291 seq_printf(m, "CS timestamp frequency: %u kHz\n",
3292 dev_priv->info.cs_timestamp_frequency_khz);
f73b5674 3293
f636edb2
CW
3294 p = drm_seq_file_printer(m);
3295 for_each_engine(engine, dev_priv, id)
0db18b17 3296 intel_engine_dump(engine, &p, "%s\n", engine->name);
1b36595f 3297
9c870d03
CW
3298 intel_runtime_pm_put(dev_priv);
3299
1b36595f
CW
3300 return 0;
3301}
3302
79e9cd5f
LL
3303static int i915_rcs_topology(struct seq_file *m, void *unused)
3304{
3305 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3306 struct drm_printer p = drm_seq_file_printer(m);
3307
3308 intel_device_info_dump_topology(&INTEL_INFO(dev_priv)->sseu, &p);
3309
3310 return 0;
3311}
3312
c5418a8b
CW
3313static int i915_shrinker_info(struct seq_file *m, void *unused)
3314{
3315 struct drm_i915_private *i915 = node_to_i915(m->private);
3316
3317 seq_printf(m, "seeks = %d\n", i915->mm.shrinker.seeks);
3318 seq_printf(m, "batch = %lu\n", i915->mm.shrinker.batch);
3319
3320 return 0;
3321}
3322
728e29d7
DV
3323static int i915_shared_dplls_info(struct seq_file *m, void *unused)
3324{
36cdd013
DW
3325 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3326 struct drm_device *dev = &dev_priv->drm;
728e29d7
DV
3327 int i;
3328
3329 drm_modeset_lock_all(dev);
3330 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
3331 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
3332
72f775fa 3333 seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name,
0823eb9c 3334 pll->info->id);
2dd66ebd 3335 seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n",
2c42e535 3336 pll->state.crtc_mask, pll->active_mask, yesno(pll->on));
728e29d7 3337 seq_printf(m, " tracked hardware state:\n");
2c42e535 3338 seq_printf(m, " dpll: 0x%08x\n", pll->state.hw_state.dpll);
3e369b76 3339 seq_printf(m, " dpll_md: 0x%08x\n",
2c42e535
ACO
3340 pll->state.hw_state.dpll_md);
3341 seq_printf(m, " fp0: 0x%08x\n", pll->state.hw_state.fp0);
3342 seq_printf(m, " fp1: 0x%08x\n", pll->state.hw_state.fp1);
3343 seq_printf(m, " wrpll: 0x%08x\n", pll->state.hw_state.wrpll);
c27e917e
PZ
3344 seq_printf(m, " cfgcr0: 0x%08x\n", pll->state.hw_state.cfgcr0);
3345 seq_printf(m, " cfgcr1: 0x%08x\n", pll->state.hw_state.cfgcr1);
3346 seq_printf(m, " mg_refclkin_ctl: 0x%08x\n",
3347 pll->state.hw_state.mg_refclkin_ctl);
3348 seq_printf(m, " mg_clktop2_coreclkctl1: 0x%08x\n",
3349 pll->state.hw_state.mg_clktop2_coreclkctl1);
3350 seq_printf(m, " mg_clktop2_hsclkctl: 0x%08x\n",
3351 pll->state.hw_state.mg_clktop2_hsclkctl);
3352 seq_printf(m, " mg_pll_div0: 0x%08x\n",
3353 pll->state.hw_state.mg_pll_div0);
3354 seq_printf(m, " mg_pll_div1: 0x%08x\n",
3355 pll->state.hw_state.mg_pll_div1);
3356 seq_printf(m, " mg_pll_lf: 0x%08x\n",
3357 pll->state.hw_state.mg_pll_lf);
3358 seq_printf(m, " mg_pll_frac_lock: 0x%08x\n",
3359 pll->state.hw_state.mg_pll_frac_lock);
3360 seq_printf(m, " mg_pll_ssc: 0x%08x\n",
3361 pll->state.hw_state.mg_pll_ssc);
3362 seq_printf(m, " mg_pll_bias: 0x%08x\n",
3363 pll->state.hw_state.mg_pll_bias);
3364 seq_printf(m, " mg_pll_tdc_coldst_bias: 0x%08x\n",
3365 pll->state.hw_state.mg_pll_tdc_coldst_bias);
728e29d7
DV
3366 }
3367 drm_modeset_unlock_all(dev);
3368
3369 return 0;
3370}
3371
1ed1ef9d 3372static int i915_wa_registers(struct seq_file *m, void *unused)
888b5995 3373{
548764bb 3374 struct i915_workarounds *wa = &node_to_i915(m->private)->workarounds;
f4ecfbfc 3375 int i;
888b5995 3376
548764bb
CW
3377 seq_printf(m, "Workarounds applied: %d\n", wa->count);
3378 for (i = 0; i < wa->count; ++i)
3379 seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n",
3380 wa->reg[i].addr, wa->reg[i].value, wa->reg[i].mask);
888b5995
AS
3381
3382 return 0;
3383}
3384
d2d4f39b
KM
3385static int i915_ipc_status_show(struct seq_file *m, void *data)
3386{
3387 struct drm_i915_private *dev_priv = m->private;
3388
3389 seq_printf(m, "Isochronous Priority Control: %s\n",
3390 yesno(dev_priv->ipc_enabled));
3391 return 0;
3392}
3393
3394static int i915_ipc_status_open(struct inode *inode, struct file *file)
3395{
3396 struct drm_i915_private *dev_priv = inode->i_private;
3397
3398 if (!HAS_IPC(dev_priv))
3399 return -ENODEV;
3400
3401 return single_open(file, i915_ipc_status_show, dev_priv);
3402}
3403
3404static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
3405 size_t len, loff_t *offp)
3406{
3407 struct seq_file *m = file->private_data;
3408 struct drm_i915_private *dev_priv = m->private;
3409 int ret;
3410 bool enable;
3411
3412 ret = kstrtobool_from_user(ubuf, len, &enable);
3413 if (ret < 0)
3414 return ret;
3415
3416 intel_runtime_pm_get(dev_priv);
3417 if (!dev_priv->ipc_enabled && enable)
3418 DRM_INFO("Enabling IPC: WM will be proper only after next commit\n");
3419 dev_priv->wm.distrust_bios_wm = true;
3420 dev_priv->ipc_enabled = enable;
3421 intel_enable_ipc(dev_priv);
3422 intel_runtime_pm_put(dev_priv);
3423
3424 return len;
3425}
3426
3427static const struct file_operations i915_ipc_status_fops = {
3428 .owner = THIS_MODULE,
3429 .open = i915_ipc_status_open,
3430 .read = seq_read,
3431 .llseek = seq_lseek,
3432 .release = single_release,
3433 .write = i915_ipc_status_write
3434};
3435
c5511e44
DL
3436static int i915_ddb_info(struct seq_file *m, void *unused)
3437{
36cdd013
DW
3438 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3439 struct drm_device *dev = &dev_priv->drm;
c5511e44
DL
3440 struct skl_ddb_allocation *ddb;
3441 struct skl_ddb_entry *entry;
3442 enum pipe pipe;
3443 int plane;
3444
36cdd013 3445 if (INTEL_GEN(dev_priv) < 9)
ab309a6a 3446 return -ENODEV;
2fcffe19 3447
c5511e44
DL
3448 drm_modeset_lock_all(dev);
3449
3450 ddb = &dev_priv->wm.skl_hw.ddb;
3451
3452 seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
3453
3454 for_each_pipe(dev_priv, pipe) {
3455 seq_printf(m, "Pipe %c\n", pipe_name(pipe));
3456
8b364b41 3457 for_each_universal_plane(dev_priv, pipe, plane) {
c5511e44
DL
3458 entry = &ddb->plane[pipe][plane];
3459 seq_printf(m, " Plane%-8d%8u%8u%8u\n", plane + 1,
3460 entry->start, entry->end,
3461 skl_ddb_entry_size(entry));
3462 }
3463
4969d33e 3464 entry = &ddb->plane[pipe][PLANE_CURSOR];
c5511e44
DL
3465 seq_printf(m, " %-13s%8u%8u%8u\n", "Cursor", entry->start,
3466 entry->end, skl_ddb_entry_size(entry));
3467 }
3468
3469 drm_modeset_unlock_all(dev);
3470
3471 return 0;
3472}
3473
a54746e3 3474static void drrs_status_per_crtc(struct seq_file *m,
36cdd013
DW
3475 struct drm_device *dev,
3476 struct intel_crtc *intel_crtc)
a54746e3 3477{
fac5e23e 3478 struct drm_i915_private *dev_priv = to_i915(dev);
a54746e3
VK
3479 struct i915_drrs *drrs = &dev_priv->drrs;
3480 int vrefresh = 0;
26875fe5 3481 struct drm_connector *connector;
3f6a5e1e 3482 struct drm_connector_list_iter conn_iter;
a54746e3 3483
3f6a5e1e
DV
3484 drm_connector_list_iter_begin(dev, &conn_iter);
3485 drm_for_each_connector_iter(connector, &conn_iter) {
26875fe5
ML
3486 if (connector->state->crtc != &intel_crtc->base)
3487 continue;
3488
3489 seq_printf(m, "%s:\n", connector->name);
a54746e3 3490 }
3f6a5e1e 3491 drm_connector_list_iter_end(&conn_iter);
a54746e3
VK
3492
3493 if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT)
3494 seq_puts(m, "\tVBT: DRRS_type: Static");
3495 else if (dev_priv->vbt.drrs_type == SEAMLESS_DRRS_SUPPORT)
3496 seq_puts(m, "\tVBT: DRRS_type: Seamless");
3497 else if (dev_priv->vbt.drrs_type == DRRS_NOT_SUPPORTED)
3498 seq_puts(m, "\tVBT: DRRS_type: None");
3499 else
3500 seq_puts(m, "\tVBT: DRRS_type: FIXME: Unrecognized Value");
3501
3502 seq_puts(m, "\n\n");
3503
f77076c9 3504 if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) {
a54746e3
VK
3505 struct intel_panel *panel;
3506
3507 mutex_lock(&drrs->mutex);
3508 /* DRRS Supported */
3509 seq_puts(m, "\tDRRS Supported: Yes\n");
3510
3511 /* disable_drrs() will make drrs->dp NULL */
3512 if (!drrs->dp) {
ce6e2137
R
3513 seq_puts(m, "Idleness DRRS: Disabled\n");
3514 if (dev_priv->psr.enabled)
3515 seq_puts(m,
3516 "\tAs PSR is enabled, DRRS is not enabled\n");
a54746e3
VK
3517 mutex_unlock(&drrs->mutex);
3518 return;
3519 }
3520
3521 panel = &drrs->dp->attached_connector->panel;
3522 seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
3523 drrs->busy_frontbuffer_bits);
3524
3525 seq_puts(m, "\n\t\t");
3526 if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
3527 seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
3528 vrefresh = panel->fixed_mode->vrefresh;
3529 } else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
3530 seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
3531 vrefresh = panel->downclock_mode->vrefresh;
3532 } else {
3533 seq_printf(m, "DRRS_State: Unknown(%d)\n",
3534 drrs->refresh_rate_type);
3535 mutex_unlock(&drrs->mutex);
3536 return;
3537 }
3538 seq_printf(m, "\t\tVrefresh: %d", vrefresh);
3539
3540 seq_puts(m, "\n\t\t");
3541 mutex_unlock(&drrs->mutex);
3542 } else {
3543 /* DRRS not supported. Print the VBT parameter*/
3544 seq_puts(m, "\tDRRS Supported : No");
3545 }
3546 seq_puts(m, "\n");
3547}
3548
3549static int i915_drrs_status(struct seq_file *m, void *unused)
3550{
36cdd013
DW
3551 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3552 struct drm_device *dev = &dev_priv->drm;
a54746e3
VK
3553 struct intel_crtc *intel_crtc;
3554 int active_crtc_cnt = 0;
3555
26875fe5 3556 drm_modeset_lock_all(dev);
a54746e3 3557 for_each_intel_crtc(dev, intel_crtc) {
f77076c9 3558 if (intel_crtc->base.state->active) {
a54746e3
VK
3559 active_crtc_cnt++;
3560 seq_printf(m, "\nCRTC %d: ", active_crtc_cnt);
3561
3562 drrs_status_per_crtc(m, dev, intel_crtc);
3563 }
a54746e3 3564 }
26875fe5 3565 drm_modeset_unlock_all(dev);
a54746e3
VK
3566
3567 if (!active_crtc_cnt)
3568 seq_puts(m, "No active crtc found\n");
3569
3570 return 0;
3571}
3572
11bed958
DA
3573static int i915_dp_mst_info(struct seq_file *m, void *unused)
3574{
36cdd013
DW
3575 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3576 struct drm_device *dev = &dev_priv->drm;
11bed958
DA
3577 struct intel_encoder *intel_encoder;
3578 struct intel_digital_port *intel_dig_port;
b6dabe3b 3579 struct drm_connector *connector;
3f6a5e1e 3580 struct drm_connector_list_iter conn_iter;
b6dabe3b 3581
3f6a5e1e
DV
3582 drm_connector_list_iter_begin(dev, &conn_iter);
3583 drm_for_each_connector_iter(connector, &conn_iter) {
b6dabe3b 3584 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
11bed958 3585 continue;
b6dabe3b
ML
3586
3587 intel_encoder = intel_attached_encoder(connector);
3588 if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
3589 continue;
3590
3591 intel_dig_port = enc_to_dig_port(&intel_encoder->base);
11bed958
DA
3592 if (!intel_dig_port->dp.can_mst)
3593 continue;
b6dabe3b 3594
40ae80cc 3595 seq_printf(m, "MST Source Port %c\n",
8f4f2797 3596 port_name(intel_dig_port->base.port));
11bed958
DA
3597 drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
3598 }
3f6a5e1e
DV
3599 drm_connector_list_iter_end(&conn_iter);
3600
11bed958
DA
3601 return 0;
3602}
3603
eb3394fa 3604static ssize_t i915_displayport_test_active_write(struct file *file,
36cdd013
DW
3605 const char __user *ubuf,
3606 size_t len, loff_t *offp)
eb3394fa
TP
3607{
3608 char *input_buffer;
3609 int status = 0;
eb3394fa
TP
3610 struct drm_device *dev;
3611 struct drm_connector *connector;
3f6a5e1e 3612 struct drm_connector_list_iter conn_iter;
eb3394fa
TP
3613 struct intel_dp *intel_dp;
3614 int val = 0;
3615
9aaffa34 3616 dev = ((struct seq_file *)file->private_data)->private;
eb3394fa 3617
eb3394fa
TP
3618 if (len == 0)
3619 return 0;
3620
261aeba8
GT
3621 input_buffer = memdup_user_nul(ubuf, len);
3622 if (IS_ERR(input_buffer))
3623 return PTR_ERR(input_buffer);
eb3394fa 3624
eb3394fa
TP
3625 DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len);
3626
3f6a5e1e
DV
3627 drm_connector_list_iter_begin(dev, &conn_iter);
3628 drm_for_each_connector_iter(connector, &conn_iter) {
a874b6a3
ML
3629 struct intel_encoder *encoder;
3630
eb3394fa
TP
3631 if (connector->connector_type !=
3632 DRM_MODE_CONNECTOR_DisplayPort)
3633 continue;
3634
a874b6a3
ML
3635 encoder = to_intel_encoder(connector->encoder);
3636 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3637 continue;
3638
3639 if (encoder && connector->status == connector_status_connected) {
3640 intel_dp = enc_to_intel_dp(&encoder->base);
eb3394fa
TP
3641 status = kstrtoint(input_buffer, 10, &val);
3642 if (status < 0)
3f6a5e1e 3643 break;
eb3394fa
TP
3644 DRM_DEBUG_DRIVER("Got %d for test active\n", val);
3645 /* To prevent erroneous activation of the compliance
3646 * testing code, only accept an actual value of 1 here
3647 */
3648 if (val == 1)
c1617abc 3649 intel_dp->compliance.test_active = 1;
eb3394fa 3650 else
c1617abc 3651 intel_dp->compliance.test_active = 0;
eb3394fa
TP
3652 }
3653 }
3f6a5e1e 3654 drm_connector_list_iter_end(&conn_iter);
eb3394fa
TP
3655 kfree(input_buffer);
3656 if (status < 0)
3657 return status;
3658
3659 *offp += len;
3660 return len;
3661}
3662
3663static int i915_displayport_test_active_show(struct seq_file *m, void *data)
3664{
e4006713
AS
3665 struct drm_i915_private *dev_priv = m->private;
3666 struct drm_device *dev = &dev_priv->drm;
eb3394fa 3667 struct drm_connector *connector;
3f6a5e1e 3668 struct drm_connector_list_iter conn_iter;
eb3394fa
TP
3669 struct intel_dp *intel_dp;
3670
3f6a5e1e
DV
3671 drm_connector_list_iter_begin(dev, &conn_iter);
3672 drm_for_each_connector_iter(connector, &conn_iter) {
a874b6a3
ML
3673 struct intel_encoder *encoder;
3674
eb3394fa
TP
3675 if (connector->connector_type !=
3676 DRM_MODE_CONNECTOR_DisplayPort)
3677 continue;
3678
a874b6a3
ML
3679 encoder = to_intel_encoder(connector->encoder);
3680 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3681 continue;
3682
3683 if (encoder && connector->status == connector_status_connected) {
3684 intel_dp = enc_to_intel_dp(&encoder->base);
c1617abc 3685 if (intel_dp->compliance.test_active)
eb3394fa
TP
3686 seq_puts(m, "1");
3687 else
3688 seq_puts(m, "0");
3689 } else
3690 seq_puts(m, "0");
3691 }
3f6a5e1e 3692 drm_connector_list_iter_end(&conn_iter);
eb3394fa
TP
3693
3694 return 0;
3695}
3696
3697static int i915_displayport_test_active_open(struct inode *inode,
36cdd013 3698 struct file *file)
eb3394fa 3699{
36cdd013 3700 return single_open(file, i915_displayport_test_active_show,
e4006713 3701 inode->i_private);
eb3394fa
TP
3702}
3703
3704static const struct file_operations i915_displayport_test_active_fops = {
3705 .owner = THIS_MODULE,
3706 .open = i915_displayport_test_active_open,
3707 .read = seq_read,
3708 .llseek = seq_lseek,
3709 .release = single_release,
3710 .write = i915_displayport_test_active_write
3711};
3712
3713static int i915_displayport_test_data_show(struct seq_file *m, void *data)
3714{
e4006713
AS
3715 struct drm_i915_private *dev_priv = m->private;
3716 struct drm_device *dev = &dev_priv->drm;
eb3394fa 3717 struct drm_connector *connector;
3f6a5e1e 3718 struct drm_connector_list_iter conn_iter;
eb3394fa
TP
3719 struct intel_dp *intel_dp;
3720
3f6a5e1e
DV
3721 drm_connector_list_iter_begin(dev, &conn_iter);
3722 drm_for_each_connector_iter(connector, &conn_iter) {
a874b6a3
ML
3723 struct intel_encoder *encoder;
3724
eb3394fa
TP
3725 if (connector->connector_type !=
3726 DRM_MODE_CONNECTOR_DisplayPort)
3727 continue;
3728
a874b6a3
ML
3729 encoder = to_intel_encoder(connector->encoder);
3730 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3731 continue;
3732
3733 if (encoder && connector->status == connector_status_connected) {
3734 intel_dp = enc_to_intel_dp(&encoder->base);
b48a5ba9
MN
3735 if (intel_dp->compliance.test_type ==
3736 DP_TEST_LINK_EDID_READ)
3737 seq_printf(m, "%lx",
3738 intel_dp->compliance.test_data.edid);
611032bf
MN
3739 else if (intel_dp->compliance.test_type ==
3740 DP_TEST_LINK_VIDEO_PATTERN) {
3741 seq_printf(m, "hdisplay: %d\n",
3742 intel_dp->compliance.test_data.hdisplay);
3743 seq_printf(m, "vdisplay: %d\n",
3744 intel_dp->compliance.test_data.vdisplay);
3745 seq_printf(m, "bpc: %u\n",
3746 intel_dp->compliance.test_data.bpc);
3747 }
eb3394fa
TP
3748 } else
3749 seq_puts(m, "0");
3750 }
3f6a5e1e 3751 drm_connector_list_iter_end(&conn_iter);
eb3394fa
TP
3752
3753 return 0;
3754}
e4006713 3755DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_data);
eb3394fa
TP
3756
3757static int i915_displayport_test_type_show(struct seq_file *m, void *data)
3758{
e4006713
AS
3759 struct drm_i915_private *dev_priv = m->private;
3760 struct drm_device *dev = &dev_priv->drm;
eb3394fa 3761 struct drm_connector *connector;
3f6a5e1e 3762 struct drm_connector_list_iter conn_iter;
eb3394fa
TP
3763 struct intel_dp *intel_dp;
3764
3f6a5e1e
DV
3765 drm_connector_list_iter_begin(dev, &conn_iter);
3766 drm_for_each_connector_iter(connector, &conn_iter) {
a874b6a3
ML
3767 struct intel_encoder *encoder;
3768
eb3394fa
TP
3769 if (connector->connector_type !=
3770 DRM_MODE_CONNECTOR_DisplayPort)
3771 continue;
3772
a874b6a3
ML
3773 encoder = to_intel_encoder(connector->encoder);
3774 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3775 continue;
3776
3777 if (encoder && connector->status == connector_status_connected) {
3778 intel_dp = enc_to_intel_dp(&encoder->base);
c1617abc 3779 seq_printf(m, "%02lx", intel_dp->compliance.test_type);
eb3394fa
TP
3780 } else
3781 seq_puts(m, "0");
3782 }
3f6a5e1e 3783 drm_connector_list_iter_end(&conn_iter);
eb3394fa
TP
3784
3785 return 0;
3786}
e4006713 3787DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type);
eb3394fa 3788
97e94b22 3789static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
369a1342 3790{
36cdd013
DW
3791 struct drm_i915_private *dev_priv = m->private;
3792 struct drm_device *dev = &dev_priv->drm;
369a1342 3793 int level;
de38b95c
VS
3794 int num_levels;
3795
36cdd013 3796 if (IS_CHERRYVIEW(dev_priv))
de38b95c 3797 num_levels = 3;
36cdd013 3798 else if (IS_VALLEYVIEW(dev_priv))
de38b95c 3799 num_levels = 1;
04548cba
VS
3800 else if (IS_G4X(dev_priv))
3801 num_levels = 3;
de38b95c 3802 else
5db94019 3803 num_levels = ilk_wm_max_level(dev_priv) + 1;
369a1342
VS
3804
3805 drm_modeset_lock_all(dev);
3806
3807 for (level = 0; level < num_levels; level++) {
3808 unsigned int latency = wm[level];
3809
97e94b22
DL
3810 /*
3811 * - WM1+ latency values in 0.5us units
de38b95c 3812 * - latencies are in us on gen9/vlv/chv
97e94b22 3813 */
04548cba
VS
3814 if (INTEL_GEN(dev_priv) >= 9 ||
3815 IS_VALLEYVIEW(dev_priv) ||
3816 IS_CHERRYVIEW(dev_priv) ||
3817 IS_G4X(dev_priv))
97e94b22
DL
3818 latency *= 10;
3819 else if (level > 0)
369a1342
VS
3820 latency *= 5;
3821
3822 seq_printf(m, "WM%d %u (%u.%u usec)\n",
97e94b22 3823 level, wm[level], latency / 10, latency % 10);
369a1342
VS
3824 }
3825
3826 drm_modeset_unlock_all(dev);
3827}
3828
3829static int pri_wm_latency_show(struct seq_file *m, void *data)
3830{
36cdd013 3831 struct drm_i915_private *dev_priv = m->private;
97e94b22
DL
3832 const uint16_t *latencies;
3833
36cdd013 3834 if (INTEL_GEN(dev_priv) >= 9)
97e94b22
DL
3835 latencies = dev_priv->wm.skl_latency;
3836 else
36cdd013 3837 latencies = dev_priv->wm.pri_latency;
369a1342 3838
97e94b22 3839 wm_latency_show(m, latencies);
369a1342
VS
3840
3841 return 0;
3842}
3843
3844static int spr_wm_latency_show(struct seq_file *m, void *data)
3845{
36cdd013 3846 struct drm_i915_private *dev_priv = m->private;
97e94b22
DL
3847 const uint16_t *latencies;
3848
36cdd013 3849 if (INTEL_GEN(dev_priv) >= 9)
97e94b22
DL
3850 latencies = dev_priv->wm.skl_latency;
3851 else
36cdd013 3852 latencies = dev_priv->wm.spr_latency;
369a1342 3853
97e94b22 3854 wm_latency_show(m, latencies);
369a1342
VS
3855
3856 return 0;
3857}
3858
3859static int cur_wm_latency_show(struct seq_file *m, void *data)
3860{
36cdd013 3861 struct drm_i915_private *dev_priv = m->private;
97e94b22
DL
3862 const uint16_t *latencies;
3863
36cdd013 3864 if (INTEL_GEN(dev_priv) >= 9)
97e94b22
DL
3865 latencies = dev_priv->wm.skl_latency;
3866 else
36cdd013 3867 latencies = dev_priv->wm.cur_latency;
369a1342 3868
97e94b22 3869 wm_latency_show(m, latencies);
369a1342
VS
3870
3871 return 0;
3872}
3873
3874static int pri_wm_latency_open(struct inode *inode, struct file *file)
3875{
36cdd013 3876 struct drm_i915_private *dev_priv = inode->i_private;
369a1342 3877
04548cba 3878 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
369a1342
VS
3879 return -ENODEV;
3880
36cdd013 3881 return single_open(file, pri_wm_latency_show, dev_priv);
369a1342
VS
3882}
3883
3884static int spr_wm_latency_open(struct inode *inode, struct file *file)
3885{
36cdd013 3886 struct drm_i915_private *dev_priv = inode->i_private;
369a1342 3887
36cdd013 3888 if (HAS_GMCH_DISPLAY(dev_priv))
369a1342
VS
3889 return -ENODEV;
3890
36cdd013 3891 return single_open(file, spr_wm_latency_show, dev_priv);
369a1342
VS
3892}
3893
3894static int cur_wm_latency_open(struct inode *inode, struct file *file)
3895{
36cdd013 3896 struct drm_i915_private *dev_priv = inode->i_private;
369a1342 3897
36cdd013 3898 if (HAS_GMCH_DISPLAY(dev_priv))
369a1342
VS
3899 return -ENODEV;
3900
36cdd013 3901 return single_open(file, cur_wm_latency_show, dev_priv);
369a1342
VS
3902}
3903
3904static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
97e94b22 3905 size_t len, loff_t *offp, uint16_t wm[8])
369a1342
VS
3906{
3907 struct seq_file *m = file->private_data;
36cdd013
DW
3908 struct drm_i915_private *dev_priv = m->private;
3909 struct drm_device *dev = &dev_priv->drm;
97e94b22 3910 uint16_t new[8] = { 0 };
de38b95c 3911 int num_levels;
369a1342
VS
3912 int level;
3913 int ret;
3914 char tmp[32];
3915
36cdd013 3916 if (IS_CHERRYVIEW(dev_priv))
de38b95c 3917 num_levels = 3;
36cdd013 3918 else if (IS_VALLEYVIEW(dev_priv))
de38b95c 3919 num_levels = 1;
04548cba
VS
3920 else if (IS_G4X(dev_priv))
3921 num_levels = 3;
de38b95c 3922 else
5db94019 3923 num_levels = ilk_wm_max_level(dev_priv) + 1;
de38b95c 3924
369a1342
VS
3925 if (len >= sizeof(tmp))
3926 return -EINVAL;
3927
3928 if (copy_from_user(tmp, ubuf, len))
3929 return -EFAULT;
3930
3931 tmp[len] = '\0';
3932
97e94b22
DL
3933 ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
3934 &new[0], &new[1], &new[2], &new[3],
3935 &new[4], &new[5], &new[6], &new[7]);
369a1342
VS
3936 if (ret != num_levels)
3937 return -EINVAL;
3938
3939 drm_modeset_lock_all(dev);
3940
3941 for (level = 0; level < num_levels; level++)
3942 wm[level] = new[level];
3943
3944 drm_modeset_unlock_all(dev);
3945
3946 return len;
3947}
3948
3949
3950static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
3951 size_t len, loff_t *offp)
3952{
3953 struct seq_file *m = file->private_data;
36cdd013 3954 struct drm_i915_private *dev_priv = m->private;
97e94b22 3955 uint16_t *latencies;
369a1342 3956
36cdd013 3957 if (INTEL_GEN(dev_priv) >= 9)
97e94b22
DL
3958 latencies = dev_priv->wm.skl_latency;
3959 else
36cdd013 3960 latencies = dev_priv->wm.pri_latency;
97e94b22
DL
3961
3962 return wm_latency_write(file, ubuf, len, offp, latencies);
369a1342
VS
3963}
3964
3965static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
3966 size_t len, loff_t *offp)
3967{
3968 struct seq_file *m = file->private_data;
36cdd013 3969 struct drm_i915_private *dev_priv = m->private;
97e94b22 3970 uint16_t *latencies;
369a1342 3971
36cdd013 3972 if (INTEL_GEN(dev_priv) >= 9)
97e94b22
DL
3973 latencies = dev_priv->wm.skl_latency;
3974 else
36cdd013 3975 latencies = dev_priv->wm.spr_latency;
97e94b22
DL
3976
3977 return wm_latency_write(file, ubuf, len, offp, latencies);
369a1342
VS
3978}
3979
3980static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
3981 size_t len, loff_t *offp)
3982{
3983 struct seq_file *m = file->private_data;
36cdd013 3984 struct drm_i915_private *dev_priv = m->private;
97e94b22
DL
3985 uint16_t *latencies;
3986
36cdd013 3987 if (INTEL_GEN(dev_priv) >= 9)
97e94b22
DL
3988 latencies = dev_priv->wm.skl_latency;
3989 else
36cdd013 3990 latencies = dev_priv->wm.cur_latency;
369a1342 3991
97e94b22 3992 return wm_latency_write(file, ubuf, len, offp, latencies);
369a1342
VS
3993}
3994
3995static const struct file_operations i915_pri_wm_latency_fops = {
3996 .owner = THIS_MODULE,
3997 .open = pri_wm_latency_open,
3998 .read = seq_read,
3999 .llseek = seq_lseek,
4000 .release = single_release,
4001 .write = pri_wm_latency_write
4002};
4003
4004static const struct file_operations i915_spr_wm_latency_fops = {
4005 .owner = THIS_MODULE,
4006 .open = spr_wm_latency_open,
4007 .read = seq_read,
4008 .llseek = seq_lseek,
4009 .release = single_release,
4010 .write = spr_wm_latency_write
4011};
4012
4013static const struct file_operations i915_cur_wm_latency_fops = {
4014 .owner = THIS_MODULE,
4015 .open = cur_wm_latency_open,
4016 .read = seq_read,
4017 .llseek = seq_lseek,
4018 .release = single_release,
4019 .write = cur_wm_latency_write
4020};
4021
647416f9
KC
4022static int
4023i915_wedged_get(void *data, u64 *val)
f3cd474b 4024{
36cdd013 4025 struct drm_i915_private *dev_priv = data;
f3cd474b 4026
d98c52cf 4027 *val = i915_terminally_wedged(&dev_priv->gpu_error);
f3cd474b 4028
647416f9 4029 return 0;
f3cd474b
CW
4030}
4031
647416f9
KC
4032static int
4033i915_wedged_set(void *data, u64 val)
f3cd474b 4034{
598b6b5a
CW
4035 struct drm_i915_private *i915 = data;
4036 struct intel_engine_cs *engine;
4037 unsigned int tmp;
d46c0517 4038
b8d24a06
MK
4039 /*
4040 * There is no safeguard against this debugfs entry colliding
4041 * with the hangcheck calling same i915_handle_error() in
4042 * parallel, causing an explosion. For now we assume that the
4043 * test harness is responsible enough not to inject gpu hangs
4044 * while it is writing to 'i915_wedged'
4045 */
4046
598b6b5a 4047 if (i915_reset_backoff(&i915->gpu_error))
b8d24a06
MK
4048 return -EAGAIN;
4049
598b6b5a
CW
4050 for_each_engine_masked(engine, i915, val, tmp) {
4051 engine->hangcheck.seqno = intel_engine_get_seqno(engine);
4052 engine->hangcheck.stalled = true;
4053 }
4054
ce800754
CW
4055 i915_handle_error(i915, val, I915_ERROR_CAPTURE,
4056 "Manually set wedged engine mask = %llx", val);
d46c0517 4057
598b6b5a 4058 wait_on_bit(&i915->gpu_error.flags,
d3df42b7
CW
4059 I915_RESET_HANDOFF,
4060 TASK_UNINTERRUPTIBLE);
4061
647416f9 4062 return 0;
f3cd474b
CW
4063}
4064
647416f9
KC
4065DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
4066 i915_wedged_get, i915_wedged_set,
3a3b4f98 4067 "%llu\n");
f3cd474b 4068
64486ae7
CW
4069static int
4070fault_irq_set(struct drm_i915_private *i915,
4071 unsigned long *irq,
4072 unsigned long val)
4073{
4074 int err;
4075
4076 err = mutex_lock_interruptible(&i915->drm.struct_mutex);
4077 if (err)
4078 return err;
4079
4080 err = i915_gem_wait_for_idle(i915,
4081 I915_WAIT_LOCKED |
ec625fb9
CW
4082 I915_WAIT_INTERRUPTIBLE,
4083 MAX_SCHEDULE_TIMEOUT);
64486ae7
CW
4084 if (err)
4085 goto err_unlock;
4086
64486ae7
CW
4087 *irq = val;
4088 mutex_unlock(&i915->drm.struct_mutex);
4089
4090 /* Flush idle worker to disarm irq */
7c26240e 4091 drain_delayed_work(&i915->gt.idle_work);
64486ae7
CW
4092
4093 return 0;
4094
4095err_unlock:
4096 mutex_unlock(&i915->drm.struct_mutex);
4097 return err;
4098}
4099
094f9a54
CW
4100static int
4101i915_ring_missed_irq_get(void *data, u64 *val)
4102{
36cdd013 4103 struct drm_i915_private *dev_priv = data;
094f9a54
CW
4104
4105 *val = dev_priv->gpu_error.missed_irq_rings;
4106 return 0;
4107}
4108
4109static int
4110i915_ring_missed_irq_set(void *data, u64 val)
4111{
64486ae7 4112 struct drm_i915_private *i915 = data;
094f9a54 4113
64486ae7 4114 return fault_irq_set(i915, &i915->gpu_error.missed_irq_rings, val);
094f9a54
CW
4115}
4116
4117DEFINE_SIMPLE_ATTRIBUTE(i915_ring_missed_irq_fops,
4118 i915_ring_missed_irq_get, i915_ring_missed_irq_set,
4119 "0x%08llx\n");
4120
4121static int
4122i915_ring_test_irq_get(void *data, u64 *val)
4123{
36cdd013 4124 struct drm_i915_private *dev_priv = data;
094f9a54
CW
4125
4126 *val = dev_priv->gpu_error.test_irq_rings;
4127
4128 return 0;
4129}
4130
4131static int
4132i915_ring_test_irq_set(void *data, u64 val)
4133{
64486ae7 4134 struct drm_i915_private *i915 = data;
094f9a54 4135
5f521722
CW
4136 /* GuC keeps the user interrupt permanently enabled for submission */
4137 if (USES_GUC_SUBMISSION(i915))
4138 return -ENODEV;
4139
4140 /*
4141 * From icl, we can no longer individually mask interrupt generation
4142 * from each engine.
4143 */
4144 if (INTEL_GEN(i915) >= 11)
4145 return -ENODEV;
4146
64486ae7 4147 val &= INTEL_INFO(i915)->ring_mask;
094f9a54 4148 DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val);
094f9a54 4149
64486ae7 4150 return fault_irq_set(i915, &i915->gpu_error.test_irq_rings, val);
094f9a54
CW
4151}
4152
4153DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops,
4154 i915_ring_test_irq_get, i915_ring_test_irq_set,
4155 "0x%08llx\n");
4156
b4a0b32d
CW
4157#define DROP_UNBOUND BIT(0)
4158#define DROP_BOUND BIT(1)
4159#define DROP_RETIRE BIT(2)
4160#define DROP_ACTIVE BIT(3)
4161#define DROP_FREED BIT(4)
4162#define DROP_SHRINK_ALL BIT(5)
4163#define DROP_IDLE BIT(6)
6b048706
CW
4164#define DROP_RESET_ACTIVE BIT(7)
4165#define DROP_RESET_SEQNO BIT(8)
fbbd37b3
CW
4166#define DROP_ALL (DROP_UNBOUND | \
4167 DROP_BOUND | \
4168 DROP_RETIRE | \
4169 DROP_ACTIVE | \
8eadc19b 4170 DROP_FREED | \
b4a0b32d 4171 DROP_SHRINK_ALL |\
6b048706
CW
4172 DROP_IDLE | \
4173 DROP_RESET_ACTIVE | \
4174 DROP_RESET_SEQNO)
647416f9
KC
4175static int
4176i915_drop_caches_get(void *data, u64 *val)
dd624afd 4177{
647416f9 4178 *val = DROP_ALL;
dd624afd 4179
647416f9 4180 return 0;
dd624afd
CW
4181}
4182
647416f9
KC
4183static int
4184i915_drop_caches_set(void *data, u64 val)
dd624afd 4185{
6b048706 4186 struct drm_i915_private *i915 = data;
00c26cf9 4187 int ret = 0;
dd624afd 4188
b4a0b32d
CW
4189 DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
4190 val, val & DROP_ALL);
dd624afd 4191
6b048706
CW
4192 if (val & DROP_RESET_ACTIVE && !intel_engines_are_idle(i915))
4193 i915_gem_set_wedged(i915);
4194
dd624afd
CW
4195 /* No need to check and wait for gpu resets, only libdrm auto-restarts
4196 * on ioctls on -EAGAIN. */
6b048706
CW
4197 if (val & (DROP_ACTIVE | DROP_RETIRE | DROP_RESET_SEQNO)) {
4198 ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
dd624afd 4199 if (ret)
00c26cf9 4200 return ret;
dd624afd 4201
00c26cf9 4202 if (val & DROP_ACTIVE)
6b048706 4203 ret = i915_gem_wait_for_idle(i915,
00c26cf9 4204 I915_WAIT_INTERRUPTIBLE |
ec625fb9
CW
4205 I915_WAIT_LOCKED,
4206 MAX_SCHEDULE_TIMEOUT);
00c26cf9 4207
88a83f3c 4208 if (ret == 0 && val & DROP_RESET_SEQNO) {
6b048706
CW
4209 intel_runtime_pm_get(i915);
4210 ret = i915_gem_set_global_seqno(&i915->drm, 1);
4211 intel_runtime_pm_put(i915);
4212 }
4213
00c26cf9 4214 if (val & DROP_RETIRE)
6b048706 4215 i915_retire_requests(i915);
00c26cf9 4216
6b048706
CW
4217 mutex_unlock(&i915->drm.struct_mutex);
4218 }
4219
4220 if (val & DROP_RESET_ACTIVE &&
4221 i915_terminally_wedged(&i915->gpu_error)) {
4222 i915_handle_error(i915, ALL_ENGINES, 0, NULL);
4223 wait_on_bit(&i915->gpu_error.flags,
4224 I915_RESET_HANDOFF,
4225 TASK_UNINTERRUPTIBLE);
00c26cf9 4226 }
dd624afd 4227
d92a8cfc 4228 fs_reclaim_acquire(GFP_KERNEL);
21ab4e74 4229 if (val & DROP_BOUND)
6b048706 4230 i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_BOUND);
4ad72b7f 4231
21ab4e74 4232 if (val & DROP_UNBOUND)
6b048706 4233 i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_UNBOUND);
dd624afd 4234
8eadc19b 4235 if (val & DROP_SHRINK_ALL)
6b048706 4236 i915_gem_shrink_all(i915);
d92a8cfc 4237 fs_reclaim_release(GFP_KERNEL);
8eadc19b 4238
4dfacb0b
CW
4239 if (val & DROP_IDLE) {
4240 do {
6b048706
CW
4241 if (READ_ONCE(i915->gt.active_requests))
4242 flush_delayed_work(&i915->gt.retire_work);
4243 drain_delayed_work(&i915->gt.idle_work);
4244 } while (READ_ONCE(i915->gt.awake));
4dfacb0b 4245 }
b4a0b32d 4246
c9c70471 4247 if (val & DROP_FREED)
6b048706 4248 i915_gem_drain_freed_objects(i915);
fbbd37b3 4249
647416f9 4250 return ret;
dd624afd
CW
4251}
4252
647416f9
KC
4253DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
4254 i915_drop_caches_get, i915_drop_caches_set,
4255 "0x%08llx\n");
dd624afd 4256
647416f9
KC
4257static int
4258i915_cache_sharing_get(void *data, u64 *val)
07b7ddd9 4259{
36cdd013 4260 struct drm_i915_private *dev_priv = data;
07b7ddd9 4261 u32 snpcr;
07b7ddd9 4262
36cdd013 4263 if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv)))
004777cb
DV
4264 return -ENODEV;
4265
c8c8fb33 4266 intel_runtime_pm_get(dev_priv);
22bcfc6a 4267
07b7ddd9 4268 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
c8c8fb33
PZ
4269
4270 intel_runtime_pm_put(dev_priv);
07b7ddd9 4271
647416f9 4272 *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
07b7ddd9 4273
647416f9 4274 return 0;
07b7ddd9
JB
4275}
4276
647416f9
KC
4277static int
4278i915_cache_sharing_set(void *data, u64 val)
07b7ddd9 4279{
36cdd013 4280 struct drm_i915_private *dev_priv = data;
07b7ddd9 4281 u32 snpcr;
07b7ddd9 4282
36cdd013 4283 if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv)))
004777cb
DV
4284 return -ENODEV;
4285
647416f9 4286 if (val > 3)
07b7ddd9
JB
4287 return -EINVAL;
4288
c8c8fb33 4289 intel_runtime_pm_get(dev_priv);
647416f9 4290 DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
07b7ddd9
JB
4291
4292 /* Update the cache sharing policy here as well */
4293 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
4294 snpcr &= ~GEN6_MBC_SNPCR_MASK;
4295 snpcr |= (val << GEN6_MBC_SNPCR_SHIFT);
4296 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
4297
c8c8fb33 4298 intel_runtime_pm_put(dev_priv);
647416f9 4299 return 0;
07b7ddd9
JB
4300}
4301
647416f9
KC
4302DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
4303 i915_cache_sharing_get, i915_cache_sharing_set,
4304 "%llu\n");
07b7ddd9 4305
36cdd013 4306static void cherryview_sseu_device_status(struct drm_i915_private *dev_priv,
915490d5 4307 struct sseu_dev_info *sseu)
5d39525a 4308{
7aa0b14e
CW
4309#define SS_MAX 2
4310 const int ss_max = SS_MAX;
4311 u32 sig1[SS_MAX], sig2[SS_MAX];
5d39525a 4312 int ss;
5d39525a
JM
4313
4314 sig1[0] = I915_READ(CHV_POWER_SS0_SIG1);
4315 sig1[1] = I915_READ(CHV_POWER_SS1_SIG1);
4316 sig2[0] = I915_READ(CHV_POWER_SS0_SIG2);
4317 sig2[1] = I915_READ(CHV_POWER_SS1_SIG2);
4318
4319 for (ss = 0; ss < ss_max; ss++) {
4320 unsigned int eu_cnt;
4321
4322 if (sig1[ss] & CHV_SS_PG_ENABLE)
4323 /* skip disabled subslice */
4324 continue;
4325
f08a0c92 4326 sseu->slice_mask = BIT(0);
8cc76693 4327 sseu->subslice_mask[0] |= BIT(ss);
5d39525a
JM
4328 eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
4329 ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
4330 ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
4331 ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2);
915490d5
ID
4332 sseu->eu_total += eu_cnt;
4333 sseu->eu_per_subslice = max_t(unsigned int,
4334 sseu->eu_per_subslice, eu_cnt);
5d39525a 4335 }
7aa0b14e 4336#undef SS_MAX
5d39525a
JM
4337}
4338
f8c3dcf9
RV
4339static void gen10_sseu_device_status(struct drm_i915_private *dev_priv,
4340 struct sseu_dev_info *sseu)
4341{
c7fb3c6c 4342#define SS_MAX 6
f8c3dcf9 4343 const struct intel_device_info *info = INTEL_INFO(dev_priv);
c7fb3c6c 4344 u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
f8c3dcf9 4345 int s, ss;
f8c3dcf9 4346
b3e7f866 4347 for (s = 0; s < info->sseu.max_slices; s++) {
f8c3dcf9
RV
4348 /*
4349 * FIXME: Valid SS Mask respects the spec and read
4350 * only valid bits for those registers, excluding reserverd
4351 * although this seems wrong because it would leave many
4352 * subslices without ACK.
4353 */
4354 s_reg[s] = I915_READ(GEN10_SLICE_PGCTL_ACK(s)) &
4355 GEN10_PGCTL_VALID_SS_MASK(s);
4356 eu_reg[2 * s] = I915_READ(GEN10_SS01_EU_PGCTL_ACK(s));
4357 eu_reg[2 * s + 1] = I915_READ(GEN10_SS23_EU_PGCTL_ACK(s));
4358 }
4359
4360 eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
4361 GEN9_PGCTL_SSA_EU19_ACK |
4362 GEN9_PGCTL_SSA_EU210_ACK |
4363 GEN9_PGCTL_SSA_EU311_ACK;
4364 eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
4365 GEN9_PGCTL_SSB_EU19_ACK |
4366 GEN9_PGCTL_SSB_EU210_ACK |
4367 GEN9_PGCTL_SSB_EU311_ACK;
4368
b3e7f866 4369 for (s = 0; s < info->sseu.max_slices; s++) {
f8c3dcf9
RV
4370 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
4371 /* skip disabled slice */
4372 continue;
4373
4374 sseu->slice_mask |= BIT(s);
8cc76693 4375 sseu->subslice_mask[s] = info->sseu.subslice_mask[s];
f8c3dcf9 4376
b3e7f866 4377 for (ss = 0; ss < info->sseu.max_subslices; ss++) {
f8c3dcf9
RV
4378 unsigned int eu_cnt;
4379
4380 if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
4381 /* skip disabled subslice */
4382 continue;
4383
4384 eu_cnt = 2 * hweight32(eu_reg[2 * s + ss / 2] &
4385 eu_mask[ss % 2]);
4386 sseu->eu_total += eu_cnt;
4387 sseu->eu_per_subslice = max_t(unsigned int,
4388 sseu->eu_per_subslice,
4389 eu_cnt);
4390 }
4391 }
c7fb3c6c 4392#undef SS_MAX
f8c3dcf9
RV
4393}
4394
36cdd013 4395static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
915490d5 4396 struct sseu_dev_info *sseu)
5d39525a 4397{
c7fb3c6c 4398#define SS_MAX 3
b3e7f866 4399 const struct intel_device_info *info = INTEL_INFO(dev_priv);
c7fb3c6c 4400 u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
5d39525a 4401 int s, ss;
1c046bc1 4402
b3e7f866 4403 for (s = 0; s < info->sseu.max_slices; s++) {
1c046bc1
JM
4404 s_reg[s] = I915_READ(GEN9_SLICE_PGCTL_ACK(s));
4405 eu_reg[2*s] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s));
4406 eu_reg[2*s + 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s));
4407 }
4408
5d39525a
JM
4409 eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
4410 GEN9_PGCTL_SSA_EU19_ACK |
4411 GEN9_PGCTL_SSA_EU210_ACK |
4412 GEN9_PGCTL_SSA_EU311_ACK;
4413 eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
4414 GEN9_PGCTL_SSB_EU19_ACK |
4415 GEN9_PGCTL_SSB_EU210_ACK |
4416 GEN9_PGCTL_SSB_EU311_ACK;
4417
b3e7f866 4418 for (s = 0; s < info->sseu.max_slices; s++) {
5d39525a
JM
4419 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
4420 /* skip disabled slice */
4421 continue;
4422
f08a0c92 4423 sseu->slice_mask |= BIT(s);
1c046bc1 4424
f8c3dcf9 4425 if (IS_GEN9_BC(dev_priv))
8cc76693
LL
4426 sseu->subslice_mask[s] =
4427 INTEL_INFO(dev_priv)->sseu.subslice_mask[s];
1c046bc1 4428
b3e7f866 4429 for (ss = 0; ss < info->sseu.max_subslices; ss++) {
5d39525a
JM
4430 unsigned int eu_cnt;
4431
cc3f90f0 4432 if (IS_GEN9_LP(dev_priv)) {
57ec171e
ID
4433 if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
4434 /* skip disabled subslice */
4435 continue;
1c046bc1 4436
8cc76693 4437 sseu->subslice_mask[s] |= BIT(ss);
57ec171e 4438 }
1c046bc1 4439
5d39525a
JM
4440 eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
4441 eu_mask[ss%2]);
915490d5
ID
4442 sseu->eu_total += eu_cnt;
4443 sseu->eu_per_subslice = max_t(unsigned int,
4444 sseu->eu_per_subslice,
4445 eu_cnt);
5d39525a
JM
4446 }
4447 }
c7fb3c6c 4448#undef SS_MAX
5d39525a
JM
4449}
4450
36cdd013 4451static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv,
915490d5 4452 struct sseu_dev_info *sseu)
91bedd34 4453{
91bedd34 4454 u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);
36cdd013 4455 int s;
91bedd34 4456
f08a0c92 4457 sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK;
91bedd34 4458
f08a0c92 4459 if (sseu->slice_mask) {
43b67998
ID
4460 sseu->eu_per_subslice =
4461 INTEL_INFO(dev_priv)->sseu.eu_per_subslice;
8cc76693
LL
4462 for (s = 0; s < fls(sseu->slice_mask); s++) {
4463 sseu->subslice_mask[s] =
4464 INTEL_INFO(dev_priv)->sseu.subslice_mask[s];
4465 }
57ec171e
ID
4466 sseu->eu_total = sseu->eu_per_subslice *
4467 sseu_subslice_total(sseu);
91bedd34
ŁD
4468
4469 /* subtract fused off EU(s) from enabled slice(s) */
795b38b3 4470 for (s = 0; s < fls(sseu->slice_mask); s++) {
43b67998
ID
4471 u8 subslice_7eu =
4472 INTEL_INFO(dev_priv)->sseu.subslice_7eu[s];
91bedd34 4473
915490d5 4474 sseu->eu_total -= hweight8(subslice_7eu);
91bedd34
ŁD
4475 }
4476 }
4477}
4478
615d8908
ID
4479static void i915_print_sseu_info(struct seq_file *m, bool is_available_info,
4480 const struct sseu_dev_info *sseu)
4481{
4482 struct drm_i915_private *dev_priv = node_to_i915(m->private);
4483 const char *type = is_available_info ? "Available" : "Enabled";
8cc76693 4484 int s;
615d8908 4485
c67ba538
ID
4486 seq_printf(m, " %s Slice Mask: %04x\n", type,
4487 sseu->slice_mask);
615d8908 4488 seq_printf(m, " %s Slice Total: %u\n", type,
f08a0c92 4489 hweight8(sseu->slice_mask));
615d8908 4490 seq_printf(m, " %s Subslice Total: %u\n", type,
57ec171e 4491 sseu_subslice_total(sseu));
8cc76693
LL
4492 for (s = 0; s < fls(sseu->slice_mask); s++) {
4493 seq_printf(m, " %s Slice%i subslices: %u\n", type,
4494 s, hweight8(sseu->subslice_mask[s]));
4495 }
615d8908
ID
4496 seq_printf(m, " %s EU Total: %u\n", type,
4497 sseu->eu_total);
4498 seq_printf(m, " %s EU Per Subslice: %u\n", type,
4499 sseu->eu_per_subslice);
4500
4501 if (!is_available_info)
4502 return;
4503
4504 seq_printf(m, " Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev_priv)));
4505 if (HAS_POOLED_EU(dev_priv))
4506 seq_printf(m, " Min EU in pool: %u\n", sseu->min_eu_in_pool);
4507
4508 seq_printf(m, " Has Slice Power Gating: %s\n",
4509 yesno(sseu->has_slice_pg));
4510 seq_printf(m, " Has Subslice Power Gating: %s\n",
4511 yesno(sseu->has_subslice_pg));
4512 seq_printf(m, " Has EU Power Gating: %s\n",
4513 yesno(sseu->has_eu_pg));
4514}
4515
3873218f
JM
4516static int i915_sseu_status(struct seq_file *m, void *unused)
4517{
36cdd013 4518 struct drm_i915_private *dev_priv = node_to_i915(m->private);
915490d5 4519 struct sseu_dev_info sseu;
3873218f 4520
36cdd013 4521 if (INTEL_GEN(dev_priv) < 8)
3873218f
JM
4522 return -ENODEV;
4523
4524 seq_puts(m, "SSEU Device Info\n");
615d8908 4525 i915_print_sseu_info(m, true, &INTEL_INFO(dev_priv)->sseu);
3873218f 4526
7f992aba 4527 seq_puts(m, "SSEU Device Status\n");
915490d5 4528 memset(&sseu, 0, sizeof(sseu));
8cc76693
LL
4529 sseu.max_slices = INTEL_INFO(dev_priv)->sseu.max_slices;
4530 sseu.max_subslices = INTEL_INFO(dev_priv)->sseu.max_subslices;
4531 sseu.max_eus_per_subslice =
4532 INTEL_INFO(dev_priv)->sseu.max_eus_per_subslice;
238010ed
DW
4533
4534 intel_runtime_pm_get(dev_priv);
4535
36cdd013 4536 if (IS_CHERRYVIEW(dev_priv)) {
915490d5 4537 cherryview_sseu_device_status(dev_priv, &sseu);
36cdd013 4538 } else if (IS_BROADWELL(dev_priv)) {
915490d5 4539 broadwell_sseu_device_status(dev_priv, &sseu);
f8c3dcf9 4540 } else if (IS_GEN9(dev_priv)) {
915490d5 4541 gen9_sseu_device_status(dev_priv, &sseu);
f8c3dcf9
RV
4542 } else if (INTEL_GEN(dev_priv) >= 10) {
4543 gen10_sseu_device_status(dev_priv, &sseu);
7f992aba 4544 }
238010ed
DW
4545
4546 intel_runtime_pm_put(dev_priv);
4547
615d8908 4548 i915_print_sseu_info(m, false, &sseu);
7f992aba 4549
3873218f
JM
4550 return 0;
4551}
4552
6d794d42
BW
4553static int i915_forcewake_open(struct inode *inode, struct file *file)
4554{
d7a133d8 4555 struct drm_i915_private *i915 = inode->i_private;
6d794d42 4556
d7a133d8 4557 if (INTEL_GEN(i915) < 6)
6d794d42
BW
4558 return 0;
4559
d7a133d8
CW
4560 intel_runtime_pm_get(i915);
4561 intel_uncore_forcewake_user_get(i915);
6d794d42
BW
4562
4563 return 0;
4564}
4565
c43b5634 4566static int i915_forcewake_release(struct inode *inode, struct file *file)
6d794d42 4567{
d7a133d8 4568 struct drm_i915_private *i915 = inode->i_private;
6d794d42 4569
d7a133d8 4570 if (INTEL_GEN(i915) < 6)
6d794d42
BW
4571 return 0;
4572
d7a133d8
CW
4573 intel_uncore_forcewake_user_put(i915);
4574 intel_runtime_pm_put(i915);
6d794d42
BW
4575
4576 return 0;
4577}
4578
4579static const struct file_operations i915_forcewake_fops = {
4580 .owner = THIS_MODULE,
4581 .open = i915_forcewake_open,
4582 .release = i915_forcewake_release,
4583};
4584
317eaa95
L
4585static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
4586{
4587 struct drm_i915_private *dev_priv = m->private;
4588 struct i915_hotplug *hotplug = &dev_priv->hotplug;
4589
4590 seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
4591 seq_printf(m, "Detected: %s\n",
4592 yesno(delayed_work_pending(&hotplug->reenable_work)));
4593
4594 return 0;
4595}
4596
4597static ssize_t i915_hpd_storm_ctl_write(struct file *file,
4598 const char __user *ubuf, size_t len,
4599 loff_t *offp)
4600{
4601 struct seq_file *m = file->private_data;
4602 struct drm_i915_private *dev_priv = m->private;
4603 struct i915_hotplug *hotplug = &dev_priv->hotplug;
4604 unsigned int new_threshold;
4605 int i;
4606 char *newline;
4607 char tmp[16];
4608
4609 if (len >= sizeof(tmp))
4610 return -EINVAL;
4611
4612 if (copy_from_user(tmp, ubuf, len))
4613 return -EFAULT;
4614
4615 tmp[len] = '\0';
4616
4617 /* Strip newline, if any */
4618 newline = strchr(tmp, '\n');
4619 if (newline)
4620 *newline = '\0';
4621
4622 if (strcmp(tmp, "reset") == 0)
4623 new_threshold = HPD_STORM_DEFAULT_THRESHOLD;
4624 else if (kstrtouint(tmp, 10, &new_threshold) != 0)
4625 return -EINVAL;
4626
4627 if (new_threshold > 0)
4628 DRM_DEBUG_KMS("Setting HPD storm detection threshold to %d\n",
4629 new_threshold);
4630 else
4631 DRM_DEBUG_KMS("Disabling HPD storm detection\n");
4632
4633 spin_lock_irq(&dev_priv->irq_lock);
4634 hotplug->hpd_storm_threshold = new_threshold;
4635 /* Reset the HPD storm stats so we don't accidentally trigger a storm */
4636 for_each_hpd_pin(i)
4637 hotplug->stats[i].count = 0;
4638 spin_unlock_irq(&dev_priv->irq_lock);
4639
4640 /* Re-enable hpd immediately if we were in an irq storm */
4641 flush_delayed_work(&dev_priv->hotplug.reenable_work);
4642
4643 return len;
4644}
4645
4646static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file)
4647{
4648 return single_open(file, i915_hpd_storm_ctl_show, inode->i_private);
4649}
4650
4651static const struct file_operations i915_hpd_storm_ctl_fops = {
4652 .owner = THIS_MODULE,
4653 .open = i915_hpd_storm_ctl_open,
4654 .read = seq_read,
4655 .llseek = seq_lseek,
4656 .release = single_release,
4657 .write = i915_hpd_storm_ctl_write
4658};
4659
35954e88
R
4660static int i915_drrs_ctl_set(void *data, u64 val)
4661{
4662 struct drm_i915_private *dev_priv = data;
4663 struct drm_device *dev = &dev_priv->drm;
4664 struct intel_crtc *intel_crtc;
4665 struct intel_encoder *encoder;
4666 struct intel_dp *intel_dp;
4667
4668 if (INTEL_GEN(dev_priv) < 7)
4669 return -ENODEV;
4670
4671 drm_modeset_lock_all(dev);
4672 for_each_intel_crtc(dev, intel_crtc) {
4673 if (!intel_crtc->base.state->active ||
4674 !intel_crtc->config->has_drrs)
4675 continue;
4676
4677 for_each_encoder_on_crtc(dev, &intel_crtc->base, encoder) {
4678 if (encoder->type != INTEL_OUTPUT_EDP)
4679 continue;
4680
4681 DRM_DEBUG_DRIVER("Manually %sabling DRRS. %llu\n",
4682 val ? "en" : "dis", val);
4683
4684 intel_dp = enc_to_intel_dp(&encoder->base);
4685 if (val)
4686 intel_edp_drrs_enable(intel_dp,
4687 intel_crtc->config);
4688 else
4689 intel_edp_drrs_disable(intel_dp,
4690 intel_crtc->config);
4691 }
4692 }
4693 drm_modeset_unlock_all(dev);
4694
4695 return 0;
4696}
4697
4698DEFINE_SIMPLE_ATTRIBUTE(i915_drrs_ctl_fops, NULL, i915_drrs_ctl_set, "%llu\n");
4699
d52ad9cb
ML
4700static ssize_t
4701i915_fifo_underrun_reset_write(struct file *filp,
4702 const char __user *ubuf,
4703 size_t cnt, loff_t *ppos)
4704{
4705 struct drm_i915_private *dev_priv = filp->private_data;
4706 struct intel_crtc *intel_crtc;
4707 struct drm_device *dev = &dev_priv->drm;
4708 int ret;
4709 bool reset;
4710
4711 ret = kstrtobool_from_user(ubuf, cnt, &reset);
4712 if (ret)
4713 return ret;
4714
4715 if (!reset)
4716 return cnt;
4717
4718 for_each_intel_crtc(dev, intel_crtc) {
4719 struct drm_crtc_commit *commit;
4720 struct intel_crtc_state *crtc_state;
4721
4722 ret = drm_modeset_lock_single_interruptible(&intel_crtc->base.mutex);
4723 if (ret)
4724 return ret;
4725
4726 crtc_state = to_intel_crtc_state(intel_crtc->base.state);
4727 commit = crtc_state->base.commit;
4728 if (commit) {
4729 ret = wait_for_completion_interruptible(&commit->hw_done);
4730 if (!ret)
4731 ret = wait_for_completion_interruptible(&commit->flip_done);
4732 }
4733
4734 if (!ret && crtc_state->base.active) {
4735 DRM_DEBUG_KMS("Re-arming FIFO underruns on pipe %c\n",
4736 pipe_name(intel_crtc->pipe));
4737
4738 intel_crtc_arm_fifo_underrun(intel_crtc, crtc_state);
4739 }
4740
4741 drm_modeset_unlock(&intel_crtc->base.mutex);
4742
4743 if (ret)
4744 return ret;
4745 }
4746
4747 ret = intel_fbc_reset_underrun(dev_priv);
4748 if (ret)
4749 return ret;
4750
4751 return cnt;
4752}
4753
4754static const struct file_operations i915_fifo_underrun_reset_ops = {
4755 .owner = THIS_MODULE,
4756 .open = simple_open,
4757 .write = i915_fifo_underrun_reset_write,
4758 .llseek = default_llseek,
4759};
4760
06c5bf8c 4761static const struct drm_info_list i915_debugfs_list[] = {
311bd68e 4762 {"i915_capabilities", i915_capabilities, 0},
73aa808f 4763 {"i915_gem_objects", i915_gem_object_info, 0},
08c18323 4764 {"i915_gem_gtt", i915_gem_gtt_info, 0},
6d2b8885 4765 {"i915_gem_stolen", i915_gem_stolen_list_info },
a6172a80 4766 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
2017263e 4767 {"i915_gem_interrupt", i915_interrupt_info, 0},
493018dc 4768 {"i915_gem_batch_pool", i915_gem_batch_pool_info, 0},
8b417c26 4769 {"i915_guc_info", i915_guc_info, 0},
fdf5d357 4770 {"i915_guc_load_status", i915_guc_load_status_info, 0},
4c7e77fc 4771 {"i915_guc_log_dump", i915_guc_log_dump, 0},
ac58d2ab 4772 {"i915_guc_load_err_log_dump", i915_guc_log_dump, 0, (void *)1},
a8b9370f 4773 {"i915_guc_stage_pool", i915_guc_stage_pool, 0},
0509ead1 4774 {"i915_huc_load_status", i915_huc_load_status_info, 0},
adb4bd12 4775 {"i915_frequency_info", i915_frequency_info, 0},
f654449a 4776 {"i915_hangcheck_info", i915_hangcheck_info, 0},
061d06a2 4777 {"i915_reset_info", i915_reset_info, 0},
f97108d1 4778 {"i915_drpc_info", i915_drpc_info, 0},
7648fa99 4779 {"i915_emon_status", i915_emon_status, 0},
23b2f8bb 4780 {"i915_ring_freq_table", i915_ring_freq_table, 0},
9a851789 4781 {"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
b5e50c3f 4782 {"i915_fbc_status", i915_fbc_status, 0},
92d44621 4783 {"i915_ips_status", i915_ips_status, 0},
4a9bef37 4784 {"i915_sr_status", i915_sr_status, 0},
44834a67 4785 {"i915_opregion", i915_opregion, 0},
ada8f955 4786 {"i915_vbt", i915_vbt, 0},
37811fcc 4787 {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
e76d3630 4788 {"i915_context_status", i915_context_status, 0},
f65367b5 4789 {"i915_forcewake_domains", i915_forcewake_domains, 0},
ea16a3cd 4790 {"i915_swizzle_info", i915_swizzle_info, 0},
3cf17fc5 4791 {"i915_ppgtt_info", i915_ppgtt_info, 0},
63573eb7 4792 {"i915_llc", i915_llc, 0},
e91fd8c6 4793 {"i915_edp_psr_status", i915_edp_psr_status, 0},
ec013e7f 4794 {"i915_energy_uJ", i915_energy_uJ, 0},
6455c870 4795 {"i915_runtime_pm_status", i915_runtime_pm_status, 0},
1da51581 4796 {"i915_power_domain_info", i915_power_domain_info, 0},
b7cec66d 4797 {"i915_dmc_info", i915_dmc_info, 0},
53f5e3ca 4798 {"i915_display_info", i915_display_info, 0},
1b36595f 4799 {"i915_engine_info", i915_engine_info, 0},
79e9cd5f 4800 {"i915_rcs_topology", i915_rcs_topology, 0},
c5418a8b 4801 {"i915_shrinker_info", i915_shrinker_info, 0},
728e29d7 4802 {"i915_shared_dplls_info", i915_shared_dplls_info, 0},
11bed958 4803 {"i915_dp_mst_info", i915_dp_mst_info, 0},
1ed1ef9d 4804 {"i915_wa_registers", i915_wa_registers, 0},
c5511e44 4805 {"i915_ddb_info", i915_ddb_info, 0},
3873218f 4806 {"i915_sseu_status", i915_sseu_status, 0},
a54746e3 4807 {"i915_drrs_status", i915_drrs_status, 0},
1854d5ca 4808 {"i915_rps_boost_info", i915_rps_boost_info, 0},
2017263e 4809};
27c202ad 4810#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
2017263e 4811
06c5bf8c 4812static const struct i915_debugfs_files {
34b9674c
DV
4813 const char *name;
4814 const struct file_operations *fops;
4815} i915_debugfs_files[] = {
4816 {"i915_wedged", &i915_wedged_fops},
34b9674c 4817 {"i915_cache_sharing", &i915_cache_sharing_fops},
094f9a54
CW
4818 {"i915_ring_missed_irq", &i915_ring_missed_irq_fops},
4819 {"i915_ring_test_irq", &i915_ring_test_irq_fops},
34b9674c 4820 {"i915_gem_drop_caches", &i915_drop_caches_fops},
98a2f411 4821#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
34b9674c 4822 {"i915_error_state", &i915_error_state_fops},
5a4c6f1b 4823 {"i915_gpu_info", &i915_gpu_info_fops},
98a2f411 4824#endif
d52ad9cb 4825 {"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops},
34b9674c 4826 {"i915_next_seqno", &i915_next_seqno_fops},
369a1342
VS
4827 {"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
4828 {"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
4829 {"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
4127dc43 4830 {"i915_fbc_false_color", &i915_fbc_false_color_fops},
eb3394fa
TP
4831 {"i915_dp_test_data", &i915_displayport_test_data_fops},
4832 {"i915_dp_test_type", &i915_displayport_test_type_fops},
685534ef 4833 {"i915_dp_test_active", &i915_displayport_test_active_fops},
4977a287
MW
4834 {"i915_guc_log_level", &i915_guc_log_level_fops},
4835 {"i915_guc_log_relay", &i915_guc_log_relay_fops},
d2d4f39b 4836 {"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
35954e88 4837 {"i915_ipc_status", &i915_ipc_status_fops},
54fd3149
DP
4838 {"i915_drrs_ctl", &i915_drrs_ctl_fops},
4839 {"i915_edp_psr_debug", &i915_edp_psr_debug_fops}
34b9674c
DV
4840};
4841
1dac891c 4842int i915_debugfs_register(struct drm_i915_private *dev_priv)
2017263e 4843{
91c8a326 4844 struct drm_minor *minor = dev_priv->drm.primary;
b05eeb0f 4845 struct dentry *ent;
6cc42152 4846 int i;
f3cd474b 4847
b05eeb0f
NT
4848 ent = debugfs_create_file("i915_forcewake_user", S_IRUSR,
4849 minor->debugfs_root, to_i915(minor->dev),
4850 &i915_forcewake_fops);
4851 if (!ent)
4852 return -ENOMEM;
6a9c308d 4853
34b9674c 4854 for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
b05eeb0f
NT
4855 ent = debugfs_create_file(i915_debugfs_files[i].name,
4856 S_IRUGO | S_IWUSR,
4857 minor->debugfs_root,
4858 to_i915(minor->dev),
34b9674c 4859 i915_debugfs_files[i].fops);
b05eeb0f
NT
4860 if (!ent)
4861 return -ENOMEM;
34b9674c 4862 }
40633219 4863
27c202ad
BG
4864 return drm_debugfs_create_files(i915_debugfs_list,
4865 I915_DEBUGFS_ENTRIES,
2017263e
BG
4866 minor->debugfs_root, minor);
4867}
4868
aa7471d2
JN
4869struct dpcd_block {
4870 /* DPCD dump start address. */
4871 unsigned int offset;
4872 /* DPCD dump end address, inclusive. If unset, .size will be used. */
4873 unsigned int end;
4874 /* DPCD dump size. Used if .end is unset. If unset, defaults to 1. */
4875 size_t size;
4876 /* Only valid for eDP. */
4877 bool edp;
4878};
4879
4880static const struct dpcd_block i915_dpcd_debug[] = {
4881 { .offset = DP_DPCD_REV, .size = DP_RECEIVER_CAP_SIZE },
4882 { .offset = DP_PSR_SUPPORT, .end = DP_PSR_CAPS },
4883 { .offset = DP_DOWNSTREAM_PORT_0, .size = 16 },
4884 { .offset = DP_LINK_BW_SET, .end = DP_EDP_CONFIGURATION_SET },
4885 { .offset = DP_SINK_COUNT, .end = DP_ADJUST_REQUEST_LANE2_3 },
4886 { .offset = DP_SET_POWER },
4887 { .offset = DP_EDP_DPCD_REV },
4888 { .offset = DP_EDP_GENERAL_CAP_1, .end = DP_EDP_GENERAL_CAP_3 },
4889 { .offset = DP_EDP_DISPLAY_CONTROL_REGISTER, .end = DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB },
4890 { .offset = DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET, .end = DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET },
4891};
4892
4893static int i915_dpcd_show(struct seq_file *m, void *data)
4894{
4895 struct drm_connector *connector = m->private;
4896 struct intel_dp *intel_dp =
4897 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4898 uint8_t buf[16];
4899 ssize_t err;
4900 int i;
4901
5c1a8875
MK
4902 if (connector->status != connector_status_connected)
4903 return -ENODEV;
4904
aa7471d2
JN
4905 for (i = 0; i < ARRAY_SIZE(i915_dpcd_debug); i++) {
4906 const struct dpcd_block *b = &i915_dpcd_debug[i];
4907 size_t size = b->end ? b->end - b->offset + 1 : (b->size ?: 1);
4908
4909 if (b->edp &&
4910 connector->connector_type != DRM_MODE_CONNECTOR_eDP)
4911 continue;
4912
4913 /* low tech for now */
4914 if (WARN_ON(size > sizeof(buf)))
4915 continue;
4916
4917 err = drm_dp_dpcd_read(&intel_dp->aux, b->offset, buf, size);
4918 if (err <= 0) {
4919 DRM_ERROR("dpcd read (%zu bytes at %u) failed (%zd)\n",
4920 size, b->offset, err);
4921 continue;
4922 }
4923
4924 seq_printf(m, "%04x: %*ph\n", b->offset, (int) size, buf);
b3f9d7d7 4925 }
aa7471d2
JN
4926
4927 return 0;
4928}
e4006713 4929DEFINE_SHOW_ATTRIBUTE(i915_dpcd);
aa7471d2 4930
ecbd6781
DW
4931static int i915_panel_show(struct seq_file *m, void *data)
4932{
4933 struct drm_connector *connector = m->private;
4934 struct intel_dp *intel_dp =
4935 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4936
4937 if (connector->status != connector_status_connected)
4938 return -ENODEV;
4939
4940 seq_printf(m, "Panel power up delay: %d\n",
4941 intel_dp->panel_power_up_delay);
4942 seq_printf(m, "Panel power down delay: %d\n",
4943 intel_dp->panel_power_down_delay);
4944 seq_printf(m, "Backlight on delay: %d\n",
4945 intel_dp->backlight_on_delay);
4946 seq_printf(m, "Backlight off delay: %d\n",
4947 intel_dp->backlight_off_delay);
4948
4949 return 0;
4950}
e4006713 4951DEFINE_SHOW_ATTRIBUTE(i915_panel);
ecbd6781 4952
aa7471d2
JN
4953/**
4954 * i915_debugfs_connector_add - add i915 specific connector debugfs files
4955 * @connector: pointer to a registered drm_connector
4956 *
4957 * Cleanup will be done by drm_connector_unregister() through a call to
4958 * drm_debugfs_connector_remove().
4959 *
4960 * Returns 0 on success, negative error codes on error.
4961 */
4962int i915_debugfs_connector_add(struct drm_connector *connector)
4963{
4964 struct dentry *root = connector->debugfs_entry;
4965
4966 /* The connector must have been registered beforehands. */
4967 if (!root)
4968 return -ENODEV;
4969
4970 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4971 connector->connector_type == DRM_MODE_CONNECTOR_eDP)
ecbd6781
DW
4972 debugfs_create_file("i915_dpcd", S_IRUGO, root,
4973 connector, &i915_dpcd_fops);
4974
5b7b3086 4975 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
ecbd6781
DW
4976 debugfs_create_file("i915_panel_timings", S_IRUGO, root,
4977 connector, &i915_panel_fops);
5b7b3086
DP
4978 debugfs_create_file("i915_psr_sink_status", S_IRUGO, root,
4979 connector, &i915_psr_sink_status_fops);
4980 }
aa7471d2
JN
4981
4982 return 0;
4983}