]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blame - drivers/gpu/drm/i915/i915_debugfs.c
drm/i915/gtt: remove px_page
[mirror_ubuntu-focal-kernel.git] / drivers / gpu / drm / i915 / i915_debugfs.c
CommitLineData
2017263e
BG
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
26 *
27 */
28
f3cd474b 29#include <linux/debugfs.h>
e637d2cb 30#include <linux/sort.h>
d92a8cfc 31#include <linux/sched/mm.h>
4e5359cd 32#include "intel_drv.h"
a2695744 33#include "intel_guc_submission.h"
2017263e 34
36cdd013
DW
35static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
36{
37 return to_i915(node->minor->dev);
38}
39
70d39fe4
CW
40static int i915_capabilities(struct seq_file *m, void *data)
41{
36cdd013
DW
42 struct drm_i915_private *dev_priv = node_to_i915(m->private);
43 const struct intel_device_info *info = INTEL_INFO(dev_priv);
a8c9b849 44 struct drm_printer p = drm_seq_file_printer(m);
70d39fe4 45
36cdd013 46 seq_printf(m, "gen: %d\n", INTEL_GEN(dev_priv));
2e0d26f8 47 seq_printf(m, "platform: %s\n", intel_platform_name(info->platform));
36cdd013 48 seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv));
418e3cd8 49
a8c9b849 50 intel_device_info_dump_flags(info, &p);
5fbbe8d4 51 intel_device_info_dump_runtime(info, &p);
3fed1808 52 intel_driver_caps_print(&dev_priv->caps, &p);
70d39fe4 53
418e3cd8 54 kernel_param_lock(THIS_MODULE);
acfb9973 55 i915_params_dump(&i915_modparams, &p);
418e3cd8
CW
56 kernel_param_unlock(THIS_MODULE);
57
70d39fe4
CW
58 return 0;
59}
2017263e 60
a7363de7 61static char get_active_flag(struct drm_i915_gem_object *obj)
a6172a80 62{
573adb39 63 return i915_gem_object_is_active(obj) ? '*' : ' ';
a6172a80
CW
64}
65
a7363de7 66static char get_pin_flag(struct drm_i915_gem_object *obj)
be12a86b 67{
bd3d2252 68 return obj->pin_global ? 'p' : ' ';
be12a86b
TU
69}
70
a7363de7 71static char get_tiling_flag(struct drm_i915_gem_object *obj)
a6172a80 72{
3e510a8e 73 switch (i915_gem_object_get_tiling(obj)) {
0206e353 74 default:
be12a86b
TU
75 case I915_TILING_NONE: return ' ';
76 case I915_TILING_X: return 'X';
77 case I915_TILING_Y: return 'Y';
0206e353 78 }
a6172a80
CW
79}
80
a7363de7 81static char get_global_flag(struct drm_i915_gem_object *obj)
be12a86b 82{
a65adaf8 83 return obj->userfault_count ? 'g' : ' ';
be12a86b
TU
84}
85
a7363de7 86static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
1d693bcc 87{
a4f5ea64 88 return obj->mm.mapping ? 'M' : ' ';
1d693bcc
BW
89}
90
ca1543be
TU
91static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
92{
93 u64 size = 0;
94 struct i915_vma *vma;
95
e2189dd0
CW
96 for_each_ggtt_vma(vma, obj) {
97 if (drm_mm_node_allocated(&vma->node))
ca1543be
TU
98 size += vma->node.size;
99 }
100
101 return size;
102}
103
7393b7ee
MA
104static const char *
105stringify_page_sizes(unsigned int page_sizes, char *buf, size_t len)
106{
107 size_t x = 0;
108
109 switch (page_sizes) {
110 case 0:
111 return "";
112 case I915_GTT_PAGE_SIZE_4K:
113 return "4K";
114 case I915_GTT_PAGE_SIZE_64K:
115 return "64K";
116 case I915_GTT_PAGE_SIZE_2M:
117 return "2M";
118 default:
119 if (!buf)
120 return "M";
121
122 if (page_sizes & I915_GTT_PAGE_SIZE_2M)
123 x += snprintf(buf + x, len - x, "2M, ");
124 if (page_sizes & I915_GTT_PAGE_SIZE_64K)
125 x += snprintf(buf + x, len - x, "64K, ");
126 if (page_sizes & I915_GTT_PAGE_SIZE_4K)
127 x += snprintf(buf + x, len - x, "4K, ");
128 buf[x-2] = '\0';
129
130 return buf;
131 }
132}
133
37811fcc
CW
134static void
135describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
136{
b4716185 137 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
e2f80391 138 struct intel_engine_cs *engine;
1d693bcc 139 struct i915_vma *vma;
faf5bf0a 140 unsigned int frontbuffer_bits;
d7f46fc4
BW
141 int pin_count = 0;
142
188c1ab7
CW
143 lockdep_assert_held(&obj->base.dev->struct_mutex);
144
d07f0e59 145 seq_printf(m, "%pK: %c%c%c%c%c %8zdKiB %02x %02x %s%s%s",
37811fcc 146 &obj->base,
be12a86b 147 get_active_flag(obj),
37811fcc
CW
148 get_pin_flag(obj),
149 get_tiling_flag(obj),
1d693bcc 150 get_global_flag(obj),
be12a86b 151 get_pin_mapped_flag(obj),
a05a5862 152 obj->base.size / 1024,
c0a51fd0
CK
153 obj->read_domains,
154 obj->write_domain,
36cdd013 155 i915_cache_level_str(dev_priv, obj->cache_level),
a4f5ea64
CW
156 obj->mm.dirty ? " dirty" : "",
157 obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
37811fcc
CW
158 if (obj->base.name)
159 seq_printf(m, " (name: %d)", obj->base.name);
1c7f4bca 160 list_for_each_entry(vma, &obj->vma_list, obj_link) {
20dfbde4 161 if (i915_vma_is_pinned(vma))
d7f46fc4 162 pin_count++;
ba0635ff
DC
163 }
164 seq_printf(m, " (pinned x %d)", pin_count);
bd3d2252
CW
165 if (obj->pin_global)
166 seq_printf(m, " (global)");
1c7f4bca 167 list_for_each_entry(vma, &obj->vma_list, obj_link) {
15717de2
CW
168 if (!drm_mm_node_allocated(&vma->node))
169 continue;
170
7393b7ee 171 seq_printf(m, " (%sgtt offset: %08llx, size: %08llx, pages: %s",
3272db53 172 i915_vma_is_ggtt(vma) ? "g" : "pp",
7393b7ee
MA
173 vma->node.start, vma->node.size,
174 stringify_page_sizes(vma->page_sizes.gtt, NULL, 0));
21976853
CW
175 if (i915_vma_is_ggtt(vma)) {
176 switch (vma->ggtt_view.type) {
177 case I915_GGTT_VIEW_NORMAL:
178 seq_puts(m, ", normal");
179 break;
180
181 case I915_GGTT_VIEW_PARTIAL:
182 seq_printf(m, ", partial [%08llx+%x]",
8bab1193
CW
183 vma->ggtt_view.partial.offset << PAGE_SHIFT,
184 vma->ggtt_view.partial.size << PAGE_SHIFT);
21976853
CW
185 break;
186
187 case I915_GGTT_VIEW_ROTATED:
188 seq_printf(m, ", rotated [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
8bab1193
CW
189 vma->ggtt_view.rotated.plane[0].width,
190 vma->ggtt_view.rotated.plane[0].height,
191 vma->ggtt_view.rotated.plane[0].stride,
192 vma->ggtt_view.rotated.plane[0].offset,
193 vma->ggtt_view.rotated.plane[1].width,
194 vma->ggtt_view.rotated.plane[1].height,
195 vma->ggtt_view.rotated.plane[1].stride,
196 vma->ggtt_view.rotated.plane[1].offset);
21976853
CW
197 break;
198
199 default:
200 MISSING_CASE(vma->ggtt_view.type);
201 break;
202 }
203 }
49ef5294
CW
204 if (vma->fence)
205 seq_printf(m, " , fence: %d%s",
206 vma->fence->id,
207 i915_gem_active_isset(&vma->last_fence) ? "*" : "");
596c5923 208 seq_puts(m, ")");
1d693bcc 209 }
c1ad11fc 210 if (obj->stolen)
440fd528 211 seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
27c01aae 212
d07f0e59 213 engine = i915_gem_object_last_write_engine(obj);
27c01aae
CW
214 if (engine)
215 seq_printf(m, " (%s)", engine->name);
216
faf5bf0a
CW
217 frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
218 if (frontbuffer_bits)
219 seq_printf(m, " (frontbuffer: 0x%03x)", frontbuffer_bits);
37811fcc
CW
220}
221
e637d2cb 222static int obj_rank_by_stolen(const void *A, const void *B)
6d2b8885 223{
e637d2cb
CW
224 const struct drm_i915_gem_object *a =
225 *(const struct drm_i915_gem_object **)A;
226 const struct drm_i915_gem_object *b =
227 *(const struct drm_i915_gem_object **)B;
6d2b8885 228
2d05fa16
RV
229 if (a->stolen->start < b->stolen->start)
230 return -1;
231 if (a->stolen->start > b->stolen->start)
232 return 1;
233 return 0;
6d2b8885
CW
234}
235
236static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
237{
36cdd013
DW
238 struct drm_i915_private *dev_priv = node_to_i915(m->private);
239 struct drm_device *dev = &dev_priv->drm;
e637d2cb 240 struct drm_i915_gem_object **objects;
6d2b8885 241 struct drm_i915_gem_object *obj;
c44ef60e 242 u64 total_obj_size, total_gtt_size;
e637d2cb
CW
243 unsigned long total, count, n;
244 int ret;
245
246 total = READ_ONCE(dev_priv->mm.object_count);
2098105e 247 objects = kvmalloc_array(total, sizeof(*objects), GFP_KERNEL);
e637d2cb
CW
248 if (!objects)
249 return -ENOMEM;
6d2b8885
CW
250
251 ret = mutex_lock_interruptible(&dev->struct_mutex);
252 if (ret)
e637d2cb 253 goto out;
6d2b8885
CW
254
255 total_obj_size = total_gtt_size = count = 0;
f2123818
CW
256
257 spin_lock(&dev_priv->mm.obj_lock);
258 list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
e637d2cb
CW
259 if (count == total)
260 break;
261
6d2b8885
CW
262 if (obj->stolen == NULL)
263 continue;
264
e637d2cb 265 objects[count++] = obj;
6d2b8885 266 total_obj_size += obj->base.size;
ca1543be 267 total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
e637d2cb 268
6d2b8885 269 }
f2123818 270 list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
e637d2cb
CW
271 if (count == total)
272 break;
273
6d2b8885
CW
274 if (obj->stolen == NULL)
275 continue;
276
e637d2cb 277 objects[count++] = obj;
6d2b8885 278 total_obj_size += obj->base.size;
6d2b8885 279 }
f2123818 280 spin_unlock(&dev_priv->mm.obj_lock);
e637d2cb
CW
281
282 sort(objects, count, sizeof(*objects), obj_rank_by_stolen, NULL);
283
6d2b8885 284 seq_puts(m, "Stolen:\n");
e637d2cb 285 for (n = 0; n < count; n++) {
6d2b8885 286 seq_puts(m, " ");
e637d2cb 287 describe_obj(m, objects[n]);
6d2b8885 288 seq_putc(m, '\n');
6d2b8885 289 }
e637d2cb 290 seq_printf(m, "Total %lu objects, %llu bytes, %llu GTT size\n",
6d2b8885 291 count, total_obj_size, total_gtt_size);
e637d2cb
CW
292
293 mutex_unlock(&dev->struct_mutex);
294out:
2098105e 295 kvfree(objects);
e637d2cb 296 return ret;
6d2b8885
CW
297}
298
2db8e9d6 299struct file_stats {
6313c204 300 struct drm_i915_file_private *file_priv;
c44ef60e
MK
301 unsigned long count;
302 u64 total, unbound;
303 u64 global, shared;
304 u64 active, inactive;
2db8e9d6
CW
305};
306
307static int per_file_stats(int id, void *ptr, void *data)
308{
309 struct drm_i915_gem_object *obj = ptr;
310 struct file_stats *stats = data;
6313c204 311 struct i915_vma *vma;
2db8e9d6 312
0caf81b5
CW
313 lockdep_assert_held(&obj->base.dev->struct_mutex);
314
2db8e9d6
CW
315 stats->count++;
316 stats->total += obj->base.size;
15717de2
CW
317 if (!obj->bind_count)
318 stats->unbound += obj->base.size;
c67a17e9
CW
319 if (obj->base.name || obj->base.dma_buf)
320 stats->shared += obj->base.size;
321
894eeecc
CW
322 list_for_each_entry(vma, &obj->vma_list, obj_link) {
323 if (!drm_mm_node_allocated(&vma->node))
324 continue;
6313c204 325
3272db53 326 if (i915_vma_is_ggtt(vma)) {
894eeecc
CW
327 stats->global += vma->node.size;
328 } else {
329 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vma->vm);
6313c204 330
82ad6443 331 if (ppgtt->vm.file != stats->file_priv)
6313c204 332 continue;
6313c204 333 }
894eeecc 334
b0decaf7 335 if (i915_vma_is_active(vma))
894eeecc
CW
336 stats->active += vma->node.size;
337 else
338 stats->inactive += vma->node.size;
2db8e9d6
CW
339 }
340
341 return 0;
342}
343
b0da1b79
CW
344#define print_file_stats(m, name, stats) do { \
345 if (stats.count) \
c44ef60e 346 seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound)\n", \
b0da1b79
CW
347 name, \
348 stats.count, \
349 stats.total, \
350 stats.active, \
351 stats.inactive, \
352 stats.global, \
353 stats.shared, \
354 stats.unbound); \
355} while (0)
493018dc
BV
356
357static void print_batch_pool_stats(struct seq_file *m,
358 struct drm_i915_private *dev_priv)
359{
360 struct drm_i915_gem_object *obj;
361 struct file_stats stats;
e2f80391 362 struct intel_engine_cs *engine;
3b3f1650 363 enum intel_engine_id id;
b4ac5afc 364 int j;
493018dc
BV
365
366 memset(&stats, 0, sizeof(stats));
367
3b3f1650 368 for_each_engine(engine, dev_priv, id) {
e2f80391 369 for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
8d9d5744 370 list_for_each_entry(obj,
e2f80391 371 &engine->batch_pool.cache_list[j],
8d9d5744
CW
372 batch_pool_link)
373 per_file_stats(0, obj, &stats);
374 }
06fbca71 375 }
493018dc 376
b0da1b79 377 print_file_stats(m, "[k]batch pool", stats);
493018dc
BV
378}
379
ab82a063 380static int per_file_ctx_stats(int idx, void *ptr, void *data)
15da9565
CW
381{
382 struct i915_gem_context *ctx = ptr;
ab82a063
CW
383 struct intel_engine_cs *engine;
384 enum intel_engine_id id;
385
386 for_each_engine(engine, ctx->i915, id) {
387 struct intel_context *ce = to_intel_context(ctx, engine);
15da9565 388
ab82a063
CW
389 if (ce->state)
390 per_file_stats(0, ce->state->obj, data);
391 if (ce->ring)
392 per_file_stats(0, ce->ring->vma->obj, data);
15da9565
CW
393 }
394
395 return 0;
396}
397
398static void print_context_stats(struct seq_file *m,
399 struct drm_i915_private *dev_priv)
400{
36cdd013 401 struct drm_device *dev = &dev_priv->drm;
15da9565
CW
402 struct file_stats stats;
403 struct drm_file *file;
404
405 memset(&stats, 0, sizeof(stats));
406
36cdd013 407 mutex_lock(&dev->struct_mutex);
15da9565
CW
408 if (dev_priv->kernel_context)
409 per_file_ctx_stats(0, dev_priv->kernel_context, &stats);
410
36cdd013 411 list_for_each_entry(file, &dev->filelist, lhead) {
15da9565
CW
412 struct drm_i915_file_private *fpriv = file->driver_priv;
413 idr_for_each(&fpriv->context_idr, per_file_ctx_stats, &stats);
414 }
36cdd013 415 mutex_unlock(&dev->struct_mutex);
15da9565
CW
416
417 print_file_stats(m, "[k]contexts", stats);
418}
419
36cdd013 420static int i915_gem_object_info(struct seq_file *m, void *data)
73aa808f 421{
36cdd013
DW
422 struct drm_i915_private *dev_priv = node_to_i915(m->private);
423 struct drm_device *dev = &dev_priv->drm;
72e96d64 424 struct i915_ggtt *ggtt = &dev_priv->ggtt;
7393b7ee
MA
425 u32 count, mapped_count, purgeable_count, dpy_count, huge_count;
426 u64 size, mapped_size, purgeable_size, dpy_size, huge_size;
6299f992 427 struct drm_i915_gem_object *obj;
7393b7ee 428 unsigned int page_sizes = 0;
2db8e9d6 429 struct drm_file *file;
7393b7ee 430 char buf[80];
73aa808f
CW
431 int ret;
432
433 ret = mutex_lock_interruptible(&dev->struct_mutex);
434 if (ret)
435 return ret;
436
3ef7f228 437 seq_printf(m, "%u objects, %llu bytes\n",
6299f992
CW
438 dev_priv->mm.object_count,
439 dev_priv->mm.object_memory);
440
1544c42e
CW
441 size = count = 0;
442 mapped_size = mapped_count = 0;
443 purgeable_size = purgeable_count = 0;
7393b7ee 444 huge_size = huge_count = 0;
f2123818
CW
445
446 spin_lock(&dev_priv->mm.obj_lock);
447 list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
2bd160a1
CW
448 size += obj->base.size;
449 ++count;
450
a4f5ea64 451 if (obj->mm.madv == I915_MADV_DONTNEED) {
2bd160a1
CW
452 purgeable_size += obj->base.size;
453 ++purgeable_count;
454 }
455
a4f5ea64 456 if (obj->mm.mapping) {
2bd160a1
CW
457 mapped_count++;
458 mapped_size += obj->base.size;
be19b10d 459 }
7393b7ee
MA
460
461 if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
462 huge_count++;
463 huge_size += obj->base.size;
464 page_sizes |= obj->mm.page_sizes.sg;
465 }
b7abb714 466 }
c44ef60e 467 seq_printf(m, "%u unbound objects, %llu bytes\n", count, size);
6c085a72 468
2bd160a1 469 size = count = dpy_size = dpy_count = 0;
f2123818 470 list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
2bd160a1
CW
471 size += obj->base.size;
472 ++count;
473
bd3d2252 474 if (obj->pin_global) {
2bd160a1
CW
475 dpy_size += obj->base.size;
476 ++dpy_count;
6299f992 477 }
2bd160a1 478
a4f5ea64 479 if (obj->mm.madv == I915_MADV_DONTNEED) {
b7abb714
CW
480 purgeable_size += obj->base.size;
481 ++purgeable_count;
482 }
2bd160a1 483
a4f5ea64 484 if (obj->mm.mapping) {
2bd160a1
CW
485 mapped_count++;
486 mapped_size += obj->base.size;
be19b10d 487 }
7393b7ee
MA
488
489 if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
490 huge_count++;
491 huge_size += obj->base.size;
492 page_sizes |= obj->mm.page_sizes.sg;
493 }
6299f992 494 }
f2123818
CW
495 spin_unlock(&dev_priv->mm.obj_lock);
496
2bd160a1
CW
497 seq_printf(m, "%u bound objects, %llu bytes\n",
498 count, size);
c44ef60e 499 seq_printf(m, "%u purgeable objects, %llu bytes\n",
b7abb714 500 purgeable_count, purgeable_size);
2bd160a1
CW
501 seq_printf(m, "%u mapped objects, %llu bytes\n",
502 mapped_count, mapped_size);
7393b7ee
MA
503 seq_printf(m, "%u huge-paged objects (%s) %llu bytes\n",
504 huge_count,
505 stringify_page_sizes(page_sizes, buf, sizeof(buf)),
506 huge_size);
bd3d2252 507 seq_printf(m, "%u display objects (globally pinned), %llu bytes\n",
2bd160a1 508 dpy_count, dpy_size);
6299f992 509
b7128ef1 510 seq_printf(m, "%llu [%pa] gtt total\n",
82ad6443 511 ggtt->vm.total, &ggtt->mappable_end);
7393b7ee
MA
512 seq_printf(m, "Supported page sizes: %s\n",
513 stringify_page_sizes(INTEL_INFO(dev_priv)->page_sizes,
514 buf, sizeof(buf)));
73aa808f 515
493018dc
BV
516 seq_putc(m, '\n');
517 print_batch_pool_stats(m, dev_priv);
1d2ac403
DV
518 mutex_unlock(&dev->struct_mutex);
519
520 mutex_lock(&dev->filelist_mutex);
15da9565 521 print_context_stats(m, dev_priv);
2db8e9d6
CW
522 list_for_each_entry_reverse(file, &dev->filelist, lhead) {
523 struct file_stats stats;
c84455b4 524 struct drm_i915_file_private *file_priv = file->driver_priv;
e61e0f51 525 struct i915_request *request;
3ec2f427 526 struct task_struct *task;
2db8e9d6 527
0caf81b5
CW
528 mutex_lock(&dev->struct_mutex);
529
2db8e9d6 530 memset(&stats, 0, sizeof(stats));
6313c204 531 stats.file_priv = file->driver_priv;
5b5ffff0 532 spin_lock(&file->table_lock);
2db8e9d6 533 idr_for_each(&file->object_idr, per_file_stats, &stats);
5b5ffff0 534 spin_unlock(&file->table_lock);
3ec2f427
TH
535 /*
536 * Although we have a valid reference on file->pid, that does
537 * not guarantee that the task_struct who called get_pid() is
538 * still alive (e.g. get_pid(current) => fork() => exit()).
539 * Therefore, we need to protect this ->comm access using RCU.
540 */
c84455b4 541 request = list_first_entry_or_null(&file_priv->mm.request_list,
e61e0f51 542 struct i915_request,
c8659efa 543 client_link);
3ec2f427 544 rcu_read_lock();
4e0d64db
CW
545 task = pid_task(request && request->gem_context->pid ?
546 request->gem_context->pid : file->pid,
c84455b4 547 PIDTYPE_PID);
493018dc 548 print_file_stats(m, task ? task->comm : "<unknown>", stats);
3ec2f427 549 rcu_read_unlock();
0caf81b5 550
c84455b4 551 mutex_unlock(&dev->struct_mutex);
2db8e9d6 552 }
1d2ac403 553 mutex_unlock(&dev->filelist_mutex);
73aa808f
CW
554
555 return 0;
556}
557
aee56cff 558static int i915_gem_gtt_info(struct seq_file *m, void *data)
08c18323 559{
9f25d007 560 struct drm_info_node *node = m->private;
36cdd013
DW
561 struct drm_i915_private *dev_priv = node_to_i915(node);
562 struct drm_device *dev = &dev_priv->drm;
f2123818 563 struct drm_i915_gem_object **objects;
08c18323 564 struct drm_i915_gem_object *obj;
c44ef60e 565 u64 total_obj_size, total_gtt_size;
f2123818 566 unsigned long nobject, n;
08c18323
CW
567 int count, ret;
568
f2123818
CW
569 nobject = READ_ONCE(dev_priv->mm.object_count);
570 objects = kvmalloc_array(nobject, sizeof(*objects), GFP_KERNEL);
571 if (!objects)
572 return -ENOMEM;
573
08c18323
CW
574 ret = mutex_lock_interruptible(&dev->struct_mutex);
575 if (ret)
576 return ret;
577
f2123818
CW
578 count = 0;
579 spin_lock(&dev_priv->mm.obj_lock);
580 list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
581 objects[count++] = obj;
582 if (count == nobject)
583 break;
584 }
585 spin_unlock(&dev_priv->mm.obj_lock);
586
587 total_obj_size = total_gtt_size = 0;
588 for (n = 0; n < count; n++) {
589 obj = objects[n];
590
267f0c90 591 seq_puts(m, " ");
08c18323 592 describe_obj(m, obj);
267f0c90 593 seq_putc(m, '\n');
08c18323 594 total_obj_size += obj->base.size;
ca1543be 595 total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
08c18323
CW
596 }
597
598 mutex_unlock(&dev->struct_mutex);
599
c44ef60e 600 seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
08c18323 601 count, total_obj_size, total_gtt_size);
f2123818 602 kvfree(objects);
08c18323
CW
603
604 return 0;
605}
606
493018dc
BV
607static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
608{
36cdd013
DW
609 struct drm_i915_private *dev_priv = node_to_i915(m->private);
610 struct drm_device *dev = &dev_priv->drm;
493018dc 611 struct drm_i915_gem_object *obj;
e2f80391 612 struct intel_engine_cs *engine;
3b3f1650 613 enum intel_engine_id id;
8d9d5744 614 int total = 0;
b4ac5afc 615 int ret, j;
493018dc
BV
616
617 ret = mutex_lock_interruptible(&dev->struct_mutex);
618 if (ret)
619 return ret;
620
3b3f1650 621 for_each_engine(engine, dev_priv, id) {
e2f80391 622 for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
8d9d5744
CW
623 int count;
624
625 count = 0;
626 list_for_each_entry(obj,
e2f80391 627 &engine->batch_pool.cache_list[j],
8d9d5744
CW
628 batch_pool_link)
629 count++;
630 seq_printf(m, "%s cache[%d]: %d objects\n",
e2f80391 631 engine->name, j, count);
8d9d5744
CW
632
633 list_for_each_entry(obj,
e2f80391 634 &engine->batch_pool.cache_list[j],
8d9d5744
CW
635 batch_pool_link) {
636 seq_puts(m, " ");
637 describe_obj(m, obj);
638 seq_putc(m, '\n');
639 }
640
641 total += count;
06fbca71 642 }
493018dc
BV
643 }
644
8d9d5744 645 seq_printf(m, "total: %d\n", total);
493018dc
BV
646
647 mutex_unlock(&dev->struct_mutex);
648
649 return 0;
650}
651
80d89350
TU
652static void gen8_display_interrupt_info(struct seq_file *m)
653{
654 struct drm_i915_private *dev_priv = node_to_i915(m->private);
655 int pipe;
656
657 for_each_pipe(dev_priv, pipe) {
658 enum intel_display_power_domain power_domain;
659
660 power_domain = POWER_DOMAIN_PIPE(pipe);
661 if (!intel_display_power_get_if_enabled(dev_priv,
662 power_domain)) {
663 seq_printf(m, "Pipe %c power disabled\n",
664 pipe_name(pipe));
665 continue;
666 }
667 seq_printf(m, "Pipe %c IMR:\t%08x\n",
668 pipe_name(pipe),
669 I915_READ(GEN8_DE_PIPE_IMR(pipe)));
670 seq_printf(m, "Pipe %c IIR:\t%08x\n",
671 pipe_name(pipe),
672 I915_READ(GEN8_DE_PIPE_IIR(pipe)));
673 seq_printf(m, "Pipe %c IER:\t%08x\n",
674 pipe_name(pipe),
675 I915_READ(GEN8_DE_PIPE_IER(pipe)));
676
677 intel_display_power_put(dev_priv, power_domain);
678 }
679
680 seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
681 I915_READ(GEN8_DE_PORT_IMR));
682 seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
683 I915_READ(GEN8_DE_PORT_IIR));
684 seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
685 I915_READ(GEN8_DE_PORT_IER));
686
687 seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
688 I915_READ(GEN8_DE_MISC_IMR));
689 seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
690 I915_READ(GEN8_DE_MISC_IIR));
691 seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
692 I915_READ(GEN8_DE_MISC_IER));
693
694 seq_printf(m, "PCU interrupt mask:\t%08x\n",
695 I915_READ(GEN8_PCU_IMR));
696 seq_printf(m, "PCU interrupt identity:\t%08x\n",
697 I915_READ(GEN8_PCU_IIR));
698 seq_printf(m, "PCU interrupt enable:\t%08x\n",
699 I915_READ(GEN8_PCU_IER));
700}
701
2017263e
BG
702static int i915_interrupt_info(struct seq_file *m, void *data)
703{
36cdd013 704 struct drm_i915_private *dev_priv = node_to_i915(m->private);
e2f80391 705 struct intel_engine_cs *engine;
3b3f1650 706 enum intel_engine_id id;
4bb05040 707 int i, pipe;
de227ef0 708
c8c8fb33 709 intel_runtime_pm_get(dev_priv);
2017263e 710
36cdd013 711 if (IS_CHERRYVIEW(dev_priv)) {
74e1ca8c
VS
712 seq_printf(m, "Master Interrupt Control:\t%08x\n",
713 I915_READ(GEN8_MASTER_IRQ));
714
715 seq_printf(m, "Display IER:\t%08x\n",
716 I915_READ(VLV_IER));
717 seq_printf(m, "Display IIR:\t%08x\n",
718 I915_READ(VLV_IIR));
719 seq_printf(m, "Display IIR_RW:\t%08x\n",
720 I915_READ(VLV_IIR_RW));
721 seq_printf(m, "Display IMR:\t%08x\n",
722 I915_READ(VLV_IMR));
9c870d03
CW
723 for_each_pipe(dev_priv, pipe) {
724 enum intel_display_power_domain power_domain;
725
726 power_domain = POWER_DOMAIN_PIPE(pipe);
727 if (!intel_display_power_get_if_enabled(dev_priv,
728 power_domain)) {
729 seq_printf(m, "Pipe %c power disabled\n",
730 pipe_name(pipe));
731 continue;
732 }
733
74e1ca8c
VS
734 seq_printf(m, "Pipe %c stat:\t%08x\n",
735 pipe_name(pipe),
736 I915_READ(PIPESTAT(pipe)));
737
9c870d03
CW
738 intel_display_power_put(dev_priv, power_domain);
739 }
740
741 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
74e1ca8c
VS
742 seq_printf(m, "Port hotplug:\t%08x\n",
743 I915_READ(PORT_HOTPLUG_EN));
744 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
745 I915_READ(VLV_DPFLIPSTAT));
746 seq_printf(m, "DPINVGTT:\t%08x\n",
747 I915_READ(DPINVGTT));
9c870d03 748 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
74e1ca8c
VS
749
750 for (i = 0; i < 4; i++) {
751 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
752 i, I915_READ(GEN8_GT_IMR(i)));
753 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
754 i, I915_READ(GEN8_GT_IIR(i)));
755 seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
756 i, I915_READ(GEN8_GT_IER(i)));
757 }
758
759 seq_printf(m, "PCU interrupt mask:\t%08x\n",
760 I915_READ(GEN8_PCU_IMR));
761 seq_printf(m, "PCU interrupt identity:\t%08x\n",
762 I915_READ(GEN8_PCU_IIR));
763 seq_printf(m, "PCU interrupt enable:\t%08x\n",
764 I915_READ(GEN8_PCU_IER));
80d89350
TU
765 } else if (INTEL_GEN(dev_priv) >= 11) {
766 seq_printf(m, "Master Interrupt Control: %08x\n",
767 I915_READ(GEN11_GFX_MSTR_IRQ));
768
769 seq_printf(m, "Render/Copy Intr Enable: %08x\n",
770 I915_READ(GEN11_RENDER_COPY_INTR_ENABLE));
771 seq_printf(m, "VCS/VECS Intr Enable: %08x\n",
772 I915_READ(GEN11_VCS_VECS_INTR_ENABLE));
773 seq_printf(m, "GUC/SG Intr Enable:\t %08x\n",
774 I915_READ(GEN11_GUC_SG_INTR_ENABLE));
775 seq_printf(m, "GPM/WGBOXPERF Intr Enable: %08x\n",
776 I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE));
777 seq_printf(m, "Crypto Intr Enable:\t %08x\n",
778 I915_READ(GEN11_CRYPTO_RSVD_INTR_ENABLE));
779 seq_printf(m, "GUnit/CSME Intr Enable:\t %08x\n",
780 I915_READ(GEN11_GUNIT_CSME_INTR_ENABLE));
781
782 seq_printf(m, "Display Interrupt Control:\t%08x\n",
783 I915_READ(GEN11_DISPLAY_INT_CTL));
784
785 gen8_display_interrupt_info(m);
36cdd013 786 } else if (INTEL_GEN(dev_priv) >= 8) {
a123f157
BW
787 seq_printf(m, "Master Interrupt Control:\t%08x\n",
788 I915_READ(GEN8_MASTER_IRQ));
789
790 for (i = 0; i < 4; i++) {
791 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
792 i, I915_READ(GEN8_GT_IMR(i)));
793 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
794 i, I915_READ(GEN8_GT_IIR(i)));
795 seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
796 i, I915_READ(GEN8_GT_IER(i)));
797 }
798
80d89350 799 gen8_display_interrupt_info(m);
36cdd013 800 } else if (IS_VALLEYVIEW(dev_priv)) {
7e231dbe
JB
801 seq_printf(m, "Display IER:\t%08x\n",
802 I915_READ(VLV_IER));
803 seq_printf(m, "Display IIR:\t%08x\n",
804 I915_READ(VLV_IIR));
805 seq_printf(m, "Display IIR_RW:\t%08x\n",
806 I915_READ(VLV_IIR_RW));
807 seq_printf(m, "Display IMR:\t%08x\n",
808 I915_READ(VLV_IMR));
4f4631af
CW
809 for_each_pipe(dev_priv, pipe) {
810 enum intel_display_power_domain power_domain;
811
812 power_domain = POWER_DOMAIN_PIPE(pipe);
813 if (!intel_display_power_get_if_enabled(dev_priv,
814 power_domain)) {
815 seq_printf(m, "Pipe %c power disabled\n",
816 pipe_name(pipe));
817 continue;
818 }
819
7e231dbe
JB
820 seq_printf(m, "Pipe %c stat:\t%08x\n",
821 pipe_name(pipe),
822 I915_READ(PIPESTAT(pipe)));
4f4631af
CW
823 intel_display_power_put(dev_priv, power_domain);
824 }
7e231dbe
JB
825
826 seq_printf(m, "Master IER:\t%08x\n",
827 I915_READ(VLV_MASTER_IER));
828
829 seq_printf(m, "Render IER:\t%08x\n",
830 I915_READ(GTIER));
831 seq_printf(m, "Render IIR:\t%08x\n",
832 I915_READ(GTIIR));
833 seq_printf(m, "Render IMR:\t%08x\n",
834 I915_READ(GTIMR));
835
836 seq_printf(m, "PM IER:\t\t%08x\n",
837 I915_READ(GEN6_PMIER));
838 seq_printf(m, "PM IIR:\t\t%08x\n",
839 I915_READ(GEN6_PMIIR));
840 seq_printf(m, "PM IMR:\t\t%08x\n",
841 I915_READ(GEN6_PMIMR));
842
843 seq_printf(m, "Port hotplug:\t%08x\n",
844 I915_READ(PORT_HOTPLUG_EN));
845 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
846 I915_READ(VLV_DPFLIPSTAT));
847 seq_printf(m, "DPINVGTT:\t%08x\n",
848 I915_READ(DPINVGTT));
849
36cdd013 850 } else if (!HAS_PCH_SPLIT(dev_priv)) {
5f6a1695
ZW
851 seq_printf(m, "Interrupt enable: %08x\n",
852 I915_READ(IER));
853 seq_printf(m, "Interrupt identity: %08x\n",
854 I915_READ(IIR));
855 seq_printf(m, "Interrupt mask: %08x\n",
856 I915_READ(IMR));
055e393f 857 for_each_pipe(dev_priv, pipe)
9db4a9c7
JB
858 seq_printf(m, "Pipe %c stat: %08x\n",
859 pipe_name(pipe),
860 I915_READ(PIPESTAT(pipe)));
5f6a1695
ZW
861 } else {
862 seq_printf(m, "North Display Interrupt enable: %08x\n",
863 I915_READ(DEIER));
864 seq_printf(m, "North Display Interrupt identity: %08x\n",
865 I915_READ(DEIIR));
866 seq_printf(m, "North Display Interrupt mask: %08x\n",
867 I915_READ(DEIMR));
868 seq_printf(m, "South Display Interrupt enable: %08x\n",
869 I915_READ(SDEIER));
870 seq_printf(m, "South Display Interrupt identity: %08x\n",
871 I915_READ(SDEIIR));
872 seq_printf(m, "South Display Interrupt mask: %08x\n",
873 I915_READ(SDEIMR));
874 seq_printf(m, "Graphics Interrupt enable: %08x\n",
875 I915_READ(GTIER));
876 seq_printf(m, "Graphics Interrupt identity: %08x\n",
877 I915_READ(GTIIR));
878 seq_printf(m, "Graphics Interrupt mask: %08x\n",
879 I915_READ(GTIMR));
880 }
80d89350
TU
881
882 if (INTEL_GEN(dev_priv) >= 11) {
883 seq_printf(m, "RCS Intr Mask:\t %08x\n",
884 I915_READ(GEN11_RCS0_RSVD_INTR_MASK));
885 seq_printf(m, "BCS Intr Mask:\t %08x\n",
886 I915_READ(GEN11_BCS_RSVD_INTR_MASK));
887 seq_printf(m, "VCS0/VCS1 Intr Mask:\t %08x\n",
888 I915_READ(GEN11_VCS0_VCS1_INTR_MASK));
889 seq_printf(m, "VCS2/VCS3 Intr Mask:\t %08x\n",
890 I915_READ(GEN11_VCS2_VCS3_INTR_MASK));
891 seq_printf(m, "VECS0/VECS1 Intr Mask:\t %08x\n",
892 I915_READ(GEN11_VECS0_VECS1_INTR_MASK));
893 seq_printf(m, "GUC/SG Intr Mask:\t %08x\n",
894 I915_READ(GEN11_GUC_SG_INTR_MASK));
895 seq_printf(m, "GPM/WGBOXPERF Intr Mask: %08x\n",
896 I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK));
897 seq_printf(m, "Crypto Intr Mask:\t %08x\n",
898 I915_READ(GEN11_CRYPTO_RSVD_INTR_MASK));
899 seq_printf(m, "Gunit/CSME Intr Mask:\t %08x\n",
900 I915_READ(GEN11_GUNIT_CSME_INTR_MASK));
901
902 } else if (INTEL_GEN(dev_priv) >= 6) {
d5acadfe 903 for_each_engine(engine, dev_priv, id) {
a2c7f6fd
CW
904 seq_printf(m,
905 "Graphics Interrupt mask (%s): %08x\n",
e2f80391 906 engine->name, I915_READ_IMR(engine));
9862e600 907 }
9862e600 908 }
80d89350 909
c8c8fb33 910 intel_runtime_pm_put(dev_priv);
de227ef0 911
2017263e
BG
912 return 0;
913}
914
a6172a80
CW
915static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
916{
36cdd013
DW
917 struct drm_i915_private *dev_priv = node_to_i915(m->private);
918 struct drm_device *dev = &dev_priv->drm;
de227ef0
CW
919 int i, ret;
920
921 ret = mutex_lock_interruptible(&dev->struct_mutex);
922 if (ret)
923 return ret;
a6172a80 924
a6172a80
CW
925 seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
926 for (i = 0; i < dev_priv->num_fence_regs; i++) {
49ef5294 927 struct i915_vma *vma = dev_priv->fence_regs[i].vma;
a6172a80 928
6c085a72
CW
929 seq_printf(m, "Fence %d, pin count = %d, object = ",
930 i, dev_priv->fence_regs[i].pin_count);
49ef5294 931 if (!vma)
267f0c90 932 seq_puts(m, "unused");
c2c347a9 933 else
49ef5294 934 describe_obj(m, vma->obj);
267f0c90 935 seq_putc(m, '\n');
a6172a80
CW
936 }
937
05394f39 938 mutex_unlock(&dev->struct_mutex);
a6172a80
CW
939 return 0;
940}
941
98a2f411 942#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
5a4c6f1b
CW
943static ssize_t gpu_state_read(struct file *file, char __user *ubuf,
944 size_t count, loff_t *pos)
d5442303 945{
5a4c6f1b
CW
946 struct i915_gpu_state *error = file->private_data;
947 struct drm_i915_error_state_buf str;
948 ssize_t ret;
949 loff_t tmp;
d5442303 950
5a4c6f1b
CW
951 if (!error)
952 return 0;
d5442303 953
5a4c6f1b
CW
954 ret = i915_error_state_buf_init(&str, error->i915, count, *pos);
955 if (ret)
956 return ret;
d5442303 957
5a4c6f1b
CW
958 ret = i915_error_state_to_str(&str, error);
959 if (ret)
960 goto out;
d5442303 961
5a4c6f1b
CW
962 tmp = 0;
963 ret = simple_read_from_buffer(ubuf, count, &tmp, str.buf, str.bytes);
964 if (ret < 0)
965 goto out;
d5442303 966
5a4c6f1b
CW
967 *pos = str.start + ret;
968out:
969 i915_error_state_buf_release(&str);
970 return ret;
971}
edc3d884 972
5a4c6f1b
CW
973static int gpu_state_release(struct inode *inode, struct file *file)
974{
975 i915_gpu_state_put(file->private_data);
edc3d884 976 return 0;
d5442303
DV
977}
978
5a4c6f1b 979static int i915_gpu_info_open(struct inode *inode, struct file *file)
d5442303 980{
090e5fe3 981 struct drm_i915_private *i915 = inode->i_private;
5a4c6f1b 982 struct i915_gpu_state *gpu;
d5442303 983
090e5fe3
CW
984 intel_runtime_pm_get(i915);
985 gpu = i915_capture_gpu_state(i915);
986 intel_runtime_pm_put(i915);
5a4c6f1b
CW
987 if (!gpu)
988 return -ENOMEM;
d5442303 989
5a4c6f1b 990 file->private_data = gpu;
edc3d884
MK
991 return 0;
992}
993
5a4c6f1b
CW
994static const struct file_operations i915_gpu_info_fops = {
995 .owner = THIS_MODULE,
996 .open = i915_gpu_info_open,
997 .read = gpu_state_read,
998 .llseek = default_llseek,
999 .release = gpu_state_release,
1000};
1001
1002static ssize_t
1003i915_error_state_write(struct file *filp,
1004 const char __user *ubuf,
1005 size_t cnt,
1006 loff_t *ppos)
4dc955f7 1007{
5a4c6f1b 1008 struct i915_gpu_state *error = filp->private_data;
4dc955f7 1009
5a4c6f1b
CW
1010 if (!error)
1011 return 0;
edc3d884 1012
5a4c6f1b
CW
1013 DRM_DEBUG_DRIVER("Resetting error state\n");
1014 i915_reset_error_state(error->i915);
edc3d884 1015
5a4c6f1b
CW
1016 return cnt;
1017}
edc3d884 1018
5a4c6f1b
CW
1019static int i915_error_state_open(struct inode *inode, struct file *file)
1020{
1021 file->private_data = i915_first_error_state(inode->i_private);
1022 return 0;
d5442303
DV
1023}
1024
1025static const struct file_operations i915_error_state_fops = {
1026 .owner = THIS_MODULE,
1027 .open = i915_error_state_open,
5a4c6f1b 1028 .read = gpu_state_read,
d5442303
DV
1029 .write = i915_error_state_write,
1030 .llseek = default_llseek,
5a4c6f1b 1031 .release = gpu_state_release,
d5442303 1032};
98a2f411
CW
1033#endif
1034
647416f9
KC
1035static int
1036i915_next_seqno_set(void *data, u64 val)
1037{
36cdd013
DW
1038 struct drm_i915_private *dev_priv = data;
1039 struct drm_device *dev = &dev_priv->drm;
40633219
MK
1040 int ret;
1041
40633219
MK
1042 ret = mutex_lock_interruptible(&dev->struct_mutex);
1043 if (ret)
1044 return ret;
1045
65c475c6 1046 intel_runtime_pm_get(dev_priv);
73cb9701 1047 ret = i915_gem_set_global_seqno(dev, val);
65c475c6
CW
1048 intel_runtime_pm_put(dev_priv);
1049
40633219
MK
1050 mutex_unlock(&dev->struct_mutex);
1051
647416f9 1052 return ret;
40633219
MK
1053}
1054
647416f9 1055DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops,
9b6586ae 1056 NULL, i915_next_seqno_set,
3a3b4f98 1057 "0x%llx\n");
40633219 1058
adb4bd12 1059static int i915_frequency_info(struct seq_file *m, void *unused)
f97108d1 1060{
36cdd013 1061 struct drm_i915_private *dev_priv = node_to_i915(m->private);
562d9bae 1062 struct intel_rps *rps = &dev_priv->gt_pm.rps;
c8c8fb33
PZ
1063 int ret = 0;
1064
1065 intel_runtime_pm_get(dev_priv);
3b8d8d91 1066
36cdd013 1067 if (IS_GEN5(dev_priv)) {
3b8d8d91
JB
1068 u16 rgvswctl = I915_READ16(MEMSWCTL);
1069 u16 rgvstat = I915_READ16(MEMSTAT_ILK);
1070
1071 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
1072 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
1073 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
1074 MEMSTAT_VID_SHIFT);
1075 seq_printf(m, "Current P-state: %d\n",
1076 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
36cdd013 1077 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
0d6fc92a 1078 u32 rpmodectl, freq_sts;
666a4537 1079
9f817501 1080 mutex_lock(&dev_priv->pcu_lock);
0d6fc92a
SAK
1081
1082 rpmodectl = I915_READ(GEN6_RP_CONTROL);
1083 seq_printf(m, "Video Turbo Mode: %s\n",
1084 yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
1085 seq_printf(m, "HW control enabled: %s\n",
1086 yesno(rpmodectl & GEN6_RP_ENABLE));
1087 seq_printf(m, "SW control enabled: %s\n",
1088 yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
1089 GEN6_RP_MEDIA_SW_MODE));
1090
666a4537
WB
1091 freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
1092 seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
1093 seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
1094
1095 seq_printf(m, "actual GPU freq: %d MHz\n",
1096 intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
1097
1098 seq_printf(m, "current GPU freq: %d MHz\n",
562d9bae 1099 intel_gpu_freq(dev_priv, rps->cur_freq));
666a4537
WB
1100
1101 seq_printf(m, "max GPU freq: %d MHz\n",
562d9bae 1102 intel_gpu_freq(dev_priv, rps->max_freq));
666a4537
WB
1103
1104 seq_printf(m, "min GPU freq: %d MHz\n",
562d9bae 1105 intel_gpu_freq(dev_priv, rps->min_freq));
666a4537
WB
1106
1107 seq_printf(m, "idle GPU freq: %d MHz\n",
562d9bae 1108 intel_gpu_freq(dev_priv, rps->idle_freq));
666a4537
WB
1109
1110 seq_printf(m,
1111 "efficient (RPe) frequency: %d MHz\n",
562d9bae 1112 intel_gpu_freq(dev_priv, rps->efficient_freq));
9f817501 1113 mutex_unlock(&dev_priv->pcu_lock);
36cdd013 1114 } else if (INTEL_GEN(dev_priv) >= 6) {
35040562
BP
1115 u32 rp_state_limits;
1116 u32 gt_perf_status;
1117 u32 rp_state_cap;
0d8f9491 1118 u32 rpmodectl, rpinclimit, rpdeclimit;
8e8c06cd 1119 u32 rpstat, cagf, reqf;
ccab5c82
JB
1120 u32 rpupei, rpcurup, rpprevup;
1121 u32 rpdownei, rpcurdown, rpprevdown;
9dd3c605 1122 u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
3b8d8d91
JB
1123 int max_freq;
1124
35040562 1125 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
cc3f90f0 1126 if (IS_GEN9_LP(dev_priv)) {
35040562
BP
1127 rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
1128 gt_perf_status = I915_READ(BXT_GT_PERF_STATUS);
1129 } else {
1130 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
1131 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
1132 }
1133
3b8d8d91 1134 /* RPSTAT1 is in the GT power well */
59bad947 1135 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
3b8d8d91 1136
8e8c06cd 1137 reqf = I915_READ(GEN6_RPNSWREQ);
35ceabf3 1138 if (INTEL_GEN(dev_priv) >= 9)
60260a5b
AG
1139 reqf >>= 23;
1140 else {
1141 reqf &= ~GEN6_TURBO_DISABLE;
36cdd013 1142 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
60260a5b
AG
1143 reqf >>= 24;
1144 else
1145 reqf >>= 25;
1146 }
7c59a9c1 1147 reqf = intel_gpu_freq(dev_priv, reqf);
8e8c06cd 1148
0d8f9491
CW
1149 rpmodectl = I915_READ(GEN6_RP_CONTROL);
1150 rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
1151 rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);
1152
ccab5c82 1153 rpstat = I915_READ(GEN6_RPSTAT1);
d6cda9c7
AG
1154 rpupei = I915_READ(GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
1155 rpcurup = I915_READ(GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
1156 rpprevup = I915_READ(GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
1157 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
1158 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
1159 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
c84b2705
TU
1160 cagf = intel_gpu_freq(dev_priv,
1161 intel_get_cagf(dev_priv, rpstat));
ccab5c82 1162
59bad947 1163 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
d1ebd816 1164
6b7a6a7b
OM
1165 if (INTEL_GEN(dev_priv) >= 11) {
1166 pm_ier = I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE);
1167 pm_imr = I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK);
1168 /*
1169 * The equivalent to the PM ISR & IIR cannot be read
1170 * without affecting the current state of the system
1171 */
1172 pm_isr = 0;
1173 pm_iir = 0;
1174 } else if (INTEL_GEN(dev_priv) >= 8) {
9dd3c605
PZ
1175 pm_ier = I915_READ(GEN8_GT_IER(2));
1176 pm_imr = I915_READ(GEN8_GT_IMR(2));
1177 pm_isr = I915_READ(GEN8_GT_ISR(2));
1178 pm_iir = I915_READ(GEN8_GT_IIR(2));
6b7a6a7b
OM
1179 } else {
1180 pm_ier = I915_READ(GEN6_PMIER);
1181 pm_imr = I915_READ(GEN6_PMIMR);
1182 pm_isr = I915_READ(GEN6_PMISR);
1183 pm_iir = I915_READ(GEN6_PMIIR);
9dd3c605 1184 }
6b7a6a7b
OM
1185 pm_mask = I915_READ(GEN6_PMINTRMSK);
1186
960e5465
SAK
1187 seq_printf(m, "Video Turbo Mode: %s\n",
1188 yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
1189 seq_printf(m, "HW control enabled: %s\n",
1190 yesno(rpmodectl & GEN6_RP_ENABLE));
1191 seq_printf(m, "SW control enabled: %s\n",
1192 yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
1193 GEN6_RP_MEDIA_SW_MODE));
6b7a6a7b
OM
1194
1195 seq_printf(m, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n",
1196 pm_ier, pm_imr, pm_mask);
1197 if (INTEL_GEN(dev_priv) <= 10)
1198 seq_printf(m, "PM ISR=0x%08x IIR=0x%08x\n",
1199 pm_isr, pm_iir);
5dd04556 1200 seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n",
562d9bae 1201 rps->pm_intrmsk_mbz);
3b8d8d91 1202 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
3b8d8d91 1203 seq_printf(m, "Render p-state ratio: %d\n",
35ceabf3 1204 (gt_perf_status & (INTEL_GEN(dev_priv) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
3b8d8d91
JB
1205 seq_printf(m, "Render p-state VID: %d\n",
1206 gt_perf_status & 0xff);
1207 seq_printf(m, "Render p-state limit: %d\n",
1208 rp_state_limits & 0xff);
0d8f9491
CW
1209 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
1210 seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
1211 seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
1212 seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
8e8c06cd 1213 seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
f82855d3 1214 seq_printf(m, "CAGF: %dMHz\n", cagf);
d6cda9c7
AG
1215 seq_printf(m, "RP CUR UP EI: %d (%dus)\n",
1216 rpupei, GT_PM_INTERVAL_TO_US(dev_priv, rpupei));
1217 seq_printf(m, "RP CUR UP: %d (%dus)\n",
1218 rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup));
1219 seq_printf(m, "RP PREV UP: %d (%dus)\n",
1220 rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup));
562d9bae 1221 seq_printf(m, "Up threshold: %d%%\n", rps->up_threshold);
d86ed34a 1222
d6cda9c7
AG
1223 seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
1224 rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei));
1225 seq_printf(m, "RP CUR DOWN: %d (%dus)\n",
1226 rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown));
1227 seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
1228 rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown));
562d9bae 1229 seq_printf(m, "Down threshold: %d%%\n", rps->down_threshold);
3b8d8d91 1230
cc3f90f0 1231 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 :
35040562 1232 rp_state_cap >> 16) & 0xff;
35ceabf3 1233 max_freq *= (IS_GEN9_BC(dev_priv) ||
2b2874ef 1234 INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
3b8d8d91 1235 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
7c59a9c1 1236 intel_gpu_freq(dev_priv, max_freq));
3b8d8d91
JB
1237
1238 max_freq = (rp_state_cap & 0xff00) >> 8;
35ceabf3 1239 max_freq *= (IS_GEN9_BC(dev_priv) ||
2b2874ef 1240 INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
3b8d8d91 1241 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
7c59a9c1 1242 intel_gpu_freq(dev_priv, max_freq));
3b8d8d91 1243
cc3f90f0 1244 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 :
35040562 1245 rp_state_cap >> 0) & 0xff;
35ceabf3 1246 max_freq *= (IS_GEN9_BC(dev_priv) ||
2b2874ef 1247 INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
3b8d8d91 1248 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
7c59a9c1 1249 intel_gpu_freq(dev_priv, max_freq));
31c77388 1250 seq_printf(m, "Max overclocked frequency: %dMHz\n",
562d9bae 1251 intel_gpu_freq(dev_priv, rps->max_freq));
aed242ff 1252
d86ed34a 1253 seq_printf(m, "Current freq: %d MHz\n",
562d9bae 1254 intel_gpu_freq(dev_priv, rps->cur_freq));
d86ed34a 1255 seq_printf(m, "Actual freq: %d MHz\n", cagf);
aed242ff 1256 seq_printf(m, "Idle freq: %d MHz\n",
562d9bae 1257 intel_gpu_freq(dev_priv, rps->idle_freq));
d86ed34a 1258 seq_printf(m, "Min freq: %d MHz\n",
562d9bae 1259 intel_gpu_freq(dev_priv, rps->min_freq));
29ecd78d 1260 seq_printf(m, "Boost freq: %d MHz\n",
562d9bae 1261 intel_gpu_freq(dev_priv, rps->boost_freq));
d86ed34a 1262 seq_printf(m, "Max freq: %d MHz\n",
562d9bae 1263 intel_gpu_freq(dev_priv, rps->max_freq));
d86ed34a
CW
1264 seq_printf(m,
1265 "efficient (RPe) frequency: %d MHz\n",
562d9bae 1266 intel_gpu_freq(dev_priv, rps->efficient_freq));
3b8d8d91 1267 } else {
267f0c90 1268 seq_puts(m, "no P-state info available\n");
3b8d8d91 1269 }
f97108d1 1270
49cd97a3 1271 seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk.hw.cdclk);
1170f28c
MK
1272 seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq);
1273 seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq);
1274
c8c8fb33
PZ
1275 intel_runtime_pm_put(dev_priv);
1276 return ret;
f97108d1
JB
1277}
1278
d636951e
BW
1279static void i915_instdone_info(struct drm_i915_private *dev_priv,
1280 struct seq_file *m,
1281 struct intel_instdone *instdone)
1282{
f9e61372
BW
1283 int slice;
1284 int subslice;
1285
d636951e
BW
1286 seq_printf(m, "\t\tINSTDONE: 0x%08x\n",
1287 instdone->instdone);
1288
1289 if (INTEL_GEN(dev_priv) <= 3)
1290 return;
1291
1292 seq_printf(m, "\t\tSC_INSTDONE: 0x%08x\n",
1293 instdone->slice_common);
1294
1295 if (INTEL_GEN(dev_priv) <= 6)
1296 return;
1297
f9e61372
BW
1298 for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1299 seq_printf(m, "\t\tSAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
1300 slice, subslice, instdone->sampler[slice][subslice]);
1301
1302 for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1303 seq_printf(m, "\t\tROW_INSTDONE[%d][%d]: 0x%08x\n",
1304 slice, subslice, instdone->row[slice][subslice]);
d636951e
BW
1305}
1306
f654449a
CW
1307static int i915_hangcheck_info(struct seq_file *m, void *unused)
1308{
36cdd013 1309 struct drm_i915_private *dev_priv = node_to_i915(m->private);
e2f80391 1310 struct intel_engine_cs *engine;
666796da
TU
1311 u64 acthd[I915_NUM_ENGINES];
1312 u32 seqno[I915_NUM_ENGINES];
d636951e 1313 struct intel_instdone instdone;
c3232b18 1314 enum intel_engine_id id;
f654449a 1315
8af29b0c 1316 if (test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
8c185eca
CW
1317 seq_puts(m, "Wedged\n");
1318 if (test_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags))
1319 seq_puts(m, "Reset in progress: struct_mutex backoff\n");
1320 if (test_bit(I915_RESET_HANDOFF, &dev_priv->gpu_error.flags))
1321 seq_puts(m, "Reset in progress: reset handoff to waiter\n");
8af29b0c 1322 if (waitqueue_active(&dev_priv->gpu_error.wait_queue))
8c185eca 1323 seq_puts(m, "Waiter holding struct mutex\n");
8af29b0c 1324 if (waitqueue_active(&dev_priv->gpu_error.reset_queue))
8c185eca 1325 seq_puts(m, "struct_mutex blocked for reset\n");
8af29b0c 1326
4f044a88 1327 if (!i915_modparams.enable_hangcheck) {
8c185eca 1328 seq_puts(m, "Hangcheck disabled\n");
f654449a
CW
1329 return 0;
1330 }
1331
ebbc7546
MK
1332 intel_runtime_pm_get(dev_priv);
1333
3b3f1650 1334 for_each_engine(engine, dev_priv, id) {
7e37f889 1335 acthd[id] = intel_engine_get_active_head(engine);
1b7744e7 1336 seqno[id] = intel_engine_get_seqno(engine);
ebbc7546
MK
1337 }
1338
3b3f1650 1339 intel_engine_get_instdone(dev_priv->engine[RCS], &instdone);
61642ff0 1340
ebbc7546
MK
1341 intel_runtime_pm_put(dev_priv);
1342
8352aea3
CW
1343 if (timer_pending(&dev_priv->gpu_error.hangcheck_work.timer))
1344 seq_printf(m, "Hangcheck active, timer fires in %dms\n",
f654449a
CW
1345 jiffies_to_msecs(dev_priv->gpu_error.hangcheck_work.timer.expires -
1346 jiffies));
8352aea3
CW
1347 else if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work))
1348 seq_puts(m, "Hangcheck active, work pending\n");
1349 else
1350 seq_puts(m, "Hangcheck inactive\n");
f654449a 1351
f73b5674
CW
1352 seq_printf(m, "GT active? %s\n", yesno(dev_priv->gt.awake));
1353
3b3f1650 1354 for_each_engine(engine, dev_priv, id) {
33f53719
CW
1355 struct intel_breadcrumbs *b = &engine->breadcrumbs;
1356 struct rb_node *rb;
1357
e2f80391 1358 seq_printf(m, "%s:\n", engine->name);
52d7f16e 1359 seq_printf(m, "\tseqno = %x [current %x, last %x]\n",
cb399eab 1360 engine->hangcheck.seqno, seqno[id],
52d7f16e 1361 intel_engine_last_submit(engine));
1fd00c0f 1362 seq_printf(m, "\twaiters? %s, fake irq active? %s, stalled? %s, wedged? %s\n",
83348ba8
CW
1363 yesno(intel_engine_has_waiter(engine)),
1364 yesno(test_bit(engine->id,
3fe3b030 1365 &dev_priv->gpu_error.missed_irq_rings)),
1fd00c0f
CW
1366 yesno(engine->hangcheck.stalled),
1367 yesno(engine->hangcheck.wedged));
3fe3b030 1368
61d3dc70 1369 spin_lock_irq(&b->rb_lock);
33f53719 1370 for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
f802cf7e 1371 struct intel_wait *w = rb_entry(rb, typeof(*w), node);
33f53719
CW
1372
1373 seq_printf(m, "\t%s [%d] waiting for %x\n",
1374 w->tsk->comm, w->tsk->pid, w->seqno);
1375 }
61d3dc70 1376 spin_unlock_irq(&b->rb_lock);
33f53719 1377
f654449a 1378 seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
e2f80391 1379 (long long)engine->hangcheck.acthd,
c3232b18 1380 (long long)acthd[id]);
3fe3b030
MK
1381 seq_printf(m, "\taction = %s(%d) %d ms ago\n",
1382 hangcheck_action_to_str(engine->hangcheck.action),
1383 engine->hangcheck.action,
1384 jiffies_to_msecs(jiffies -
1385 engine->hangcheck.action_timestamp));
61642ff0 1386
e2f80391 1387 if (engine->id == RCS) {
d636951e 1388 seq_puts(m, "\tinstdone read =\n");
61642ff0 1389
d636951e 1390 i915_instdone_info(dev_priv, m, &instdone);
61642ff0 1391
d636951e 1392 seq_puts(m, "\tinstdone accu =\n");
61642ff0 1393
d636951e
BW
1394 i915_instdone_info(dev_priv, m,
1395 &engine->hangcheck.instdone);
61642ff0 1396 }
f654449a
CW
1397 }
1398
1399 return 0;
1400}
1401
061d06a2
MT
1402static int i915_reset_info(struct seq_file *m, void *unused)
1403{
1404 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1405 struct i915_gpu_error *error = &dev_priv->gpu_error;
1406 struct intel_engine_cs *engine;
1407 enum intel_engine_id id;
1408
1409 seq_printf(m, "full gpu reset = %u\n", i915_reset_count(error));
1410
1411 for_each_engine(engine, dev_priv, id) {
1412 seq_printf(m, "%s = %u\n", engine->name,
1413 i915_reset_engine_count(error, engine));
1414 }
1415
1416 return 0;
1417}
1418
4d85529d 1419static int ironlake_drpc_info(struct seq_file *m)
f97108d1 1420{
36cdd013 1421 struct drm_i915_private *dev_priv = node_to_i915(m->private);
616fdb5a
BW
1422 u32 rgvmodectl, rstdbyctl;
1423 u16 crstandvid;
616fdb5a 1424
616fdb5a
BW
1425 rgvmodectl = I915_READ(MEMMODECTL);
1426 rstdbyctl = I915_READ(RSTDBYCTL);
1427 crstandvid = I915_READ16(CRSTANDVID);
1428
742f491d 1429 seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN));
f97108d1
JB
1430 seq_printf(m, "Boost freq: %d\n",
1431 (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
1432 MEMMODE_BOOST_FREQ_SHIFT);
1433 seq_printf(m, "HW control enabled: %s\n",
742f491d 1434 yesno(rgvmodectl & MEMMODE_HWIDLE_EN));
f97108d1 1435 seq_printf(m, "SW control enabled: %s\n",
742f491d 1436 yesno(rgvmodectl & MEMMODE_SWMODE_EN));
f97108d1 1437 seq_printf(m, "Gated voltage change: %s\n",
742f491d 1438 yesno(rgvmodectl & MEMMODE_RCLK_GATE));
f97108d1
JB
1439 seq_printf(m, "Starting frequency: P%d\n",
1440 (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
7648fa99 1441 seq_printf(m, "Max P-state: P%d\n",
f97108d1 1442 (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
7648fa99
JB
1443 seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
1444 seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
1445 seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
1446 seq_printf(m, "Render standby enabled: %s\n",
742f491d 1447 yesno(!(rstdbyctl & RCX_SW_EXIT)));
267f0c90 1448 seq_puts(m, "Current RS state: ");
88271da3
JB
1449 switch (rstdbyctl & RSX_STATUS_MASK) {
1450 case RSX_STATUS_ON:
267f0c90 1451 seq_puts(m, "on\n");
88271da3
JB
1452 break;
1453 case RSX_STATUS_RC1:
267f0c90 1454 seq_puts(m, "RC1\n");
88271da3
JB
1455 break;
1456 case RSX_STATUS_RC1E:
267f0c90 1457 seq_puts(m, "RC1E\n");
88271da3
JB
1458 break;
1459 case RSX_STATUS_RS1:
267f0c90 1460 seq_puts(m, "RS1\n");
88271da3
JB
1461 break;
1462 case RSX_STATUS_RS2:
267f0c90 1463 seq_puts(m, "RS2 (RC6)\n");
88271da3
JB
1464 break;
1465 case RSX_STATUS_RS3:
267f0c90 1466 seq_puts(m, "RC3 (RC6+)\n");
88271da3
JB
1467 break;
1468 default:
267f0c90 1469 seq_puts(m, "unknown\n");
88271da3
JB
1470 break;
1471 }
f97108d1
JB
1472
1473 return 0;
1474}
1475
f65367b5 1476static int i915_forcewake_domains(struct seq_file *m, void *data)
669ab5aa 1477{
233ebf57 1478 struct drm_i915_private *i915 = node_to_i915(m->private);
b2cff0db 1479 struct intel_uncore_forcewake_domain *fw_domain;
d2dc94bc 1480 unsigned int tmp;
b2cff0db 1481
d7a133d8
CW
1482 seq_printf(m, "user.bypass_count = %u\n",
1483 i915->uncore.user_forcewake.count);
1484
233ebf57 1485 for_each_fw_domain(fw_domain, i915, tmp)
b2cff0db 1486 seq_printf(m, "%s.wake_count = %u\n",
33c582c1 1487 intel_uncore_forcewake_domain_to_str(fw_domain->id),
233ebf57 1488 READ_ONCE(fw_domain->wake_count));
669ab5aa 1489
b2cff0db
CW
1490 return 0;
1491}
1492
1362877e
MK
1493static void print_rc6_res(struct seq_file *m,
1494 const char *title,
1495 const i915_reg_t reg)
1496{
1497 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1498
1499 seq_printf(m, "%s %u (%llu us)\n",
1500 title, I915_READ(reg),
1501 intel_rc6_residency_us(dev_priv, reg));
1502}
1503
b2cff0db
CW
1504static int vlv_drpc_info(struct seq_file *m)
1505{
36cdd013 1506 struct drm_i915_private *dev_priv = node_to_i915(m->private);
0d6fc92a 1507 u32 rcctl1, pw_status;
669ab5aa 1508
6b312cd3 1509 pw_status = I915_READ(VLV_GTLC_PW_STATUS);
669ab5aa
D
1510 rcctl1 = I915_READ(GEN6_RC_CONTROL);
1511
669ab5aa
D
1512 seq_printf(m, "RC6 Enabled: %s\n",
1513 yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
1514 GEN6_RC_CTL_EI_MODE(1))));
1515 seq_printf(m, "Render Power Well: %s\n",
6b312cd3 1516 (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
669ab5aa 1517 seq_printf(m, "Media Power Well: %s\n",
6b312cd3 1518 (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
669ab5aa 1519
1362877e
MK
1520 print_rc6_res(m, "Render RC6 residency since boot:", VLV_GT_RENDER_RC6);
1521 print_rc6_res(m, "Media RC6 residency since boot:", VLV_GT_MEDIA_RC6);
9cc19be5 1522
f65367b5 1523 return i915_forcewake_domains(m, NULL);
669ab5aa
D
1524}
1525
4d85529d
BW
1526static int gen6_drpc_info(struct seq_file *m)
1527{
36cdd013 1528 struct drm_i915_private *dev_priv = node_to_i915(m->private);
960e5465 1529 u32 gt_core_status, rcctl1, rc6vids = 0;
f2dd7578 1530 u32 gen9_powergate_enable = 0, gen9_powergate_status = 0;
4d85529d 1531
75aa3f63 1532 gt_core_status = I915_READ_FW(GEN6_GT_CORE_STATUS);
ed71f1b4 1533 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
4d85529d 1534
4d85529d 1535 rcctl1 = I915_READ(GEN6_RC_CONTROL);
36cdd013 1536 if (INTEL_GEN(dev_priv) >= 9) {
f2dd7578
AG
1537 gen9_powergate_enable = I915_READ(GEN9_PG_ENABLE);
1538 gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS);
1539 }
cf632bd6 1540
51cc9ade
ID
1541 if (INTEL_GEN(dev_priv) <= 7) {
1542 mutex_lock(&dev_priv->pcu_lock);
1543 sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS,
1544 &rc6vids);
1545 mutex_unlock(&dev_priv->pcu_lock);
1546 }
4d85529d 1547
fff24e21 1548 seq_printf(m, "RC1e Enabled: %s\n",
4d85529d
BW
1549 yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
1550 seq_printf(m, "RC6 Enabled: %s\n",
1551 yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
36cdd013 1552 if (INTEL_GEN(dev_priv) >= 9) {
f2dd7578
AG
1553 seq_printf(m, "Render Well Gating Enabled: %s\n",
1554 yesno(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE));
1555 seq_printf(m, "Media Well Gating Enabled: %s\n",
1556 yesno(gen9_powergate_enable & GEN9_MEDIA_PG_ENABLE));
1557 }
4d85529d
BW
1558 seq_printf(m, "Deep RC6 Enabled: %s\n",
1559 yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
1560 seq_printf(m, "Deepest RC6 Enabled: %s\n",
1561 yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
267f0c90 1562 seq_puts(m, "Current RC state: ");
4d85529d
BW
1563 switch (gt_core_status & GEN6_RCn_MASK) {
1564 case GEN6_RC0:
1565 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
267f0c90 1566 seq_puts(m, "Core Power Down\n");
4d85529d 1567 else
267f0c90 1568 seq_puts(m, "on\n");
4d85529d
BW
1569 break;
1570 case GEN6_RC3:
267f0c90 1571 seq_puts(m, "RC3\n");
4d85529d
BW
1572 break;
1573 case GEN6_RC6:
267f0c90 1574 seq_puts(m, "RC6\n");
4d85529d
BW
1575 break;
1576 case GEN6_RC7:
267f0c90 1577 seq_puts(m, "RC7\n");
4d85529d
BW
1578 break;
1579 default:
267f0c90 1580 seq_puts(m, "Unknown\n");
4d85529d
BW
1581 break;
1582 }
1583
1584 seq_printf(m, "Core Power Down: %s\n",
1585 yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
36cdd013 1586 if (INTEL_GEN(dev_priv) >= 9) {
f2dd7578
AG
1587 seq_printf(m, "Render Power Well: %s\n",
1588 (gen9_powergate_status &
1589 GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down");
1590 seq_printf(m, "Media Power Well: %s\n",
1591 (gen9_powergate_status &
1592 GEN9_PWRGT_MEDIA_STATUS_MASK) ? "Up" : "Down");
1593 }
cce66a28
BW
1594
1595 /* Not exactly sure what this is */
1362877e
MK
1596 print_rc6_res(m, "RC6 \"Locked to RPn\" residency since boot:",
1597 GEN6_GT_GFX_RC6_LOCKED);
1598 print_rc6_res(m, "RC6 residency since boot:", GEN6_GT_GFX_RC6);
1599 print_rc6_res(m, "RC6+ residency since boot:", GEN6_GT_GFX_RC6p);
1600 print_rc6_res(m, "RC6++ residency since boot:", GEN6_GT_GFX_RC6pp);
cce66a28 1601
51cc9ade
ID
1602 if (INTEL_GEN(dev_priv) <= 7) {
1603 seq_printf(m, "RC6 voltage: %dmV\n",
1604 GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
1605 seq_printf(m, "RC6+ voltage: %dmV\n",
1606 GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
1607 seq_printf(m, "RC6++ voltage: %dmV\n",
1608 GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
1609 }
1610
f2dd7578 1611 return i915_forcewake_domains(m, NULL);
4d85529d
BW
1612}
1613
1614static int i915_drpc_info(struct seq_file *m, void *unused)
1615{
36cdd013 1616 struct drm_i915_private *dev_priv = node_to_i915(m->private);
cf632bd6
CW
1617 int err;
1618
1619 intel_runtime_pm_get(dev_priv);
4d85529d 1620
36cdd013 1621 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
cf632bd6 1622 err = vlv_drpc_info(m);
36cdd013 1623 else if (INTEL_GEN(dev_priv) >= 6)
cf632bd6 1624 err = gen6_drpc_info(m);
4d85529d 1625 else
cf632bd6
CW
1626 err = ironlake_drpc_info(m);
1627
1628 intel_runtime_pm_put(dev_priv);
1629
1630 return err;
4d85529d
BW
1631}
1632
9a851789
DV
1633static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
1634{
36cdd013 1635 struct drm_i915_private *dev_priv = node_to_i915(m->private);
9a851789
DV
1636
1637 seq_printf(m, "FB tracking busy bits: 0x%08x\n",
1638 dev_priv->fb_tracking.busy_bits);
1639
1640 seq_printf(m, "FB tracking flip bits: 0x%08x\n",
1641 dev_priv->fb_tracking.flip_bits);
1642
1643 return 0;
1644}
1645
b5e50c3f
JB
1646static int i915_fbc_status(struct seq_file *m, void *unused)
1647{
36cdd013 1648 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3138872c 1649 struct intel_fbc *fbc = &dev_priv->fbc;
b5e50c3f 1650
ab309a6a
MW
1651 if (!HAS_FBC(dev_priv))
1652 return -ENODEV;
b5e50c3f 1653
36623ef8 1654 intel_runtime_pm_get(dev_priv);
3138872c 1655 mutex_lock(&fbc->lock);
36623ef8 1656
0e631adc 1657 if (intel_fbc_is_active(dev_priv))
267f0c90 1658 seq_puts(m, "FBC enabled\n");
2e8144a5 1659 else
3138872c
CW
1660 seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
1661
3fd5d1ec
VS
1662 if (intel_fbc_is_active(dev_priv)) {
1663 u32 mask;
1664
1665 if (INTEL_GEN(dev_priv) >= 8)
1666 mask = I915_READ(IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK;
1667 else if (INTEL_GEN(dev_priv) >= 7)
1668 mask = I915_READ(IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK;
1669 else if (INTEL_GEN(dev_priv) >= 5)
1670 mask = I915_READ(ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK;
1671 else if (IS_G4X(dev_priv))
1672 mask = I915_READ(DPFC_STATUS) & DPFC_COMP_SEG_MASK;
1673 else
1674 mask = I915_READ(FBC_STATUS) & (FBC_STAT_COMPRESSING |
1675 FBC_STAT_COMPRESSED);
1676
1677 seq_printf(m, "Compressing: %s\n", yesno(mask));
0fc6a9dc 1678 }
31b9df10 1679
3138872c 1680 mutex_unlock(&fbc->lock);
36623ef8
PZ
1681 intel_runtime_pm_put(dev_priv);
1682
b5e50c3f
JB
1683 return 0;
1684}
1685
4127dc43 1686static int i915_fbc_false_color_get(void *data, u64 *val)
da46f936 1687{
36cdd013 1688 struct drm_i915_private *dev_priv = data;
da46f936 1689
36cdd013 1690 if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
da46f936
RV
1691 return -ENODEV;
1692
da46f936 1693 *val = dev_priv->fbc.false_color;
da46f936
RV
1694
1695 return 0;
1696}
1697
4127dc43 1698static int i915_fbc_false_color_set(void *data, u64 val)
da46f936 1699{
36cdd013 1700 struct drm_i915_private *dev_priv = data;
da46f936
RV
1701 u32 reg;
1702
36cdd013 1703 if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
da46f936
RV
1704 return -ENODEV;
1705
25ad93fd 1706 mutex_lock(&dev_priv->fbc.lock);
da46f936
RV
1707
1708 reg = I915_READ(ILK_DPFC_CONTROL);
1709 dev_priv->fbc.false_color = val;
1710
1711 I915_WRITE(ILK_DPFC_CONTROL, val ?
1712 (reg | FBC_CTL_FALSE_COLOR) :
1713 (reg & ~FBC_CTL_FALSE_COLOR));
1714
25ad93fd 1715 mutex_unlock(&dev_priv->fbc.lock);
da46f936
RV
1716 return 0;
1717}
1718
4127dc43
VS
1719DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops,
1720 i915_fbc_false_color_get, i915_fbc_false_color_set,
da46f936
RV
1721 "%llu\n");
1722
92d44621
PZ
1723static int i915_ips_status(struct seq_file *m, void *unused)
1724{
36cdd013 1725 struct drm_i915_private *dev_priv = node_to_i915(m->private);
92d44621 1726
ab309a6a
MW
1727 if (!HAS_IPS(dev_priv))
1728 return -ENODEV;
92d44621 1729
36623ef8
PZ
1730 intel_runtime_pm_get(dev_priv);
1731
0eaa53f0 1732 seq_printf(m, "Enabled by kernel parameter: %s\n",
4f044a88 1733 yesno(i915_modparams.enable_ips));
0eaa53f0 1734
36cdd013 1735 if (INTEL_GEN(dev_priv) >= 8) {
0eaa53f0
RV
1736 seq_puts(m, "Currently: unknown\n");
1737 } else {
1738 if (I915_READ(IPS_CTL) & IPS_ENABLE)
1739 seq_puts(m, "Currently: enabled\n");
1740 else
1741 seq_puts(m, "Currently: disabled\n");
1742 }
92d44621 1743
36623ef8
PZ
1744 intel_runtime_pm_put(dev_priv);
1745
92d44621
PZ
1746 return 0;
1747}
1748
4a9bef37
JB
1749static int i915_sr_status(struct seq_file *m, void *unused)
1750{
36cdd013 1751 struct drm_i915_private *dev_priv = node_to_i915(m->private);
4a9bef37
JB
1752 bool sr_enabled = false;
1753
36623ef8 1754 intel_runtime_pm_get(dev_priv);
9c870d03 1755 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
36623ef8 1756
7342a72c
CW
1757 if (INTEL_GEN(dev_priv) >= 9)
1758 /* no global SR status; inspect per-plane WM */;
1759 else if (HAS_PCH_SPLIT(dev_priv))
5ba2aaaa 1760 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
c0f86832 1761 else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) ||
36cdd013 1762 IS_I945G(dev_priv) || IS_I945GM(dev_priv))
4a9bef37 1763 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
36cdd013 1764 else if (IS_I915GM(dev_priv))
4a9bef37 1765 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
36cdd013 1766 else if (IS_PINEVIEW(dev_priv))
4a9bef37 1767 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
36cdd013 1768 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
77b64555 1769 sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
4a9bef37 1770
9c870d03 1771 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
36623ef8
PZ
1772 intel_runtime_pm_put(dev_priv);
1773
08c4d7fc 1774 seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
4a9bef37
JB
1775
1776 return 0;
1777}
1778
7648fa99
JB
1779static int i915_emon_status(struct seq_file *m, void *unused)
1780{
36cdd013
DW
1781 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1782 struct drm_device *dev = &dev_priv->drm;
7648fa99 1783 unsigned long temp, chipset, gfx;
de227ef0
CW
1784 int ret;
1785
36cdd013 1786 if (!IS_GEN5(dev_priv))
582be6b4
CW
1787 return -ENODEV;
1788
de227ef0
CW
1789 ret = mutex_lock_interruptible(&dev->struct_mutex);
1790 if (ret)
1791 return ret;
7648fa99
JB
1792
1793 temp = i915_mch_val(dev_priv);
1794 chipset = i915_chipset_val(dev_priv);
1795 gfx = i915_gfx_val(dev_priv);
de227ef0 1796 mutex_unlock(&dev->struct_mutex);
7648fa99
JB
1797
1798 seq_printf(m, "GMCH temp: %ld\n", temp);
1799 seq_printf(m, "Chipset power: %ld\n", chipset);
1800 seq_printf(m, "GFX power: %ld\n", gfx);
1801 seq_printf(m, "Total power: %ld\n", chipset + gfx);
1802
1803 return 0;
1804}
1805
23b2f8bb
JB
1806static int i915_ring_freq_table(struct seq_file *m, void *unused)
1807{
36cdd013 1808 struct drm_i915_private *dev_priv = node_to_i915(m->private);
562d9bae 1809 struct intel_rps *rps = &dev_priv->gt_pm.rps;
f936ec34 1810 unsigned int max_gpu_freq, min_gpu_freq;
d586b5f4
CW
1811 int gpu_freq, ia_freq;
1812 int ret;
23b2f8bb 1813
ab309a6a
MW
1814 if (!HAS_LLC(dev_priv))
1815 return -ENODEV;
23b2f8bb 1816
5bfa0199
PZ
1817 intel_runtime_pm_get(dev_priv);
1818
9f817501 1819 ret = mutex_lock_interruptible(&dev_priv->pcu_lock);
23b2f8bb 1820 if (ret)
5bfa0199 1821 goto out;
23b2f8bb 1822
d586b5f4
CW
1823 min_gpu_freq = rps->min_freq;
1824 max_gpu_freq = rps->max_freq;
2b2874ef 1825 if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
f936ec34 1826 /* Convert GT frequency to 50 HZ units */
d586b5f4
CW
1827 min_gpu_freq /= GEN9_FREQ_SCALER;
1828 max_gpu_freq /= GEN9_FREQ_SCALER;
f936ec34
AG
1829 }
1830
267f0c90 1831 seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
23b2f8bb 1832
f936ec34 1833 for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
42c0526c
BW
1834 ia_freq = gpu_freq;
1835 sandybridge_pcode_read(dev_priv,
1836 GEN6_PCODE_READ_MIN_FREQ_TABLE,
1837 &ia_freq);
3ebecd07 1838 seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
f936ec34 1839 intel_gpu_freq(dev_priv, (gpu_freq *
35ceabf3 1840 (IS_GEN9_BC(dev_priv) ||
2b2874ef 1841 INTEL_GEN(dev_priv) >= 10 ?
b976dc53 1842 GEN9_FREQ_SCALER : 1))),
3ebecd07
CW
1843 ((ia_freq >> 0) & 0xff) * 100,
1844 ((ia_freq >> 8) & 0xff) * 100);
23b2f8bb
JB
1845 }
1846
9f817501 1847 mutex_unlock(&dev_priv->pcu_lock);
23b2f8bb 1848
5bfa0199
PZ
1849out:
1850 intel_runtime_pm_put(dev_priv);
1851 return ret;
23b2f8bb
JB
1852}
1853
44834a67
CW
1854static int i915_opregion(struct seq_file *m, void *unused)
1855{
36cdd013
DW
1856 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1857 struct drm_device *dev = &dev_priv->drm;
44834a67
CW
1858 struct intel_opregion *opregion = &dev_priv->opregion;
1859 int ret;
1860
1861 ret = mutex_lock_interruptible(&dev->struct_mutex);
1862 if (ret)
0d38f009 1863 goto out;
44834a67 1864
2455a8e4
JN
1865 if (opregion->header)
1866 seq_write(m, opregion->header, OPREGION_SIZE);
44834a67
CW
1867
1868 mutex_unlock(&dev->struct_mutex);
1869
0d38f009 1870out:
44834a67
CW
1871 return 0;
1872}
1873
ada8f955
JN
1874static int i915_vbt(struct seq_file *m, void *unused)
1875{
36cdd013 1876 struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
ada8f955
JN
1877
1878 if (opregion->vbt)
1879 seq_write(m, opregion->vbt, opregion->vbt_size);
1880
1881 return 0;
1882}
1883
37811fcc
CW
1884static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1885{
36cdd013
DW
1886 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1887 struct drm_device *dev = &dev_priv->drm;
b13b8402 1888 struct intel_framebuffer *fbdev_fb = NULL;
3a58ee10 1889 struct drm_framebuffer *drm_fb;
188c1ab7
CW
1890 int ret;
1891
1892 ret = mutex_lock_interruptible(&dev->struct_mutex);
1893 if (ret)
1894 return ret;
37811fcc 1895
0695726e 1896#ifdef CONFIG_DRM_FBDEV_EMULATION
346fb4e0 1897 if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) {
36cdd013 1898 fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
25bcce94
CW
1899
1900 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1901 fbdev_fb->base.width,
1902 fbdev_fb->base.height,
b00c600e 1903 fbdev_fb->base.format->depth,
272725c7 1904 fbdev_fb->base.format->cpp[0] * 8,
bae781b2 1905 fbdev_fb->base.modifier,
25bcce94 1906 drm_framebuffer_read_refcount(&fbdev_fb->base));
a5ff7a45 1907 describe_obj(m, intel_fb_obj(&fbdev_fb->base));
25bcce94
CW
1908 seq_putc(m, '\n');
1909 }
4520f53a 1910#endif
37811fcc 1911
4b096ac1 1912 mutex_lock(&dev->mode_config.fb_lock);
3a58ee10 1913 drm_for_each_fb(drm_fb, dev) {
b13b8402
NS
1914 struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
1915 if (fb == fbdev_fb)
37811fcc
CW
1916 continue;
1917
c1ca506d 1918 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
37811fcc
CW
1919 fb->base.width,
1920 fb->base.height,
b00c600e 1921 fb->base.format->depth,
272725c7 1922 fb->base.format->cpp[0] * 8,
bae781b2 1923 fb->base.modifier,
747a598f 1924 drm_framebuffer_read_refcount(&fb->base));
a5ff7a45 1925 describe_obj(m, intel_fb_obj(&fb->base));
267f0c90 1926 seq_putc(m, '\n');
37811fcc 1927 }
4b096ac1 1928 mutex_unlock(&dev->mode_config.fb_lock);
188c1ab7 1929 mutex_unlock(&dev->struct_mutex);
37811fcc
CW
1930
1931 return 0;
1932}
1933
7e37f889 1934static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
c9fe99bd 1935{
ef5032a0
CW
1936 seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, emit: %u)",
1937 ring->space, ring->head, ring->tail, ring->emit);
c9fe99bd
OM
1938}
1939
e76d3630
BW
1940static int i915_context_status(struct seq_file *m, void *unused)
1941{
36cdd013
DW
1942 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1943 struct drm_device *dev = &dev_priv->drm;
e2f80391 1944 struct intel_engine_cs *engine;
e2efd130 1945 struct i915_gem_context *ctx;
3b3f1650 1946 enum intel_engine_id id;
c3232b18 1947 int ret;
e76d3630 1948
f3d28878 1949 ret = mutex_lock_interruptible(&dev->struct_mutex);
e76d3630
BW
1950 if (ret)
1951 return ret;
1952
829a0af2 1953 list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
5d1808ec 1954 seq_printf(m, "HW context %u ", ctx->hw_id);
c84455b4 1955 if (ctx->pid) {
d28b99ab
CW
1956 struct task_struct *task;
1957
c84455b4 1958 task = get_pid_task(ctx->pid, PIDTYPE_PID);
d28b99ab
CW
1959 if (task) {
1960 seq_printf(m, "(%s [%d]) ",
1961 task->comm, task->pid);
1962 put_task_struct(task);
1963 }
c84455b4
CW
1964 } else if (IS_ERR(ctx->file_priv)) {
1965 seq_puts(m, "(deleted) ");
d28b99ab
CW
1966 } else {
1967 seq_puts(m, "(kernel) ");
1968 }
1969
bca44d80
CW
1970 seq_putc(m, ctx->remap_slice ? 'R' : 'r');
1971 seq_putc(m, '\n');
c9fe99bd 1972
3b3f1650 1973 for_each_engine(engine, dev_priv, id) {
ab82a063
CW
1974 struct intel_context *ce =
1975 to_intel_context(ctx, engine);
bca44d80
CW
1976
1977 seq_printf(m, "%s: ", engine->name);
bca44d80 1978 if (ce->state)
bf3783e5 1979 describe_obj(m, ce->state->obj);
dca33ecc 1980 if (ce->ring)
7e37f889 1981 describe_ctx_ring(m, ce->ring);
c9fe99bd 1982 seq_putc(m, '\n');
c9fe99bd 1983 }
a33afea5 1984
a33afea5 1985 seq_putc(m, '\n');
a168c293
BW
1986 }
1987
f3d28878 1988 mutex_unlock(&dev->struct_mutex);
e76d3630
BW
1989
1990 return 0;
1991}
1992
ea16a3cd
DV
1993static const char *swizzle_string(unsigned swizzle)
1994{
aee56cff 1995 switch (swizzle) {
ea16a3cd
DV
1996 case I915_BIT_6_SWIZZLE_NONE:
1997 return "none";
1998 case I915_BIT_6_SWIZZLE_9:
1999 return "bit9";
2000 case I915_BIT_6_SWIZZLE_9_10:
2001 return "bit9/bit10";
2002 case I915_BIT_6_SWIZZLE_9_11:
2003 return "bit9/bit11";
2004 case I915_BIT_6_SWIZZLE_9_10_11:
2005 return "bit9/bit10/bit11";
2006 case I915_BIT_6_SWIZZLE_9_17:
2007 return "bit9/bit17";
2008 case I915_BIT_6_SWIZZLE_9_10_17:
2009 return "bit9/bit10/bit17";
2010 case I915_BIT_6_SWIZZLE_UNKNOWN:
8a168ca7 2011 return "unknown";
ea16a3cd
DV
2012 }
2013
2014 return "bug";
2015}
2016
2017static int i915_swizzle_info(struct seq_file *m, void *data)
2018{
36cdd013 2019 struct drm_i915_private *dev_priv = node_to_i915(m->private);
22bcfc6a 2020
c8c8fb33 2021 intel_runtime_pm_get(dev_priv);
ea16a3cd 2022
ea16a3cd
DV
2023 seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
2024 swizzle_string(dev_priv->mm.bit_6_swizzle_x));
2025 seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
2026 swizzle_string(dev_priv->mm.bit_6_swizzle_y));
2027
36cdd013 2028 if (IS_GEN3(dev_priv) || IS_GEN4(dev_priv)) {
ea16a3cd
DV
2029 seq_printf(m, "DDC = 0x%08x\n",
2030 I915_READ(DCC));
656bfa3a
DV
2031 seq_printf(m, "DDC2 = 0x%08x\n",
2032 I915_READ(DCC2));
ea16a3cd
DV
2033 seq_printf(m, "C0DRB3 = 0x%04x\n",
2034 I915_READ16(C0DRB3));
2035 seq_printf(m, "C1DRB3 = 0x%04x\n",
2036 I915_READ16(C1DRB3));
36cdd013 2037 } else if (INTEL_GEN(dev_priv) >= 6) {
3fa7d235
DV
2038 seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
2039 I915_READ(MAD_DIMM_C0));
2040 seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
2041 I915_READ(MAD_DIMM_C1));
2042 seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
2043 I915_READ(MAD_DIMM_C2));
2044 seq_printf(m, "TILECTL = 0x%08x\n",
2045 I915_READ(TILECTL));
36cdd013 2046 if (INTEL_GEN(dev_priv) >= 8)
9d3203e1
BW
2047 seq_printf(m, "GAMTARBMODE = 0x%08x\n",
2048 I915_READ(GAMTARBMODE));
2049 else
2050 seq_printf(m, "ARB_MODE = 0x%08x\n",
2051 I915_READ(ARB_MODE));
3fa7d235
DV
2052 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
2053 I915_READ(DISP_ARB_CTL));
ea16a3cd 2054 }
656bfa3a
DV
2055
2056 if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
2057 seq_puts(m, "L-shaped memory detected\n");
2058
c8c8fb33 2059 intel_runtime_pm_put(dev_priv);
ea16a3cd
DV
2060
2061 return 0;
2062}
2063
1c60fef5
BW
2064static int per_file_ctx(int id, void *ptr, void *data)
2065{
e2efd130 2066 struct i915_gem_context *ctx = ptr;
1c60fef5 2067 struct seq_file *m = data;
ae6c4806
DV
2068 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
2069
2070 if (!ppgtt) {
2071 seq_printf(m, " no ppgtt for context %d\n",
2072 ctx->user_handle);
2073 return 0;
2074 }
1c60fef5 2075
f83d6518
OM
2076 if (i915_gem_context_is_default(ctx))
2077 seq_puts(m, " default context:\n");
2078 else
821d66dd 2079 seq_printf(m, " context %d:\n", ctx->user_handle);
1c60fef5
BW
2080 ppgtt->debug_dump(ppgtt, m);
2081
2082 return 0;
2083}
2084
36cdd013
DW
2085static void gen8_ppgtt_info(struct seq_file *m,
2086 struct drm_i915_private *dev_priv)
3cf17fc5 2087{
77df6772 2088 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
3b3f1650
AG
2089 struct intel_engine_cs *engine;
2090 enum intel_engine_id id;
b4ac5afc 2091 int i;
3cf17fc5 2092
77df6772
BW
2093 if (!ppgtt)
2094 return;
2095
3b3f1650 2096 for_each_engine(engine, dev_priv, id) {
e2f80391 2097 seq_printf(m, "%s\n", engine->name);
77df6772 2098 for (i = 0; i < 4; i++) {
e2f80391 2099 u64 pdp = I915_READ(GEN8_RING_PDP_UDW(engine, i));
77df6772 2100 pdp <<= 32;
e2f80391 2101 pdp |= I915_READ(GEN8_RING_PDP_LDW(engine, i));
a2a5b15c 2102 seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp);
77df6772
BW
2103 }
2104 }
2105}
2106
36cdd013
DW
2107static void gen6_ppgtt_info(struct seq_file *m,
2108 struct drm_i915_private *dev_priv)
77df6772 2109{
e2f80391 2110 struct intel_engine_cs *engine;
3b3f1650 2111 enum intel_engine_id id;
3cf17fc5 2112
7e22dbbb 2113 if (IS_GEN6(dev_priv))
3cf17fc5
DV
2114 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
2115
3b3f1650 2116 for_each_engine(engine, dev_priv, id) {
e2f80391 2117 seq_printf(m, "%s\n", engine->name);
7e22dbbb 2118 if (IS_GEN7(dev_priv))
e2f80391
TU
2119 seq_printf(m, "GFX_MODE: 0x%08x\n",
2120 I915_READ(RING_MODE_GEN7(engine)));
2121 seq_printf(m, "PP_DIR_BASE: 0x%08x\n",
2122 I915_READ(RING_PP_DIR_BASE(engine)));
2123 seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n",
2124 I915_READ(RING_PP_DIR_BASE_READ(engine)));
2125 seq_printf(m, "PP_DIR_DCLV: 0x%08x\n",
2126 I915_READ(RING_PP_DIR_DCLV(engine)));
3cf17fc5
DV
2127 }
2128 if (dev_priv->mm.aliasing_ppgtt) {
2129 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
2130
267f0c90 2131 seq_puts(m, "aliasing PPGTT:\n");
44159ddb 2132 seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd.base.ggtt_offset);
1c60fef5 2133
87d60b63 2134 ppgtt->debug_dump(ppgtt, m);
ae6c4806 2135 }
1c60fef5 2136
3cf17fc5 2137 seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
77df6772
BW
2138}
2139
2140static int i915_ppgtt_info(struct seq_file *m, void *data)
2141{
36cdd013
DW
2142 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2143 struct drm_device *dev = &dev_priv->drm;
ea91e401 2144 struct drm_file *file;
637ee29e 2145 int ret;
77df6772 2146
637ee29e
CW
2147 mutex_lock(&dev->filelist_mutex);
2148 ret = mutex_lock_interruptible(&dev->struct_mutex);
77df6772 2149 if (ret)
637ee29e
CW
2150 goto out_unlock;
2151
c8c8fb33 2152 intel_runtime_pm_get(dev_priv);
77df6772 2153
36cdd013
DW
2154 if (INTEL_GEN(dev_priv) >= 8)
2155 gen8_ppgtt_info(m, dev_priv);
2156 else if (INTEL_GEN(dev_priv) >= 6)
2157 gen6_ppgtt_info(m, dev_priv);
77df6772 2158
ea91e401
MT
2159 list_for_each_entry_reverse(file, &dev->filelist, lhead) {
2160 struct drm_i915_file_private *file_priv = file->driver_priv;
7cb5dff8 2161 struct task_struct *task;
ea91e401 2162
7cb5dff8 2163 task = get_pid_task(file->pid, PIDTYPE_PID);
06812760
DC
2164 if (!task) {
2165 ret = -ESRCH;
637ee29e 2166 goto out_rpm;
06812760 2167 }
7cb5dff8
GT
2168 seq_printf(m, "\nproc: %s\n", task->comm);
2169 put_task_struct(task);
ea91e401
MT
2170 idr_for_each(&file_priv->context_idr, per_file_ctx,
2171 (void *)(unsigned long)m);
2172 }
2173
637ee29e 2174out_rpm:
c8c8fb33 2175 intel_runtime_pm_put(dev_priv);
3cf17fc5 2176 mutex_unlock(&dev->struct_mutex);
637ee29e
CW
2177out_unlock:
2178 mutex_unlock(&dev->filelist_mutex);
06812760 2179 return ret;
3cf17fc5
DV
2180}
2181
f5a4c67d
CW
2182static int count_irq_waiters(struct drm_i915_private *i915)
2183{
e2f80391 2184 struct intel_engine_cs *engine;
3b3f1650 2185 enum intel_engine_id id;
f5a4c67d 2186 int count = 0;
f5a4c67d 2187
3b3f1650 2188 for_each_engine(engine, i915, id)
688e6c72 2189 count += intel_engine_has_waiter(engine);
f5a4c67d
CW
2190
2191 return count;
2192}
2193
7466c291
CW
2194static const char *rps_power_to_str(unsigned int power)
2195{
2196 static const char * const strings[] = {
2197 [LOW_POWER] = "low power",
2198 [BETWEEN] = "mixed",
2199 [HIGH_POWER] = "high power",
2200 };
2201
2202 if (power >= ARRAY_SIZE(strings) || !strings[power])
2203 return "unknown";
2204
2205 return strings[power];
2206}
2207
1854d5ca
CW
2208static int i915_rps_boost_info(struct seq_file *m, void *data)
2209{
36cdd013
DW
2210 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2211 struct drm_device *dev = &dev_priv->drm;
562d9bae 2212 struct intel_rps *rps = &dev_priv->gt_pm.rps;
1854d5ca 2213 struct drm_file *file;
1854d5ca 2214
562d9bae 2215 seq_printf(m, "RPS enabled? %d\n", rps->enabled);
28176ef4
CW
2216 seq_printf(m, "GPU busy? %s [%d requests]\n",
2217 yesno(dev_priv->gt.awake), dev_priv->gt.active_requests);
f5a4c67d 2218 seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv));
7b92c1bd 2219 seq_printf(m, "Boosts outstanding? %d\n",
562d9bae 2220 atomic_read(&rps->num_waiters));
7466c291 2221 seq_printf(m, "Frequency requested %d\n",
562d9bae 2222 intel_gpu_freq(dev_priv, rps->cur_freq));
7466c291 2223 seq_printf(m, " min hard:%d, soft:%d; max soft:%d, hard:%d\n",
562d9bae
SAK
2224 intel_gpu_freq(dev_priv, rps->min_freq),
2225 intel_gpu_freq(dev_priv, rps->min_freq_softlimit),
2226 intel_gpu_freq(dev_priv, rps->max_freq_softlimit),
2227 intel_gpu_freq(dev_priv, rps->max_freq));
7466c291 2228 seq_printf(m, " idle:%d, efficient:%d, boost:%d\n",
562d9bae
SAK
2229 intel_gpu_freq(dev_priv, rps->idle_freq),
2230 intel_gpu_freq(dev_priv, rps->efficient_freq),
2231 intel_gpu_freq(dev_priv, rps->boost_freq));
1d2ac403
DV
2232
2233 mutex_lock(&dev->filelist_mutex);
1854d5ca
CW
2234 list_for_each_entry_reverse(file, &dev->filelist, lhead) {
2235 struct drm_i915_file_private *file_priv = file->driver_priv;
2236 struct task_struct *task;
2237
2238 rcu_read_lock();
2239 task = pid_task(file->pid, PIDTYPE_PID);
7b92c1bd 2240 seq_printf(m, "%s [%d]: %d boosts\n",
1854d5ca
CW
2241 task ? task->comm : "<unknown>",
2242 task ? task->pid : -1,
562d9bae 2243 atomic_read(&file_priv->rps_client.boosts));
1854d5ca
CW
2244 rcu_read_unlock();
2245 }
7b92c1bd 2246 seq_printf(m, "Kernel (anonymous) boosts: %d\n",
562d9bae 2247 atomic_read(&rps->boosts));
1d2ac403 2248 mutex_unlock(&dev->filelist_mutex);
1854d5ca 2249
7466c291 2250 if (INTEL_GEN(dev_priv) >= 6 &&
562d9bae 2251 rps->enabled &&
28176ef4 2252 dev_priv->gt.active_requests) {
7466c291
CW
2253 u32 rpup, rpupei;
2254 u32 rpdown, rpdownei;
2255
2256 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
2257 rpup = I915_READ_FW(GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK;
2258 rpupei = I915_READ_FW(GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK;
2259 rpdown = I915_READ_FW(GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK;
2260 rpdownei = I915_READ_FW(GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK;
2261 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
2262
2263 seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
562d9bae 2264 rps_power_to_str(rps->power));
7466c291 2265 seq_printf(m, " Avg. up: %d%% [above threshold? %d%%]\n",
23f4a287 2266 rpup && rpupei ? 100 * rpup / rpupei : 0,
562d9bae 2267 rps->up_threshold);
7466c291 2268 seq_printf(m, " Avg. down: %d%% [below threshold? %d%%]\n",
23f4a287 2269 rpdown && rpdownei ? 100 * rpdown / rpdownei : 0,
562d9bae 2270 rps->down_threshold);
7466c291
CW
2271 } else {
2272 seq_puts(m, "\nRPS Autotuning inactive\n");
2273 }
2274
8d3afd7d 2275 return 0;
1854d5ca
CW
2276}
2277
63573eb7
BW
2278static int i915_llc(struct seq_file *m, void *data)
2279{
36cdd013 2280 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3accaf7e 2281 const bool edram = INTEL_GEN(dev_priv) > 8;
63573eb7 2282
36cdd013 2283 seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev_priv)));
3accaf7e
MK
2284 seq_printf(m, "%s: %lluMB\n", edram ? "eDRAM" : "eLLC",
2285 intel_uncore_edram_size(dev_priv)/1024/1024);
63573eb7
BW
2286
2287 return 0;
2288}
2289
0509ead1
AS
2290static int i915_huc_load_status_info(struct seq_file *m, void *data)
2291{
2292 struct drm_i915_private *dev_priv = node_to_i915(m->private);
56ffc742 2293 struct drm_printer p;
0509ead1 2294
ab309a6a
MW
2295 if (!HAS_HUC(dev_priv))
2296 return -ENODEV;
0509ead1 2297
56ffc742
MW
2298 p = drm_seq_file_printer(m);
2299 intel_uc_fw_dump(&dev_priv->huc.fw, &p);
0509ead1 2300
3582ad13 2301 intel_runtime_pm_get(dev_priv);
0509ead1 2302 seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2));
3582ad13 2303 intel_runtime_pm_put(dev_priv);
0509ead1
AS
2304
2305 return 0;
2306}
2307
fdf5d357
AD
2308static int i915_guc_load_status_info(struct seq_file *m, void *data)
2309{
36cdd013 2310 struct drm_i915_private *dev_priv = node_to_i915(m->private);
56ffc742 2311 struct drm_printer p;
fdf5d357
AD
2312 u32 tmp, i;
2313
ab309a6a
MW
2314 if (!HAS_GUC(dev_priv))
2315 return -ENODEV;
fdf5d357 2316
56ffc742
MW
2317 p = drm_seq_file_printer(m);
2318 intel_uc_fw_dump(&dev_priv->guc.fw, &p);
fdf5d357 2319
3582ad13 2320 intel_runtime_pm_get(dev_priv);
2321
fdf5d357
AD
2322 tmp = I915_READ(GUC_STATUS);
2323
2324 seq_printf(m, "\nGuC status 0x%08x:\n", tmp);
2325 seq_printf(m, "\tBootrom status = 0x%x\n",
2326 (tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
2327 seq_printf(m, "\tuKernel status = 0x%x\n",
2328 (tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
2329 seq_printf(m, "\tMIA Core status = 0x%x\n",
2330 (tmp & GS_MIA_MASK) >> GS_MIA_SHIFT);
2331 seq_puts(m, "\nScratch registers:\n");
2332 for (i = 0; i < 16; i++)
2333 seq_printf(m, "\t%2d: \t0x%x\n", i, I915_READ(SOFT_SCRATCH(i)));
2334
3582ad13 2335 intel_runtime_pm_put(dev_priv);
2336
fdf5d357
AD
2337 return 0;
2338}
2339
5e24e4a2
MW
2340static const char *
2341stringify_guc_log_type(enum guc_log_buffer_type type)
2342{
2343 switch (type) {
2344 case GUC_ISR_LOG_BUFFER:
2345 return "ISR";
2346 case GUC_DPC_LOG_BUFFER:
2347 return "DPC";
2348 case GUC_CRASH_DUMP_LOG_BUFFER:
2349 return "CRASH";
2350 default:
2351 MISSING_CASE(type);
2352 }
2353
2354 return "";
2355}
2356
5aa1ee4b
AG
2357static void i915_guc_log_info(struct seq_file *m,
2358 struct drm_i915_private *dev_priv)
2359{
5e24e4a2
MW
2360 struct intel_guc_log *log = &dev_priv->guc.log;
2361 enum guc_log_buffer_type type;
5aa1ee4b 2362
5e24e4a2
MW
2363 if (!intel_guc_log_relay_enabled(log)) {
2364 seq_puts(m, "GuC log relay disabled\n");
2365 return;
2366 }
5aa1ee4b 2367
5e24e4a2 2368 seq_puts(m, "GuC logging stats:\n");
5aa1ee4b 2369
6a96be24 2370 seq_printf(m, "\tRelay full count: %u\n",
5e24e4a2
MW
2371 log->relay.full_count);
2372
2373 for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
2374 seq_printf(m, "\t%s:\tflush count %10u, overflow count %10u\n",
2375 stringify_guc_log_type(type),
2376 log->stats[type].flush,
2377 log->stats[type].sampled_overflow);
2378 }
5aa1ee4b
AG
2379}
2380
8b417c26
DG
2381static void i915_guc_client_info(struct seq_file *m,
2382 struct drm_i915_private *dev_priv,
5afc8b49 2383 struct intel_guc_client *client)
8b417c26 2384{
e2f80391 2385 struct intel_engine_cs *engine;
c18468c4 2386 enum intel_engine_id id;
8b417c26 2387 uint64_t tot = 0;
8b417c26 2388
b09935a6
OM
2389 seq_printf(m, "\tPriority %d, GuC stage index: %u, PD offset 0x%x\n",
2390 client->priority, client->stage_id, client->proc_desc_offset);
59db36cf
MW
2391 seq_printf(m, "\tDoorbell id %d, offset: 0x%lx\n",
2392 client->doorbell_id, client->doorbell_offset);
8b417c26 2393
3b3f1650 2394 for_each_engine(engine, dev_priv, id) {
c18468c4
DG
2395 u64 submissions = client->submissions[id];
2396 tot += submissions;
8b417c26 2397 seq_printf(m, "\tSubmissions: %llu %s\n",
c18468c4 2398 submissions, engine->name);
8b417c26
DG
2399 }
2400 seq_printf(m, "\tTotal: %llu\n", tot);
2401}
2402
a8b9370f
OM
2403static int i915_guc_info(struct seq_file *m, void *data)
2404{
2405 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2406 const struct intel_guc *guc = &dev_priv->guc;
a8b9370f 2407
db557993 2408 if (!USES_GUC(dev_priv))
ab309a6a
MW
2409 return -ENODEV;
2410
db557993
MW
2411 i915_guc_log_info(m, dev_priv);
2412
2413 if (!USES_GUC_SUBMISSION(dev_priv))
2414 return 0;
2415
ab309a6a 2416 GEM_BUG_ON(!guc->execbuf_client);
a8b9370f 2417
db557993 2418 seq_printf(m, "\nDoorbell map:\n");
abddffdf 2419 seq_printf(m, "\t%*pb\n", GUC_NUM_DOORBELLS, guc->doorbell_bitmap);
db557993 2420 seq_printf(m, "Doorbell next cacheline: 0x%x\n", guc->db_cacheline);
9636f6db 2421
334636c6
CW
2422 seq_printf(m, "\nGuC execbuf client @ %p:\n", guc->execbuf_client);
2423 i915_guc_client_info(m, dev_priv, guc->execbuf_client);
e78c9175
CW
2424 if (guc->preempt_client) {
2425 seq_printf(m, "\nGuC preempt client @ %p:\n",
2426 guc->preempt_client);
2427 i915_guc_client_info(m, dev_priv, guc->preempt_client);
2428 }
8b417c26
DG
2429
2430 /* Add more as required ... */
2431
2432 return 0;
2433}
2434
a8b9370f 2435static int i915_guc_stage_pool(struct seq_file *m, void *data)
4c7e77fc 2436{
36cdd013 2437 struct drm_i915_private *dev_priv = node_to_i915(m->private);
a8b9370f
OM
2438 const struct intel_guc *guc = &dev_priv->guc;
2439 struct guc_stage_desc *desc = guc->stage_desc_pool_vaddr;
5afc8b49 2440 struct intel_guc_client *client = guc->execbuf_client;
a8b9370f
OM
2441 unsigned int tmp;
2442 int index;
4c7e77fc 2443
ab309a6a
MW
2444 if (!USES_GUC_SUBMISSION(dev_priv))
2445 return -ENODEV;
4c7e77fc 2446
a8b9370f
OM
2447 for (index = 0; index < GUC_MAX_STAGE_DESCRIPTORS; index++, desc++) {
2448 struct intel_engine_cs *engine;
2449
2450 if (!(desc->attribute & GUC_STAGE_DESC_ATTR_ACTIVE))
2451 continue;
2452
2453 seq_printf(m, "GuC stage descriptor %u:\n", index);
2454 seq_printf(m, "\tIndex: %u\n", desc->stage_id);
2455 seq_printf(m, "\tAttribute: 0x%x\n", desc->attribute);
2456 seq_printf(m, "\tPriority: %d\n", desc->priority);
2457 seq_printf(m, "\tDoorbell id: %d\n", desc->db_id);
2458 seq_printf(m, "\tEngines used: 0x%x\n",
2459 desc->engines_used);
2460 seq_printf(m, "\tDoorbell trigger phy: 0x%llx, cpu: 0x%llx, uK: 0x%x\n",
2461 desc->db_trigger_phy,
2462 desc->db_trigger_cpu,
2463 desc->db_trigger_uk);
2464 seq_printf(m, "\tProcess descriptor: 0x%x\n",
2465 desc->process_desc);
9a09485d 2466 seq_printf(m, "\tWorkqueue address: 0x%x, size: 0x%x\n",
a8b9370f
OM
2467 desc->wq_addr, desc->wq_size);
2468 seq_putc(m, '\n');
2469
2470 for_each_engine_masked(engine, dev_priv, client->engines, tmp) {
2471 u32 guc_engine_id = engine->guc_id;
2472 struct guc_execlist_context *lrc =
2473 &desc->lrc[guc_engine_id];
2474
2475 seq_printf(m, "\t%s LRC:\n", engine->name);
2476 seq_printf(m, "\t\tContext desc: 0x%x\n",
2477 lrc->context_desc);
2478 seq_printf(m, "\t\tContext id: 0x%x\n", lrc->context_id);
2479 seq_printf(m, "\t\tLRCA: 0x%x\n", lrc->ring_lrca);
2480 seq_printf(m, "\t\tRing begin: 0x%x\n", lrc->ring_begin);
2481 seq_printf(m, "\t\tRing end: 0x%x\n", lrc->ring_end);
2482 seq_putc(m, '\n');
2483 }
2484 }
2485
2486 return 0;
2487}
2488
4c7e77fc
AD
2489static int i915_guc_log_dump(struct seq_file *m, void *data)
2490{
ac58d2ab
DCS
2491 struct drm_info_node *node = m->private;
2492 struct drm_i915_private *dev_priv = node_to_i915(node);
2493 bool dump_load_err = !!node->info_ent->data;
2494 struct drm_i915_gem_object *obj = NULL;
2495 u32 *log;
2496 int i = 0;
4c7e77fc 2497
ab309a6a
MW
2498 if (!HAS_GUC(dev_priv))
2499 return -ENODEV;
2500
ac58d2ab
DCS
2501 if (dump_load_err)
2502 obj = dev_priv->guc.load_err_log;
2503 else if (dev_priv->guc.log.vma)
2504 obj = dev_priv->guc.log.vma->obj;
4c7e77fc 2505
ac58d2ab
DCS
2506 if (!obj)
2507 return 0;
4c7e77fc 2508
ac58d2ab
DCS
2509 log = i915_gem_object_pin_map(obj, I915_MAP_WC);
2510 if (IS_ERR(log)) {
2511 DRM_DEBUG("Failed to pin object\n");
2512 seq_puts(m, "(log data unaccessible)\n");
2513 return PTR_ERR(log);
4c7e77fc
AD
2514 }
2515
ac58d2ab
DCS
2516 for (i = 0; i < obj->base.size / sizeof(u32); i += 4)
2517 seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n",
2518 *(log + i), *(log + i + 1),
2519 *(log + i + 2), *(log + i + 3));
2520
4c7e77fc
AD
2521 seq_putc(m, '\n');
2522
ac58d2ab
DCS
2523 i915_gem_object_unpin_map(obj);
2524
4c7e77fc
AD
2525 return 0;
2526}
2527
4977a287 2528static int i915_guc_log_level_get(void *data, u64 *val)
685534ef 2529{
bcc36d8a 2530 struct drm_i915_private *dev_priv = data;
685534ef 2531
86aa8247 2532 if (!USES_GUC(dev_priv))
ab309a6a
MW
2533 return -ENODEV;
2534
50935ac7 2535 *val = intel_guc_log_get_level(&dev_priv->guc.log);
685534ef
SAK
2536
2537 return 0;
2538}
2539
4977a287 2540static int i915_guc_log_level_set(void *data, u64 val)
685534ef 2541{
bcc36d8a 2542 struct drm_i915_private *dev_priv = data;
685534ef 2543
86aa8247 2544 if (!USES_GUC(dev_priv))
ab309a6a
MW
2545 return -ENODEV;
2546
50935ac7 2547 return intel_guc_log_set_level(&dev_priv->guc.log, val);
685534ef
SAK
2548}
2549
4977a287
MW
2550DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_level_fops,
2551 i915_guc_log_level_get, i915_guc_log_level_set,
685534ef
SAK
2552 "%lld\n");
2553
4977a287
MW
2554static int i915_guc_log_relay_open(struct inode *inode, struct file *file)
2555{
2556 struct drm_i915_private *dev_priv = inode->i_private;
2557
2558 if (!USES_GUC(dev_priv))
2559 return -ENODEV;
2560
2561 file->private_data = &dev_priv->guc.log;
2562
2563 return intel_guc_log_relay_open(&dev_priv->guc.log);
2564}
2565
2566static ssize_t
2567i915_guc_log_relay_write(struct file *filp,
2568 const char __user *ubuf,
2569 size_t cnt,
2570 loff_t *ppos)
2571{
2572 struct intel_guc_log *log = filp->private_data;
2573
2574 intel_guc_log_relay_flush(log);
2575
2576 return cnt;
2577}
2578
2579static int i915_guc_log_relay_release(struct inode *inode, struct file *file)
2580{
2581 struct drm_i915_private *dev_priv = inode->i_private;
2582
2583 intel_guc_log_relay_close(&dev_priv->guc.log);
2584
2585 return 0;
2586}
2587
2588static const struct file_operations i915_guc_log_relay_fops = {
2589 .owner = THIS_MODULE,
2590 .open = i915_guc_log_relay_open,
2591 .write = i915_guc_log_relay_write,
2592 .release = i915_guc_log_relay_release,
2593};
2594
5b7b3086
DP
2595static int i915_psr_sink_status_show(struct seq_file *m, void *data)
2596{
2597 u8 val;
2598 static const char * const sink_status[] = {
2599 "inactive",
2600 "transition to active, capture and display",
2601 "active, display from RFB",
2602 "active, capture and display on sink device timings",
2603 "transition to inactive, capture and display, timing re-sync",
2604 "reserved",
2605 "reserved",
2606 "sink internal error",
2607 };
2608 struct drm_connector *connector = m->private;
7a72c78b 2609 struct drm_i915_private *dev_priv = to_i915(connector->dev);
5b7b3086
DP
2610 struct intel_dp *intel_dp =
2611 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
7a72c78b
RV
2612 int ret;
2613
2614 if (!CAN_PSR(dev_priv)) {
2615 seq_puts(m, "PSR Unsupported\n");
2616 return -ENODEV;
2617 }
5b7b3086
DP
2618
2619 if (connector->status != connector_status_connected)
2620 return -ENODEV;
2621
7a72c78b
RV
2622 ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val);
2623
2624 if (ret == 1) {
5b7b3086
DP
2625 const char *str = "unknown";
2626
2627 val &= DP_PSR_SINK_STATE_MASK;
2628 if (val < ARRAY_SIZE(sink_status))
2629 str = sink_status[val];
2630 seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str);
2631 } else {
7a72c78b 2632 return ret;
5b7b3086
DP
2633 }
2634
2635 return 0;
2636}
2637DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
2638
00b06296
VN
2639static void
2640psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
2641{
2642 u32 val, psr_status;
b86bef20 2643
00b06296
VN
2644 if (dev_priv->psr.psr2_enabled) {
2645 static const char * const live_status[] = {
2646 "IDLE",
2647 "CAPTURE",
2648 "CAPTURE_FS",
2649 "SLEEP",
2650 "BUFON_FW",
2651 "ML_UP",
2652 "SU_STANDBY",
2653 "FAST_SLEEP",
2654 "DEEP_SLEEP",
2655 "BUF_ON",
2656 "TG_ON"
2657 };
2658 psr_status = I915_READ(EDP_PSR2_STATUS);
2659 val = (psr_status & EDP_PSR2_STATUS_STATE_MASK) >>
2660 EDP_PSR2_STATUS_STATE_SHIFT;
2661 if (val < ARRAY_SIZE(live_status)) {
2662 seq_printf(m, "Source PSR status: 0x%x [%s]\n",
2663 psr_status, live_status[val]);
2664 return;
2665 }
2666 } else {
2667 static const char * const live_status[] = {
2668 "IDLE",
2669 "SRDONACK",
2670 "SRDENT",
2671 "BUFOFF",
2672 "BUFON",
2673 "AUXACK",
2674 "SRDOFFACK",
2675 "SRDENT_ON",
2676 };
2677 psr_status = I915_READ(EDP_PSR_STATUS);
2678 val = (psr_status & EDP_PSR_STATUS_STATE_MASK) >>
2679 EDP_PSR_STATUS_STATE_SHIFT;
2680 if (val < ARRAY_SIZE(live_status)) {
2681 seq_printf(m, "Source PSR status: 0x%x [%s]\n",
2682 psr_status, live_status[val]);
2683 return;
2684 }
2685 }
b86bef20 2686
00b06296 2687 seq_printf(m, "Source PSR status: 0x%x [%s]\n", psr_status, "unknown");
b86bef20
CW
2688}
2689
e91fd8c6
RV
2690static int i915_edp_psr_status(struct seq_file *m, void *data)
2691{
36cdd013 2692 struct drm_i915_private *dev_priv = node_to_i915(m->private);
a031d709
RV
2693 u32 psrperf = 0;
2694 bool enabled = false;
c9ef291a 2695 bool sink_support;
e91fd8c6 2696
ab309a6a
MW
2697 if (!HAS_PSR(dev_priv))
2698 return -ENODEV;
3553a8ea 2699
c9ef291a
DP
2700 sink_support = dev_priv->psr.sink_support;
2701 seq_printf(m, "Sink_Support: %s\n", yesno(sink_support));
2702 if (!sink_support)
2703 return 0;
2704
c8c8fb33
PZ
2705 intel_runtime_pm_get(dev_priv);
2706
fa128fa6 2707 mutex_lock(&dev_priv->psr.lock);
2807cf69 2708 seq_printf(m, "Enabled: %s\n", yesno((bool)dev_priv->psr.enabled));
fa128fa6
DV
2709 seq_printf(m, "Busy frontbuffer bits: 0x%03x\n",
2710 dev_priv->psr.busy_frontbuffer_bits);
e91fd8c6 2711
ce3508fd
DP
2712 if (dev_priv->psr.psr2_enabled)
2713 enabled = I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE;
2714 else
2715 enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE;
60e5ffe3
RV
2716
2717 seq_printf(m, "Main link in standby mode: %s\n",
2718 yesno(dev_priv->psr.link_standby));
2719
ce3508fd 2720 seq_printf(m, "HW Enabled & Active bit: %s\n", yesno(enabled));
e91fd8c6 2721
05eec3c2 2722 /*
05eec3c2
RV
2723 * SKL+ Perf counter is reset to 0 everytime DC state is entered
2724 */
36cdd013 2725 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
443a389f 2726 psrperf = I915_READ(EDP_PSR_PERF_CNT) &
a031d709 2727 EDP_PSR_PERF_CNT_MASK;
a6cbdb8e
RV
2728
2729 seq_printf(m, "Performance_Counter: %u\n", psrperf);
2730 }
b86bef20 2731
00b06296 2732 psr_source_status(dev_priv, m);
fa128fa6 2733 mutex_unlock(&dev_priv->psr.lock);
e91fd8c6 2734
3f983e54
DP
2735 if (READ_ONCE(dev_priv->psr.debug)) {
2736 seq_printf(m, "Last attempted entry at: %lld\n",
2737 dev_priv->psr.last_entry_attempt);
2738 seq_printf(m, "Last exit at: %lld\n",
2739 dev_priv->psr.last_exit);
2740 }
2741
c8c8fb33 2742 intel_runtime_pm_put(dev_priv);
e91fd8c6
RV
2743 return 0;
2744}
2745
54fd3149
DP
2746static int
2747i915_edp_psr_debug_set(void *data, u64 val)
2748{
2749 struct drm_i915_private *dev_priv = data;
2750
2751 if (!CAN_PSR(dev_priv))
2752 return -ENODEV;
2753
2754 DRM_DEBUG_KMS("PSR debug %s\n", enableddisabled(val));
2755
2756 intel_runtime_pm_get(dev_priv);
2757 intel_psr_irq_control(dev_priv, !!val);
2758 intel_runtime_pm_put(dev_priv);
2759
2760 return 0;
2761}
2762
2763static int
2764i915_edp_psr_debug_get(void *data, u64 *val)
2765{
2766 struct drm_i915_private *dev_priv = data;
2767
2768 if (!CAN_PSR(dev_priv))
2769 return -ENODEV;
2770
2771 *val = READ_ONCE(dev_priv->psr.debug);
2772 return 0;
2773}
2774
2775DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
2776 i915_edp_psr_debug_get, i915_edp_psr_debug_set,
2777 "%llu\n");
2778
ec013e7f
JB
2779static int i915_energy_uJ(struct seq_file *m, void *data)
2780{
36cdd013 2781 struct drm_i915_private *dev_priv = node_to_i915(m->private);
d38014ea 2782 unsigned long long power;
ec013e7f
JB
2783 u32 units;
2784
36cdd013 2785 if (INTEL_GEN(dev_priv) < 6)
ec013e7f
JB
2786 return -ENODEV;
2787
36623ef8
PZ
2788 intel_runtime_pm_get(dev_priv);
2789
d38014ea
GKB
2790 if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power)) {
2791 intel_runtime_pm_put(dev_priv);
2792 return -ENODEV;
2793 }
2794
2795 units = (power & 0x1f00) >> 8;
ec013e7f 2796 power = I915_READ(MCH_SECP_NRG_STTS);
d38014ea 2797 power = (1000000 * power) >> units; /* convert to uJ */
ec013e7f 2798
36623ef8
PZ
2799 intel_runtime_pm_put(dev_priv);
2800
d38014ea 2801 seq_printf(m, "%llu", power);
371db66a
PZ
2802
2803 return 0;
2804}
2805
6455c870 2806static int i915_runtime_pm_status(struct seq_file *m, void *unused)
371db66a 2807{
36cdd013 2808 struct drm_i915_private *dev_priv = node_to_i915(m->private);
52a05c30 2809 struct pci_dev *pdev = dev_priv->drm.pdev;
371db66a 2810
a156e64d
CW
2811 if (!HAS_RUNTIME_PM(dev_priv))
2812 seq_puts(m, "Runtime power management not supported\n");
371db66a 2813
6f56103d
CW
2814 seq_printf(m, "GPU idle: %s (epoch %u)\n",
2815 yesno(!dev_priv->gt.awake), dev_priv->gt.epoch);
371db66a 2816 seq_printf(m, "IRQs disabled: %s\n",
9df7575f 2817 yesno(!intel_irqs_enabled(dev_priv)));
0d804184 2818#ifdef CONFIG_PM
a6aaec8b 2819 seq_printf(m, "Usage count: %d\n",
36cdd013 2820 atomic_read(&dev_priv->drm.dev->power.usage_count));
0d804184
CW
2821#else
2822 seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
2823#endif
a156e64d 2824 seq_printf(m, "PCI device power state: %s [%d]\n",
52a05c30
DW
2825 pci_power_name(pdev->current_state),
2826 pdev->current_state);
371db66a 2827
ec013e7f
JB
2828 return 0;
2829}
2830
1da51581
ID
2831static int i915_power_domain_info(struct seq_file *m, void *unused)
2832{
36cdd013 2833 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1da51581
ID
2834 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2835 int i;
2836
2837 mutex_lock(&power_domains->lock);
2838
2839 seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
2840 for (i = 0; i < power_domains->power_well_count; i++) {
2841 struct i915_power_well *power_well;
2842 enum intel_display_power_domain power_domain;
2843
2844 power_well = &power_domains->power_wells[i];
2845 seq_printf(m, "%-25s %d\n", power_well->name,
2846 power_well->count);
2847
8385c2ec 2848 for_each_power_domain(power_domain, power_well->domains)
1da51581 2849 seq_printf(m, " %-23s %d\n",
9895ad03 2850 intel_display_power_domain_str(power_domain),
1da51581 2851 power_domains->domain_use_count[power_domain]);
1da51581
ID
2852 }
2853
2854 mutex_unlock(&power_domains->lock);
2855
2856 return 0;
2857}
2858
b7cec66d
DL
2859static int i915_dmc_info(struct seq_file *m, void *unused)
2860{
36cdd013 2861 struct drm_i915_private *dev_priv = node_to_i915(m->private);
b7cec66d
DL
2862 struct intel_csr *csr;
2863
ab309a6a
MW
2864 if (!HAS_CSR(dev_priv))
2865 return -ENODEV;
b7cec66d
DL
2866
2867 csr = &dev_priv->csr;
2868
6fb403de
MK
2869 intel_runtime_pm_get(dev_priv);
2870
b7cec66d
DL
2871 seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
2872 seq_printf(m, "path: %s\n", csr->fw_path);
2873
2874 if (!csr->dmc_payload)
6fb403de 2875 goto out;
b7cec66d
DL
2876
2877 seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
2878 CSR_VERSION_MINOR(csr->version));
2879
48de568c
MK
2880 if (IS_KABYLAKE(dev_priv) ||
2881 (IS_SKYLAKE(dev_priv) && csr->version >= CSR_VERSION(1, 6))) {
8337206d
DL
2882 seq_printf(m, "DC3 -> DC5 count: %d\n",
2883 I915_READ(SKL_CSR_DC3_DC5_COUNT));
2884 seq_printf(m, "DC5 -> DC6 count: %d\n",
2885 I915_READ(SKL_CSR_DC5_DC6_COUNT));
36cdd013 2886 } else if (IS_BROXTON(dev_priv) && csr->version >= CSR_VERSION(1, 4)) {
16e11b99
MK
2887 seq_printf(m, "DC3 -> DC5 count: %d\n",
2888 I915_READ(BXT_CSR_DC3_DC5_COUNT));
8337206d
DL
2889 }
2890
6fb403de
MK
2891out:
2892 seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0)));
2893 seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE));
2894 seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL));
2895
8337206d
DL
2896 intel_runtime_pm_put(dev_priv);
2897
b7cec66d
DL
2898 return 0;
2899}
2900
53f5e3ca
JB
2901static void intel_seq_print_mode(struct seq_file *m, int tabs,
2902 struct drm_display_mode *mode)
2903{
2904 int i;
2905
2906 for (i = 0; i < tabs; i++)
2907 seq_putc(m, '\t');
2908
2909 seq_printf(m, "id %d:\"%s\" freq %d clock %d hdisp %d hss %d hse %d htot %d vdisp %d vss %d vse %d vtot %d type 0x%x flags 0x%x\n",
2910 mode->base.id, mode->name,
2911 mode->vrefresh, mode->clock,
2912 mode->hdisplay, mode->hsync_start,
2913 mode->hsync_end, mode->htotal,
2914 mode->vdisplay, mode->vsync_start,
2915 mode->vsync_end, mode->vtotal,
2916 mode->type, mode->flags);
2917}
2918
2919static void intel_encoder_info(struct seq_file *m,
2920 struct intel_crtc *intel_crtc,
2921 struct intel_encoder *intel_encoder)
2922{
36cdd013
DW
2923 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2924 struct drm_device *dev = &dev_priv->drm;
53f5e3ca
JB
2925 struct drm_crtc *crtc = &intel_crtc->base;
2926 struct intel_connector *intel_connector;
2927 struct drm_encoder *encoder;
2928
2929 encoder = &intel_encoder->base;
2930 seq_printf(m, "\tencoder %d: type: %s, connectors:\n",
8e329a03 2931 encoder->base.id, encoder->name);
53f5e3ca
JB
2932 for_each_connector_on_encoder(dev, encoder, intel_connector) {
2933 struct drm_connector *connector = &intel_connector->base;
2934 seq_printf(m, "\t\tconnector %d: type: %s, status: %s",
2935 connector->base.id,
c23cc417 2936 connector->name,
53f5e3ca
JB
2937 drm_get_connector_status_name(connector->status));
2938 if (connector->status == connector_status_connected) {
2939 struct drm_display_mode *mode = &crtc->mode;
2940 seq_printf(m, ", mode:\n");
2941 intel_seq_print_mode(m, 2, mode);
2942 } else {
2943 seq_putc(m, '\n');
2944 }
2945 }
2946}
2947
2948static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2949{
36cdd013
DW
2950 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2951 struct drm_device *dev = &dev_priv->drm;
53f5e3ca
JB
2952 struct drm_crtc *crtc = &intel_crtc->base;
2953 struct intel_encoder *intel_encoder;
23a48d53
ML
2954 struct drm_plane_state *plane_state = crtc->primary->state;
2955 struct drm_framebuffer *fb = plane_state->fb;
53f5e3ca 2956
23a48d53 2957 if (fb)
5aa8a937 2958 seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
23a48d53
ML
2959 fb->base.id, plane_state->src_x >> 16,
2960 plane_state->src_y >> 16, fb->width, fb->height);
5aa8a937
MR
2961 else
2962 seq_puts(m, "\tprimary plane disabled\n");
53f5e3ca
JB
2963 for_each_encoder_on_crtc(dev, crtc, intel_encoder)
2964 intel_encoder_info(m, intel_crtc, intel_encoder);
2965}
2966
2967static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
2968{
2969 struct drm_display_mode *mode = panel->fixed_mode;
2970
2971 seq_printf(m, "\tfixed mode:\n");
2972 intel_seq_print_mode(m, 2, mode);
2973}
2974
2975static void intel_dp_info(struct seq_file *m,
2976 struct intel_connector *intel_connector)
2977{
2978 struct intel_encoder *intel_encoder = intel_connector->encoder;
2979 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
2980
2981 seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
742f491d 2982 seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
b6dabe3b 2983 if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
53f5e3ca 2984 intel_panel_info(m, &intel_connector->panel);
80209e5f
MK
2985
2986 drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
2987 &intel_dp->aux);
53f5e3ca
JB
2988}
2989
9a148a96
LY
2990static void intel_dp_mst_info(struct seq_file *m,
2991 struct intel_connector *intel_connector)
2992{
2993 struct intel_encoder *intel_encoder = intel_connector->encoder;
2994 struct intel_dp_mst_encoder *intel_mst =
2995 enc_to_mst(&intel_encoder->base);
2996 struct intel_digital_port *intel_dig_port = intel_mst->primary;
2997 struct intel_dp *intel_dp = &intel_dig_port->dp;
2998 bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr,
2999 intel_connector->port);
3000
3001 seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
3002}
3003
53f5e3ca
JB
3004static void intel_hdmi_info(struct seq_file *m,
3005 struct intel_connector *intel_connector)
3006{
3007 struct intel_encoder *intel_encoder = intel_connector->encoder;
3008 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
3009
742f491d 3010 seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
53f5e3ca
JB
3011}
3012
3013static void intel_lvds_info(struct seq_file *m,
3014 struct intel_connector *intel_connector)
3015{
3016 intel_panel_info(m, &intel_connector->panel);
3017}
3018
3019static void intel_connector_info(struct seq_file *m,
3020 struct drm_connector *connector)
3021{
3022 struct intel_connector *intel_connector = to_intel_connector(connector);
3023 struct intel_encoder *intel_encoder = intel_connector->encoder;
f103fc7d 3024 struct drm_display_mode *mode;
53f5e3ca
JB
3025
3026 seq_printf(m, "connector %d: type %s, status: %s\n",
c23cc417 3027 connector->base.id, connector->name,
53f5e3ca
JB
3028 drm_get_connector_status_name(connector->status));
3029 if (connector->status == connector_status_connected) {
3030 seq_printf(m, "\tname: %s\n", connector->display_info.name);
3031 seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
3032 connector->display_info.width_mm,
3033 connector->display_info.height_mm);
3034 seq_printf(m, "\tsubpixel order: %s\n",
3035 drm_get_subpixel_order_name(connector->display_info.subpixel_order));
3036 seq_printf(m, "\tCEA rev: %d\n",
3037 connector->display_info.cea_rev);
3038 }
ee648a74 3039
77d1f615 3040 if (!intel_encoder)
ee648a74
ML
3041 return;
3042
3043 switch (connector->connector_type) {
3044 case DRM_MODE_CONNECTOR_DisplayPort:
3045 case DRM_MODE_CONNECTOR_eDP:
9a148a96
LY
3046 if (intel_encoder->type == INTEL_OUTPUT_DP_MST)
3047 intel_dp_mst_info(m, intel_connector);
3048 else
3049 intel_dp_info(m, intel_connector);
ee648a74
ML
3050 break;
3051 case DRM_MODE_CONNECTOR_LVDS:
3052 if (intel_encoder->type == INTEL_OUTPUT_LVDS)
36cd7444 3053 intel_lvds_info(m, intel_connector);
ee648a74
ML
3054 break;
3055 case DRM_MODE_CONNECTOR_HDMIA:
3056 if (intel_encoder->type == INTEL_OUTPUT_HDMI ||
7e732cac 3057 intel_encoder->type == INTEL_OUTPUT_DDI)
ee648a74
ML
3058 intel_hdmi_info(m, intel_connector);
3059 break;
3060 default:
3061 break;
36cd7444 3062 }
53f5e3ca 3063
f103fc7d
JB
3064 seq_printf(m, "\tmodes:\n");
3065 list_for_each_entry(mode, &connector->modes, head)
3066 intel_seq_print_mode(m, 2, mode);
53f5e3ca
JB
3067}
3068
3abc4e09
RF
3069static const char *plane_type(enum drm_plane_type type)
3070{
3071 switch (type) {
3072 case DRM_PLANE_TYPE_OVERLAY:
3073 return "OVL";
3074 case DRM_PLANE_TYPE_PRIMARY:
3075 return "PRI";
3076 case DRM_PLANE_TYPE_CURSOR:
3077 return "CUR";
3078 /*
3079 * Deliberately omitting default: to generate compiler warnings
3080 * when a new drm_plane_type gets added.
3081 */
3082 }
3083
3084 return "unknown";
3085}
3086
3087static const char *plane_rotation(unsigned int rotation)
3088{
3089 static char buf[48];
3090 /*
c2c446ad 3091 * According to doc only one DRM_MODE_ROTATE_ is allowed but this
3abc4e09
RF
3092 * will print them all to visualize if the values are misused
3093 */
3094 snprintf(buf, sizeof(buf),
3095 "%s%s%s%s%s%s(0x%08x)",
c2c446ad
RF
3096 (rotation & DRM_MODE_ROTATE_0) ? "0 " : "",
3097 (rotation & DRM_MODE_ROTATE_90) ? "90 " : "",
3098 (rotation & DRM_MODE_ROTATE_180) ? "180 " : "",
3099 (rotation & DRM_MODE_ROTATE_270) ? "270 " : "",
3100 (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "",
3101 (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "",
3abc4e09
RF
3102 rotation);
3103
3104 return buf;
3105}
3106
3107static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
3108{
36cdd013
DW
3109 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3110 struct drm_device *dev = &dev_priv->drm;
3abc4e09
RF
3111 struct intel_plane *intel_plane;
3112
3113 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
3114 struct drm_plane_state *state;
3115 struct drm_plane *plane = &intel_plane->base;
b3c11ac2 3116 struct drm_format_name_buf format_name;
3abc4e09
RF
3117
3118 if (!plane->state) {
3119 seq_puts(m, "plane->state is NULL!\n");
3120 continue;
3121 }
3122
3123 state = plane->state;
3124
90844f00 3125 if (state->fb) {
438b74a5
VS
3126 drm_get_format_name(state->fb->format->format,
3127 &format_name);
90844f00 3128 } else {
b3c11ac2 3129 sprintf(format_name.str, "N/A");
90844f00
EE
3130 }
3131
3abc4e09
RF
3132 seq_printf(m, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n",
3133 plane->base.id,
3134 plane_type(intel_plane->base.type),
3135 state->crtc_x, state->crtc_y,
3136 state->crtc_w, state->crtc_h,
3137 (state->src_x >> 16),
3138 ((state->src_x & 0xffff) * 15625) >> 10,
3139 (state->src_y >> 16),
3140 ((state->src_y & 0xffff) * 15625) >> 10,
3141 (state->src_w >> 16),
3142 ((state->src_w & 0xffff) * 15625) >> 10,
3143 (state->src_h >> 16),
3144 ((state->src_h & 0xffff) * 15625) >> 10,
b3c11ac2 3145 format_name.str,
3abc4e09
RF
3146 plane_rotation(state->rotation));
3147 }
3148}
3149
3150static void intel_scaler_info(struct seq_file *m, struct intel_crtc *intel_crtc)
3151{
3152 struct intel_crtc_state *pipe_config;
3153 int num_scalers = intel_crtc->num_scalers;
3154 int i;
3155
3156 pipe_config = to_intel_crtc_state(intel_crtc->base.state);
3157
3158 /* Not all platformas have a scaler */
3159 if (num_scalers) {
3160 seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
3161 num_scalers,
3162 pipe_config->scaler_state.scaler_users,
3163 pipe_config->scaler_state.scaler_id);
3164
58415918 3165 for (i = 0; i < num_scalers; i++) {
3abc4e09
RF
3166 struct intel_scaler *sc =
3167 &pipe_config->scaler_state.scalers[i];
3168
3169 seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
3170 i, yesno(sc->in_use), sc->mode);
3171 }
3172 seq_puts(m, "\n");
3173 } else {
3174 seq_puts(m, "\tNo scalers available on this platform\n");
3175 }
3176}
3177
53f5e3ca
JB
3178static int i915_display_info(struct seq_file *m, void *unused)
3179{
36cdd013
DW
3180 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3181 struct drm_device *dev = &dev_priv->drm;
065f2ec2 3182 struct intel_crtc *crtc;
53f5e3ca 3183 struct drm_connector *connector;
3f6a5e1e 3184 struct drm_connector_list_iter conn_iter;
53f5e3ca 3185
b0e5ddf3 3186 intel_runtime_pm_get(dev_priv);
53f5e3ca
JB
3187 seq_printf(m, "CRTC info\n");
3188 seq_printf(m, "---------\n");
d3fcc808 3189 for_each_intel_crtc(dev, crtc) {
f77076c9 3190 struct intel_crtc_state *pipe_config;
53f5e3ca 3191
3f6a5e1e 3192 drm_modeset_lock(&crtc->base.mutex, NULL);
f77076c9
ML
3193 pipe_config = to_intel_crtc_state(crtc->base.state);
3194
3abc4e09 3195 seq_printf(m, "CRTC %d: pipe: %c, active=%s, (size=%dx%d), dither=%s, bpp=%d\n",
065f2ec2 3196 crtc->base.base.id, pipe_name(crtc->pipe),
f77076c9 3197 yesno(pipe_config->base.active),
3abc4e09
RF
3198 pipe_config->pipe_src_w, pipe_config->pipe_src_h,
3199 yesno(pipe_config->dither), pipe_config->pipe_bpp);
3200
f77076c9 3201 if (pipe_config->base.active) {
cd5dcbf1
VS
3202 struct intel_plane *cursor =
3203 to_intel_plane(crtc->base.cursor);
3204
065f2ec2
CW
3205 intel_crtc_info(m, crtc);
3206
cd5dcbf1
VS
3207 seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x\n",
3208 yesno(cursor->base.state->visible),
3209 cursor->base.state->crtc_x,
3210 cursor->base.state->crtc_y,
3211 cursor->base.state->crtc_w,
3212 cursor->base.state->crtc_h,
3213 cursor->cursor.base);
3abc4e09
RF
3214 intel_scaler_info(m, crtc);
3215 intel_plane_info(m, crtc);
a23dc658 3216 }
cace841c
DV
3217
3218 seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n",
3219 yesno(!crtc->cpu_fifo_underrun_disabled),
3220 yesno(!crtc->pch_fifo_underrun_disabled));
3f6a5e1e 3221 drm_modeset_unlock(&crtc->base.mutex);
53f5e3ca
JB
3222 }
3223
3224 seq_printf(m, "\n");
3225 seq_printf(m, "Connector info\n");
3226 seq_printf(m, "--------------\n");
3f6a5e1e
DV
3227 mutex_lock(&dev->mode_config.mutex);
3228 drm_connector_list_iter_begin(dev, &conn_iter);
3229 drm_for_each_connector_iter(connector, &conn_iter)
53f5e3ca 3230 intel_connector_info(m, connector);
3f6a5e1e
DV
3231 drm_connector_list_iter_end(&conn_iter);
3232 mutex_unlock(&dev->mode_config.mutex);
3233
b0e5ddf3 3234 intel_runtime_pm_put(dev_priv);
53f5e3ca
JB
3235
3236 return 0;
3237}
3238
1b36595f
CW
3239static int i915_engine_info(struct seq_file *m, void *unused)
3240{
3241 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3242 struct intel_engine_cs *engine;
3b3f1650 3243 enum intel_engine_id id;
f636edb2 3244 struct drm_printer p;
1b36595f 3245
9c870d03
CW
3246 intel_runtime_pm_get(dev_priv);
3247
6f56103d
CW
3248 seq_printf(m, "GT awake? %s (epoch %u)\n",
3249 yesno(dev_priv->gt.awake), dev_priv->gt.epoch);
f73b5674
CW
3250 seq_printf(m, "Global active requests: %d\n",
3251 dev_priv->gt.active_requests);
f577a03b
LL
3252 seq_printf(m, "CS timestamp frequency: %u kHz\n",
3253 dev_priv->info.cs_timestamp_frequency_khz);
f73b5674 3254
f636edb2
CW
3255 p = drm_seq_file_printer(m);
3256 for_each_engine(engine, dev_priv, id)
0db18b17 3257 intel_engine_dump(engine, &p, "%s\n", engine->name);
1b36595f 3258
9c870d03
CW
3259 intel_runtime_pm_put(dev_priv);
3260
1b36595f
CW
3261 return 0;
3262}
3263
79e9cd5f
LL
3264static int i915_rcs_topology(struct seq_file *m, void *unused)
3265{
3266 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3267 struct drm_printer p = drm_seq_file_printer(m);
3268
3269 intel_device_info_dump_topology(&INTEL_INFO(dev_priv)->sseu, &p);
3270
3271 return 0;
3272}
3273
c5418a8b
CW
3274static int i915_shrinker_info(struct seq_file *m, void *unused)
3275{
3276 struct drm_i915_private *i915 = node_to_i915(m->private);
3277
3278 seq_printf(m, "seeks = %d\n", i915->mm.shrinker.seeks);
3279 seq_printf(m, "batch = %lu\n", i915->mm.shrinker.batch);
3280
3281 return 0;
3282}
3283
728e29d7
DV
3284static int i915_shared_dplls_info(struct seq_file *m, void *unused)
3285{
36cdd013
DW
3286 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3287 struct drm_device *dev = &dev_priv->drm;
728e29d7
DV
3288 int i;
3289
3290 drm_modeset_lock_all(dev);
3291 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
3292 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
3293
72f775fa 3294 seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name,
0823eb9c 3295 pll->info->id);
2dd66ebd 3296 seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n",
2c42e535 3297 pll->state.crtc_mask, pll->active_mask, yesno(pll->on));
728e29d7 3298 seq_printf(m, " tracked hardware state:\n");
2c42e535 3299 seq_printf(m, " dpll: 0x%08x\n", pll->state.hw_state.dpll);
3e369b76 3300 seq_printf(m, " dpll_md: 0x%08x\n",
2c42e535
ACO
3301 pll->state.hw_state.dpll_md);
3302 seq_printf(m, " fp0: 0x%08x\n", pll->state.hw_state.fp0);
3303 seq_printf(m, " fp1: 0x%08x\n", pll->state.hw_state.fp1);
3304 seq_printf(m, " wrpll: 0x%08x\n", pll->state.hw_state.wrpll);
c27e917e
PZ
3305 seq_printf(m, " cfgcr0: 0x%08x\n", pll->state.hw_state.cfgcr0);
3306 seq_printf(m, " cfgcr1: 0x%08x\n", pll->state.hw_state.cfgcr1);
3307 seq_printf(m, " mg_refclkin_ctl: 0x%08x\n",
3308 pll->state.hw_state.mg_refclkin_ctl);
3309 seq_printf(m, " mg_clktop2_coreclkctl1: 0x%08x\n",
3310 pll->state.hw_state.mg_clktop2_coreclkctl1);
3311 seq_printf(m, " mg_clktop2_hsclkctl: 0x%08x\n",
3312 pll->state.hw_state.mg_clktop2_hsclkctl);
3313 seq_printf(m, " mg_pll_div0: 0x%08x\n",
3314 pll->state.hw_state.mg_pll_div0);
3315 seq_printf(m, " mg_pll_div1: 0x%08x\n",
3316 pll->state.hw_state.mg_pll_div1);
3317 seq_printf(m, " mg_pll_lf: 0x%08x\n",
3318 pll->state.hw_state.mg_pll_lf);
3319 seq_printf(m, " mg_pll_frac_lock: 0x%08x\n",
3320 pll->state.hw_state.mg_pll_frac_lock);
3321 seq_printf(m, " mg_pll_ssc: 0x%08x\n",
3322 pll->state.hw_state.mg_pll_ssc);
3323 seq_printf(m, " mg_pll_bias: 0x%08x\n",
3324 pll->state.hw_state.mg_pll_bias);
3325 seq_printf(m, " mg_pll_tdc_coldst_bias: 0x%08x\n",
3326 pll->state.hw_state.mg_pll_tdc_coldst_bias);
728e29d7
DV
3327 }
3328 drm_modeset_unlock_all(dev);
3329
3330 return 0;
3331}
3332
1ed1ef9d 3333static int i915_wa_registers(struct seq_file *m, void *unused)
888b5995 3334{
548764bb 3335 struct i915_workarounds *wa = &node_to_i915(m->private)->workarounds;
f4ecfbfc 3336 int i;
888b5995 3337
548764bb
CW
3338 seq_printf(m, "Workarounds applied: %d\n", wa->count);
3339 for (i = 0; i < wa->count; ++i)
3340 seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n",
3341 wa->reg[i].addr, wa->reg[i].value, wa->reg[i].mask);
888b5995
AS
3342
3343 return 0;
3344}
3345
d2d4f39b
KM
3346static int i915_ipc_status_show(struct seq_file *m, void *data)
3347{
3348 struct drm_i915_private *dev_priv = m->private;
3349
3350 seq_printf(m, "Isochronous Priority Control: %s\n",
3351 yesno(dev_priv->ipc_enabled));
3352 return 0;
3353}
3354
3355static int i915_ipc_status_open(struct inode *inode, struct file *file)
3356{
3357 struct drm_i915_private *dev_priv = inode->i_private;
3358
3359 if (!HAS_IPC(dev_priv))
3360 return -ENODEV;
3361
3362 return single_open(file, i915_ipc_status_show, dev_priv);
3363}
3364
3365static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
3366 size_t len, loff_t *offp)
3367{
3368 struct seq_file *m = file->private_data;
3369 struct drm_i915_private *dev_priv = m->private;
3370 int ret;
3371 bool enable;
3372
3373 ret = kstrtobool_from_user(ubuf, len, &enable);
3374 if (ret < 0)
3375 return ret;
3376
3377 intel_runtime_pm_get(dev_priv);
3378 if (!dev_priv->ipc_enabled && enable)
3379 DRM_INFO("Enabling IPC: WM will be proper only after next commit\n");
3380 dev_priv->wm.distrust_bios_wm = true;
3381 dev_priv->ipc_enabled = enable;
3382 intel_enable_ipc(dev_priv);
3383 intel_runtime_pm_put(dev_priv);
3384
3385 return len;
3386}
3387
3388static const struct file_operations i915_ipc_status_fops = {
3389 .owner = THIS_MODULE,
3390 .open = i915_ipc_status_open,
3391 .read = seq_read,
3392 .llseek = seq_lseek,
3393 .release = single_release,
3394 .write = i915_ipc_status_write
3395};
3396
c5511e44
DL
3397static int i915_ddb_info(struct seq_file *m, void *unused)
3398{
36cdd013
DW
3399 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3400 struct drm_device *dev = &dev_priv->drm;
c5511e44
DL
3401 struct skl_ddb_allocation *ddb;
3402 struct skl_ddb_entry *entry;
3403 enum pipe pipe;
3404 int plane;
3405
36cdd013 3406 if (INTEL_GEN(dev_priv) < 9)
ab309a6a 3407 return -ENODEV;
2fcffe19 3408
c5511e44
DL
3409 drm_modeset_lock_all(dev);
3410
3411 ddb = &dev_priv->wm.skl_hw.ddb;
3412
3413 seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
3414
3415 for_each_pipe(dev_priv, pipe) {
3416 seq_printf(m, "Pipe %c\n", pipe_name(pipe));
3417
8b364b41 3418 for_each_universal_plane(dev_priv, pipe, plane) {
c5511e44
DL
3419 entry = &ddb->plane[pipe][plane];
3420 seq_printf(m, " Plane%-8d%8u%8u%8u\n", plane + 1,
3421 entry->start, entry->end,
3422 skl_ddb_entry_size(entry));
3423 }
3424
4969d33e 3425 entry = &ddb->plane[pipe][PLANE_CURSOR];
c5511e44
DL
3426 seq_printf(m, " %-13s%8u%8u%8u\n", "Cursor", entry->start,
3427 entry->end, skl_ddb_entry_size(entry));
3428 }
3429
3430 drm_modeset_unlock_all(dev);
3431
3432 return 0;
3433}
3434
a54746e3 3435static void drrs_status_per_crtc(struct seq_file *m,
36cdd013
DW
3436 struct drm_device *dev,
3437 struct intel_crtc *intel_crtc)
a54746e3 3438{
fac5e23e 3439 struct drm_i915_private *dev_priv = to_i915(dev);
a54746e3
VK
3440 struct i915_drrs *drrs = &dev_priv->drrs;
3441 int vrefresh = 0;
26875fe5 3442 struct drm_connector *connector;
3f6a5e1e 3443 struct drm_connector_list_iter conn_iter;
a54746e3 3444
3f6a5e1e
DV
3445 drm_connector_list_iter_begin(dev, &conn_iter);
3446 drm_for_each_connector_iter(connector, &conn_iter) {
26875fe5
ML
3447 if (connector->state->crtc != &intel_crtc->base)
3448 continue;
3449
3450 seq_printf(m, "%s:\n", connector->name);
a54746e3 3451 }
3f6a5e1e 3452 drm_connector_list_iter_end(&conn_iter);
a54746e3
VK
3453
3454 if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT)
3455 seq_puts(m, "\tVBT: DRRS_type: Static");
3456 else if (dev_priv->vbt.drrs_type == SEAMLESS_DRRS_SUPPORT)
3457 seq_puts(m, "\tVBT: DRRS_type: Seamless");
3458 else if (dev_priv->vbt.drrs_type == DRRS_NOT_SUPPORTED)
3459 seq_puts(m, "\tVBT: DRRS_type: None");
3460 else
3461 seq_puts(m, "\tVBT: DRRS_type: FIXME: Unrecognized Value");
3462
3463 seq_puts(m, "\n\n");
3464
f77076c9 3465 if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) {
a54746e3
VK
3466 struct intel_panel *panel;
3467
3468 mutex_lock(&drrs->mutex);
3469 /* DRRS Supported */
3470 seq_puts(m, "\tDRRS Supported: Yes\n");
3471
3472 /* disable_drrs() will make drrs->dp NULL */
3473 if (!drrs->dp) {
ce6e2137
R
3474 seq_puts(m, "Idleness DRRS: Disabled\n");
3475 if (dev_priv->psr.enabled)
3476 seq_puts(m,
3477 "\tAs PSR is enabled, DRRS is not enabled\n");
a54746e3
VK
3478 mutex_unlock(&drrs->mutex);
3479 return;
3480 }
3481
3482 panel = &drrs->dp->attached_connector->panel;
3483 seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
3484 drrs->busy_frontbuffer_bits);
3485
3486 seq_puts(m, "\n\t\t");
3487 if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
3488 seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
3489 vrefresh = panel->fixed_mode->vrefresh;
3490 } else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
3491 seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
3492 vrefresh = panel->downclock_mode->vrefresh;
3493 } else {
3494 seq_printf(m, "DRRS_State: Unknown(%d)\n",
3495 drrs->refresh_rate_type);
3496 mutex_unlock(&drrs->mutex);
3497 return;
3498 }
3499 seq_printf(m, "\t\tVrefresh: %d", vrefresh);
3500
3501 seq_puts(m, "\n\t\t");
3502 mutex_unlock(&drrs->mutex);
3503 } else {
3504 /* DRRS not supported. Print the VBT parameter*/
3505 seq_puts(m, "\tDRRS Supported : No");
3506 }
3507 seq_puts(m, "\n");
3508}
3509
3510static int i915_drrs_status(struct seq_file *m, void *unused)
3511{
36cdd013
DW
3512 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3513 struct drm_device *dev = &dev_priv->drm;
a54746e3
VK
3514 struct intel_crtc *intel_crtc;
3515 int active_crtc_cnt = 0;
3516
26875fe5 3517 drm_modeset_lock_all(dev);
a54746e3 3518 for_each_intel_crtc(dev, intel_crtc) {
f77076c9 3519 if (intel_crtc->base.state->active) {
a54746e3
VK
3520 active_crtc_cnt++;
3521 seq_printf(m, "\nCRTC %d: ", active_crtc_cnt);
3522
3523 drrs_status_per_crtc(m, dev, intel_crtc);
3524 }
a54746e3 3525 }
26875fe5 3526 drm_modeset_unlock_all(dev);
a54746e3
VK
3527
3528 if (!active_crtc_cnt)
3529 seq_puts(m, "No active crtc found\n");
3530
3531 return 0;
3532}
3533
11bed958
DA
3534static int i915_dp_mst_info(struct seq_file *m, void *unused)
3535{
36cdd013
DW
3536 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3537 struct drm_device *dev = &dev_priv->drm;
11bed958
DA
3538 struct intel_encoder *intel_encoder;
3539 struct intel_digital_port *intel_dig_port;
b6dabe3b 3540 struct drm_connector *connector;
3f6a5e1e 3541 struct drm_connector_list_iter conn_iter;
b6dabe3b 3542
3f6a5e1e
DV
3543 drm_connector_list_iter_begin(dev, &conn_iter);
3544 drm_for_each_connector_iter(connector, &conn_iter) {
b6dabe3b 3545 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
11bed958 3546 continue;
b6dabe3b
ML
3547
3548 intel_encoder = intel_attached_encoder(connector);
3549 if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
3550 continue;
3551
3552 intel_dig_port = enc_to_dig_port(&intel_encoder->base);
11bed958
DA
3553 if (!intel_dig_port->dp.can_mst)
3554 continue;
b6dabe3b 3555
40ae80cc 3556 seq_printf(m, "MST Source Port %c\n",
8f4f2797 3557 port_name(intel_dig_port->base.port));
11bed958
DA
3558 drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
3559 }
3f6a5e1e
DV
3560 drm_connector_list_iter_end(&conn_iter);
3561
11bed958
DA
3562 return 0;
3563}
3564
eb3394fa 3565static ssize_t i915_displayport_test_active_write(struct file *file,
36cdd013
DW
3566 const char __user *ubuf,
3567 size_t len, loff_t *offp)
eb3394fa
TP
3568{
3569 char *input_buffer;
3570 int status = 0;
eb3394fa
TP
3571 struct drm_device *dev;
3572 struct drm_connector *connector;
3f6a5e1e 3573 struct drm_connector_list_iter conn_iter;
eb3394fa
TP
3574 struct intel_dp *intel_dp;
3575 int val = 0;
3576
9aaffa34 3577 dev = ((struct seq_file *)file->private_data)->private;
eb3394fa 3578
eb3394fa
TP
3579 if (len == 0)
3580 return 0;
3581
261aeba8
GT
3582 input_buffer = memdup_user_nul(ubuf, len);
3583 if (IS_ERR(input_buffer))
3584 return PTR_ERR(input_buffer);
eb3394fa 3585
eb3394fa
TP
3586 DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len);
3587
3f6a5e1e
DV
3588 drm_connector_list_iter_begin(dev, &conn_iter);
3589 drm_for_each_connector_iter(connector, &conn_iter) {
a874b6a3
ML
3590 struct intel_encoder *encoder;
3591
eb3394fa
TP
3592 if (connector->connector_type !=
3593 DRM_MODE_CONNECTOR_DisplayPort)
3594 continue;
3595
a874b6a3
ML
3596 encoder = to_intel_encoder(connector->encoder);
3597 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3598 continue;
3599
3600 if (encoder && connector->status == connector_status_connected) {
3601 intel_dp = enc_to_intel_dp(&encoder->base);
eb3394fa
TP
3602 status = kstrtoint(input_buffer, 10, &val);
3603 if (status < 0)
3f6a5e1e 3604 break;
eb3394fa
TP
3605 DRM_DEBUG_DRIVER("Got %d for test active\n", val);
3606 /* To prevent erroneous activation of the compliance
3607 * testing code, only accept an actual value of 1 here
3608 */
3609 if (val == 1)
c1617abc 3610 intel_dp->compliance.test_active = 1;
eb3394fa 3611 else
c1617abc 3612 intel_dp->compliance.test_active = 0;
eb3394fa
TP
3613 }
3614 }
3f6a5e1e 3615 drm_connector_list_iter_end(&conn_iter);
eb3394fa
TP
3616 kfree(input_buffer);
3617 if (status < 0)
3618 return status;
3619
3620 *offp += len;
3621 return len;
3622}
3623
3624static int i915_displayport_test_active_show(struct seq_file *m, void *data)
3625{
e4006713
AS
3626 struct drm_i915_private *dev_priv = m->private;
3627 struct drm_device *dev = &dev_priv->drm;
eb3394fa 3628 struct drm_connector *connector;
3f6a5e1e 3629 struct drm_connector_list_iter conn_iter;
eb3394fa
TP
3630 struct intel_dp *intel_dp;
3631
3f6a5e1e
DV
3632 drm_connector_list_iter_begin(dev, &conn_iter);
3633 drm_for_each_connector_iter(connector, &conn_iter) {
a874b6a3
ML
3634 struct intel_encoder *encoder;
3635
eb3394fa
TP
3636 if (connector->connector_type !=
3637 DRM_MODE_CONNECTOR_DisplayPort)
3638 continue;
3639
a874b6a3
ML
3640 encoder = to_intel_encoder(connector->encoder);
3641 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3642 continue;
3643
3644 if (encoder && connector->status == connector_status_connected) {
3645 intel_dp = enc_to_intel_dp(&encoder->base);
c1617abc 3646 if (intel_dp->compliance.test_active)
eb3394fa
TP
3647 seq_puts(m, "1");
3648 else
3649 seq_puts(m, "0");
3650 } else
3651 seq_puts(m, "0");
3652 }
3f6a5e1e 3653 drm_connector_list_iter_end(&conn_iter);
eb3394fa
TP
3654
3655 return 0;
3656}
3657
3658static int i915_displayport_test_active_open(struct inode *inode,
36cdd013 3659 struct file *file)
eb3394fa 3660{
36cdd013 3661 return single_open(file, i915_displayport_test_active_show,
e4006713 3662 inode->i_private);
eb3394fa
TP
3663}
3664
3665static const struct file_operations i915_displayport_test_active_fops = {
3666 .owner = THIS_MODULE,
3667 .open = i915_displayport_test_active_open,
3668 .read = seq_read,
3669 .llseek = seq_lseek,
3670 .release = single_release,
3671 .write = i915_displayport_test_active_write
3672};
3673
3674static int i915_displayport_test_data_show(struct seq_file *m, void *data)
3675{
e4006713
AS
3676 struct drm_i915_private *dev_priv = m->private;
3677 struct drm_device *dev = &dev_priv->drm;
eb3394fa 3678 struct drm_connector *connector;
3f6a5e1e 3679 struct drm_connector_list_iter conn_iter;
eb3394fa
TP
3680 struct intel_dp *intel_dp;
3681
3f6a5e1e
DV
3682 drm_connector_list_iter_begin(dev, &conn_iter);
3683 drm_for_each_connector_iter(connector, &conn_iter) {
a874b6a3
ML
3684 struct intel_encoder *encoder;
3685
eb3394fa
TP
3686 if (connector->connector_type !=
3687 DRM_MODE_CONNECTOR_DisplayPort)
3688 continue;
3689
a874b6a3
ML
3690 encoder = to_intel_encoder(connector->encoder);
3691 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3692 continue;
3693
3694 if (encoder && connector->status == connector_status_connected) {
3695 intel_dp = enc_to_intel_dp(&encoder->base);
b48a5ba9
MN
3696 if (intel_dp->compliance.test_type ==
3697 DP_TEST_LINK_EDID_READ)
3698 seq_printf(m, "%lx",
3699 intel_dp->compliance.test_data.edid);
611032bf
MN
3700 else if (intel_dp->compliance.test_type ==
3701 DP_TEST_LINK_VIDEO_PATTERN) {
3702 seq_printf(m, "hdisplay: %d\n",
3703 intel_dp->compliance.test_data.hdisplay);
3704 seq_printf(m, "vdisplay: %d\n",
3705 intel_dp->compliance.test_data.vdisplay);
3706 seq_printf(m, "bpc: %u\n",
3707 intel_dp->compliance.test_data.bpc);
3708 }
eb3394fa
TP
3709 } else
3710 seq_puts(m, "0");
3711 }
3f6a5e1e 3712 drm_connector_list_iter_end(&conn_iter);
eb3394fa
TP
3713
3714 return 0;
3715}
e4006713 3716DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_data);
eb3394fa
TP
3717
3718static int i915_displayport_test_type_show(struct seq_file *m, void *data)
3719{
e4006713
AS
3720 struct drm_i915_private *dev_priv = m->private;
3721 struct drm_device *dev = &dev_priv->drm;
eb3394fa 3722 struct drm_connector *connector;
3f6a5e1e 3723 struct drm_connector_list_iter conn_iter;
eb3394fa
TP
3724 struct intel_dp *intel_dp;
3725
3f6a5e1e
DV
3726 drm_connector_list_iter_begin(dev, &conn_iter);
3727 drm_for_each_connector_iter(connector, &conn_iter) {
a874b6a3
ML
3728 struct intel_encoder *encoder;
3729
eb3394fa
TP
3730 if (connector->connector_type !=
3731 DRM_MODE_CONNECTOR_DisplayPort)
3732 continue;
3733
a874b6a3
ML
3734 encoder = to_intel_encoder(connector->encoder);
3735 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3736 continue;
3737
3738 if (encoder && connector->status == connector_status_connected) {
3739 intel_dp = enc_to_intel_dp(&encoder->base);
c1617abc 3740 seq_printf(m, "%02lx", intel_dp->compliance.test_type);
eb3394fa
TP
3741 } else
3742 seq_puts(m, "0");
3743 }
3f6a5e1e 3744 drm_connector_list_iter_end(&conn_iter);
eb3394fa
TP
3745
3746 return 0;
3747}
e4006713 3748DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type);
eb3394fa 3749
97e94b22 3750static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
369a1342 3751{
36cdd013
DW
3752 struct drm_i915_private *dev_priv = m->private;
3753 struct drm_device *dev = &dev_priv->drm;
369a1342 3754 int level;
de38b95c
VS
3755 int num_levels;
3756
36cdd013 3757 if (IS_CHERRYVIEW(dev_priv))
de38b95c 3758 num_levels = 3;
36cdd013 3759 else if (IS_VALLEYVIEW(dev_priv))
de38b95c 3760 num_levels = 1;
04548cba
VS
3761 else if (IS_G4X(dev_priv))
3762 num_levels = 3;
de38b95c 3763 else
5db94019 3764 num_levels = ilk_wm_max_level(dev_priv) + 1;
369a1342
VS
3765
3766 drm_modeset_lock_all(dev);
3767
3768 for (level = 0; level < num_levels; level++) {
3769 unsigned int latency = wm[level];
3770
97e94b22
DL
3771 /*
3772 * - WM1+ latency values in 0.5us units
de38b95c 3773 * - latencies are in us on gen9/vlv/chv
97e94b22 3774 */
04548cba
VS
3775 if (INTEL_GEN(dev_priv) >= 9 ||
3776 IS_VALLEYVIEW(dev_priv) ||
3777 IS_CHERRYVIEW(dev_priv) ||
3778 IS_G4X(dev_priv))
97e94b22
DL
3779 latency *= 10;
3780 else if (level > 0)
369a1342
VS
3781 latency *= 5;
3782
3783 seq_printf(m, "WM%d %u (%u.%u usec)\n",
97e94b22 3784 level, wm[level], latency / 10, latency % 10);
369a1342
VS
3785 }
3786
3787 drm_modeset_unlock_all(dev);
3788}
3789
3790static int pri_wm_latency_show(struct seq_file *m, void *data)
3791{
36cdd013 3792 struct drm_i915_private *dev_priv = m->private;
97e94b22
DL
3793 const uint16_t *latencies;
3794
36cdd013 3795 if (INTEL_GEN(dev_priv) >= 9)
97e94b22
DL
3796 latencies = dev_priv->wm.skl_latency;
3797 else
36cdd013 3798 latencies = dev_priv->wm.pri_latency;
369a1342 3799
97e94b22 3800 wm_latency_show(m, latencies);
369a1342
VS
3801
3802 return 0;
3803}
3804
3805static int spr_wm_latency_show(struct seq_file *m, void *data)
3806{
36cdd013 3807 struct drm_i915_private *dev_priv = m->private;
97e94b22
DL
3808 const uint16_t *latencies;
3809
36cdd013 3810 if (INTEL_GEN(dev_priv) >= 9)
97e94b22
DL
3811 latencies = dev_priv->wm.skl_latency;
3812 else
36cdd013 3813 latencies = dev_priv->wm.spr_latency;
369a1342 3814
97e94b22 3815 wm_latency_show(m, latencies);
369a1342
VS
3816
3817 return 0;
3818}
3819
3820static int cur_wm_latency_show(struct seq_file *m, void *data)
3821{
36cdd013 3822 struct drm_i915_private *dev_priv = m->private;
97e94b22
DL
3823 const uint16_t *latencies;
3824
36cdd013 3825 if (INTEL_GEN(dev_priv) >= 9)
97e94b22
DL
3826 latencies = dev_priv->wm.skl_latency;
3827 else
36cdd013 3828 latencies = dev_priv->wm.cur_latency;
369a1342 3829
97e94b22 3830 wm_latency_show(m, latencies);
369a1342
VS
3831
3832 return 0;
3833}
3834
3835static int pri_wm_latency_open(struct inode *inode, struct file *file)
3836{
36cdd013 3837 struct drm_i915_private *dev_priv = inode->i_private;
369a1342 3838
04548cba 3839 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
369a1342
VS
3840 return -ENODEV;
3841
36cdd013 3842 return single_open(file, pri_wm_latency_show, dev_priv);
369a1342
VS
3843}
3844
3845static int spr_wm_latency_open(struct inode *inode, struct file *file)
3846{
36cdd013 3847 struct drm_i915_private *dev_priv = inode->i_private;
369a1342 3848
36cdd013 3849 if (HAS_GMCH_DISPLAY(dev_priv))
369a1342
VS
3850 return -ENODEV;
3851
36cdd013 3852 return single_open(file, spr_wm_latency_show, dev_priv);
369a1342
VS
3853}
3854
3855static int cur_wm_latency_open(struct inode *inode, struct file *file)
3856{
36cdd013 3857 struct drm_i915_private *dev_priv = inode->i_private;
369a1342 3858
36cdd013 3859 if (HAS_GMCH_DISPLAY(dev_priv))
369a1342
VS
3860 return -ENODEV;
3861
36cdd013 3862 return single_open(file, cur_wm_latency_show, dev_priv);
369a1342
VS
3863}
3864
3865static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
97e94b22 3866 size_t len, loff_t *offp, uint16_t wm[8])
369a1342
VS
3867{
3868 struct seq_file *m = file->private_data;
36cdd013
DW
3869 struct drm_i915_private *dev_priv = m->private;
3870 struct drm_device *dev = &dev_priv->drm;
97e94b22 3871 uint16_t new[8] = { 0 };
de38b95c 3872 int num_levels;
369a1342
VS
3873 int level;
3874 int ret;
3875 char tmp[32];
3876
36cdd013 3877 if (IS_CHERRYVIEW(dev_priv))
de38b95c 3878 num_levels = 3;
36cdd013 3879 else if (IS_VALLEYVIEW(dev_priv))
de38b95c 3880 num_levels = 1;
04548cba
VS
3881 else if (IS_G4X(dev_priv))
3882 num_levels = 3;
de38b95c 3883 else
5db94019 3884 num_levels = ilk_wm_max_level(dev_priv) + 1;
de38b95c 3885
369a1342
VS
3886 if (len >= sizeof(tmp))
3887 return -EINVAL;
3888
3889 if (copy_from_user(tmp, ubuf, len))
3890 return -EFAULT;
3891
3892 tmp[len] = '\0';
3893
97e94b22
DL
3894 ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
3895 &new[0], &new[1], &new[2], &new[3],
3896 &new[4], &new[5], &new[6], &new[7]);
369a1342
VS
3897 if (ret != num_levels)
3898 return -EINVAL;
3899
3900 drm_modeset_lock_all(dev);
3901
3902 for (level = 0; level < num_levels; level++)
3903 wm[level] = new[level];
3904
3905 drm_modeset_unlock_all(dev);
3906
3907 return len;
3908}
3909
3910
3911static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
3912 size_t len, loff_t *offp)
3913{
3914 struct seq_file *m = file->private_data;
36cdd013 3915 struct drm_i915_private *dev_priv = m->private;
97e94b22 3916 uint16_t *latencies;
369a1342 3917
36cdd013 3918 if (INTEL_GEN(dev_priv) >= 9)
97e94b22
DL
3919 latencies = dev_priv->wm.skl_latency;
3920 else
36cdd013 3921 latencies = dev_priv->wm.pri_latency;
97e94b22
DL
3922
3923 return wm_latency_write(file, ubuf, len, offp, latencies);
369a1342
VS
3924}
3925
3926static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
3927 size_t len, loff_t *offp)
3928{
3929 struct seq_file *m = file->private_data;
36cdd013 3930 struct drm_i915_private *dev_priv = m->private;
97e94b22 3931 uint16_t *latencies;
369a1342 3932
36cdd013 3933 if (INTEL_GEN(dev_priv) >= 9)
97e94b22
DL
3934 latencies = dev_priv->wm.skl_latency;
3935 else
36cdd013 3936 latencies = dev_priv->wm.spr_latency;
97e94b22
DL
3937
3938 return wm_latency_write(file, ubuf, len, offp, latencies);
369a1342
VS
3939}
3940
3941static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
3942 size_t len, loff_t *offp)
3943{
3944 struct seq_file *m = file->private_data;
36cdd013 3945 struct drm_i915_private *dev_priv = m->private;
97e94b22
DL
3946 uint16_t *latencies;
3947
36cdd013 3948 if (INTEL_GEN(dev_priv) >= 9)
97e94b22
DL
3949 latencies = dev_priv->wm.skl_latency;
3950 else
36cdd013 3951 latencies = dev_priv->wm.cur_latency;
369a1342 3952
97e94b22 3953 return wm_latency_write(file, ubuf, len, offp, latencies);
369a1342
VS
3954}
3955
3956static const struct file_operations i915_pri_wm_latency_fops = {
3957 .owner = THIS_MODULE,
3958 .open = pri_wm_latency_open,
3959 .read = seq_read,
3960 .llseek = seq_lseek,
3961 .release = single_release,
3962 .write = pri_wm_latency_write
3963};
3964
3965static const struct file_operations i915_spr_wm_latency_fops = {
3966 .owner = THIS_MODULE,
3967 .open = spr_wm_latency_open,
3968 .read = seq_read,
3969 .llseek = seq_lseek,
3970 .release = single_release,
3971 .write = spr_wm_latency_write
3972};
3973
3974static const struct file_operations i915_cur_wm_latency_fops = {
3975 .owner = THIS_MODULE,
3976 .open = cur_wm_latency_open,
3977 .read = seq_read,
3978 .llseek = seq_lseek,
3979 .release = single_release,
3980 .write = cur_wm_latency_write
3981};
3982
647416f9
KC
3983static int
3984i915_wedged_get(void *data, u64 *val)
f3cd474b 3985{
36cdd013 3986 struct drm_i915_private *dev_priv = data;
f3cd474b 3987
d98c52cf 3988 *val = i915_terminally_wedged(&dev_priv->gpu_error);
f3cd474b 3989
647416f9 3990 return 0;
f3cd474b
CW
3991}
3992
647416f9
KC
3993static int
3994i915_wedged_set(void *data, u64 val)
f3cd474b 3995{
598b6b5a
CW
3996 struct drm_i915_private *i915 = data;
3997 struct intel_engine_cs *engine;
3998 unsigned int tmp;
d46c0517 3999
b8d24a06
MK
4000 /*
4001 * There is no safeguard against this debugfs entry colliding
4002 * with the hangcheck calling same i915_handle_error() in
4003 * parallel, causing an explosion. For now we assume that the
4004 * test harness is responsible enough not to inject gpu hangs
4005 * while it is writing to 'i915_wedged'
4006 */
4007
598b6b5a 4008 if (i915_reset_backoff(&i915->gpu_error))
b8d24a06
MK
4009 return -EAGAIN;
4010
598b6b5a
CW
4011 for_each_engine_masked(engine, i915, val, tmp) {
4012 engine->hangcheck.seqno = intel_engine_get_seqno(engine);
4013 engine->hangcheck.stalled = true;
4014 }
4015
ce800754
CW
4016 i915_handle_error(i915, val, I915_ERROR_CAPTURE,
4017 "Manually set wedged engine mask = %llx", val);
d46c0517 4018
598b6b5a 4019 wait_on_bit(&i915->gpu_error.flags,
d3df42b7
CW
4020 I915_RESET_HANDOFF,
4021 TASK_UNINTERRUPTIBLE);
4022
647416f9 4023 return 0;
f3cd474b
CW
4024}
4025
647416f9
KC
4026DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
4027 i915_wedged_get, i915_wedged_set,
3a3b4f98 4028 "%llu\n");
f3cd474b 4029
64486ae7
CW
4030static int
4031fault_irq_set(struct drm_i915_private *i915,
4032 unsigned long *irq,
4033 unsigned long val)
4034{
4035 int err;
4036
4037 err = mutex_lock_interruptible(&i915->drm.struct_mutex);
4038 if (err)
4039 return err;
4040
4041 err = i915_gem_wait_for_idle(i915,
4042 I915_WAIT_LOCKED |
ec625fb9
CW
4043 I915_WAIT_INTERRUPTIBLE,
4044 MAX_SCHEDULE_TIMEOUT);
64486ae7
CW
4045 if (err)
4046 goto err_unlock;
4047
64486ae7
CW
4048 *irq = val;
4049 mutex_unlock(&i915->drm.struct_mutex);
4050
4051 /* Flush idle worker to disarm irq */
7c26240e 4052 drain_delayed_work(&i915->gt.idle_work);
64486ae7
CW
4053
4054 return 0;
4055
4056err_unlock:
4057 mutex_unlock(&i915->drm.struct_mutex);
4058 return err;
4059}
4060
094f9a54
CW
4061static int
4062i915_ring_missed_irq_get(void *data, u64 *val)
4063{
36cdd013 4064 struct drm_i915_private *dev_priv = data;
094f9a54
CW
4065
4066 *val = dev_priv->gpu_error.missed_irq_rings;
4067 return 0;
4068}
4069
4070static int
4071i915_ring_missed_irq_set(void *data, u64 val)
4072{
64486ae7 4073 struct drm_i915_private *i915 = data;
094f9a54 4074
64486ae7 4075 return fault_irq_set(i915, &i915->gpu_error.missed_irq_rings, val);
094f9a54
CW
4076}
4077
4078DEFINE_SIMPLE_ATTRIBUTE(i915_ring_missed_irq_fops,
4079 i915_ring_missed_irq_get, i915_ring_missed_irq_set,
4080 "0x%08llx\n");
4081
4082static int
4083i915_ring_test_irq_get(void *data, u64 *val)
4084{
36cdd013 4085 struct drm_i915_private *dev_priv = data;
094f9a54
CW
4086
4087 *val = dev_priv->gpu_error.test_irq_rings;
4088
4089 return 0;
4090}
4091
4092static int
4093i915_ring_test_irq_set(void *data, u64 val)
4094{
64486ae7 4095 struct drm_i915_private *i915 = data;
094f9a54 4096
64486ae7 4097 val &= INTEL_INFO(i915)->ring_mask;
094f9a54 4098 DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val);
094f9a54 4099
64486ae7 4100 return fault_irq_set(i915, &i915->gpu_error.test_irq_rings, val);
094f9a54
CW
4101}
4102
4103DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops,
4104 i915_ring_test_irq_get, i915_ring_test_irq_set,
4105 "0x%08llx\n");
4106
b4a0b32d
CW
4107#define DROP_UNBOUND BIT(0)
4108#define DROP_BOUND BIT(1)
4109#define DROP_RETIRE BIT(2)
4110#define DROP_ACTIVE BIT(3)
4111#define DROP_FREED BIT(4)
4112#define DROP_SHRINK_ALL BIT(5)
4113#define DROP_IDLE BIT(6)
fbbd37b3
CW
4114#define DROP_ALL (DROP_UNBOUND | \
4115 DROP_BOUND | \
4116 DROP_RETIRE | \
4117 DROP_ACTIVE | \
8eadc19b 4118 DROP_FREED | \
b4a0b32d
CW
4119 DROP_SHRINK_ALL |\
4120 DROP_IDLE)
647416f9
KC
4121static int
4122i915_drop_caches_get(void *data, u64 *val)
dd624afd 4123{
647416f9 4124 *val = DROP_ALL;
dd624afd 4125
647416f9 4126 return 0;
dd624afd
CW
4127}
4128
647416f9
KC
4129static int
4130i915_drop_caches_set(void *data, u64 val)
dd624afd 4131{
36cdd013
DW
4132 struct drm_i915_private *dev_priv = data;
4133 struct drm_device *dev = &dev_priv->drm;
00c26cf9 4134 int ret = 0;
dd624afd 4135
b4a0b32d
CW
4136 DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
4137 val, val & DROP_ALL);
dd624afd
CW
4138
4139 /* No need to check and wait for gpu resets, only libdrm auto-restarts
4140 * on ioctls on -EAGAIN. */
00c26cf9
CW
4141 if (val & (DROP_ACTIVE | DROP_RETIRE)) {
4142 ret = mutex_lock_interruptible(&dev->struct_mutex);
dd624afd 4143 if (ret)
00c26cf9 4144 return ret;
dd624afd 4145
00c26cf9
CW
4146 if (val & DROP_ACTIVE)
4147 ret = i915_gem_wait_for_idle(dev_priv,
4148 I915_WAIT_INTERRUPTIBLE |
ec625fb9
CW
4149 I915_WAIT_LOCKED,
4150 MAX_SCHEDULE_TIMEOUT);
00c26cf9
CW
4151
4152 if (val & DROP_RETIRE)
e61e0f51 4153 i915_retire_requests(dev_priv);
00c26cf9
CW
4154
4155 mutex_unlock(&dev->struct_mutex);
4156 }
dd624afd 4157
d92a8cfc 4158 fs_reclaim_acquire(GFP_KERNEL);
21ab4e74 4159 if (val & DROP_BOUND)
912d572d 4160 i915_gem_shrink(dev_priv, LONG_MAX, NULL, I915_SHRINK_BOUND);
4ad72b7f 4161
21ab4e74 4162 if (val & DROP_UNBOUND)
912d572d 4163 i915_gem_shrink(dev_priv, LONG_MAX, NULL, I915_SHRINK_UNBOUND);
dd624afd 4164
8eadc19b
CW
4165 if (val & DROP_SHRINK_ALL)
4166 i915_gem_shrink_all(dev_priv);
d92a8cfc 4167 fs_reclaim_release(GFP_KERNEL);
8eadc19b 4168
4dfacb0b
CW
4169 if (val & DROP_IDLE) {
4170 do {
4171 if (READ_ONCE(dev_priv->gt.active_requests))
4172 flush_delayed_work(&dev_priv->gt.retire_work);
4173 drain_delayed_work(&dev_priv->gt.idle_work);
4174 } while (READ_ONCE(dev_priv->gt.awake));
4175 }
b4a0b32d 4176
c9c70471 4177 if (val & DROP_FREED)
bdeb9785 4178 i915_gem_drain_freed_objects(dev_priv);
fbbd37b3 4179
647416f9 4180 return ret;
dd624afd
CW
4181}
4182
647416f9
KC
4183DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
4184 i915_drop_caches_get, i915_drop_caches_set,
4185 "0x%08llx\n");
dd624afd 4186
647416f9
KC
4187static int
4188i915_cache_sharing_get(void *data, u64 *val)
07b7ddd9 4189{
36cdd013 4190 struct drm_i915_private *dev_priv = data;
07b7ddd9 4191 u32 snpcr;
07b7ddd9 4192
36cdd013 4193 if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv)))
004777cb
DV
4194 return -ENODEV;
4195
c8c8fb33 4196 intel_runtime_pm_get(dev_priv);
22bcfc6a 4197
07b7ddd9 4198 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
c8c8fb33
PZ
4199
4200 intel_runtime_pm_put(dev_priv);
07b7ddd9 4201
647416f9 4202 *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
07b7ddd9 4203
647416f9 4204 return 0;
07b7ddd9
JB
4205}
4206
647416f9
KC
4207static int
4208i915_cache_sharing_set(void *data, u64 val)
07b7ddd9 4209{
36cdd013 4210 struct drm_i915_private *dev_priv = data;
07b7ddd9 4211 u32 snpcr;
07b7ddd9 4212
36cdd013 4213 if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv)))
004777cb
DV
4214 return -ENODEV;
4215
647416f9 4216 if (val > 3)
07b7ddd9
JB
4217 return -EINVAL;
4218
c8c8fb33 4219 intel_runtime_pm_get(dev_priv);
647416f9 4220 DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
07b7ddd9
JB
4221
4222 /* Update the cache sharing policy here as well */
4223 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
4224 snpcr &= ~GEN6_MBC_SNPCR_MASK;
4225 snpcr |= (val << GEN6_MBC_SNPCR_SHIFT);
4226 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
4227
c8c8fb33 4228 intel_runtime_pm_put(dev_priv);
647416f9 4229 return 0;
07b7ddd9
JB
4230}
4231
647416f9
KC
4232DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
4233 i915_cache_sharing_get, i915_cache_sharing_set,
4234 "%llu\n");
07b7ddd9 4235
36cdd013 4236static void cherryview_sseu_device_status(struct drm_i915_private *dev_priv,
915490d5 4237 struct sseu_dev_info *sseu)
5d39525a 4238{
7aa0b14e
CW
4239#define SS_MAX 2
4240 const int ss_max = SS_MAX;
4241 u32 sig1[SS_MAX], sig2[SS_MAX];
5d39525a 4242 int ss;
5d39525a
JM
4243
4244 sig1[0] = I915_READ(CHV_POWER_SS0_SIG1);
4245 sig1[1] = I915_READ(CHV_POWER_SS1_SIG1);
4246 sig2[0] = I915_READ(CHV_POWER_SS0_SIG2);
4247 sig2[1] = I915_READ(CHV_POWER_SS1_SIG2);
4248
4249 for (ss = 0; ss < ss_max; ss++) {
4250 unsigned int eu_cnt;
4251
4252 if (sig1[ss] & CHV_SS_PG_ENABLE)
4253 /* skip disabled subslice */
4254 continue;
4255
f08a0c92 4256 sseu->slice_mask = BIT(0);
8cc76693 4257 sseu->subslice_mask[0] |= BIT(ss);
5d39525a
JM
4258 eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
4259 ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
4260 ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
4261 ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2);
915490d5
ID
4262 sseu->eu_total += eu_cnt;
4263 sseu->eu_per_subslice = max_t(unsigned int,
4264 sseu->eu_per_subslice, eu_cnt);
5d39525a 4265 }
7aa0b14e 4266#undef SS_MAX
5d39525a
JM
4267}
4268
f8c3dcf9
RV
4269static void gen10_sseu_device_status(struct drm_i915_private *dev_priv,
4270 struct sseu_dev_info *sseu)
4271{
c7fb3c6c 4272#define SS_MAX 6
f8c3dcf9 4273 const struct intel_device_info *info = INTEL_INFO(dev_priv);
c7fb3c6c 4274 u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
f8c3dcf9 4275 int s, ss;
f8c3dcf9 4276
b3e7f866 4277 for (s = 0; s < info->sseu.max_slices; s++) {
f8c3dcf9
RV
4278 /*
4279 * FIXME: Valid SS Mask respects the spec and read
4280 * only valid bits for those registers, excluding reserverd
4281 * although this seems wrong because it would leave many
4282 * subslices without ACK.
4283 */
4284 s_reg[s] = I915_READ(GEN10_SLICE_PGCTL_ACK(s)) &
4285 GEN10_PGCTL_VALID_SS_MASK(s);
4286 eu_reg[2 * s] = I915_READ(GEN10_SS01_EU_PGCTL_ACK(s));
4287 eu_reg[2 * s + 1] = I915_READ(GEN10_SS23_EU_PGCTL_ACK(s));
4288 }
4289
4290 eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
4291 GEN9_PGCTL_SSA_EU19_ACK |
4292 GEN9_PGCTL_SSA_EU210_ACK |
4293 GEN9_PGCTL_SSA_EU311_ACK;
4294 eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
4295 GEN9_PGCTL_SSB_EU19_ACK |
4296 GEN9_PGCTL_SSB_EU210_ACK |
4297 GEN9_PGCTL_SSB_EU311_ACK;
4298
b3e7f866 4299 for (s = 0; s < info->sseu.max_slices; s++) {
f8c3dcf9
RV
4300 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
4301 /* skip disabled slice */
4302 continue;
4303
4304 sseu->slice_mask |= BIT(s);
8cc76693 4305 sseu->subslice_mask[s] = info->sseu.subslice_mask[s];
f8c3dcf9 4306
b3e7f866 4307 for (ss = 0; ss < info->sseu.max_subslices; ss++) {
f8c3dcf9
RV
4308 unsigned int eu_cnt;
4309
4310 if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
4311 /* skip disabled subslice */
4312 continue;
4313
4314 eu_cnt = 2 * hweight32(eu_reg[2 * s + ss / 2] &
4315 eu_mask[ss % 2]);
4316 sseu->eu_total += eu_cnt;
4317 sseu->eu_per_subslice = max_t(unsigned int,
4318 sseu->eu_per_subslice,
4319 eu_cnt);
4320 }
4321 }
c7fb3c6c 4322#undef SS_MAX
f8c3dcf9
RV
4323}
4324
36cdd013 4325static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
915490d5 4326 struct sseu_dev_info *sseu)
5d39525a 4327{
c7fb3c6c 4328#define SS_MAX 3
b3e7f866 4329 const struct intel_device_info *info = INTEL_INFO(dev_priv);
c7fb3c6c 4330 u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
5d39525a 4331 int s, ss;
1c046bc1 4332
b3e7f866 4333 for (s = 0; s < info->sseu.max_slices; s++) {
1c046bc1
JM
4334 s_reg[s] = I915_READ(GEN9_SLICE_PGCTL_ACK(s));
4335 eu_reg[2*s] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s));
4336 eu_reg[2*s + 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s));
4337 }
4338
5d39525a
JM
4339 eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
4340 GEN9_PGCTL_SSA_EU19_ACK |
4341 GEN9_PGCTL_SSA_EU210_ACK |
4342 GEN9_PGCTL_SSA_EU311_ACK;
4343 eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
4344 GEN9_PGCTL_SSB_EU19_ACK |
4345 GEN9_PGCTL_SSB_EU210_ACK |
4346 GEN9_PGCTL_SSB_EU311_ACK;
4347
b3e7f866 4348 for (s = 0; s < info->sseu.max_slices; s++) {
5d39525a
JM
4349 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
4350 /* skip disabled slice */
4351 continue;
4352
f08a0c92 4353 sseu->slice_mask |= BIT(s);
1c046bc1 4354
f8c3dcf9 4355 if (IS_GEN9_BC(dev_priv))
8cc76693
LL
4356 sseu->subslice_mask[s] =
4357 INTEL_INFO(dev_priv)->sseu.subslice_mask[s];
1c046bc1 4358
b3e7f866 4359 for (ss = 0; ss < info->sseu.max_subslices; ss++) {
5d39525a
JM
4360 unsigned int eu_cnt;
4361
cc3f90f0 4362 if (IS_GEN9_LP(dev_priv)) {
57ec171e
ID
4363 if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
4364 /* skip disabled subslice */
4365 continue;
1c046bc1 4366
8cc76693 4367 sseu->subslice_mask[s] |= BIT(ss);
57ec171e 4368 }
1c046bc1 4369
5d39525a
JM
4370 eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
4371 eu_mask[ss%2]);
915490d5
ID
4372 sseu->eu_total += eu_cnt;
4373 sseu->eu_per_subslice = max_t(unsigned int,
4374 sseu->eu_per_subslice,
4375 eu_cnt);
5d39525a
JM
4376 }
4377 }
c7fb3c6c 4378#undef SS_MAX
5d39525a
JM
4379}
4380
36cdd013 4381static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv,
915490d5 4382 struct sseu_dev_info *sseu)
91bedd34 4383{
91bedd34 4384 u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);
36cdd013 4385 int s;
91bedd34 4386
f08a0c92 4387 sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK;
91bedd34 4388
f08a0c92 4389 if (sseu->slice_mask) {
43b67998
ID
4390 sseu->eu_per_subslice =
4391 INTEL_INFO(dev_priv)->sseu.eu_per_subslice;
8cc76693
LL
4392 for (s = 0; s < fls(sseu->slice_mask); s++) {
4393 sseu->subslice_mask[s] =
4394 INTEL_INFO(dev_priv)->sseu.subslice_mask[s];
4395 }
57ec171e
ID
4396 sseu->eu_total = sseu->eu_per_subslice *
4397 sseu_subslice_total(sseu);
91bedd34
ŁD
4398
4399 /* subtract fused off EU(s) from enabled slice(s) */
795b38b3 4400 for (s = 0; s < fls(sseu->slice_mask); s++) {
43b67998
ID
4401 u8 subslice_7eu =
4402 INTEL_INFO(dev_priv)->sseu.subslice_7eu[s];
91bedd34 4403
915490d5 4404 sseu->eu_total -= hweight8(subslice_7eu);
91bedd34
ŁD
4405 }
4406 }
4407}
4408
615d8908
ID
4409static void i915_print_sseu_info(struct seq_file *m, bool is_available_info,
4410 const struct sseu_dev_info *sseu)
4411{
4412 struct drm_i915_private *dev_priv = node_to_i915(m->private);
4413 const char *type = is_available_info ? "Available" : "Enabled";
8cc76693 4414 int s;
615d8908 4415
c67ba538
ID
4416 seq_printf(m, " %s Slice Mask: %04x\n", type,
4417 sseu->slice_mask);
615d8908 4418 seq_printf(m, " %s Slice Total: %u\n", type,
f08a0c92 4419 hweight8(sseu->slice_mask));
615d8908 4420 seq_printf(m, " %s Subslice Total: %u\n", type,
57ec171e 4421 sseu_subslice_total(sseu));
8cc76693
LL
4422 for (s = 0; s < fls(sseu->slice_mask); s++) {
4423 seq_printf(m, " %s Slice%i subslices: %u\n", type,
4424 s, hweight8(sseu->subslice_mask[s]));
4425 }
615d8908
ID
4426 seq_printf(m, " %s EU Total: %u\n", type,
4427 sseu->eu_total);
4428 seq_printf(m, " %s EU Per Subslice: %u\n", type,
4429 sseu->eu_per_subslice);
4430
4431 if (!is_available_info)
4432 return;
4433
4434 seq_printf(m, " Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev_priv)));
4435 if (HAS_POOLED_EU(dev_priv))
4436 seq_printf(m, " Min EU in pool: %u\n", sseu->min_eu_in_pool);
4437
4438 seq_printf(m, " Has Slice Power Gating: %s\n",
4439 yesno(sseu->has_slice_pg));
4440 seq_printf(m, " Has Subslice Power Gating: %s\n",
4441 yesno(sseu->has_subslice_pg));
4442 seq_printf(m, " Has EU Power Gating: %s\n",
4443 yesno(sseu->has_eu_pg));
4444}
4445
3873218f
JM
4446static int i915_sseu_status(struct seq_file *m, void *unused)
4447{
36cdd013 4448 struct drm_i915_private *dev_priv = node_to_i915(m->private);
915490d5 4449 struct sseu_dev_info sseu;
3873218f 4450
36cdd013 4451 if (INTEL_GEN(dev_priv) < 8)
3873218f
JM
4452 return -ENODEV;
4453
4454 seq_puts(m, "SSEU Device Info\n");
615d8908 4455 i915_print_sseu_info(m, true, &INTEL_INFO(dev_priv)->sseu);
3873218f 4456
7f992aba 4457 seq_puts(m, "SSEU Device Status\n");
915490d5 4458 memset(&sseu, 0, sizeof(sseu));
8cc76693
LL
4459 sseu.max_slices = INTEL_INFO(dev_priv)->sseu.max_slices;
4460 sseu.max_subslices = INTEL_INFO(dev_priv)->sseu.max_subslices;
4461 sseu.max_eus_per_subslice =
4462 INTEL_INFO(dev_priv)->sseu.max_eus_per_subslice;
238010ed
DW
4463
4464 intel_runtime_pm_get(dev_priv);
4465
36cdd013 4466 if (IS_CHERRYVIEW(dev_priv)) {
915490d5 4467 cherryview_sseu_device_status(dev_priv, &sseu);
36cdd013 4468 } else if (IS_BROADWELL(dev_priv)) {
915490d5 4469 broadwell_sseu_device_status(dev_priv, &sseu);
f8c3dcf9 4470 } else if (IS_GEN9(dev_priv)) {
915490d5 4471 gen9_sseu_device_status(dev_priv, &sseu);
f8c3dcf9
RV
4472 } else if (INTEL_GEN(dev_priv) >= 10) {
4473 gen10_sseu_device_status(dev_priv, &sseu);
7f992aba 4474 }
238010ed
DW
4475
4476 intel_runtime_pm_put(dev_priv);
4477
615d8908 4478 i915_print_sseu_info(m, false, &sseu);
7f992aba 4479
3873218f
JM
4480 return 0;
4481}
4482
6d794d42
BW
4483static int i915_forcewake_open(struct inode *inode, struct file *file)
4484{
d7a133d8 4485 struct drm_i915_private *i915 = inode->i_private;
6d794d42 4486
d7a133d8 4487 if (INTEL_GEN(i915) < 6)
6d794d42
BW
4488 return 0;
4489
d7a133d8
CW
4490 intel_runtime_pm_get(i915);
4491 intel_uncore_forcewake_user_get(i915);
6d794d42
BW
4492
4493 return 0;
4494}
4495
c43b5634 4496static int i915_forcewake_release(struct inode *inode, struct file *file)
6d794d42 4497{
d7a133d8 4498 struct drm_i915_private *i915 = inode->i_private;
6d794d42 4499
d7a133d8 4500 if (INTEL_GEN(i915) < 6)
6d794d42
BW
4501 return 0;
4502
d7a133d8
CW
4503 intel_uncore_forcewake_user_put(i915);
4504 intel_runtime_pm_put(i915);
6d794d42
BW
4505
4506 return 0;
4507}
4508
4509static const struct file_operations i915_forcewake_fops = {
4510 .owner = THIS_MODULE,
4511 .open = i915_forcewake_open,
4512 .release = i915_forcewake_release,
4513};
4514
317eaa95
L
4515static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
4516{
4517 struct drm_i915_private *dev_priv = m->private;
4518 struct i915_hotplug *hotplug = &dev_priv->hotplug;
4519
4520 seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
4521 seq_printf(m, "Detected: %s\n",
4522 yesno(delayed_work_pending(&hotplug->reenable_work)));
4523
4524 return 0;
4525}
4526
4527static ssize_t i915_hpd_storm_ctl_write(struct file *file,
4528 const char __user *ubuf, size_t len,
4529 loff_t *offp)
4530{
4531 struct seq_file *m = file->private_data;
4532 struct drm_i915_private *dev_priv = m->private;
4533 struct i915_hotplug *hotplug = &dev_priv->hotplug;
4534 unsigned int new_threshold;
4535 int i;
4536 char *newline;
4537 char tmp[16];
4538
4539 if (len >= sizeof(tmp))
4540 return -EINVAL;
4541
4542 if (copy_from_user(tmp, ubuf, len))
4543 return -EFAULT;
4544
4545 tmp[len] = '\0';
4546
4547 /* Strip newline, if any */
4548 newline = strchr(tmp, '\n');
4549 if (newline)
4550 *newline = '\0';
4551
4552 if (strcmp(tmp, "reset") == 0)
4553 new_threshold = HPD_STORM_DEFAULT_THRESHOLD;
4554 else if (kstrtouint(tmp, 10, &new_threshold) != 0)
4555 return -EINVAL;
4556
4557 if (new_threshold > 0)
4558 DRM_DEBUG_KMS("Setting HPD storm detection threshold to %d\n",
4559 new_threshold);
4560 else
4561 DRM_DEBUG_KMS("Disabling HPD storm detection\n");
4562
4563 spin_lock_irq(&dev_priv->irq_lock);
4564 hotplug->hpd_storm_threshold = new_threshold;
4565 /* Reset the HPD storm stats so we don't accidentally trigger a storm */
4566 for_each_hpd_pin(i)
4567 hotplug->stats[i].count = 0;
4568 spin_unlock_irq(&dev_priv->irq_lock);
4569
4570 /* Re-enable hpd immediately if we were in an irq storm */
4571 flush_delayed_work(&dev_priv->hotplug.reenable_work);
4572
4573 return len;
4574}
4575
4576static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file)
4577{
4578 return single_open(file, i915_hpd_storm_ctl_show, inode->i_private);
4579}
4580
4581static const struct file_operations i915_hpd_storm_ctl_fops = {
4582 .owner = THIS_MODULE,
4583 .open = i915_hpd_storm_ctl_open,
4584 .read = seq_read,
4585 .llseek = seq_lseek,
4586 .release = single_release,
4587 .write = i915_hpd_storm_ctl_write
4588};
4589
35954e88
R
4590static int i915_drrs_ctl_set(void *data, u64 val)
4591{
4592 struct drm_i915_private *dev_priv = data;
4593 struct drm_device *dev = &dev_priv->drm;
4594 struct intel_crtc *intel_crtc;
4595 struct intel_encoder *encoder;
4596 struct intel_dp *intel_dp;
4597
4598 if (INTEL_GEN(dev_priv) < 7)
4599 return -ENODEV;
4600
4601 drm_modeset_lock_all(dev);
4602 for_each_intel_crtc(dev, intel_crtc) {
4603 if (!intel_crtc->base.state->active ||
4604 !intel_crtc->config->has_drrs)
4605 continue;
4606
4607 for_each_encoder_on_crtc(dev, &intel_crtc->base, encoder) {
4608 if (encoder->type != INTEL_OUTPUT_EDP)
4609 continue;
4610
4611 DRM_DEBUG_DRIVER("Manually %sabling DRRS. %llu\n",
4612 val ? "en" : "dis", val);
4613
4614 intel_dp = enc_to_intel_dp(&encoder->base);
4615 if (val)
4616 intel_edp_drrs_enable(intel_dp,
4617 intel_crtc->config);
4618 else
4619 intel_edp_drrs_disable(intel_dp,
4620 intel_crtc->config);
4621 }
4622 }
4623 drm_modeset_unlock_all(dev);
4624
4625 return 0;
4626}
4627
4628DEFINE_SIMPLE_ATTRIBUTE(i915_drrs_ctl_fops, NULL, i915_drrs_ctl_set, "%llu\n");
4629
d52ad9cb
ML
4630static ssize_t
4631i915_fifo_underrun_reset_write(struct file *filp,
4632 const char __user *ubuf,
4633 size_t cnt, loff_t *ppos)
4634{
4635 struct drm_i915_private *dev_priv = filp->private_data;
4636 struct intel_crtc *intel_crtc;
4637 struct drm_device *dev = &dev_priv->drm;
4638 int ret;
4639 bool reset;
4640
4641 ret = kstrtobool_from_user(ubuf, cnt, &reset);
4642 if (ret)
4643 return ret;
4644
4645 if (!reset)
4646 return cnt;
4647
4648 for_each_intel_crtc(dev, intel_crtc) {
4649 struct drm_crtc_commit *commit;
4650 struct intel_crtc_state *crtc_state;
4651
4652 ret = drm_modeset_lock_single_interruptible(&intel_crtc->base.mutex);
4653 if (ret)
4654 return ret;
4655
4656 crtc_state = to_intel_crtc_state(intel_crtc->base.state);
4657 commit = crtc_state->base.commit;
4658 if (commit) {
4659 ret = wait_for_completion_interruptible(&commit->hw_done);
4660 if (!ret)
4661 ret = wait_for_completion_interruptible(&commit->flip_done);
4662 }
4663
4664 if (!ret && crtc_state->base.active) {
4665 DRM_DEBUG_KMS("Re-arming FIFO underruns on pipe %c\n",
4666 pipe_name(intel_crtc->pipe));
4667
4668 intel_crtc_arm_fifo_underrun(intel_crtc, crtc_state);
4669 }
4670
4671 drm_modeset_unlock(&intel_crtc->base.mutex);
4672
4673 if (ret)
4674 return ret;
4675 }
4676
4677 ret = intel_fbc_reset_underrun(dev_priv);
4678 if (ret)
4679 return ret;
4680
4681 return cnt;
4682}
4683
4684static const struct file_operations i915_fifo_underrun_reset_ops = {
4685 .owner = THIS_MODULE,
4686 .open = simple_open,
4687 .write = i915_fifo_underrun_reset_write,
4688 .llseek = default_llseek,
4689};
4690
06c5bf8c 4691static const struct drm_info_list i915_debugfs_list[] = {
311bd68e 4692 {"i915_capabilities", i915_capabilities, 0},
73aa808f 4693 {"i915_gem_objects", i915_gem_object_info, 0},
08c18323 4694 {"i915_gem_gtt", i915_gem_gtt_info, 0},
6d2b8885 4695 {"i915_gem_stolen", i915_gem_stolen_list_info },
a6172a80 4696 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
2017263e 4697 {"i915_gem_interrupt", i915_interrupt_info, 0},
493018dc 4698 {"i915_gem_batch_pool", i915_gem_batch_pool_info, 0},
8b417c26 4699 {"i915_guc_info", i915_guc_info, 0},
fdf5d357 4700 {"i915_guc_load_status", i915_guc_load_status_info, 0},
4c7e77fc 4701 {"i915_guc_log_dump", i915_guc_log_dump, 0},
ac58d2ab 4702 {"i915_guc_load_err_log_dump", i915_guc_log_dump, 0, (void *)1},
a8b9370f 4703 {"i915_guc_stage_pool", i915_guc_stage_pool, 0},
0509ead1 4704 {"i915_huc_load_status", i915_huc_load_status_info, 0},
adb4bd12 4705 {"i915_frequency_info", i915_frequency_info, 0},
f654449a 4706 {"i915_hangcheck_info", i915_hangcheck_info, 0},
061d06a2 4707 {"i915_reset_info", i915_reset_info, 0},
f97108d1 4708 {"i915_drpc_info", i915_drpc_info, 0},
7648fa99 4709 {"i915_emon_status", i915_emon_status, 0},
23b2f8bb 4710 {"i915_ring_freq_table", i915_ring_freq_table, 0},
9a851789 4711 {"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
b5e50c3f 4712 {"i915_fbc_status", i915_fbc_status, 0},
92d44621 4713 {"i915_ips_status", i915_ips_status, 0},
4a9bef37 4714 {"i915_sr_status", i915_sr_status, 0},
44834a67 4715 {"i915_opregion", i915_opregion, 0},
ada8f955 4716 {"i915_vbt", i915_vbt, 0},
37811fcc 4717 {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
e76d3630 4718 {"i915_context_status", i915_context_status, 0},
f65367b5 4719 {"i915_forcewake_domains", i915_forcewake_domains, 0},
ea16a3cd 4720 {"i915_swizzle_info", i915_swizzle_info, 0},
3cf17fc5 4721 {"i915_ppgtt_info", i915_ppgtt_info, 0},
63573eb7 4722 {"i915_llc", i915_llc, 0},
e91fd8c6 4723 {"i915_edp_psr_status", i915_edp_psr_status, 0},
ec013e7f 4724 {"i915_energy_uJ", i915_energy_uJ, 0},
6455c870 4725 {"i915_runtime_pm_status", i915_runtime_pm_status, 0},
1da51581 4726 {"i915_power_domain_info", i915_power_domain_info, 0},
b7cec66d 4727 {"i915_dmc_info", i915_dmc_info, 0},
53f5e3ca 4728 {"i915_display_info", i915_display_info, 0},
1b36595f 4729 {"i915_engine_info", i915_engine_info, 0},
79e9cd5f 4730 {"i915_rcs_topology", i915_rcs_topology, 0},
c5418a8b 4731 {"i915_shrinker_info", i915_shrinker_info, 0},
728e29d7 4732 {"i915_shared_dplls_info", i915_shared_dplls_info, 0},
11bed958 4733 {"i915_dp_mst_info", i915_dp_mst_info, 0},
1ed1ef9d 4734 {"i915_wa_registers", i915_wa_registers, 0},
c5511e44 4735 {"i915_ddb_info", i915_ddb_info, 0},
3873218f 4736 {"i915_sseu_status", i915_sseu_status, 0},
a54746e3 4737 {"i915_drrs_status", i915_drrs_status, 0},
1854d5ca 4738 {"i915_rps_boost_info", i915_rps_boost_info, 0},
2017263e 4739};
27c202ad 4740#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
2017263e 4741
06c5bf8c 4742static const struct i915_debugfs_files {
34b9674c
DV
4743 const char *name;
4744 const struct file_operations *fops;
4745} i915_debugfs_files[] = {
4746 {"i915_wedged", &i915_wedged_fops},
34b9674c 4747 {"i915_cache_sharing", &i915_cache_sharing_fops},
094f9a54
CW
4748 {"i915_ring_missed_irq", &i915_ring_missed_irq_fops},
4749 {"i915_ring_test_irq", &i915_ring_test_irq_fops},
34b9674c 4750 {"i915_gem_drop_caches", &i915_drop_caches_fops},
98a2f411 4751#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
34b9674c 4752 {"i915_error_state", &i915_error_state_fops},
5a4c6f1b 4753 {"i915_gpu_info", &i915_gpu_info_fops},
98a2f411 4754#endif
d52ad9cb 4755 {"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops},
34b9674c 4756 {"i915_next_seqno", &i915_next_seqno_fops},
369a1342
VS
4757 {"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
4758 {"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
4759 {"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
4127dc43 4760 {"i915_fbc_false_color", &i915_fbc_false_color_fops},
eb3394fa
TP
4761 {"i915_dp_test_data", &i915_displayport_test_data_fops},
4762 {"i915_dp_test_type", &i915_displayport_test_type_fops},
685534ef 4763 {"i915_dp_test_active", &i915_displayport_test_active_fops},
4977a287
MW
4764 {"i915_guc_log_level", &i915_guc_log_level_fops},
4765 {"i915_guc_log_relay", &i915_guc_log_relay_fops},
d2d4f39b 4766 {"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
35954e88 4767 {"i915_ipc_status", &i915_ipc_status_fops},
54fd3149
DP
4768 {"i915_drrs_ctl", &i915_drrs_ctl_fops},
4769 {"i915_edp_psr_debug", &i915_edp_psr_debug_fops}
34b9674c
DV
4770};
4771
1dac891c 4772int i915_debugfs_register(struct drm_i915_private *dev_priv)
2017263e 4773{
91c8a326 4774 struct drm_minor *minor = dev_priv->drm.primary;
b05eeb0f 4775 struct dentry *ent;
6cc42152 4776 int i;
f3cd474b 4777
b05eeb0f
NT
4778 ent = debugfs_create_file("i915_forcewake_user", S_IRUSR,
4779 minor->debugfs_root, to_i915(minor->dev),
4780 &i915_forcewake_fops);
4781 if (!ent)
4782 return -ENOMEM;
6a9c308d 4783
34b9674c 4784 for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
b05eeb0f
NT
4785 ent = debugfs_create_file(i915_debugfs_files[i].name,
4786 S_IRUGO | S_IWUSR,
4787 minor->debugfs_root,
4788 to_i915(minor->dev),
34b9674c 4789 i915_debugfs_files[i].fops);
b05eeb0f
NT
4790 if (!ent)
4791 return -ENOMEM;
34b9674c 4792 }
40633219 4793
27c202ad
BG
4794 return drm_debugfs_create_files(i915_debugfs_list,
4795 I915_DEBUGFS_ENTRIES,
2017263e
BG
4796 minor->debugfs_root, minor);
4797}
4798
aa7471d2
JN
4799struct dpcd_block {
4800 /* DPCD dump start address. */
4801 unsigned int offset;
4802 /* DPCD dump end address, inclusive. If unset, .size will be used. */
4803 unsigned int end;
4804 /* DPCD dump size. Used if .end is unset. If unset, defaults to 1. */
4805 size_t size;
4806 /* Only valid for eDP. */
4807 bool edp;
4808};
4809
4810static const struct dpcd_block i915_dpcd_debug[] = {
4811 { .offset = DP_DPCD_REV, .size = DP_RECEIVER_CAP_SIZE },
4812 { .offset = DP_PSR_SUPPORT, .end = DP_PSR_CAPS },
4813 { .offset = DP_DOWNSTREAM_PORT_0, .size = 16 },
4814 { .offset = DP_LINK_BW_SET, .end = DP_EDP_CONFIGURATION_SET },
4815 { .offset = DP_SINK_COUNT, .end = DP_ADJUST_REQUEST_LANE2_3 },
4816 { .offset = DP_SET_POWER },
4817 { .offset = DP_EDP_DPCD_REV },
4818 { .offset = DP_EDP_GENERAL_CAP_1, .end = DP_EDP_GENERAL_CAP_3 },
4819 { .offset = DP_EDP_DISPLAY_CONTROL_REGISTER, .end = DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB },
4820 { .offset = DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET, .end = DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET },
4821};
4822
4823static int i915_dpcd_show(struct seq_file *m, void *data)
4824{
4825 struct drm_connector *connector = m->private;
4826 struct intel_dp *intel_dp =
4827 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4828 uint8_t buf[16];
4829 ssize_t err;
4830 int i;
4831
5c1a8875
MK
4832 if (connector->status != connector_status_connected)
4833 return -ENODEV;
4834
aa7471d2
JN
4835 for (i = 0; i < ARRAY_SIZE(i915_dpcd_debug); i++) {
4836 const struct dpcd_block *b = &i915_dpcd_debug[i];
4837 size_t size = b->end ? b->end - b->offset + 1 : (b->size ?: 1);
4838
4839 if (b->edp &&
4840 connector->connector_type != DRM_MODE_CONNECTOR_eDP)
4841 continue;
4842
4843 /* low tech for now */
4844 if (WARN_ON(size > sizeof(buf)))
4845 continue;
4846
4847 err = drm_dp_dpcd_read(&intel_dp->aux, b->offset, buf, size);
4848 if (err <= 0) {
4849 DRM_ERROR("dpcd read (%zu bytes at %u) failed (%zd)\n",
4850 size, b->offset, err);
4851 continue;
4852 }
4853
4854 seq_printf(m, "%04x: %*ph\n", b->offset, (int) size, buf);
b3f9d7d7 4855 }
aa7471d2
JN
4856
4857 return 0;
4858}
e4006713 4859DEFINE_SHOW_ATTRIBUTE(i915_dpcd);
aa7471d2 4860
ecbd6781
DW
4861static int i915_panel_show(struct seq_file *m, void *data)
4862{
4863 struct drm_connector *connector = m->private;
4864 struct intel_dp *intel_dp =
4865 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4866
4867 if (connector->status != connector_status_connected)
4868 return -ENODEV;
4869
4870 seq_printf(m, "Panel power up delay: %d\n",
4871 intel_dp->panel_power_up_delay);
4872 seq_printf(m, "Panel power down delay: %d\n",
4873 intel_dp->panel_power_down_delay);
4874 seq_printf(m, "Backlight on delay: %d\n",
4875 intel_dp->backlight_on_delay);
4876 seq_printf(m, "Backlight off delay: %d\n",
4877 intel_dp->backlight_off_delay);
4878
4879 return 0;
4880}
e4006713 4881DEFINE_SHOW_ATTRIBUTE(i915_panel);
ecbd6781 4882
aa7471d2
JN
4883/**
4884 * i915_debugfs_connector_add - add i915 specific connector debugfs files
4885 * @connector: pointer to a registered drm_connector
4886 *
4887 * Cleanup will be done by drm_connector_unregister() through a call to
4888 * drm_debugfs_connector_remove().
4889 *
4890 * Returns 0 on success, negative error codes on error.
4891 */
4892int i915_debugfs_connector_add(struct drm_connector *connector)
4893{
4894 struct dentry *root = connector->debugfs_entry;
4895
4896 /* The connector must have been registered beforehands. */
4897 if (!root)
4898 return -ENODEV;
4899
4900 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4901 connector->connector_type == DRM_MODE_CONNECTOR_eDP)
ecbd6781
DW
4902 debugfs_create_file("i915_dpcd", S_IRUGO, root,
4903 connector, &i915_dpcd_fops);
4904
5b7b3086 4905 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
ecbd6781
DW
4906 debugfs_create_file("i915_panel_timings", S_IRUGO, root,
4907 connector, &i915_panel_fops);
5b7b3086
DP
4908 debugfs_create_file("i915_psr_sink_status", S_IRUGO, root,
4909 connector, &i915_psr_sink_status_fops);
4910 }
aa7471d2
JN
4911
4912 return 0;
4913}