]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blame - drivers/gpu/drm/i915/i915_debugfs.c
drm/i915: Update DRIVER_DATE to 20180906
[mirror_ubuntu-focal-kernel.git] / drivers / gpu / drm / i915 / i915_debugfs.c
CommitLineData
2017263e
BG
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
26 *
27 */
28
f3cd474b 29#include <linux/debugfs.h>
e637d2cb 30#include <linux/sort.h>
d92a8cfc 31#include <linux/sched/mm.h>
4e5359cd 32#include "intel_drv.h"
a2695744 33#include "intel_guc_submission.h"
2017263e 34
36cdd013
DW
35static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
36{
37 return to_i915(node->minor->dev);
38}
39
70d39fe4
CW
40static int i915_capabilities(struct seq_file *m, void *data)
41{
36cdd013
DW
42 struct drm_i915_private *dev_priv = node_to_i915(m->private);
43 const struct intel_device_info *info = INTEL_INFO(dev_priv);
a8c9b849 44 struct drm_printer p = drm_seq_file_printer(m);
70d39fe4 45
36cdd013 46 seq_printf(m, "gen: %d\n", INTEL_GEN(dev_priv));
2e0d26f8 47 seq_printf(m, "platform: %s\n", intel_platform_name(info->platform));
36cdd013 48 seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv));
418e3cd8 49
a8c9b849 50 intel_device_info_dump_flags(info, &p);
5fbbe8d4 51 intel_device_info_dump_runtime(info, &p);
3fed1808 52 intel_driver_caps_print(&dev_priv->caps, &p);
70d39fe4 53
418e3cd8 54 kernel_param_lock(THIS_MODULE);
acfb9973 55 i915_params_dump(&i915_modparams, &p);
418e3cd8
CW
56 kernel_param_unlock(THIS_MODULE);
57
70d39fe4
CW
58 return 0;
59}
2017263e 60
a7363de7 61static char get_active_flag(struct drm_i915_gem_object *obj)
a6172a80 62{
573adb39 63 return i915_gem_object_is_active(obj) ? '*' : ' ';
a6172a80
CW
64}
65
a7363de7 66static char get_pin_flag(struct drm_i915_gem_object *obj)
be12a86b 67{
bd3d2252 68 return obj->pin_global ? 'p' : ' ';
be12a86b
TU
69}
70
a7363de7 71static char get_tiling_flag(struct drm_i915_gem_object *obj)
a6172a80 72{
3e510a8e 73 switch (i915_gem_object_get_tiling(obj)) {
0206e353 74 default:
be12a86b
TU
75 case I915_TILING_NONE: return ' ';
76 case I915_TILING_X: return 'X';
77 case I915_TILING_Y: return 'Y';
0206e353 78 }
a6172a80
CW
79}
80
a7363de7 81static char get_global_flag(struct drm_i915_gem_object *obj)
be12a86b 82{
a65adaf8 83 return obj->userfault_count ? 'g' : ' ';
be12a86b
TU
84}
85
a7363de7 86static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
1d693bcc 87{
a4f5ea64 88 return obj->mm.mapping ? 'M' : ' ';
1d693bcc
BW
89}
90
ca1543be
TU
91static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
92{
93 u64 size = 0;
94 struct i915_vma *vma;
95
e2189dd0
CW
96 for_each_ggtt_vma(vma, obj) {
97 if (drm_mm_node_allocated(&vma->node))
ca1543be
TU
98 size += vma->node.size;
99 }
100
101 return size;
102}
103
7393b7ee
MA
104static const char *
105stringify_page_sizes(unsigned int page_sizes, char *buf, size_t len)
106{
107 size_t x = 0;
108
109 switch (page_sizes) {
110 case 0:
111 return "";
112 case I915_GTT_PAGE_SIZE_4K:
113 return "4K";
114 case I915_GTT_PAGE_SIZE_64K:
115 return "64K";
116 case I915_GTT_PAGE_SIZE_2M:
117 return "2M";
118 default:
119 if (!buf)
120 return "M";
121
122 if (page_sizes & I915_GTT_PAGE_SIZE_2M)
123 x += snprintf(buf + x, len - x, "2M, ");
124 if (page_sizes & I915_GTT_PAGE_SIZE_64K)
125 x += snprintf(buf + x, len - x, "64K, ");
126 if (page_sizes & I915_GTT_PAGE_SIZE_4K)
127 x += snprintf(buf + x, len - x, "4K, ");
128 buf[x-2] = '\0';
129
130 return buf;
131 }
132}
133
37811fcc
CW
134static void
135describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
136{
b4716185 137 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
e2f80391 138 struct intel_engine_cs *engine;
1d693bcc 139 struct i915_vma *vma;
faf5bf0a 140 unsigned int frontbuffer_bits;
d7f46fc4
BW
141 int pin_count = 0;
142
188c1ab7
CW
143 lockdep_assert_held(&obj->base.dev->struct_mutex);
144
d07f0e59 145 seq_printf(m, "%pK: %c%c%c%c%c %8zdKiB %02x %02x %s%s%s",
37811fcc 146 &obj->base,
be12a86b 147 get_active_flag(obj),
37811fcc
CW
148 get_pin_flag(obj),
149 get_tiling_flag(obj),
1d693bcc 150 get_global_flag(obj),
be12a86b 151 get_pin_mapped_flag(obj),
a05a5862 152 obj->base.size / 1024,
c0a51fd0
CK
153 obj->read_domains,
154 obj->write_domain,
36cdd013 155 i915_cache_level_str(dev_priv, obj->cache_level),
a4f5ea64
CW
156 obj->mm.dirty ? " dirty" : "",
157 obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
37811fcc
CW
158 if (obj->base.name)
159 seq_printf(m, " (name: %d)", obj->base.name);
1c7f4bca 160 list_for_each_entry(vma, &obj->vma_list, obj_link) {
20dfbde4 161 if (i915_vma_is_pinned(vma))
d7f46fc4 162 pin_count++;
ba0635ff
DC
163 }
164 seq_printf(m, " (pinned x %d)", pin_count);
bd3d2252
CW
165 if (obj->pin_global)
166 seq_printf(m, " (global)");
1c7f4bca 167 list_for_each_entry(vma, &obj->vma_list, obj_link) {
15717de2
CW
168 if (!drm_mm_node_allocated(&vma->node))
169 continue;
170
7393b7ee 171 seq_printf(m, " (%sgtt offset: %08llx, size: %08llx, pages: %s",
3272db53 172 i915_vma_is_ggtt(vma) ? "g" : "pp",
7393b7ee
MA
173 vma->node.start, vma->node.size,
174 stringify_page_sizes(vma->page_sizes.gtt, NULL, 0));
21976853
CW
175 if (i915_vma_is_ggtt(vma)) {
176 switch (vma->ggtt_view.type) {
177 case I915_GGTT_VIEW_NORMAL:
178 seq_puts(m, ", normal");
179 break;
180
181 case I915_GGTT_VIEW_PARTIAL:
182 seq_printf(m, ", partial [%08llx+%x]",
8bab1193
CW
183 vma->ggtt_view.partial.offset << PAGE_SHIFT,
184 vma->ggtt_view.partial.size << PAGE_SHIFT);
21976853
CW
185 break;
186
187 case I915_GGTT_VIEW_ROTATED:
188 seq_printf(m, ", rotated [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
8bab1193
CW
189 vma->ggtt_view.rotated.plane[0].width,
190 vma->ggtt_view.rotated.plane[0].height,
191 vma->ggtt_view.rotated.plane[0].stride,
192 vma->ggtt_view.rotated.plane[0].offset,
193 vma->ggtt_view.rotated.plane[1].width,
194 vma->ggtt_view.rotated.plane[1].height,
195 vma->ggtt_view.rotated.plane[1].stride,
196 vma->ggtt_view.rotated.plane[1].offset);
21976853
CW
197 break;
198
199 default:
200 MISSING_CASE(vma->ggtt_view.type);
201 break;
202 }
203 }
49ef5294
CW
204 if (vma->fence)
205 seq_printf(m, " , fence: %d%s",
206 vma->fence->id,
207 i915_gem_active_isset(&vma->last_fence) ? "*" : "");
596c5923 208 seq_puts(m, ")");
1d693bcc 209 }
c1ad11fc 210 if (obj->stolen)
440fd528 211 seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
27c01aae 212
d07f0e59 213 engine = i915_gem_object_last_write_engine(obj);
27c01aae
CW
214 if (engine)
215 seq_printf(m, " (%s)", engine->name);
216
faf5bf0a
CW
217 frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
218 if (frontbuffer_bits)
219 seq_printf(m, " (frontbuffer: 0x%03x)", frontbuffer_bits);
37811fcc
CW
220}
221
e637d2cb 222static int obj_rank_by_stolen(const void *A, const void *B)
6d2b8885 223{
e637d2cb
CW
224 const struct drm_i915_gem_object *a =
225 *(const struct drm_i915_gem_object **)A;
226 const struct drm_i915_gem_object *b =
227 *(const struct drm_i915_gem_object **)B;
6d2b8885 228
2d05fa16
RV
229 if (a->stolen->start < b->stolen->start)
230 return -1;
231 if (a->stolen->start > b->stolen->start)
232 return 1;
233 return 0;
6d2b8885
CW
234}
235
236static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
237{
36cdd013
DW
238 struct drm_i915_private *dev_priv = node_to_i915(m->private);
239 struct drm_device *dev = &dev_priv->drm;
e637d2cb 240 struct drm_i915_gem_object **objects;
6d2b8885 241 struct drm_i915_gem_object *obj;
c44ef60e 242 u64 total_obj_size, total_gtt_size;
e637d2cb
CW
243 unsigned long total, count, n;
244 int ret;
245
246 total = READ_ONCE(dev_priv->mm.object_count);
2098105e 247 objects = kvmalloc_array(total, sizeof(*objects), GFP_KERNEL);
e637d2cb
CW
248 if (!objects)
249 return -ENOMEM;
6d2b8885
CW
250
251 ret = mutex_lock_interruptible(&dev->struct_mutex);
252 if (ret)
e637d2cb 253 goto out;
6d2b8885
CW
254
255 total_obj_size = total_gtt_size = count = 0;
f2123818
CW
256
257 spin_lock(&dev_priv->mm.obj_lock);
258 list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
e637d2cb
CW
259 if (count == total)
260 break;
261
6d2b8885
CW
262 if (obj->stolen == NULL)
263 continue;
264
e637d2cb 265 objects[count++] = obj;
6d2b8885 266 total_obj_size += obj->base.size;
ca1543be 267 total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
e637d2cb 268
6d2b8885 269 }
f2123818 270 list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
e637d2cb
CW
271 if (count == total)
272 break;
273
6d2b8885
CW
274 if (obj->stolen == NULL)
275 continue;
276
e637d2cb 277 objects[count++] = obj;
6d2b8885 278 total_obj_size += obj->base.size;
6d2b8885 279 }
f2123818 280 spin_unlock(&dev_priv->mm.obj_lock);
e637d2cb
CW
281
282 sort(objects, count, sizeof(*objects), obj_rank_by_stolen, NULL);
283
6d2b8885 284 seq_puts(m, "Stolen:\n");
e637d2cb 285 for (n = 0; n < count; n++) {
6d2b8885 286 seq_puts(m, " ");
e637d2cb 287 describe_obj(m, objects[n]);
6d2b8885 288 seq_putc(m, '\n');
6d2b8885 289 }
e637d2cb 290 seq_printf(m, "Total %lu objects, %llu bytes, %llu GTT size\n",
6d2b8885 291 count, total_obj_size, total_gtt_size);
e637d2cb
CW
292
293 mutex_unlock(&dev->struct_mutex);
294out:
2098105e 295 kvfree(objects);
e637d2cb 296 return ret;
6d2b8885
CW
297}
298
2db8e9d6 299struct file_stats {
6313c204 300 struct drm_i915_file_private *file_priv;
c44ef60e
MK
301 unsigned long count;
302 u64 total, unbound;
303 u64 global, shared;
304 u64 active, inactive;
2db8e9d6
CW
305};
306
307static int per_file_stats(int id, void *ptr, void *data)
308{
309 struct drm_i915_gem_object *obj = ptr;
310 struct file_stats *stats = data;
6313c204 311 struct i915_vma *vma;
2db8e9d6 312
0caf81b5
CW
313 lockdep_assert_held(&obj->base.dev->struct_mutex);
314
2db8e9d6
CW
315 stats->count++;
316 stats->total += obj->base.size;
15717de2
CW
317 if (!obj->bind_count)
318 stats->unbound += obj->base.size;
c67a17e9
CW
319 if (obj->base.name || obj->base.dma_buf)
320 stats->shared += obj->base.size;
321
894eeecc
CW
322 list_for_each_entry(vma, &obj->vma_list, obj_link) {
323 if (!drm_mm_node_allocated(&vma->node))
324 continue;
6313c204 325
3272db53 326 if (i915_vma_is_ggtt(vma)) {
894eeecc
CW
327 stats->global += vma->node.size;
328 } else {
329 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vma->vm);
6313c204 330
82ad6443 331 if (ppgtt->vm.file != stats->file_priv)
6313c204 332 continue;
6313c204 333 }
894eeecc 334
b0decaf7 335 if (i915_vma_is_active(vma))
894eeecc
CW
336 stats->active += vma->node.size;
337 else
338 stats->inactive += vma->node.size;
2db8e9d6
CW
339 }
340
341 return 0;
342}
343
b0da1b79
CW
344#define print_file_stats(m, name, stats) do { \
345 if (stats.count) \
c44ef60e 346 seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound)\n", \
b0da1b79
CW
347 name, \
348 stats.count, \
349 stats.total, \
350 stats.active, \
351 stats.inactive, \
352 stats.global, \
353 stats.shared, \
354 stats.unbound); \
355} while (0)
493018dc
BV
356
357static void print_batch_pool_stats(struct seq_file *m,
358 struct drm_i915_private *dev_priv)
359{
360 struct drm_i915_gem_object *obj;
361 struct file_stats stats;
e2f80391 362 struct intel_engine_cs *engine;
3b3f1650 363 enum intel_engine_id id;
b4ac5afc 364 int j;
493018dc
BV
365
366 memset(&stats, 0, sizeof(stats));
367
3b3f1650 368 for_each_engine(engine, dev_priv, id) {
e2f80391 369 for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
8d9d5744 370 list_for_each_entry(obj,
e2f80391 371 &engine->batch_pool.cache_list[j],
8d9d5744
CW
372 batch_pool_link)
373 per_file_stats(0, obj, &stats);
374 }
06fbca71 375 }
493018dc 376
b0da1b79 377 print_file_stats(m, "[k]batch pool", stats);
493018dc
BV
378}
379
ab82a063 380static int per_file_ctx_stats(int idx, void *ptr, void *data)
15da9565
CW
381{
382 struct i915_gem_context *ctx = ptr;
ab82a063
CW
383 struct intel_engine_cs *engine;
384 enum intel_engine_id id;
385
386 for_each_engine(engine, ctx->i915, id) {
387 struct intel_context *ce = to_intel_context(ctx, engine);
15da9565 388
ab82a063
CW
389 if (ce->state)
390 per_file_stats(0, ce->state->obj, data);
391 if (ce->ring)
392 per_file_stats(0, ce->ring->vma->obj, data);
15da9565
CW
393 }
394
395 return 0;
396}
397
398static void print_context_stats(struct seq_file *m,
399 struct drm_i915_private *dev_priv)
400{
36cdd013 401 struct drm_device *dev = &dev_priv->drm;
15da9565
CW
402 struct file_stats stats;
403 struct drm_file *file;
404
405 memset(&stats, 0, sizeof(stats));
406
36cdd013 407 mutex_lock(&dev->struct_mutex);
15da9565
CW
408 if (dev_priv->kernel_context)
409 per_file_ctx_stats(0, dev_priv->kernel_context, &stats);
410
36cdd013 411 list_for_each_entry(file, &dev->filelist, lhead) {
15da9565
CW
412 struct drm_i915_file_private *fpriv = file->driver_priv;
413 idr_for_each(&fpriv->context_idr, per_file_ctx_stats, &stats);
414 }
36cdd013 415 mutex_unlock(&dev->struct_mutex);
15da9565
CW
416
417 print_file_stats(m, "[k]contexts", stats);
418}
419
36cdd013 420static int i915_gem_object_info(struct seq_file *m, void *data)
73aa808f 421{
36cdd013
DW
422 struct drm_i915_private *dev_priv = node_to_i915(m->private);
423 struct drm_device *dev = &dev_priv->drm;
72e96d64 424 struct i915_ggtt *ggtt = &dev_priv->ggtt;
7393b7ee
MA
425 u32 count, mapped_count, purgeable_count, dpy_count, huge_count;
426 u64 size, mapped_size, purgeable_size, dpy_size, huge_size;
6299f992 427 struct drm_i915_gem_object *obj;
7393b7ee 428 unsigned int page_sizes = 0;
2db8e9d6 429 struct drm_file *file;
7393b7ee 430 char buf[80];
73aa808f
CW
431 int ret;
432
433 ret = mutex_lock_interruptible(&dev->struct_mutex);
434 if (ret)
435 return ret;
436
3ef7f228 437 seq_printf(m, "%u objects, %llu bytes\n",
6299f992
CW
438 dev_priv->mm.object_count,
439 dev_priv->mm.object_memory);
440
1544c42e
CW
441 size = count = 0;
442 mapped_size = mapped_count = 0;
443 purgeable_size = purgeable_count = 0;
7393b7ee 444 huge_size = huge_count = 0;
f2123818
CW
445
446 spin_lock(&dev_priv->mm.obj_lock);
447 list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
2bd160a1
CW
448 size += obj->base.size;
449 ++count;
450
a4f5ea64 451 if (obj->mm.madv == I915_MADV_DONTNEED) {
2bd160a1
CW
452 purgeable_size += obj->base.size;
453 ++purgeable_count;
454 }
455
a4f5ea64 456 if (obj->mm.mapping) {
2bd160a1
CW
457 mapped_count++;
458 mapped_size += obj->base.size;
be19b10d 459 }
7393b7ee
MA
460
461 if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
462 huge_count++;
463 huge_size += obj->base.size;
464 page_sizes |= obj->mm.page_sizes.sg;
465 }
b7abb714 466 }
c44ef60e 467 seq_printf(m, "%u unbound objects, %llu bytes\n", count, size);
6c085a72 468
2bd160a1 469 size = count = dpy_size = dpy_count = 0;
f2123818 470 list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
2bd160a1
CW
471 size += obj->base.size;
472 ++count;
473
bd3d2252 474 if (obj->pin_global) {
2bd160a1
CW
475 dpy_size += obj->base.size;
476 ++dpy_count;
6299f992 477 }
2bd160a1 478
a4f5ea64 479 if (obj->mm.madv == I915_MADV_DONTNEED) {
b7abb714
CW
480 purgeable_size += obj->base.size;
481 ++purgeable_count;
482 }
2bd160a1 483
a4f5ea64 484 if (obj->mm.mapping) {
2bd160a1
CW
485 mapped_count++;
486 mapped_size += obj->base.size;
be19b10d 487 }
7393b7ee
MA
488
489 if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
490 huge_count++;
491 huge_size += obj->base.size;
492 page_sizes |= obj->mm.page_sizes.sg;
493 }
6299f992 494 }
f2123818
CW
495 spin_unlock(&dev_priv->mm.obj_lock);
496
2bd160a1
CW
497 seq_printf(m, "%u bound objects, %llu bytes\n",
498 count, size);
c44ef60e 499 seq_printf(m, "%u purgeable objects, %llu bytes\n",
b7abb714 500 purgeable_count, purgeable_size);
2bd160a1
CW
501 seq_printf(m, "%u mapped objects, %llu bytes\n",
502 mapped_count, mapped_size);
7393b7ee
MA
503 seq_printf(m, "%u huge-paged objects (%s) %llu bytes\n",
504 huge_count,
505 stringify_page_sizes(page_sizes, buf, sizeof(buf)),
506 huge_size);
bd3d2252 507 seq_printf(m, "%u display objects (globally pinned), %llu bytes\n",
2bd160a1 508 dpy_count, dpy_size);
6299f992 509
b7128ef1 510 seq_printf(m, "%llu [%pa] gtt total\n",
82ad6443 511 ggtt->vm.total, &ggtt->mappable_end);
7393b7ee
MA
512 seq_printf(m, "Supported page sizes: %s\n",
513 stringify_page_sizes(INTEL_INFO(dev_priv)->page_sizes,
514 buf, sizeof(buf)));
73aa808f 515
493018dc
BV
516 seq_putc(m, '\n');
517 print_batch_pool_stats(m, dev_priv);
1d2ac403
DV
518 mutex_unlock(&dev->struct_mutex);
519
520 mutex_lock(&dev->filelist_mutex);
15da9565 521 print_context_stats(m, dev_priv);
2db8e9d6
CW
522 list_for_each_entry_reverse(file, &dev->filelist, lhead) {
523 struct file_stats stats;
c84455b4 524 struct drm_i915_file_private *file_priv = file->driver_priv;
e61e0f51 525 struct i915_request *request;
3ec2f427 526 struct task_struct *task;
2db8e9d6 527
0caf81b5
CW
528 mutex_lock(&dev->struct_mutex);
529
2db8e9d6 530 memset(&stats, 0, sizeof(stats));
6313c204 531 stats.file_priv = file->driver_priv;
5b5ffff0 532 spin_lock(&file->table_lock);
2db8e9d6 533 idr_for_each(&file->object_idr, per_file_stats, &stats);
5b5ffff0 534 spin_unlock(&file->table_lock);
3ec2f427
TH
535 /*
536 * Although we have a valid reference on file->pid, that does
537 * not guarantee that the task_struct who called get_pid() is
538 * still alive (e.g. get_pid(current) => fork() => exit()).
539 * Therefore, we need to protect this ->comm access using RCU.
540 */
c84455b4 541 request = list_first_entry_or_null(&file_priv->mm.request_list,
e61e0f51 542 struct i915_request,
c8659efa 543 client_link);
3ec2f427 544 rcu_read_lock();
4e0d64db
CW
545 task = pid_task(request && request->gem_context->pid ?
546 request->gem_context->pid : file->pid,
c84455b4 547 PIDTYPE_PID);
493018dc 548 print_file_stats(m, task ? task->comm : "<unknown>", stats);
3ec2f427 549 rcu_read_unlock();
0caf81b5 550
c84455b4 551 mutex_unlock(&dev->struct_mutex);
2db8e9d6 552 }
1d2ac403 553 mutex_unlock(&dev->filelist_mutex);
73aa808f
CW
554
555 return 0;
556}
557
aee56cff 558static int i915_gem_gtt_info(struct seq_file *m, void *data)
08c18323 559{
9f25d007 560 struct drm_info_node *node = m->private;
36cdd013
DW
561 struct drm_i915_private *dev_priv = node_to_i915(node);
562 struct drm_device *dev = &dev_priv->drm;
f2123818 563 struct drm_i915_gem_object **objects;
08c18323 564 struct drm_i915_gem_object *obj;
c44ef60e 565 u64 total_obj_size, total_gtt_size;
f2123818 566 unsigned long nobject, n;
08c18323
CW
567 int count, ret;
568
f2123818
CW
569 nobject = READ_ONCE(dev_priv->mm.object_count);
570 objects = kvmalloc_array(nobject, sizeof(*objects), GFP_KERNEL);
571 if (!objects)
572 return -ENOMEM;
573
08c18323
CW
574 ret = mutex_lock_interruptible(&dev->struct_mutex);
575 if (ret)
576 return ret;
577
f2123818
CW
578 count = 0;
579 spin_lock(&dev_priv->mm.obj_lock);
580 list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
581 objects[count++] = obj;
582 if (count == nobject)
583 break;
584 }
585 spin_unlock(&dev_priv->mm.obj_lock);
586
587 total_obj_size = total_gtt_size = 0;
588 for (n = 0; n < count; n++) {
589 obj = objects[n];
590
267f0c90 591 seq_puts(m, " ");
08c18323 592 describe_obj(m, obj);
267f0c90 593 seq_putc(m, '\n');
08c18323 594 total_obj_size += obj->base.size;
ca1543be 595 total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
08c18323
CW
596 }
597
598 mutex_unlock(&dev->struct_mutex);
599
c44ef60e 600 seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
08c18323 601 count, total_obj_size, total_gtt_size);
f2123818 602 kvfree(objects);
08c18323
CW
603
604 return 0;
605}
606
493018dc
BV
607static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
608{
36cdd013
DW
609 struct drm_i915_private *dev_priv = node_to_i915(m->private);
610 struct drm_device *dev = &dev_priv->drm;
493018dc 611 struct drm_i915_gem_object *obj;
e2f80391 612 struct intel_engine_cs *engine;
3b3f1650 613 enum intel_engine_id id;
8d9d5744 614 int total = 0;
b4ac5afc 615 int ret, j;
493018dc
BV
616
617 ret = mutex_lock_interruptible(&dev->struct_mutex);
618 if (ret)
619 return ret;
620
3b3f1650 621 for_each_engine(engine, dev_priv, id) {
e2f80391 622 for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
8d9d5744
CW
623 int count;
624
625 count = 0;
626 list_for_each_entry(obj,
e2f80391 627 &engine->batch_pool.cache_list[j],
8d9d5744
CW
628 batch_pool_link)
629 count++;
630 seq_printf(m, "%s cache[%d]: %d objects\n",
e2f80391 631 engine->name, j, count);
8d9d5744
CW
632
633 list_for_each_entry(obj,
e2f80391 634 &engine->batch_pool.cache_list[j],
8d9d5744
CW
635 batch_pool_link) {
636 seq_puts(m, " ");
637 describe_obj(m, obj);
638 seq_putc(m, '\n');
639 }
640
641 total += count;
06fbca71 642 }
493018dc
BV
643 }
644
8d9d5744 645 seq_printf(m, "total: %d\n", total);
493018dc
BV
646
647 mutex_unlock(&dev->struct_mutex);
648
649 return 0;
650}
651
80d89350
TU
652static void gen8_display_interrupt_info(struct seq_file *m)
653{
654 struct drm_i915_private *dev_priv = node_to_i915(m->private);
655 int pipe;
656
657 for_each_pipe(dev_priv, pipe) {
658 enum intel_display_power_domain power_domain;
659
660 power_domain = POWER_DOMAIN_PIPE(pipe);
661 if (!intel_display_power_get_if_enabled(dev_priv,
662 power_domain)) {
663 seq_printf(m, "Pipe %c power disabled\n",
664 pipe_name(pipe));
665 continue;
666 }
667 seq_printf(m, "Pipe %c IMR:\t%08x\n",
668 pipe_name(pipe),
669 I915_READ(GEN8_DE_PIPE_IMR(pipe)));
670 seq_printf(m, "Pipe %c IIR:\t%08x\n",
671 pipe_name(pipe),
672 I915_READ(GEN8_DE_PIPE_IIR(pipe)));
673 seq_printf(m, "Pipe %c IER:\t%08x\n",
674 pipe_name(pipe),
675 I915_READ(GEN8_DE_PIPE_IER(pipe)));
676
677 intel_display_power_put(dev_priv, power_domain);
678 }
679
680 seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
681 I915_READ(GEN8_DE_PORT_IMR));
682 seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
683 I915_READ(GEN8_DE_PORT_IIR));
684 seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
685 I915_READ(GEN8_DE_PORT_IER));
686
687 seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
688 I915_READ(GEN8_DE_MISC_IMR));
689 seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
690 I915_READ(GEN8_DE_MISC_IIR));
691 seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
692 I915_READ(GEN8_DE_MISC_IER));
693
694 seq_printf(m, "PCU interrupt mask:\t%08x\n",
695 I915_READ(GEN8_PCU_IMR));
696 seq_printf(m, "PCU interrupt identity:\t%08x\n",
697 I915_READ(GEN8_PCU_IIR));
698 seq_printf(m, "PCU interrupt enable:\t%08x\n",
699 I915_READ(GEN8_PCU_IER));
700}
701
2017263e
BG
702static int i915_interrupt_info(struct seq_file *m, void *data)
703{
36cdd013 704 struct drm_i915_private *dev_priv = node_to_i915(m->private);
e2f80391 705 struct intel_engine_cs *engine;
3b3f1650 706 enum intel_engine_id id;
4bb05040 707 int i, pipe;
de227ef0 708
c8c8fb33 709 intel_runtime_pm_get(dev_priv);
2017263e 710
36cdd013 711 if (IS_CHERRYVIEW(dev_priv)) {
74e1ca8c
VS
712 seq_printf(m, "Master Interrupt Control:\t%08x\n",
713 I915_READ(GEN8_MASTER_IRQ));
714
715 seq_printf(m, "Display IER:\t%08x\n",
716 I915_READ(VLV_IER));
717 seq_printf(m, "Display IIR:\t%08x\n",
718 I915_READ(VLV_IIR));
719 seq_printf(m, "Display IIR_RW:\t%08x\n",
720 I915_READ(VLV_IIR_RW));
721 seq_printf(m, "Display IMR:\t%08x\n",
722 I915_READ(VLV_IMR));
9c870d03
CW
723 for_each_pipe(dev_priv, pipe) {
724 enum intel_display_power_domain power_domain;
725
726 power_domain = POWER_DOMAIN_PIPE(pipe);
727 if (!intel_display_power_get_if_enabled(dev_priv,
728 power_domain)) {
729 seq_printf(m, "Pipe %c power disabled\n",
730 pipe_name(pipe));
731 continue;
732 }
733
74e1ca8c
VS
734 seq_printf(m, "Pipe %c stat:\t%08x\n",
735 pipe_name(pipe),
736 I915_READ(PIPESTAT(pipe)));
737
9c870d03
CW
738 intel_display_power_put(dev_priv, power_domain);
739 }
740
741 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
74e1ca8c
VS
742 seq_printf(m, "Port hotplug:\t%08x\n",
743 I915_READ(PORT_HOTPLUG_EN));
744 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
745 I915_READ(VLV_DPFLIPSTAT));
746 seq_printf(m, "DPINVGTT:\t%08x\n",
747 I915_READ(DPINVGTT));
9c870d03 748 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
74e1ca8c
VS
749
750 for (i = 0; i < 4; i++) {
751 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
752 i, I915_READ(GEN8_GT_IMR(i)));
753 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
754 i, I915_READ(GEN8_GT_IIR(i)));
755 seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
756 i, I915_READ(GEN8_GT_IER(i)));
757 }
758
759 seq_printf(m, "PCU interrupt mask:\t%08x\n",
760 I915_READ(GEN8_PCU_IMR));
761 seq_printf(m, "PCU interrupt identity:\t%08x\n",
762 I915_READ(GEN8_PCU_IIR));
763 seq_printf(m, "PCU interrupt enable:\t%08x\n",
764 I915_READ(GEN8_PCU_IER));
80d89350
TU
765 } else if (INTEL_GEN(dev_priv) >= 11) {
766 seq_printf(m, "Master Interrupt Control: %08x\n",
767 I915_READ(GEN11_GFX_MSTR_IRQ));
768
769 seq_printf(m, "Render/Copy Intr Enable: %08x\n",
770 I915_READ(GEN11_RENDER_COPY_INTR_ENABLE));
771 seq_printf(m, "VCS/VECS Intr Enable: %08x\n",
772 I915_READ(GEN11_VCS_VECS_INTR_ENABLE));
773 seq_printf(m, "GUC/SG Intr Enable:\t %08x\n",
774 I915_READ(GEN11_GUC_SG_INTR_ENABLE));
775 seq_printf(m, "GPM/WGBOXPERF Intr Enable: %08x\n",
776 I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE));
777 seq_printf(m, "Crypto Intr Enable:\t %08x\n",
778 I915_READ(GEN11_CRYPTO_RSVD_INTR_ENABLE));
779 seq_printf(m, "GUnit/CSME Intr Enable:\t %08x\n",
780 I915_READ(GEN11_GUNIT_CSME_INTR_ENABLE));
781
782 seq_printf(m, "Display Interrupt Control:\t%08x\n",
783 I915_READ(GEN11_DISPLAY_INT_CTL));
784
785 gen8_display_interrupt_info(m);
36cdd013 786 } else if (INTEL_GEN(dev_priv) >= 8) {
a123f157
BW
787 seq_printf(m, "Master Interrupt Control:\t%08x\n",
788 I915_READ(GEN8_MASTER_IRQ));
789
790 for (i = 0; i < 4; i++) {
791 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
792 i, I915_READ(GEN8_GT_IMR(i)));
793 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
794 i, I915_READ(GEN8_GT_IIR(i)));
795 seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
796 i, I915_READ(GEN8_GT_IER(i)));
797 }
798
80d89350 799 gen8_display_interrupt_info(m);
36cdd013 800 } else if (IS_VALLEYVIEW(dev_priv)) {
7e231dbe
JB
801 seq_printf(m, "Display IER:\t%08x\n",
802 I915_READ(VLV_IER));
803 seq_printf(m, "Display IIR:\t%08x\n",
804 I915_READ(VLV_IIR));
805 seq_printf(m, "Display IIR_RW:\t%08x\n",
806 I915_READ(VLV_IIR_RW));
807 seq_printf(m, "Display IMR:\t%08x\n",
808 I915_READ(VLV_IMR));
4f4631af
CW
809 for_each_pipe(dev_priv, pipe) {
810 enum intel_display_power_domain power_domain;
811
812 power_domain = POWER_DOMAIN_PIPE(pipe);
813 if (!intel_display_power_get_if_enabled(dev_priv,
814 power_domain)) {
815 seq_printf(m, "Pipe %c power disabled\n",
816 pipe_name(pipe));
817 continue;
818 }
819
7e231dbe
JB
820 seq_printf(m, "Pipe %c stat:\t%08x\n",
821 pipe_name(pipe),
822 I915_READ(PIPESTAT(pipe)));
4f4631af
CW
823 intel_display_power_put(dev_priv, power_domain);
824 }
7e231dbe
JB
825
826 seq_printf(m, "Master IER:\t%08x\n",
827 I915_READ(VLV_MASTER_IER));
828
829 seq_printf(m, "Render IER:\t%08x\n",
830 I915_READ(GTIER));
831 seq_printf(m, "Render IIR:\t%08x\n",
832 I915_READ(GTIIR));
833 seq_printf(m, "Render IMR:\t%08x\n",
834 I915_READ(GTIMR));
835
836 seq_printf(m, "PM IER:\t\t%08x\n",
837 I915_READ(GEN6_PMIER));
838 seq_printf(m, "PM IIR:\t\t%08x\n",
839 I915_READ(GEN6_PMIIR));
840 seq_printf(m, "PM IMR:\t\t%08x\n",
841 I915_READ(GEN6_PMIMR));
842
843 seq_printf(m, "Port hotplug:\t%08x\n",
844 I915_READ(PORT_HOTPLUG_EN));
845 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
846 I915_READ(VLV_DPFLIPSTAT));
847 seq_printf(m, "DPINVGTT:\t%08x\n",
848 I915_READ(DPINVGTT));
849
36cdd013 850 } else if (!HAS_PCH_SPLIT(dev_priv)) {
5f6a1695
ZW
851 seq_printf(m, "Interrupt enable: %08x\n",
852 I915_READ(IER));
853 seq_printf(m, "Interrupt identity: %08x\n",
854 I915_READ(IIR));
855 seq_printf(m, "Interrupt mask: %08x\n",
856 I915_READ(IMR));
055e393f 857 for_each_pipe(dev_priv, pipe)
9db4a9c7
JB
858 seq_printf(m, "Pipe %c stat: %08x\n",
859 pipe_name(pipe),
860 I915_READ(PIPESTAT(pipe)));
5f6a1695
ZW
861 } else {
862 seq_printf(m, "North Display Interrupt enable: %08x\n",
863 I915_READ(DEIER));
864 seq_printf(m, "North Display Interrupt identity: %08x\n",
865 I915_READ(DEIIR));
866 seq_printf(m, "North Display Interrupt mask: %08x\n",
867 I915_READ(DEIMR));
868 seq_printf(m, "South Display Interrupt enable: %08x\n",
869 I915_READ(SDEIER));
870 seq_printf(m, "South Display Interrupt identity: %08x\n",
871 I915_READ(SDEIIR));
872 seq_printf(m, "South Display Interrupt mask: %08x\n",
873 I915_READ(SDEIMR));
874 seq_printf(m, "Graphics Interrupt enable: %08x\n",
875 I915_READ(GTIER));
876 seq_printf(m, "Graphics Interrupt identity: %08x\n",
877 I915_READ(GTIIR));
878 seq_printf(m, "Graphics Interrupt mask: %08x\n",
879 I915_READ(GTIMR));
880 }
80d89350
TU
881
882 if (INTEL_GEN(dev_priv) >= 11) {
883 seq_printf(m, "RCS Intr Mask:\t %08x\n",
884 I915_READ(GEN11_RCS0_RSVD_INTR_MASK));
885 seq_printf(m, "BCS Intr Mask:\t %08x\n",
886 I915_READ(GEN11_BCS_RSVD_INTR_MASK));
887 seq_printf(m, "VCS0/VCS1 Intr Mask:\t %08x\n",
888 I915_READ(GEN11_VCS0_VCS1_INTR_MASK));
889 seq_printf(m, "VCS2/VCS3 Intr Mask:\t %08x\n",
890 I915_READ(GEN11_VCS2_VCS3_INTR_MASK));
891 seq_printf(m, "VECS0/VECS1 Intr Mask:\t %08x\n",
892 I915_READ(GEN11_VECS0_VECS1_INTR_MASK));
893 seq_printf(m, "GUC/SG Intr Mask:\t %08x\n",
894 I915_READ(GEN11_GUC_SG_INTR_MASK));
895 seq_printf(m, "GPM/WGBOXPERF Intr Mask: %08x\n",
896 I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK));
897 seq_printf(m, "Crypto Intr Mask:\t %08x\n",
898 I915_READ(GEN11_CRYPTO_RSVD_INTR_MASK));
899 seq_printf(m, "Gunit/CSME Intr Mask:\t %08x\n",
900 I915_READ(GEN11_GUNIT_CSME_INTR_MASK));
901
902 } else if (INTEL_GEN(dev_priv) >= 6) {
d5acadfe 903 for_each_engine(engine, dev_priv, id) {
a2c7f6fd
CW
904 seq_printf(m,
905 "Graphics Interrupt mask (%s): %08x\n",
e2f80391 906 engine->name, I915_READ_IMR(engine));
9862e600 907 }
9862e600 908 }
80d89350 909
c8c8fb33 910 intel_runtime_pm_put(dev_priv);
de227ef0 911
2017263e
BG
912 return 0;
913}
914
a6172a80
CW
915static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
916{
36cdd013
DW
917 struct drm_i915_private *dev_priv = node_to_i915(m->private);
918 struct drm_device *dev = &dev_priv->drm;
de227ef0
CW
919 int i, ret;
920
921 ret = mutex_lock_interruptible(&dev->struct_mutex);
922 if (ret)
923 return ret;
a6172a80 924
a6172a80
CW
925 seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
926 for (i = 0; i < dev_priv->num_fence_regs; i++) {
49ef5294 927 struct i915_vma *vma = dev_priv->fence_regs[i].vma;
a6172a80 928
6c085a72
CW
929 seq_printf(m, "Fence %d, pin count = %d, object = ",
930 i, dev_priv->fence_regs[i].pin_count);
49ef5294 931 if (!vma)
267f0c90 932 seq_puts(m, "unused");
c2c347a9 933 else
49ef5294 934 describe_obj(m, vma->obj);
267f0c90 935 seq_putc(m, '\n');
a6172a80
CW
936 }
937
05394f39 938 mutex_unlock(&dev->struct_mutex);
a6172a80
CW
939 return 0;
940}
941
98a2f411 942#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
5a4c6f1b
CW
943static ssize_t gpu_state_read(struct file *file, char __user *ubuf,
944 size_t count, loff_t *pos)
d5442303 945{
5a4c6f1b
CW
946 struct i915_gpu_state *error = file->private_data;
947 struct drm_i915_error_state_buf str;
948 ssize_t ret;
949 loff_t tmp;
d5442303 950
5a4c6f1b
CW
951 if (!error)
952 return 0;
d5442303 953
5a4c6f1b
CW
954 ret = i915_error_state_buf_init(&str, error->i915, count, *pos);
955 if (ret)
956 return ret;
d5442303 957
5a4c6f1b
CW
958 ret = i915_error_state_to_str(&str, error);
959 if (ret)
960 goto out;
d5442303 961
5a4c6f1b
CW
962 tmp = 0;
963 ret = simple_read_from_buffer(ubuf, count, &tmp, str.buf, str.bytes);
964 if (ret < 0)
965 goto out;
d5442303 966
5a4c6f1b
CW
967 *pos = str.start + ret;
968out:
969 i915_error_state_buf_release(&str);
970 return ret;
971}
edc3d884 972
5a4c6f1b
CW
973static int gpu_state_release(struct inode *inode, struct file *file)
974{
975 i915_gpu_state_put(file->private_data);
edc3d884 976 return 0;
d5442303
DV
977}
978
5a4c6f1b 979static int i915_gpu_info_open(struct inode *inode, struct file *file)
d5442303 980{
090e5fe3 981 struct drm_i915_private *i915 = inode->i_private;
5a4c6f1b 982 struct i915_gpu_state *gpu;
d5442303 983
090e5fe3
CW
984 intel_runtime_pm_get(i915);
985 gpu = i915_capture_gpu_state(i915);
986 intel_runtime_pm_put(i915);
5a4c6f1b
CW
987 if (!gpu)
988 return -ENOMEM;
d5442303 989
5a4c6f1b 990 file->private_data = gpu;
edc3d884
MK
991 return 0;
992}
993
5a4c6f1b
CW
994static const struct file_operations i915_gpu_info_fops = {
995 .owner = THIS_MODULE,
996 .open = i915_gpu_info_open,
997 .read = gpu_state_read,
998 .llseek = default_llseek,
999 .release = gpu_state_release,
1000};
1001
1002static ssize_t
1003i915_error_state_write(struct file *filp,
1004 const char __user *ubuf,
1005 size_t cnt,
1006 loff_t *ppos)
4dc955f7 1007{
5a4c6f1b 1008 struct i915_gpu_state *error = filp->private_data;
4dc955f7 1009
5a4c6f1b
CW
1010 if (!error)
1011 return 0;
edc3d884 1012
5a4c6f1b
CW
1013 DRM_DEBUG_DRIVER("Resetting error state\n");
1014 i915_reset_error_state(error->i915);
edc3d884 1015
5a4c6f1b
CW
1016 return cnt;
1017}
edc3d884 1018
5a4c6f1b
CW
1019static int i915_error_state_open(struct inode *inode, struct file *file)
1020{
1021 file->private_data = i915_first_error_state(inode->i_private);
1022 return 0;
d5442303
DV
1023}
1024
1025static const struct file_operations i915_error_state_fops = {
1026 .owner = THIS_MODULE,
1027 .open = i915_error_state_open,
5a4c6f1b 1028 .read = gpu_state_read,
d5442303
DV
1029 .write = i915_error_state_write,
1030 .llseek = default_llseek,
5a4c6f1b 1031 .release = gpu_state_release,
d5442303 1032};
98a2f411
CW
1033#endif
1034
647416f9
KC
1035static int
1036i915_next_seqno_set(void *data, u64 val)
1037{
36cdd013
DW
1038 struct drm_i915_private *dev_priv = data;
1039 struct drm_device *dev = &dev_priv->drm;
40633219
MK
1040 int ret;
1041
40633219
MK
1042 ret = mutex_lock_interruptible(&dev->struct_mutex);
1043 if (ret)
1044 return ret;
1045
65c475c6 1046 intel_runtime_pm_get(dev_priv);
73cb9701 1047 ret = i915_gem_set_global_seqno(dev, val);
65c475c6
CW
1048 intel_runtime_pm_put(dev_priv);
1049
40633219
MK
1050 mutex_unlock(&dev->struct_mutex);
1051
647416f9 1052 return ret;
40633219
MK
1053}
1054
647416f9 1055DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops,
9b6586ae 1056 NULL, i915_next_seqno_set,
3a3b4f98 1057 "0x%llx\n");
40633219 1058
adb4bd12 1059static int i915_frequency_info(struct seq_file *m, void *unused)
f97108d1 1060{
36cdd013 1061 struct drm_i915_private *dev_priv = node_to_i915(m->private);
562d9bae 1062 struct intel_rps *rps = &dev_priv->gt_pm.rps;
c8c8fb33
PZ
1063 int ret = 0;
1064
1065 intel_runtime_pm_get(dev_priv);
3b8d8d91 1066
36cdd013 1067 if (IS_GEN5(dev_priv)) {
3b8d8d91
JB
1068 u16 rgvswctl = I915_READ16(MEMSWCTL);
1069 u16 rgvstat = I915_READ16(MEMSTAT_ILK);
1070
1071 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
1072 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
1073 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
1074 MEMSTAT_VID_SHIFT);
1075 seq_printf(m, "Current P-state: %d\n",
1076 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
36cdd013 1077 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
0d6fc92a 1078 u32 rpmodectl, freq_sts;
666a4537 1079
9f817501 1080 mutex_lock(&dev_priv->pcu_lock);
0d6fc92a
SAK
1081
1082 rpmodectl = I915_READ(GEN6_RP_CONTROL);
1083 seq_printf(m, "Video Turbo Mode: %s\n",
1084 yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
1085 seq_printf(m, "HW control enabled: %s\n",
1086 yesno(rpmodectl & GEN6_RP_ENABLE));
1087 seq_printf(m, "SW control enabled: %s\n",
1088 yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
1089 GEN6_RP_MEDIA_SW_MODE));
1090
666a4537
WB
1091 freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
1092 seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
1093 seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
1094
1095 seq_printf(m, "actual GPU freq: %d MHz\n",
1096 intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
1097
1098 seq_printf(m, "current GPU freq: %d MHz\n",
562d9bae 1099 intel_gpu_freq(dev_priv, rps->cur_freq));
666a4537
WB
1100
1101 seq_printf(m, "max GPU freq: %d MHz\n",
562d9bae 1102 intel_gpu_freq(dev_priv, rps->max_freq));
666a4537
WB
1103
1104 seq_printf(m, "min GPU freq: %d MHz\n",
562d9bae 1105 intel_gpu_freq(dev_priv, rps->min_freq));
666a4537
WB
1106
1107 seq_printf(m, "idle GPU freq: %d MHz\n",
562d9bae 1108 intel_gpu_freq(dev_priv, rps->idle_freq));
666a4537
WB
1109
1110 seq_printf(m,
1111 "efficient (RPe) frequency: %d MHz\n",
562d9bae 1112 intel_gpu_freq(dev_priv, rps->efficient_freq));
9f817501 1113 mutex_unlock(&dev_priv->pcu_lock);
36cdd013 1114 } else if (INTEL_GEN(dev_priv) >= 6) {
35040562
BP
1115 u32 rp_state_limits;
1116 u32 gt_perf_status;
1117 u32 rp_state_cap;
0d8f9491 1118 u32 rpmodectl, rpinclimit, rpdeclimit;
8e8c06cd 1119 u32 rpstat, cagf, reqf;
ccab5c82
JB
1120 u32 rpupei, rpcurup, rpprevup;
1121 u32 rpdownei, rpcurdown, rpprevdown;
9dd3c605 1122 u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
3b8d8d91
JB
1123 int max_freq;
1124
35040562 1125 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
cc3f90f0 1126 if (IS_GEN9_LP(dev_priv)) {
35040562
BP
1127 rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
1128 gt_perf_status = I915_READ(BXT_GT_PERF_STATUS);
1129 } else {
1130 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
1131 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
1132 }
1133
3b8d8d91 1134 /* RPSTAT1 is in the GT power well */
59bad947 1135 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
3b8d8d91 1136
8e8c06cd 1137 reqf = I915_READ(GEN6_RPNSWREQ);
35ceabf3 1138 if (INTEL_GEN(dev_priv) >= 9)
60260a5b
AG
1139 reqf >>= 23;
1140 else {
1141 reqf &= ~GEN6_TURBO_DISABLE;
36cdd013 1142 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
60260a5b
AG
1143 reqf >>= 24;
1144 else
1145 reqf >>= 25;
1146 }
7c59a9c1 1147 reqf = intel_gpu_freq(dev_priv, reqf);
8e8c06cd 1148
0d8f9491
CW
1149 rpmodectl = I915_READ(GEN6_RP_CONTROL);
1150 rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
1151 rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);
1152
ccab5c82 1153 rpstat = I915_READ(GEN6_RPSTAT1);
d6cda9c7
AG
1154 rpupei = I915_READ(GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
1155 rpcurup = I915_READ(GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
1156 rpprevup = I915_READ(GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
1157 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
1158 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
1159 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
c84b2705
TU
1160 cagf = intel_gpu_freq(dev_priv,
1161 intel_get_cagf(dev_priv, rpstat));
ccab5c82 1162
59bad947 1163 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
d1ebd816 1164
6b7a6a7b
OM
1165 if (INTEL_GEN(dev_priv) >= 11) {
1166 pm_ier = I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE);
1167 pm_imr = I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK);
1168 /*
1169 * The equivalent to the PM ISR & IIR cannot be read
1170 * without affecting the current state of the system
1171 */
1172 pm_isr = 0;
1173 pm_iir = 0;
1174 } else if (INTEL_GEN(dev_priv) >= 8) {
9dd3c605
PZ
1175 pm_ier = I915_READ(GEN8_GT_IER(2));
1176 pm_imr = I915_READ(GEN8_GT_IMR(2));
1177 pm_isr = I915_READ(GEN8_GT_ISR(2));
1178 pm_iir = I915_READ(GEN8_GT_IIR(2));
6b7a6a7b
OM
1179 } else {
1180 pm_ier = I915_READ(GEN6_PMIER);
1181 pm_imr = I915_READ(GEN6_PMIMR);
1182 pm_isr = I915_READ(GEN6_PMISR);
1183 pm_iir = I915_READ(GEN6_PMIIR);
9dd3c605 1184 }
6b7a6a7b
OM
1185 pm_mask = I915_READ(GEN6_PMINTRMSK);
1186
960e5465
SAK
1187 seq_printf(m, "Video Turbo Mode: %s\n",
1188 yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
1189 seq_printf(m, "HW control enabled: %s\n",
1190 yesno(rpmodectl & GEN6_RP_ENABLE));
1191 seq_printf(m, "SW control enabled: %s\n",
1192 yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
1193 GEN6_RP_MEDIA_SW_MODE));
6b7a6a7b
OM
1194
1195 seq_printf(m, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n",
1196 pm_ier, pm_imr, pm_mask);
1197 if (INTEL_GEN(dev_priv) <= 10)
1198 seq_printf(m, "PM ISR=0x%08x IIR=0x%08x\n",
1199 pm_isr, pm_iir);
5dd04556 1200 seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n",
562d9bae 1201 rps->pm_intrmsk_mbz);
3b8d8d91 1202 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
3b8d8d91 1203 seq_printf(m, "Render p-state ratio: %d\n",
35ceabf3 1204 (gt_perf_status & (INTEL_GEN(dev_priv) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
3b8d8d91
JB
1205 seq_printf(m, "Render p-state VID: %d\n",
1206 gt_perf_status & 0xff);
1207 seq_printf(m, "Render p-state limit: %d\n",
1208 rp_state_limits & 0xff);
0d8f9491
CW
1209 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
1210 seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
1211 seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
1212 seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
8e8c06cd 1213 seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
f82855d3 1214 seq_printf(m, "CAGF: %dMHz\n", cagf);
d6cda9c7
AG
1215 seq_printf(m, "RP CUR UP EI: %d (%dus)\n",
1216 rpupei, GT_PM_INTERVAL_TO_US(dev_priv, rpupei));
1217 seq_printf(m, "RP CUR UP: %d (%dus)\n",
1218 rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup));
1219 seq_printf(m, "RP PREV UP: %d (%dus)\n",
1220 rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup));
60548c55
CW
1221 seq_printf(m, "Up threshold: %d%%\n",
1222 rps->power.up_threshold);
d86ed34a 1223
d6cda9c7
AG
1224 seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
1225 rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei));
1226 seq_printf(m, "RP CUR DOWN: %d (%dus)\n",
1227 rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown));
1228 seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
1229 rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown));
60548c55
CW
1230 seq_printf(m, "Down threshold: %d%%\n",
1231 rps->power.down_threshold);
3b8d8d91 1232
cc3f90f0 1233 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 :
35040562 1234 rp_state_cap >> 16) & 0xff;
35ceabf3 1235 max_freq *= (IS_GEN9_BC(dev_priv) ||
2b2874ef 1236 INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
3b8d8d91 1237 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
7c59a9c1 1238 intel_gpu_freq(dev_priv, max_freq));
3b8d8d91
JB
1239
1240 max_freq = (rp_state_cap & 0xff00) >> 8;
35ceabf3 1241 max_freq *= (IS_GEN9_BC(dev_priv) ||
2b2874ef 1242 INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
3b8d8d91 1243 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
7c59a9c1 1244 intel_gpu_freq(dev_priv, max_freq));
3b8d8d91 1245
cc3f90f0 1246 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 :
35040562 1247 rp_state_cap >> 0) & 0xff;
35ceabf3 1248 max_freq *= (IS_GEN9_BC(dev_priv) ||
2b2874ef 1249 INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
3b8d8d91 1250 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
7c59a9c1 1251 intel_gpu_freq(dev_priv, max_freq));
31c77388 1252 seq_printf(m, "Max overclocked frequency: %dMHz\n",
562d9bae 1253 intel_gpu_freq(dev_priv, rps->max_freq));
aed242ff 1254
d86ed34a 1255 seq_printf(m, "Current freq: %d MHz\n",
562d9bae 1256 intel_gpu_freq(dev_priv, rps->cur_freq));
d86ed34a 1257 seq_printf(m, "Actual freq: %d MHz\n", cagf);
aed242ff 1258 seq_printf(m, "Idle freq: %d MHz\n",
562d9bae 1259 intel_gpu_freq(dev_priv, rps->idle_freq));
d86ed34a 1260 seq_printf(m, "Min freq: %d MHz\n",
562d9bae 1261 intel_gpu_freq(dev_priv, rps->min_freq));
29ecd78d 1262 seq_printf(m, "Boost freq: %d MHz\n",
562d9bae 1263 intel_gpu_freq(dev_priv, rps->boost_freq));
d86ed34a 1264 seq_printf(m, "Max freq: %d MHz\n",
562d9bae 1265 intel_gpu_freq(dev_priv, rps->max_freq));
d86ed34a
CW
1266 seq_printf(m,
1267 "efficient (RPe) frequency: %d MHz\n",
562d9bae 1268 intel_gpu_freq(dev_priv, rps->efficient_freq));
3b8d8d91 1269 } else {
267f0c90 1270 seq_puts(m, "no P-state info available\n");
3b8d8d91 1271 }
f97108d1 1272
49cd97a3 1273 seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk.hw.cdclk);
1170f28c
MK
1274 seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq);
1275 seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq);
1276
c8c8fb33
PZ
1277 intel_runtime_pm_put(dev_priv);
1278 return ret;
f97108d1
JB
1279}
1280
d636951e
BW
1281static void i915_instdone_info(struct drm_i915_private *dev_priv,
1282 struct seq_file *m,
1283 struct intel_instdone *instdone)
1284{
f9e61372
BW
1285 int slice;
1286 int subslice;
1287
d636951e
BW
1288 seq_printf(m, "\t\tINSTDONE: 0x%08x\n",
1289 instdone->instdone);
1290
1291 if (INTEL_GEN(dev_priv) <= 3)
1292 return;
1293
1294 seq_printf(m, "\t\tSC_INSTDONE: 0x%08x\n",
1295 instdone->slice_common);
1296
1297 if (INTEL_GEN(dev_priv) <= 6)
1298 return;
1299
f9e61372
BW
1300 for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1301 seq_printf(m, "\t\tSAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
1302 slice, subslice, instdone->sampler[slice][subslice]);
1303
1304 for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1305 seq_printf(m, "\t\tROW_INSTDONE[%d][%d]: 0x%08x\n",
1306 slice, subslice, instdone->row[slice][subslice]);
d636951e
BW
1307}
1308
f654449a
CW
1309static int i915_hangcheck_info(struct seq_file *m, void *unused)
1310{
36cdd013 1311 struct drm_i915_private *dev_priv = node_to_i915(m->private);
e2f80391 1312 struct intel_engine_cs *engine;
666796da
TU
1313 u64 acthd[I915_NUM_ENGINES];
1314 u32 seqno[I915_NUM_ENGINES];
d636951e 1315 struct intel_instdone instdone;
c3232b18 1316 enum intel_engine_id id;
f654449a 1317
8af29b0c 1318 if (test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
8c185eca
CW
1319 seq_puts(m, "Wedged\n");
1320 if (test_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags))
1321 seq_puts(m, "Reset in progress: struct_mutex backoff\n");
1322 if (test_bit(I915_RESET_HANDOFF, &dev_priv->gpu_error.flags))
1323 seq_puts(m, "Reset in progress: reset handoff to waiter\n");
8af29b0c 1324 if (waitqueue_active(&dev_priv->gpu_error.wait_queue))
8c185eca 1325 seq_puts(m, "Waiter holding struct mutex\n");
8af29b0c 1326 if (waitqueue_active(&dev_priv->gpu_error.reset_queue))
8c185eca 1327 seq_puts(m, "struct_mutex blocked for reset\n");
8af29b0c 1328
4f044a88 1329 if (!i915_modparams.enable_hangcheck) {
8c185eca 1330 seq_puts(m, "Hangcheck disabled\n");
f654449a
CW
1331 return 0;
1332 }
1333
ebbc7546
MK
1334 intel_runtime_pm_get(dev_priv);
1335
3b3f1650 1336 for_each_engine(engine, dev_priv, id) {
7e37f889 1337 acthd[id] = intel_engine_get_active_head(engine);
1b7744e7 1338 seqno[id] = intel_engine_get_seqno(engine);
ebbc7546
MK
1339 }
1340
3b3f1650 1341 intel_engine_get_instdone(dev_priv->engine[RCS], &instdone);
61642ff0 1342
ebbc7546
MK
1343 intel_runtime_pm_put(dev_priv);
1344
8352aea3
CW
1345 if (timer_pending(&dev_priv->gpu_error.hangcheck_work.timer))
1346 seq_printf(m, "Hangcheck active, timer fires in %dms\n",
f654449a
CW
1347 jiffies_to_msecs(dev_priv->gpu_error.hangcheck_work.timer.expires -
1348 jiffies));
8352aea3
CW
1349 else if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work))
1350 seq_puts(m, "Hangcheck active, work pending\n");
1351 else
1352 seq_puts(m, "Hangcheck inactive\n");
f654449a 1353
f73b5674
CW
1354 seq_printf(m, "GT active? %s\n", yesno(dev_priv->gt.awake));
1355
3b3f1650 1356 for_each_engine(engine, dev_priv, id) {
33f53719
CW
1357 struct intel_breadcrumbs *b = &engine->breadcrumbs;
1358 struct rb_node *rb;
1359
e2f80391 1360 seq_printf(m, "%s:\n", engine->name);
52d7f16e 1361 seq_printf(m, "\tseqno = %x [current %x, last %x]\n",
cb399eab 1362 engine->hangcheck.seqno, seqno[id],
52d7f16e 1363 intel_engine_last_submit(engine));
1fd00c0f 1364 seq_printf(m, "\twaiters? %s, fake irq active? %s, stalled? %s, wedged? %s\n",
83348ba8
CW
1365 yesno(intel_engine_has_waiter(engine)),
1366 yesno(test_bit(engine->id,
3fe3b030 1367 &dev_priv->gpu_error.missed_irq_rings)),
1fd00c0f
CW
1368 yesno(engine->hangcheck.stalled),
1369 yesno(engine->hangcheck.wedged));
3fe3b030 1370
61d3dc70 1371 spin_lock_irq(&b->rb_lock);
33f53719 1372 for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
f802cf7e 1373 struct intel_wait *w = rb_entry(rb, typeof(*w), node);
33f53719
CW
1374
1375 seq_printf(m, "\t%s [%d] waiting for %x\n",
1376 w->tsk->comm, w->tsk->pid, w->seqno);
1377 }
61d3dc70 1378 spin_unlock_irq(&b->rb_lock);
33f53719 1379
f654449a 1380 seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
e2f80391 1381 (long long)engine->hangcheck.acthd,
c3232b18 1382 (long long)acthd[id]);
3fe3b030
MK
1383 seq_printf(m, "\taction = %s(%d) %d ms ago\n",
1384 hangcheck_action_to_str(engine->hangcheck.action),
1385 engine->hangcheck.action,
1386 jiffies_to_msecs(jiffies -
1387 engine->hangcheck.action_timestamp));
61642ff0 1388
e2f80391 1389 if (engine->id == RCS) {
d636951e 1390 seq_puts(m, "\tinstdone read =\n");
61642ff0 1391
d636951e 1392 i915_instdone_info(dev_priv, m, &instdone);
61642ff0 1393
d636951e 1394 seq_puts(m, "\tinstdone accu =\n");
61642ff0 1395
d636951e
BW
1396 i915_instdone_info(dev_priv, m,
1397 &engine->hangcheck.instdone);
61642ff0 1398 }
f654449a
CW
1399 }
1400
1401 return 0;
1402}
1403
061d06a2
MT
1404static int i915_reset_info(struct seq_file *m, void *unused)
1405{
1406 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1407 struct i915_gpu_error *error = &dev_priv->gpu_error;
1408 struct intel_engine_cs *engine;
1409 enum intel_engine_id id;
1410
1411 seq_printf(m, "full gpu reset = %u\n", i915_reset_count(error));
1412
1413 for_each_engine(engine, dev_priv, id) {
1414 seq_printf(m, "%s = %u\n", engine->name,
1415 i915_reset_engine_count(error, engine));
1416 }
1417
1418 return 0;
1419}
1420
4d85529d 1421static int ironlake_drpc_info(struct seq_file *m)
f97108d1 1422{
36cdd013 1423 struct drm_i915_private *dev_priv = node_to_i915(m->private);
616fdb5a
BW
1424 u32 rgvmodectl, rstdbyctl;
1425 u16 crstandvid;
616fdb5a 1426
616fdb5a
BW
1427 rgvmodectl = I915_READ(MEMMODECTL);
1428 rstdbyctl = I915_READ(RSTDBYCTL);
1429 crstandvid = I915_READ16(CRSTANDVID);
1430
742f491d 1431 seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN));
f97108d1
JB
1432 seq_printf(m, "Boost freq: %d\n",
1433 (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
1434 MEMMODE_BOOST_FREQ_SHIFT);
1435 seq_printf(m, "HW control enabled: %s\n",
742f491d 1436 yesno(rgvmodectl & MEMMODE_HWIDLE_EN));
f97108d1 1437 seq_printf(m, "SW control enabled: %s\n",
742f491d 1438 yesno(rgvmodectl & MEMMODE_SWMODE_EN));
f97108d1 1439 seq_printf(m, "Gated voltage change: %s\n",
742f491d 1440 yesno(rgvmodectl & MEMMODE_RCLK_GATE));
f97108d1
JB
1441 seq_printf(m, "Starting frequency: P%d\n",
1442 (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
7648fa99 1443 seq_printf(m, "Max P-state: P%d\n",
f97108d1 1444 (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
7648fa99
JB
1445 seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
1446 seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
1447 seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
1448 seq_printf(m, "Render standby enabled: %s\n",
742f491d 1449 yesno(!(rstdbyctl & RCX_SW_EXIT)));
267f0c90 1450 seq_puts(m, "Current RS state: ");
88271da3
JB
1451 switch (rstdbyctl & RSX_STATUS_MASK) {
1452 case RSX_STATUS_ON:
267f0c90 1453 seq_puts(m, "on\n");
88271da3
JB
1454 break;
1455 case RSX_STATUS_RC1:
267f0c90 1456 seq_puts(m, "RC1\n");
88271da3
JB
1457 break;
1458 case RSX_STATUS_RC1E:
267f0c90 1459 seq_puts(m, "RC1E\n");
88271da3
JB
1460 break;
1461 case RSX_STATUS_RS1:
267f0c90 1462 seq_puts(m, "RS1\n");
88271da3
JB
1463 break;
1464 case RSX_STATUS_RS2:
267f0c90 1465 seq_puts(m, "RS2 (RC6)\n");
88271da3
JB
1466 break;
1467 case RSX_STATUS_RS3:
267f0c90 1468 seq_puts(m, "RC3 (RC6+)\n");
88271da3
JB
1469 break;
1470 default:
267f0c90 1471 seq_puts(m, "unknown\n");
88271da3
JB
1472 break;
1473 }
f97108d1
JB
1474
1475 return 0;
1476}
1477
f65367b5 1478static int i915_forcewake_domains(struct seq_file *m, void *data)
669ab5aa 1479{
233ebf57 1480 struct drm_i915_private *i915 = node_to_i915(m->private);
b2cff0db 1481 struct intel_uncore_forcewake_domain *fw_domain;
d2dc94bc 1482 unsigned int tmp;
b2cff0db 1483
d7a133d8
CW
1484 seq_printf(m, "user.bypass_count = %u\n",
1485 i915->uncore.user_forcewake.count);
1486
233ebf57 1487 for_each_fw_domain(fw_domain, i915, tmp)
b2cff0db 1488 seq_printf(m, "%s.wake_count = %u\n",
33c582c1 1489 intel_uncore_forcewake_domain_to_str(fw_domain->id),
233ebf57 1490 READ_ONCE(fw_domain->wake_count));
669ab5aa 1491
b2cff0db
CW
1492 return 0;
1493}
1494
1362877e
MK
1495static void print_rc6_res(struct seq_file *m,
1496 const char *title,
1497 const i915_reg_t reg)
1498{
1499 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1500
1501 seq_printf(m, "%s %u (%llu us)\n",
1502 title, I915_READ(reg),
1503 intel_rc6_residency_us(dev_priv, reg));
1504}
1505
b2cff0db
CW
1506static int vlv_drpc_info(struct seq_file *m)
1507{
36cdd013 1508 struct drm_i915_private *dev_priv = node_to_i915(m->private);
0d6fc92a 1509 u32 rcctl1, pw_status;
669ab5aa 1510
6b312cd3 1511 pw_status = I915_READ(VLV_GTLC_PW_STATUS);
669ab5aa
D
1512 rcctl1 = I915_READ(GEN6_RC_CONTROL);
1513
669ab5aa
D
1514 seq_printf(m, "RC6 Enabled: %s\n",
1515 yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
1516 GEN6_RC_CTL_EI_MODE(1))));
1517 seq_printf(m, "Render Power Well: %s\n",
6b312cd3 1518 (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
669ab5aa 1519 seq_printf(m, "Media Power Well: %s\n",
6b312cd3 1520 (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
669ab5aa 1521
1362877e
MK
1522 print_rc6_res(m, "Render RC6 residency since boot:", VLV_GT_RENDER_RC6);
1523 print_rc6_res(m, "Media RC6 residency since boot:", VLV_GT_MEDIA_RC6);
9cc19be5 1524
f65367b5 1525 return i915_forcewake_domains(m, NULL);
669ab5aa
D
1526}
1527
4d85529d
BW
1528static int gen6_drpc_info(struct seq_file *m)
1529{
36cdd013 1530 struct drm_i915_private *dev_priv = node_to_i915(m->private);
960e5465 1531 u32 gt_core_status, rcctl1, rc6vids = 0;
f2dd7578 1532 u32 gen9_powergate_enable = 0, gen9_powergate_status = 0;
4d85529d 1533
75aa3f63 1534 gt_core_status = I915_READ_FW(GEN6_GT_CORE_STATUS);
ed71f1b4 1535 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
4d85529d 1536
4d85529d 1537 rcctl1 = I915_READ(GEN6_RC_CONTROL);
36cdd013 1538 if (INTEL_GEN(dev_priv) >= 9) {
f2dd7578
AG
1539 gen9_powergate_enable = I915_READ(GEN9_PG_ENABLE);
1540 gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS);
1541 }
cf632bd6 1542
51cc9ade
ID
1543 if (INTEL_GEN(dev_priv) <= 7) {
1544 mutex_lock(&dev_priv->pcu_lock);
1545 sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS,
1546 &rc6vids);
1547 mutex_unlock(&dev_priv->pcu_lock);
1548 }
4d85529d 1549
fff24e21 1550 seq_printf(m, "RC1e Enabled: %s\n",
4d85529d
BW
1551 yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
1552 seq_printf(m, "RC6 Enabled: %s\n",
1553 yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
36cdd013 1554 if (INTEL_GEN(dev_priv) >= 9) {
f2dd7578
AG
1555 seq_printf(m, "Render Well Gating Enabled: %s\n",
1556 yesno(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE));
1557 seq_printf(m, "Media Well Gating Enabled: %s\n",
1558 yesno(gen9_powergate_enable & GEN9_MEDIA_PG_ENABLE));
1559 }
4d85529d
BW
1560 seq_printf(m, "Deep RC6 Enabled: %s\n",
1561 yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
1562 seq_printf(m, "Deepest RC6 Enabled: %s\n",
1563 yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
267f0c90 1564 seq_puts(m, "Current RC state: ");
4d85529d
BW
1565 switch (gt_core_status & GEN6_RCn_MASK) {
1566 case GEN6_RC0:
1567 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
267f0c90 1568 seq_puts(m, "Core Power Down\n");
4d85529d 1569 else
267f0c90 1570 seq_puts(m, "on\n");
4d85529d
BW
1571 break;
1572 case GEN6_RC3:
267f0c90 1573 seq_puts(m, "RC3\n");
4d85529d
BW
1574 break;
1575 case GEN6_RC6:
267f0c90 1576 seq_puts(m, "RC6\n");
4d85529d
BW
1577 break;
1578 case GEN6_RC7:
267f0c90 1579 seq_puts(m, "RC7\n");
4d85529d
BW
1580 break;
1581 default:
267f0c90 1582 seq_puts(m, "Unknown\n");
4d85529d
BW
1583 break;
1584 }
1585
1586 seq_printf(m, "Core Power Down: %s\n",
1587 yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
36cdd013 1588 if (INTEL_GEN(dev_priv) >= 9) {
f2dd7578
AG
1589 seq_printf(m, "Render Power Well: %s\n",
1590 (gen9_powergate_status &
1591 GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down");
1592 seq_printf(m, "Media Power Well: %s\n",
1593 (gen9_powergate_status &
1594 GEN9_PWRGT_MEDIA_STATUS_MASK) ? "Up" : "Down");
1595 }
cce66a28
BW
1596
1597 /* Not exactly sure what this is */
1362877e
MK
1598 print_rc6_res(m, "RC6 \"Locked to RPn\" residency since boot:",
1599 GEN6_GT_GFX_RC6_LOCKED);
1600 print_rc6_res(m, "RC6 residency since boot:", GEN6_GT_GFX_RC6);
1601 print_rc6_res(m, "RC6+ residency since boot:", GEN6_GT_GFX_RC6p);
1602 print_rc6_res(m, "RC6++ residency since boot:", GEN6_GT_GFX_RC6pp);
cce66a28 1603
51cc9ade
ID
1604 if (INTEL_GEN(dev_priv) <= 7) {
1605 seq_printf(m, "RC6 voltage: %dmV\n",
1606 GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
1607 seq_printf(m, "RC6+ voltage: %dmV\n",
1608 GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
1609 seq_printf(m, "RC6++ voltage: %dmV\n",
1610 GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
1611 }
1612
f2dd7578 1613 return i915_forcewake_domains(m, NULL);
4d85529d
BW
1614}
1615
1616static int i915_drpc_info(struct seq_file *m, void *unused)
1617{
36cdd013 1618 struct drm_i915_private *dev_priv = node_to_i915(m->private);
cf632bd6
CW
1619 int err;
1620
1621 intel_runtime_pm_get(dev_priv);
4d85529d 1622
36cdd013 1623 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
cf632bd6 1624 err = vlv_drpc_info(m);
36cdd013 1625 else if (INTEL_GEN(dev_priv) >= 6)
cf632bd6 1626 err = gen6_drpc_info(m);
4d85529d 1627 else
cf632bd6
CW
1628 err = ironlake_drpc_info(m);
1629
1630 intel_runtime_pm_put(dev_priv);
1631
1632 return err;
4d85529d
BW
1633}
1634
9a851789
DV
1635static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
1636{
36cdd013 1637 struct drm_i915_private *dev_priv = node_to_i915(m->private);
9a851789
DV
1638
1639 seq_printf(m, "FB tracking busy bits: 0x%08x\n",
1640 dev_priv->fb_tracking.busy_bits);
1641
1642 seq_printf(m, "FB tracking flip bits: 0x%08x\n",
1643 dev_priv->fb_tracking.flip_bits);
1644
1645 return 0;
1646}
1647
b5e50c3f
JB
1648static int i915_fbc_status(struct seq_file *m, void *unused)
1649{
36cdd013 1650 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3138872c 1651 struct intel_fbc *fbc = &dev_priv->fbc;
b5e50c3f 1652
ab309a6a
MW
1653 if (!HAS_FBC(dev_priv))
1654 return -ENODEV;
b5e50c3f 1655
36623ef8 1656 intel_runtime_pm_get(dev_priv);
3138872c 1657 mutex_lock(&fbc->lock);
36623ef8 1658
0e631adc 1659 if (intel_fbc_is_active(dev_priv))
267f0c90 1660 seq_puts(m, "FBC enabled\n");
2e8144a5 1661 else
3138872c
CW
1662 seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
1663
3fd5d1ec
VS
1664 if (intel_fbc_is_active(dev_priv)) {
1665 u32 mask;
1666
1667 if (INTEL_GEN(dev_priv) >= 8)
1668 mask = I915_READ(IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK;
1669 else if (INTEL_GEN(dev_priv) >= 7)
1670 mask = I915_READ(IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK;
1671 else if (INTEL_GEN(dev_priv) >= 5)
1672 mask = I915_READ(ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK;
1673 else if (IS_G4X(dev_priv))
1674 mask = I915_READ(DPFC_STATUS) & DPFC_COMP_SEG_MASK;
1675 else
1676 mask = I915_READ(FBC_STATUS) & (FBC_STAT_COMPRESSING |
1677 FBC_STAT_COMPRESSED);
1678
1679 seq_printf(m, "Compressing: %s\n", yesno(mask));
0fc6a9dc 1680 }
31b9df10 1681
3138872c 1682 mutex_unlock(&fbc->lock);
36623ef8
PZ
1683 intel_runtime_pm_put(dev_priv);
1684
b5e50c3f
JB
1685 return 0;
1686}
1687
4127dc43 1688static int i915_fbc_false_color_get(void *data, u64 *val)
da46f936 1689{
36cdd013 1690 struct drm_i915_private *dev_priv = data;
da46f936 1691
36cdd013 1692 if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
da46f936
RV
1693 return -ENODEV;
1694
da46f936 1695 *val = dev_priv->fbc.false_color;
da46f936
RV
1696
1697 return 0;
1698}
1699
4127dc43 1700static int i915_fbc_false_color_set(void *data, u64 val)
da46f936 1701{
36cdd013 1702 struct drm_i915_private *dev_priv = data;
da46f936
RV
1703 u32 reg;
1704
36cdd013 1705 if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
da46f936
RV
1706 return -ENODEV;
1707
25ad93fd 1708 mutex_lock(&dev_priv->fbc.lock);
da46f936
RV
1709
1710 reg = I915_READ(ILK_DPFC_CONTROL);
1711 dev_priv->fbc.false_color = val;
1712
1713 I915_WRITE(ILK_DPFC_CONTROL, val ?
1714 (reg | FBC_CTL_FALSE_COLOR) :
1715 (reg & ~FBC_CTL_FALSE_COLOR));
1716
25ad93fd 1717 mutex_unlock(&dev_priv->fbc.lock);
da46f936
RV
1718 return 0;
1719}
1720
4127dc43
VS
1721DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops,
1722 i915_fbc_false_color_get, i915_fbc_false_color_set,
da46f936
RV
1723 "%llu\n");
1724
92d44621
PZ
1725static int i915_ips_status(struct seq_file *m, void *unused)
1726{
36cdd013 1727 struct drm_i915_private *dev_priv = node_to_i915(m->private);
92d44621 1728
ab309a6a
MW
1729 if (!HAS_IPS(dev_priv))
1730 return -ENODEV;
92d44621 1731
36623ef8
PZ
1732 intel_runtime_pm_get(dev_priv);
1733
0eaa53f0 1734 seq_printf(m, "Enabled by kernel parameter: %s\n",
4f044a88 1735 yesno(i915_modparams.enable_ips));
0eaa53f0 1736
36cdd013 1737 if (INTEL_GEN(dev_priv) >= 8) {
0eaa53f0
RV
1738 seq_puts(m, "Currently: unknown\n");
1739 } else {
1740 if (I915_READ(IPS_CTL) & IPS_ENABLE)
1741 seq_puts(m, "Currently: enabled\n");
1742 else
1743 seq_puts(m, "Currently: disabled\n");
1744 }
92d44621 1745
36623ef8
PZ
1746 intel_runtime_pm_put(dev_priv);
1747
92d44621
PZ
1748 return 0;
1749}
1750
4a9bef37
JB
1751static int i915_sr_status(struct seq_file *m, void *unused)
1752{
36cdd013 1753 struct drm_i915_private *dev_priv = node_to_i915(m->private);
4a9bef37
JB
1754 bool sr_enabled = false;
1755
36623ef8 1756 intel_runtime_pm_get(dev_priv);
9c870d03 1757 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
36623ef8 1758
7342a72c
CW
1759 if (INTEL_GEN(dev_priv) >= 9)
1760 /* no global SR status; inspect per-plane WM */;
1761 else if (HAS_PCH_SPLIT(dev_priv))
5ba2aaaa 1762 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
c0f86832 1763 else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) ||
36cdd013 1764 IS_I945G(dev_priv) || IS_I945GM(dev_priv))
4a9bef37 1765 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
36cdd013 1766 else if (IS_I915GM(dev_priv))
4a9bef37 1767 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
36cdd013 1768 else if (IS_PINEVIEW(dev_priv))
4a9bef37 1769 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
36cdd013 1770 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
77b64555 1771 sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
4a9bef37 1772
9c870d03 1773 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
36623ef8
PZ
1774 intel_runtime_pm_put(dev_priv);
1775
08c4d7fc 1776 seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
4a9bef37
JB
1777
1778 return 0;
1779}
1780
7648fa99
JB
1781static int i915_emon_status(struct seq_file *m, void *unused)
1782{
36cdd013
DW
1783 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1784 struct drm_device *dev = &dev_priv->drm;
7648fa99 1785 unsigned long temp, chipset, gfx;
de227ef0
CW
1786 int ret;
1787
36cdd013 1788 if (!IS_GEN5(dev_priv))
582be6b4
CW
1789 return -ENODEV;
1790
de227ef0
CW
1791 ret = mutex_lock_interruptible(&dev->struct_mutex);
1792 if (ret)
1793 return ret;
7648fa99
JB
1794
1795 temp = i915_mch_val(dev_priv);
1796 chipset = i915_chipset_val(dev_priv);
1797 gfx = i915_gfx_val(dev_priv);
de227ef0 1798 mutex_unlock(&dev->struct_mutex);
7648fa99
JB
1799
1800 seq_printf(m, "GMCH temp: %ld\n", temp);
1801 seq_printf(m, "Chipset power: %ld\n", chipset);
1802 seq_printf(m, "GFX power: %ld\n", gfx);
1803 seq_printf(m, "Total power: %ld\n", chipset + gfx);
1804
1805 return 0;
1806}
1807
23b2f8bb
JB
1808static int i915_ring_freq_table(struct seq_file *m, void *unused)
1809{
36cdd013 1810 struct drm_i915_private *dev_priv = node_to_i915(m->private);
562d9bae 1811 struct intel_rps *rps = &dev_priv->gt_pm.rps;
f936ec34 1812 unsigned int max_gpu_freq, min_gpu_freq;
d586b5f4
CW
1813 int gpu_freq, ia_freq;
1814 int ret;
23b2f8bb 1815
ab309a6a
MW
1816 if (!HAS_LLC(dev_priv))
1817 return -ENODEV;
23b2f8bb 1818
5bfa0199
PZ
1819 intel_runtime_pm_get(dev_priv);
1820
9f817501 1821 ret = mutex_lock_interruptible(&dev_priv->pcu_lock);
23b2f8bb 1822 if (ret)
5bfa0199 1823 goto out;
23b2f8bb 1824
d586b5f4
CW
1825 min_gpu_freq = rps->min_freq;
1826 max_gpu_freq = rps->max_freq;
2b2874ef 1827 if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
f936ec34 1828 /* Convert GT frequency to 50 HZ units */
d586b5f4
CW
1829 min_gpu_freq /= GEN9_FREQ_SCALER;
1830 max_gpu_freq /= GEN9_FREQ_SCALER;
f936ec34
AG
1831 }
1832
267f0c90 1833 seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
23b2f8bb 1834
f936ec34 1835 for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
42c0526c
BW
1836 ia_freq = gpu_freq;
1837 sandybridge_pcode_read(dev_priv,
1838 GEN6_PCODE_READ_MIN_FREQ_TABLE,
1839 &ia_freq);
3ebecd07 1840 seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
f936ec34 1841 intel_gpu_freq(dev_priv, (gpu_freq *
35ceabf3 1842 (IS_GEN9_BC(dev_priv) ||
2b2874ef 1843 INTEL_GEN(dev_priv) >= 10 ?
b976dc53 1844 GEN9_FREQ_SCALER : 1))),
3ebecd07
CW
1845 ((ia_freq >> 0) & 0xff) * 100,
1846 ((ia_freq >> 8) & 0xff) * 100);
23b2f8bb
JB
1847 }
1848
9f817501 1849 mutex_unlock(&dev_priv->pcu_lock);
23b2f8bb 1850
5bfa0199
PZ
1851out:
1852 intel_runtime_pm_put(dev_priv);
1853 return ret;
23b2f8bb
JB
1854}
1855
44834a67
CW
1856static int i915_opregion(struct seq_file *m, void *unused)
1857{
36cdd013
DW
1858 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1859 struct drm_device *dev = &dev_priv->drm;
44834a67
CW
1860 struct intel_opregion *opregion = &dev_priv->opregion;
1861 int ret;
1862
1863 ret = mutex_lock_interruptible(&dev->struct_mutex);
1864 if (ret)
0d38f009 1865 goto out;
44834a67 1866
2455a8e4
JN
1867 if (opregion->header)
1868 seq_write(m, opregion->header, OPREGION_SIZE);
44834a67
CW
1869
1870 mutex_unlock(&dev->struct_mutex);
1871
0d38f009 1872out:
44834a67
CW
1873 return 0;
1874}
1875
ada8f955
JN
1876static int i915_vbt(struct seq_file *m, void *unused)
1877{
36cdd013 1878 struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
ada8f955
JN
1879
1880 if (opregion->vbt)
1881 seq_write(m, opregion->vbt, opregion->vbt_size);
1882
1883 return 0;
1884}
1885
37811fcc
CW
1886static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1887{
36cdd013
DW
1888 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1889 struct drm_device *dev = &dev_priv->drm;
b13b8402 1890 struct intel_framebuffer *fbdev_fb = NULL;
3a58ee10 1891 struct drm_framebuffer *drm_fb;
188c1ab7
CW
1892 int ret;
1893
1894 ret = mutex_lock_interruptible(&dev->struct_mutex);
1895 if (ret)
1896 return ret;
37811fcc 1897
0695726e 1898#ifdef CONFIG_DRM_FBDEV_EMULATION
346fb4e0 1899 if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) {
36cdd013 1900 fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
25bcce94
CW
1901
1902 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1903 fbdev_fb->base.width,
1904 fbdev_fb->base.height,
b00c600e 1905 fbdev_fb->base.format->depth,
272725c7 1906 fbdev_fb->base.format->cpp[0] * 8,
bae781b2 1907 fbdev_fb->base.modifier,
25bcce94 1908 drm_framebuffer_read_refcount(&fbdev_fb->base));
a5ff7a45 1909 describe_obj(m, intel_fb_obj(&fbdev_fb->base));
25bcce94
CW
1910 seq_putc(m, '\n');
1911 }
4520f53a 1912#endif
37811fcc 1913
4b096ac1 1914 mutex_lock(&dev->mode_config.fb_lock);
3a58ee10 1915 drm_for_each_fb(drm_fb, dev) {
b13b8402
NS
1916 struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
1917 if (fb == fbdev_fb)
37811fcc
CW
1918 continue;
1919
c1ca506d 1920 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
37811fcc
CW
1921 fb->base.width,
1922 fb->base.height,
b00c600e 1923 fb->base.format->depth,
272725c7 1924 fb->base.format->cpp[0] * 8,
bae781b2 1925 fb->base.modifier,
747a598f 1926 drm_framebuffer_read_refcount(&fb->base));
a5ff7a45 1927 describe_obj(m, intel_fb_obj(&fb->base));
267f0c90 1928 seq_putc(m, '\n');
37811fcc 1929 }
4b096ac1 1930 mutex_unlock(&dev->mode_config.fb_lock);
188c1ab7 1931 mutex_unlock(&dev->struct_mutex);
37811fcc
CW
1932
1933 return 0;
1934}
1935
7e37f889 1936static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
c9fe99bd 1937{
ef5032a0
CW
1938 seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, emit: %u)",
1939 ring->space, ring->head, ring->tail, ring->emit);
c9fe99bd
OM
1940}
1941
e76d3630
BW
1942static int i915_context_status(struct seq_file *m, void *unused)
1943{
36cdd013
DW
1944 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1945 struct drm_device *dev = &dev_priv->drm;
e2f80391 1946 struct intel_engine_cs *engine;
e2efd130 1947 struct i915_gem_context *ctx;
3b3f1650 1948 enum intel_engine_id id;
c3232b18 1949 int ret;
e76d3630 1950
f3d28878 1951 ret = mutex_lock_interruptible(&dev->struct_mutex);
e76d3630
BW
1952 if (ret)
1953 return ret;
1954
829a0af2 1955 list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
288f1ced
CW
1956 seq_puts(m, "HW context ");
1957 if (!list_empty(&ctx->hw_id_link))
1958 seq_printf(m, "%x [pin %u]", ctx->hw_id,
1959 atomic_read(&ctx->hw_id_pin_count));
c84455b4 1960 if (ctx->pid) {
d28b99ab
CW
1961 struct task_struct *task;
1962
c84455b4 1963 task = get_pid_task(ctx->pid, PIDTYPE_PID);
d28b99ab
CW
1964 if (task) {
1965 seq_printf(m, "(%s [%d]) ",
1966 task->comm, task->pid);
1967 put_task_struct(task);
1968 }
c84455b4
CW
1969 } else if (IS_ERR(ctx->file_priv)) {
1970 seq_puts(m, "(deleted) ");
d28b99ab
CW
1971 } else {
1972 seq_puts(m, "(kernel) ");
1973 }
1974
bca44d80
CW
1975 seq_putc(m, ctx->remap_slice ? 'R' : 'r');
1976 seq_putc(m, '\n');
c9fe99bd 1977
3b3f1650 1978 for_each_engine(engine, dev_priv, id) {
ab82a063
CW
1979 struct intel_context *ce =
1980 to_intel_context(ctx, engine);
bca44d80
CW
1981
1982 seq_printf(m, "%s: ", engine->name);
bca44d80 1983 if (ce->state)
bf3783e5 1984 describe_obj(m, ce->state->obj);
dca33ecc 1985 if (ce->ring)
7e37f889 1986 describe_ctx_ring(m, ce->ring);
c9fe99bd 1987 seq_putc(m, '\n');
c9fe99bd 1988 }
a33afea5 1989
a33afea5 1990 seq_putc(m, '\n');
a168c293
BW
1991 }
1992
f3d28878 1993 mutex_unlock(&dev->struct_mutex);
e76d3630
BW
1994
1995 return 0;
1996}
1997
ea16a3cd
DV
1998static const char *swizzle_string(unsigned swizzle)
1999{
aee56cff 2000 switch (swizzle) {
ea16a3cd
DV
2001 case I915_BIT_6_SWIZZLE_NONE:
2002 return "none";
2003 case I915_BIT_6_SWIZZLE_9:
2004 return "bit9";
2005 case I915_BIT_6_SWIZZLE_9_10:
2006 return "bit9/bit10";
2007 case I915_BIT_6_SWIZZLE_9_11:
2008 return "bit9/bit11";
2009 case I915_BIT_6_SWIZZLE_9_10_11:
2010 return "bit9/bit10/bit11";
2011 case I915_BIT_6_SWIZZLE_9_17:
2012 return "bit9/bit17";
2013 case I915_BIT_6_SWIZZLE_9_10_17:
2014 return "bit9/bit10/bit17";
2015 case I915_BIT_6_SWIZZLE_UNKNOWN:
8a168ca7 2016 return "unknown";
ea16a3cd
DV
2017 }
2018
2019 return "bug";
2020}
2021
2022static int i915_swizzle_info(struct seq_file *m, void *data)
2023{
36cdd013 2024 struct drm_i915_private *dev_priv = node_to_i915(m->private);
22bcfc6a 2025
c8c8fb33 2026 intel_runtime_pm_get(dev_priv);
ea16a3cd 2027
ea16a3cd
DV
2028 seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
2029 swizzle_string(dev_priv->mm.bit_6_swizzle_x));
2030 seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
2031 swizzle_string(dev_priv->mm.bit_6_swizzle_y));
2032
36cdd013 2033 if (IS_GEN3(dev_priv) || IS_GEN4(dev_priv)) {
ea16a3cd
DV
2034 seq_printf(m, "DDC = 0x%08x\n",
2035 I915_READ(DCC));
656bfa3a
DV
2036 seq_printf(m, "DDC2 = 0x%08x\n",
2037 I915_READ(DCC2));
ea16a3cd
DV
2038 seq_printf(m, "C0DRB3 = 0x%04x\n",
2039 I915_READ16(C0DRB3));
2040 seq_printf(m, "C1DRB3 = 0x%04x\n",
2041 I915_READ16(C1DRB3));
36cdd013 2042 } else if (INTEL_GEN(dev_priv) >= 6) {
3fa7d235
DV
2043 seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
2044 I915_READ(MAD_DIMM_C0));
2045 seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
2046 I915_READ(MAD_DIMM_C1));
2047 seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
2048 I915_READ(MAD_DIMM_C2));
2049 seq_printf(m, "TILECTL = 0x%08x\n",
2050 I915_READ(TILECTL));
36cdd013 2051 if (INTEL_GEN(dev_priv) >= 8)
9d3203e1
BW
2052 seq_printf(m, "GAMTARBMODE = 0x%08x\n",
2053 I915_READ(GAMTARBMODE));
2054 else
2055 seq_printf(m, "ARB_MODE = 0x%08x\n",
2056 I915_READ(ARB_MODE));
3fa7d235
DV
2057 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
2058 I915_READ(DISP_ARB_CTL));
ea16a3cd 2059 }
656bfa3a
DV
2060
2061 if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
2062 seq_puts(m, "L-shaped memory detected\n");
2063
c8c8fb33 2064 intel_runtime_pm_put(dev_priv);
ea16a3cd
DV
2065
2066 return 0;
2067}
2068
1c60fef5
BW
2069static int per_file_ctx(int id, void *ptr, void *data)
2070{
e2efd130 2071 struct i915_gem_context *ctx = ptr;
1c60fef5 2072 struct seq_file *m = data;
ae6c4806
DV
2073 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
2074
2075 if (!ppgtt) {
2076 seq_printf(m, " no ppgtt for context %d\n",
2077 ctx->user_handle);
2078 return 0;
2079 }
1c60fef5 2080
f83d6518
OM
2081 if (i915_gem_context_is_default(ctx))
2082 seq_puts(m, " default context:\n");
2083 else
821d66dd 2084 seq_printf(m, " context %d:\n", ctx->user_handle);
1c60fef5
BW
2085 ppgtt->debug_dump(ppgtt, m);
2086
2087 return 0;
2088}
2089
36cdd013
DW
2090static void gen8_ppgtt_info(struct seq_file *m,
2091 struct drm_i915_private *dev_priv)
3cf17fc5 2092{
77df6772 2093 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
3b3f1650
AG
2094 struct intel_engine_cs *engine;
2095 enum intel_engine_id id;
b4ac5afc 2096 int i;
3cf17fc5 2097
77df6772
BW
2098 if (!ppgtt)
2099 return;
2100
3b3f1650 2101 for_each_engine(engine, dev_priv, id) {
e2f80391 2102 seq_printf(m, "%s\n", engine->name);
77df6772 2103 for (i = 0; i < 4; i++) {
e2f80391 2104 u64 pdp = I915_READ(GEN8_RING_PDP_UDW(engine, i));
77df6772 2105 pdp <<= 32;
e2f80391 2106 pdp |= I915_READ(GEN8_RING_PDP_LDW(engine, i));
a2a5b15c 2107 seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp);
77df6772
BW
2108 }
2109 }
2110}
2111
36cdd013
DW
2112static void gen6_ppgtt_info(struct seq_file *m,
2113 struct drm_i915_private *dev_priv)
77df6772 2114{
e2f80391 2115 struct intel_engine_cs *engine;
3b3f1650 2116 enum intel_engine_id id;
3cf17fc5 2117
7e22dbbb 2118 if (IS_GEN6(dev_priv))
3cf17fc5
DV
2119 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
2120
3b3f1650 2121 for_each_engine(engine, dev_priv, id) {
e2f80391 2122 seq_printf(m, "%s\n", engine->name);
7e22dbbb 2123 if (IS_GEN7(dev_priv))
e2f80391
TU
2124 seq_printf(m, "GFX_MODE: 0x%08x\n",
2125 I915_READ(RING_MODE_GEN7(engine)));
2126 seq_printf(m, "PP_DIR_BASE: 0x%08x\n",
2127 I915_READ(RING_PP_DIR_BASE(engine)));
2128 seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n",
2129 I915_READ(RING_PP_DIR_BASE_READ(engine)));
2130 seq_printf(m, "PP_DIR_DCLV: 0x%08x\n",
2131 I915_READ(RING_PP_DIR_DCLV(engine)));
3cf17fc5
DV
2132 }
2133 if (dev_priv->mm.aliasing_ppgtt) {
2134 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
2135
267f0c90 2136 seq_puts(m, "aliasing PPGTT:\n");
44159ddb 2137 seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd.base.ggtt_offset);
1c60fef5 2138
87d60b63 2139 ppgtt->debug_dump(ppgtt, m);
ae6c4806 2140 }
1c60fef5 2141
3cf17fc5 2142 seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
77df6772
BW
2143}
2144
2145static int i915_ppgtt_info(struct seq_file *m, void *data)
2146{
36cdd013
DW
2147 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2148 struct drm_device *dev = &dev_priv->drm;
ea91e401 2149 struct drm_file *file;
637ee29e 2150 int ret;
77df6772 2151
637ee29e
CW
2152 mutex_lock(&dev->filelist_mutex);
2153 ret = mutex_lock_interruptible(&dev->struct_mutex);
77df6772 2154 if (ret)
637ee29e
CW
2155 goto out_unlock;
2156
c8c8fb33 2157 intel_runtime_pm_get(dev_priv);
77df6772 2158
36cdd013
DW
2159 if (INTEL_GEN(dev_priv) >= 8)
2160 gen8_ppgtt_info(m, dev_priv);
2161 else if (INTEL_GEN(dev_priv) >= 6)
2162 gen6_ppgtt_info(m, dev_priv);
77df6772 2163
ea91e401
MT
2164 list_for_each_entry_reverse(file, &dev->filelist, lhead) {
2165 struct drm_i915_file_private *file_priv = file->driver_priv;
7cb5dff8 2166 struct task_struct *task;
ea91e401 2167
7cb5dff8 2168 task = get_pid_task(file->pid, PIDTYPE_PID);
06812760
DC
2169 if (!task) {
2170 ret = -ESRCH;
637ee29e 2171 goto out_rpm;
06812760 2172 }
7cb5dff8
GT
2173 seq_printf(m, "\nproc: %s\n", task->comm);
2174 put_task_struct(task);
ea91e401
MT
2175 idr_for_each(&file_priv->context_idr, per_file_ctx,
2176 (void *)(unsigned long)m);
2177 }
2178
637ee29e 2179out_rpm:
c8c8fb33 2180 intel_runtime_pm_put(dev_priv);
3cf17fc5 2181 mutex_unlock(&dev->struct_mutex);
637ee29e
CW
2182out_unlock:
2183 mutex_unlock(&dev->filelist_mutex);
06812760 2184 return ret;
3cf17fc5
DV
2185}
2186
f5a4c67d
CW
2187static int count_irq_waiters(struct drm_i915_private *i915)
2188{
e2f80391 2189 struct intel_engine_cs *engine;
3b3f1650 2190 enum intel_engine_id id;
f5a4c67d 2191 int count = 0;
f5a4c67d 2192
3b3f1650 2193 for_each_engine(engine, i915, id)
688e6c72 2194 count += intel_engine_has_waiter(engine);
f5a4c67d
CW
2195
2196 return count;
2197}
2198
7466c291
CW
2199static const char *rps_power_to_str(unsigned int power)
2200{
2201 static const char * const strings[] = {
2202 [LOW_POWER] = "low power",
2203 [BETWEEN] = "mixed",
2204 [HIGH_POWER] = "high power",
2205 };
2206
2207 if (power >= ARRAY_SIZE(strings) || !strings[power])
2208 return "unknown";
2209
2210 return strings[power];
2211}
2212
1854d5ca
CW
2213static int i915_rps_boost_info(struct seq_file *m, void *data)
2214{
36cdd013
DW
2215 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2216 struct drm_device *dev = &dev_priv->drm;
562d9bae 2217 struct intel_rps *rps = &dev_priv->gt_pm.rps;
1854d5ca 2218 struct drm_file *file;
1854d5ca 2219
562d9bae 2220 seq_printf(m, "RPS enabled? %d\n", rps->enabled);
28176ef4
CW
2221 seq_printf(m, "GPU busy? %s [%d requests]\n",
2222 yesno(dev_priv->gt.awake), dev_priv->gt.active_requests);
f5a4c67d 2223 seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv));
7b92c1bd 2224 seq_printf(m, "Boosts outstanding? %d\n",
562d9bae 2225 atomic_read(&rps->num_waiters));
60548c55 2226 seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
7466c291 2227 seq_printf(m, "Frequency requested %d\n",
562d9bae 2228 intel_gpu_freq(dev_priv, rps->cur_freq));
7466c291 2229 seq_printf(m, " min hard:%d, soft:%d; max soft:%d, hard:%d\n",
562d9bae
SAK
2230 intel_gpu_freq(dev_priv, rps->min_freq),
2231 intel_gpu_freq(dev_priv, rps->min_freq_softlimit),
2232 intel_gpu_freq(dev_priv, rps->max_freq_softlimit),
2233 intel_gpu_freq(dev_priv, rps->max_freq));
7466c291 2234 seq_printf(m, " idle:%d, efficient:%d, boost:%d\n",
562d9bae
SAK
2235 intel_gpu_freq(dev_priv, rps->idle_freq),
2236 intel_gpu_freq(dev_priv, rps->efficient_freq),
2237 intel_gpu_freq(dev_priv, rps->boost_freq));
1d2ac403
DV
2238
2239 mutex_lock(&dev->filelist_mutex);
1854d5ca
CW
2240 list_for_each_entry_reverse(file, &dev->filelist, lhead) {
2241 struct drm_i915_file_private *file_priv = file->driver_priv;
2242 struct task_struct *task;
2243
2244 rcu_read_lock();
2245 task = pid_task(file->pid, PIDTYPE_PID);
7b92c1bd 2246 seq_printf(m, "%s [%d]: %d boosts\n",
1854d5ca
CW
2247 task ? task->comm : "<unknown>",
2248 task ? task->pid : -1,
562d9bae 2249 atomic_read(&file_priv->rps_client.boosts));
1854d5ca
CW
2250 rcu_read_unlock();
2251 }
7b92c1bd 2252 seq_printf(m, "Kernel (anonymous) boosts: %d\n",
562d9bae 2253 atomic_read(&rps->boosts));
1d2ac403 2254 mutex_unlock(&dev->filelist_mutex);
1854d5ca 2255
7466c291 2256 if (INTEL_GEN(dev_priv) >= 6 &&
562d9bae 2257 rps->enabled &&
28176ef4 2258 dev_priv->gt.active_requests) {
7466c291
CW
2259 u32 rpup, rpupei;
2260 u32 rpdown, rpdownei;
2261
2262 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
2263 rpup = I915_READ_FW(GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK;
2264 rpupei = I915_READ_FW(GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK;
2265 rpdown = I915_READ_FW(GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK;
2266 rpdownei = I915_READ_FW(GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK;
2267 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
2268
2269 seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
60548c55 2270 rps_power_to_str(rps->power.mode));
7466c291 2271 seq_printf(m, " Avg. up: %d%% [above threshold? %d%%]\n",
23f4a287 2272 rpup && rpupei ? 100 * rpup / rpupei : 0,
60548c55 2273 rps->power.up_threshold);
7466c291 2274 seq_printf(m, " Avg. down: %d%% [below threshold? %d%%]\n",
23f4a287 2275 rpdown && rpdownei ? 100 * rpdown / rpdownei : 0,
60548c55 2276 rps->power.down_threshold);
7466c291
CW
2277 } else {
2278 seq_puts(m, "\nRPS Autotuning inactive\n");
2279 }
2280
8d3afd7d 2281 return 0;
1854d5ca
CW
2282}
2283
63573eb7
BW
2284static int i915_llc(struct seq_file *m, void *data)
2285{
36cdd013 2286 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3accaf7e 2287 const bool edram = INTEL_GEN(dev_priv) > 8;
63573eb7 2288
36cdd013 2289 seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev_priv)));
3accaf7e
MK
2290 seq_printf(m, "%s: %lluMB\n", edram ? "eDRAM" : "eLLC",
2291 intel_uncore_edram_size(dev_priv)/1024/1024);
63573eb7
BW
2292
2293 return 0;
2294}
2295
0509ead1
AS
2296static int i915_huc_load_status_info(struct seq_file *m, void *data)
2297{
2298 struct drm_i915_private *dev_priv = node_to_i915(m->private);
56ffc742 2299 struct drm_printer p;
0509ead1 2300
ab309a6a
MW
2301 if (!HAS_HUC(dev_priv))
2302 return -ENODEV;
0509ead1 2303
56ffc742
MW
2304 p = drm_seq_file_printer(m);
2305 intel_uc_fw_dump(&dev_priv->huc.fw, &p);
0509ead1 2306
3582ad13 2307 intel_runtime_pm_get(dev_priv);
0509ead1 2308 seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2));
3582ad13 2309 intel_runtime_pm_put(dev_priv);
0509ead1
AS
2310
2311 return 0;
2312}
2313
fdf5d357
AD
2314static int i915_guc_load_status_info(struct seq_file *m, void *data)
2315{
36cdd013 2316 struct drm_i915_private *dev_priv = node_to_i915(m->private);
56ffc742 2317 struct drm_printer p;
fdf5d357
AD
2318 u32 tmp, i;
2319
ab309a6a
MW
2320 if (!HAS_GUC(dev_priv))
2321 return -ENODEV;
fdf5d357 2322
56ffc742
MW
2323 p = drm_seq_file_printer(m);
2324 intel_uc_fw_dump(&dev_priv->guc.fw, &p);
fdf5d357 2325
3582ad13 2326 intel_runtime_pm_get(dev_priv);
2327
fdf5d357
AD
2328 tmp = I915_READ(GUC_STATUS);
2329
2330 seq_printf(m, "\nGuC status 0x%08x:\n", tmp);
2331 seq_printf(m, "\tBootrom status = 0x%x\n",
2332 (tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
2333 seq_printf(m, "\tuKernel status = 0x%x\n",
2334 (tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
2335 seq_printf(m, "\tMIA Core status = 0x%x\n",
2336 (tmp & GS_MIA_MASK) >> GS_MIA_SHIFT);
2337 seq_puts(m, "\nScratch registers:\n");
2338 for (i = 0; i < 16; i++)
2339 seq_printf(m, "\t%2d: \t0x%x\n", i, I915_READ(SOFT_SCRATCH(i)));
2340
3582ad13 2341 intel_runtime_pm_put(dev_priv);
2342
fdf5d357
AD
2343 return 0;
2344}
2345
5e24e4a2
MW
2346static const char *
2347stringify_guc_log_type(enum guc_log_buffer_type type)
2348{
2349 switch (type) {
2350 case GUC_ISR_LOG_BUFFER:
2351 return "ISR";
2352 case GUC_DPC_LOG_BUFFER:
2353 return "DPC";
2354 case GUC_CRASH_DUMP_LOG_BUFFER:
2355 return "CRASH";
2356 default:
2357 MISSING_CASE(type);
2358 }
2359
2360 return "";
2361}
2362
5aa1ee4b
AG
2363static void i915_guc_log_info(struct seq_file *m,
2364 struct drm_i915_private *dev_priv)
2365{
5e24e4a2
MW
2366 struct intel_guc_log *log = &dev_priv->guc.log;
2367 enum guc_log_buffer_type type;
5aa1ee4b 2368
5e24e4a2
MW
2369 if (!intel_guc_log_relay_enabled(log)) {
2370 seq_puts(m, "GuC log relay disabled\n");
2371 return;
2372 }
5aa1ee4b 2373
5e24e4a2 2374 seq_puts(m, "GuC logging stats:\n");
5aa1ee4b 2375
6a96be24 2376 seq_printf(m, "\tRelay full count: %u\n",
5e24e4a2
MW
2377 log->relay.full_count);
2378
2379 for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
2380 seq_printf(m, "\t%s:\tflush count %10u, overflow count %10u\n",
2381 stringify_guc_log_type(type),
2382 log->stats[type].flush,
2383 log->stats[type].sampled_overflow);
2384 }
5aa1ee4b
AG
2385}
2386
8b417c26
DG
2387static void i915_guc_client_info(struct seq_file *m,
2388 struct drm_i915_private *dev_priv,
5afc8b49 2389 struct intel_guc_client *client)
8b417c26 2390{
e2f80391 2391 struct intel_engine_cs *engine;
c18468c4 2392 enum intel_engine_id id;
8b417c26 2393 uint64_t tot = 0;
8b417c26 2394
b09935a6
OM
2395 seq_printf(m, "\tPriority %d, GuC stage index: %u, PD offset 0x%x\n",
2396 client->priority, client->stage_id, client->proc_desc_offset);
59db36cf
MW
2397 seq_printf(m, "\tDoorbell id %d, offset: 0x%lx\n",
2398 client->doorbell_id, client->doorbell_offset);
8b417c26 2399
3b3f1650 2400 for_each_engine(engine, dev_priv, id) {
c18468c4
DG
2401 u64 submissions = client->submissions[id];
2402 tot += submissions;
8b417c26 2403 seq_printf(m, "\tSubmissions: %llu %s\n",
c18468c4 2404 submissions, engine->name);
8b417c26
DG
2405 }
2406 seq_printf(m, "\tTotal: %llu\n", tot);
2407}
2408
a8b9370f
OM
2409static int i915_guc_info(struct seq_file *m, void *data)
2410{
2411 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2412 const struct intel_guc *guc = &dev_priv->guc;
a8b9370f 2413
db557993 2414 if (!USES_GUC(dev_priv))
ab309a6a
MW
2415 return -ENODEV;
2416
db557993
MW
2417 i915_guc_log_info(m, dev_priv);
2418
2419 if (!USES_GUC_SUBMISSION(dev_priv))
2420 return 0;
2421
ab309a6a 2422 GEM_BUG_ON(!guc->execbuf_client);
a8b9370f 2423
db557993 2424 seq_printf(m, "\nDoorbell map:\n");
abddffdf 2425 seq_printf(m, "\t%*pb\n", GUC_NUM_DOORBELLS, guc->doorbell_bitmap);
db557993 2426 seq_printf(m, "Doorbell next cacheline: 0x%x\n", guc->db_cacheline);
9636f6db 2427
334636c6
CW
2428 seq_printf(m, "\nGuC execbuf client @ %p:\n", guc->execbuf_client);
2429 i915_guc_client_info(m, dev_priv, guc->execbuf_client);
e78c9175
CW
2430 if (guc->preempt_client) {
2431 seq_printf(m, "\nGuC preempt client @ %p:\n",
2432 guc->preempt_client);
2433 i915_guc_client_info(m, dev_priv, guc->preempt_client);
2434 }
8b417c26
DG
2435
2436 /* Add more as required ... */
2437
2438 return 0;
2439}
2440
a8b9370f 2441static int i915_guc_stage_pool(struct seq_file *m, void *data)
4c7e77fc 2442{
36cdd013 2443 struct drm_i915_private *dev_priv = node_to_i915(m->private);
a8b9370f
OM
2444 const struct intel_guc *guc = &dev_priv->guc;
2445 struct guc_stage_desc *desc = guc->stage_desc_pool_vaddr;
5afc8b49 2446 struct intel_guc_client *client = guc->execbuf_client;
a8b9370f
OM
2447 unsigned int tmp;
2448 int index;
4c7e77fc 2449
ab309a6a
MW
2450 if (!USES_GUC_SUBMISSION(dev_priv))
2451 return -ENODEV;
4c7e77fc 2452
a8b9370f
OM
2453 for (index = 0; index < GUC_MAX_STAGE_DESCRIPTORS; index++, desc++) {
2454 struct intel_engine_cs *engine;
2455
2456 if (!(desc->attribute & GUC_STAGE_DESC_ATTR_ACTIVE))
2457 continue;
2458
2459 seq_printf(m, "GuC stage descriptor %u:\n", index);
2460 seq_printf(m, "\tIndex: %u\n", desc->stage_id);
2461 seq_printf(m, "\tAttribute: 0x%x\n", desc->attribute);
2462 seq_printf(m, "\tPriority: %d\n", desc->priority);
2463 seq_printf(m, "\tDoorbell id: %d\n", desc->db_id);
2464 seq_printf(m, "\tEngines used: 0x%x\n",
2465 desc->engines_used);
2466 seq_printf(m, "\tDoorbell trigger phy: 0x%llx, cpu: 0x%llx, uK: 0x%x\n",
2467 desc->db_trigger_phy,
2468 desc->db_trigger_cpu,
2469 desc->db_trigger_uk);
2470 seq_printf(m, "\tProcess descriptor: 0x%x\n",
2471 desc->process_desc);
9a09485d 2472 seq_printf(m, "\tWorkqueue address: 0x%x, size: 0x%x\n",
a8b9370f
OM
2473 desc->wq_addr, desc->wq_size);
2474 seq_putc(m, '\n');
2475
2476 for_each_engine_masked(engine, dev_priv, client->engines, tmp) {
2477 u32 guc_engine_id = engine->guc_id;
2478 struct guc_execlist_context *lrc =
2479 &desc->lrc[guc_engine_id];
2480
2481 seq_printf(m, "\t%s LRC:\n", engine->name);
2482 seq_printf(m, "\t\tContext desc: 0x%x\n",
2483 lrc->context_desc);
2484 seq_printf(m, "\t\tContext id: 0x%x\n", lrc->context_id);
2485 seq_printf(m, "\t\tLRCA: 0x%x\n", lrc->ring_lrca);
2486 seq_printf(m, "\t\tRing begin: 0x%x\n", lrc->ring_begin);
2487 seq_printf(m, "\t\tRing end: 0x%x\n", lrc->ring_end);
2488 seq_putc(m, '\n');
2489 }
2490 }
2491
2492 return 0;
2493}
2494
4c7e77fc
AD
2495static int i915_guc_log_dump(struct seq_file *m, void *data)
2496{
ac58d2ab
DCS
2497 struct drm_info_node *node = m->private;
2498 struct drm_i915_private *dev_priv = node_to_i915(node);
2499 bool dump_load_err = !!node->info_ent->data;
2500 struct drm_i915_gem_object *obj = NULL;
2501 u32 *log;
2502 int i = 0;
4c7e77fc 2503
ab309a6a
MW
2504 if (!HAS_GUC(dev_priv))
2505 return -ENODEV;
2506
ac58d2ab
DCS
2507 if (dump_load_err)
2508 obj = dev_priv->guc.load_err_log;
2509 else if (dev_priv->guc.log.vma)
2510 obj = dev_priv->guc.log.vma->obj;
4c7e77fc 2511
ac58d2ab
DCS
2512 if (!obj)
2513 return 0;
4c7e77fc 2514
ac58d2ab
DCS
2515 log = i915_gem_object_pin_map(obj, I915_MAP_WC);
2516 if (IS_ERR(log)) {
2517 DRM_DEBUG("Failed to pin object\n");
2518 seq_puts(m, "(log data unaccessible)\n");
2519 return PTR_ERR(log);
4c7e77fc
AD
2520 }
2521
ac58d2ab
DCS
2522 for (i = 0; i < obj->base.size / sizeof(u32); i += 4)
2523 seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n",
2524 *(log + i), *(log + i + 1),
2525 *(log + i + 2), *(log + i + 3));
2526
4c7e77fc
AD
2527 seq_putc(m, '\n');
2528
ac58d2ab
DCS
2529 i915_gem_object_unpin_map(obj);
2530
4c7e77fc
AD
2531 return 0;
2532}
2533
4977a287 2534static int i915_guc_log_level_get(void *data, u64 *val)
685534ef 2535{
bcc36d8a 2536 struct drm_i915_private *dev_priv = data;
685534ef 2537
86aa8247 2538 if (!USES_GUC(dev_priv))
ab309a6a
MW
2539 return -ENODEV;
2540
50935ac7 2541 *val = intel_guc_log_get_level(&dev_priv->guc.log);
685534ef
SAK
2542
2543 return 0;
2544}
2545
4977a287 2546static int i915_guc_log_level_set(void *data, u64 val)
685534ef 2547{
bcc36d8a 2548 struct drm_i915_private *dev_priv = data;
685534ef 2549
86aa8247 2550 if (!USES_GUC(dev_priv))
ab309a6a
MW
2551 return -ENODEV;
2552
50935ac7 2553 return intel_guc_log_set_level(&dev_priv->guc.log, val);
685534ef
SAK
2554}
2555
4977a287
MW
2556DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_level_fops,
2557 i915_guc_log_level_get, i915_guc_log_level_set,
685534ef
SAK
2558 "%lld\n");
2559
4977a287
MW
2560static int i915_guc_log_relay_open(struct inode *inode, struct file *file)
2561{
2562 struct drm_i915_private *dev_priv = inode->i_private;
2563
2564 if (!USES_GUC(dev_priv))
2565 return -ENODEV;
2566
2567 file->private_data = &dev_priv->guc.log;
2568
2569 return intel_guc_log_relay_open(&dev_priv->guc.log);
2570}
2571
2572static ssize_t
2573i915_guc_log_relay_write(struct file *filp,
2574 const char __user *ubuf,
2575 size_t cnt,
2576 loff_t *ppos)
2577{
2578 struct intel_guc_log *log = filp->private_data;
2579
2580 intel_guc_log_relay_flush(log);
2581
2582 return cnt;
2583}
2584
2585static int i915_guc_log_relay_release(struct inode *inode, struct file *file)
2586{
2587 struct drm_i915_private *dev_priv = inode->i_private;
2588
2589 intel_guc_log_relay_close(&dev_priv->guc.log);
2590
2591 return 0;
2592}
2593
2594static const struct file_operations i915_guc_log_relay_fops = {
2595 .owner = THIS_MODULE,
2596 .open = i915_guc_log_relay_open,
2597 .write = i915_guc_log_relay_write,
2598 .release = i915_guc_log_relay_release,
2599};
2600
5b7b3086
DP
2601static int i915_psr_sink_status_show(struct seq_file *m, void *data)
2602{
2603 u8 val;
2604 static const char * const sink_status[] = {
2605 "inactive",
2606 "transition to active, capture and display",
2607 "active, display from RFB",
2608 "active, capture and display on sink device timings",
2609 "transition to inactive, capture and display, timing re-sync",
2610 "reserved",
2611 "reserved",
2612 "sink internal error",
2613 };
2614 struct drm_connector *connector = m->private;
7a72c78b 2615 struct drm_i915_private *dev_priv = to_i915(connector->dev);
5b7b3086
DP
2616 struct intel_dp *intel_dp =
2617 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
7a72c78b
RV
2618 int ret;
2619
2620 if (!CAN_PSR(dev_priv)) {
2621 seq_puts(m, "PSR Unsupported\n");
2622 return -ENODEV;
2623 }
5b7b3086
DP
2624
2625 if (connector->status != connector_status_connected)
2626 return -ENODEV;
2627
7a72c78b
RV
2628 ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val);
2629
2630 if (ret == 1) {
5b7b3086
DP
2631 const char *str = "unknown";
2632
2633 val &= DP_PSR_SINK_STATE_MASK;
2634 if (val < ARRAY_SIZE(sink_status))
2635 str = sink_status[val];
2636 seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str);
2637 } else {
7a72c78b 2638 return ret;
5b7b3086
DP
2639 }
2640
2641 return 0;
2642}
2643DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
2644
00b06296
VN
2645static void
2646psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
2647{
2648 u32 val, psr_status;
b86bef20 2649
00b06296
VN
2650 if (dev_priv->psr.psr2_enabled) {
2651 static const char * const live_status[] = {
2652 "IDLE",
2653 "CAPTURE",
2654 "CAPTURE_FS",
2655 "SLEEP",
2656 "BUFON_FW",
2657 "ML_UP",
2658 "SU_STANDBY",
2659 "FAST_SLEEP",
2660 "DEEP_SLEEP",
2661 "BUF_ON",
2662 "TG_ON"
2663 };
2664 psr_status = I915_READ(EDP_PSR2_STATUS);
2665 val = (psr_status & EDP_PSR2_STATUS_STATE_MASK) >>
2666 EDP_PSR2_STATUS_STATE_SHIFT;
2667 if (val < ARRAY_SIZE(live_status)) {
2668 seq_printf(m, "Source PSR status: 0x%x [%s]\n",
2669 psr_status, live_status[val]);
2670 return;
2671 }
2672 } else {
2673 static const char * const live_status[] = {
2674 "IDLE",
2675 "SRDONACK",
2676 "SRDENT",
2677 "BUFOFF",
2678 "BUFON",
2679 "AUXACK",
2680 "SRDOFFACK",
2681 "SRDENT_ON",
2682 };
2683 psr_status = I915_READ(EDP_PSR_STATUS);
2684 val = (psr_status & EDP_PSR_STATUS_STATE_MASK) >>
2685 EDP_PSR_STATUS_STATE_SHIFT;
2686 if (val < ARRAY_SIZE(live_status)) {
2687 seq_printf(m, "Source PSR status: 0x%x [%s]\n",
2688 psr_status, live_status[val]);
2689 return;
2690 }
2691 }
b86bef20 2692
00b06296 2693 seq_printf(m, "Source PSR status: 0x%x [%s]\n", psr_status, "unknown");
b86bef20
CW
2694}
2695
e91fd8c6
RV
2696static int i915_edp_psr_status(struct seq_file *m, void *data)
2697{
36cdd013 2698 struct drm_i915_private *dev_priv = node_to_i915(m->private);
a031d709
RV
2699 u32 psrperf = 0;
2700 bool enabled = false;
c9ef291a 2701 bool sink_support;
e91fd8c6 2702
ab309a6a
MW
2703 if (!HAS_PSR(dev_priv))
2704 return -ENODEV;
3553a8ea 2705
c9ef291a
DP
2706 sink_support = dev_priv->psr.sink_support;
2707 seq_printf(m, "Sink_Support: %s\n", yesno(sink_support));
2708 if (!sink_support)
2709 return 0;
2710
c8c8fb33
PZ
2711 intel_runtime_pm_get(dev_priv);
2712
fa128fa6 2713 mutex_lock(&dev_priv->psr.lock);
0577ab48
AS
2714 seq_printf(m, "PSR mode: %s\n",
2715 dev_priv->psr.psr2_enabled ? "PSR2" : "PSR1");
c44301fc 2716 seq_printf(m, "Enabled: %s\n", yesno(dev_priv->psr.enabled));
fa128fa6
DV
2717 seq_printf(m, "Busy frontbuffer bits: 0x%03x\n",
2718 dev_priv->psr.busy_frontbuffer_bits);
e91fd8c6 2719
ce3508fd
DP
2720 if (dev_priv->psr.psr2_enabled)
2721 enabled = I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE;
2722 else
2723 enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE;
60e5ffe3
RV
2724
2725 seq_printf(m, "Main link in standby mode: %s\n",
2726 yesno(dev_priv->psr.link_standby));
2727
ce3508fd 2728 seq_printf(m, "HW Enabled & Active bit: %s\n", yesno(enabled));
e91fd8c6 2729
05eec3c2 2730 /*
05eec3c2
RV
2731 * SKL+ Perf counter is reset to 0 everytime DC state is entered
2732 */
36cdd013 2733 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
443a389f 2734 psrperf = I915_READ(EDP_PSR_PERF_CNT) &
a031d709 2735 EDP_PSR_PERF_CNT_MASK;
a6cbdb8e
RV
2736
2737 seq_printf(m, "Performance_Counter: %u\n", psrperf);
2738 }
b86bef20 2739
00b06296 2740 psr_source_status(dev_priv, m);
fa128fa6 2741 mutex_unlock(&dev_priv->psr.lock);
e91fd8c6 2742
9844d4bf 2743 if (READ_ONCE(dev_priv->psr.debug) & I915_PSR_DEBUG_IRQ) {
3f983e54
DP
2744 seq_printf(m, "Last attempted entry at: %lld\n",
2745 dev_priv->psr.last_entry_attempt);
2746 seq_printf(m, "Last exit at: %lld\n",
2747 dev_priv->psr.last_exit);
2748 }
2749
c8c8fb33 2750 intel_runtime_pm_put(dev_priv);
e91fd8c6
RV
2751 return 0;
2752}
2753
54fd3149
DP
2754static int
2755i915_edp_psr_debug_set(void *data, u64 val)
2756{
2757 struct drm_i915_private *dev_priv = data;
c44301fc
ML
2758 struct drm_modeset_acquire_ctx ctx;
2759 int ret;
54fd3149
DP
2760
2761 if (!CAN_PSR(dev_priv))
2762 return -ENODEV;
2763
c44301fc 2764 DRM_DEBUG_KMS("Setting PSR debug to %llx\n", val);
54fd3149
DP
2765
2766 intel_runtime_pm_get(dev_priv);
c44301fc
ML
2767
2768 drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
2769
2770retry:
2771 ret = intel_psr_set_debugfs_mode(dev_priv, &ctx, val);
2772 if (ret == -EDEADLK) {
2773 ret = drm_modeset_backoff(&ctx);
2774 if (!ret)
2775 goto retry;
2776 }
2777
2778 drm_modeset_drop_locks(&ctx);
2779 drm_modeset_acquire_fini(&ctx);
2780
54fd3149
DP
2781 intel_runtime_pm_put(dev_priv);
2782
c44301fc 2783 return ret;
54fd3149
DP
2784}
2785
2786static int
2787i915_edp_psr_debug_get(void *data, u64 *val)
2788{
2789 struct drm_i915_private *dev_priv = data;
2790
2791 if (!CAN_PSR(dev_priv))
2792 return -ENODEV;
2793
2794 *val = READ_ONCE(dev_priv->psr.debug);
2795 return 0;
2796}
2797
2798DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
2799 i915_edp_psr_debug_get, i915_edp_psr_debug_set,
2800 "%llu\n");
2801
ec013e7f
JB
2802static int i915_energy_uJ(struct seq_file *m, void *data)
2803{
36cdd013 2804 struct drm_i915_private *dev_priv = node_to_i915(m->private);
d38014ea 2805 unsigned long long power;
ec013e7f
JB
2806 u32 units;
2807
36cdd013 2808 if (INTEL_GEN(dev_priv) < 6)
ec013e7f
JB
2809 return -ENODEV;
2810
36623ef8
PZ
2811 intel_runtime_pm_get(dev_priv);
2812
d38014ea
GKB
2813 if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power)) {
2814 intel_runtime_pm_put(dev_priv);
2815 return -ENODEV;
2816 }
2817
2818 units = (power & 0x1f00) >> 8;
ec013e7f 2819 power = I915_READ(MCH_SECP_NRG_STTS);
d38014ea 2820 power = (1000000 * power) >> units; /* convert to uJ */
ec013e7f 2821
36623ef8
PZ
2822 intel_runtime_pm_put(dev_priv);
2823
d38014ea 2824 seq_printf(m, "%llu", power);
371db66a
PZ
2825
2826 return 0;
2827}
2828
6455c870 2829static int i915_runtime_pm_status(struct seq_file *m, void *unused)
371db66a 2830{
36cdd013 2831 struct drm_i915_private *dev_priv = node_to_i915(m->private);
52a05c30 2832 struct pci_dev *pdev = dev_priv->drm.pdev;
371db66a 2833
a156e64d
CW
2834 if (!HAS_RUNTIME_PM(dev_priv))
2835 seq_puts(m, "Runtime power management not supported\n");
371db66a 2836
6f56103d
CW
2837 seq_printf(m, "GPU idle: %s (epoch %u)\n",
2838 yesno(!dev_priv->gt.awake), dev_priv->gt.epoch);
371db66a 2839 seq_printf(m, "IRQs disabled: %s\n",
9df7575f 2840 yesno(!intel_irqs_enabled(dev_priv)));
0d804184 2841#ifdef CONFIG_PM
a6aaec8b 2842 seq_printf(m, "Usage count: %d\n",
36cdd013 2843 atomic_read(&dev_priv->drm.dev->power.usage_count));
0d804184
CW
2844#else
2845 seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
2846#endif
a156e64d 2847 seq_printf(m, "PCI device power state: %s [%d]\n",
52a05c30
DW
2848 pci_power_name(pdev->current_state),
2849 pdev->current_state);
371db66a 2850
ec013e7f
JB
2851 return 0;
2852}
2853
1da51581
ID
2854static int i915_power_domain_info(struct seq_file *m, void *unused)
2855{
36cdd013 2856 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1da51581
ID
2857 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2858 int i;
2859
2860 mutex_lock(&power_domains->lock);
2861
2862 seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
2863 for (i = 0; i < power_domains->power_well_count; i++) {
2864 struct i915_power_well *power_well;
2865 enum intel_display_power_domain power_domain;
2866
2867 power_well = &power_domains->power_wells[i];
f28ec6f4 2868 seq_printf(m, "%-25s %d\n", power_well->desc->name,
1da51581
ID
2869 power_well->count);
2870
f28ec6f4 2871 for_each_power_domain(power_domain, power_well->desc->domains)
1da51581 2872 seq_printf(m, " %-23s %d\n",
9895ad03 2873 intel_display_power_domain_str(power_domain),
1da51581 2874 power_domains->domain_use_count[power_domain]);
1da51581
ID
2875 }
2876
2877 mutex_unlock(&power_domains->lock);
2878
2879 return 0;
2880}
2881
b7cec66d
DL
2882static int i915_dmc_info(struct seq_file *m, void *unused)
2883{
36cdd013 2884 struct drm_i915_private *dev_priv = node_to_i915(m->private);
b7cec66d
DL
2885 struct intel_csr *csr;
2886
ab309a6a
MW
2887 if (!HAS_CSR(dev_priv))
2888 return -ENODEV;
b7cec66d
DL
2889
2890 csr = &dev_priv->csr;
2891
6fb403de
MK
2892 intel_runtime_pm_get(dev_priv);
2893
b7cec66d
DL
2894 seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
2895 seq_printf(m, "path: %s\n", csr->fw_path);
2896
2897 if (!csr->dmc_payload)
6fb403de 2898 goto out;
b7cec66d
DL
2899
2900 seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
2901 CSR_VERSION_MINOR(csr->version));
2902
48de568c
MK
2903 if (IS_KABYLAKE(dev_priv) ||
2904 (IS_SKYLAKE(dev_priv) && csr->version >= CSR_VERSION(1, 6))) {
8337206d
DL
2905 seq_printf(m, "DC3 -> DC5 count: %d\n",
2906 I915_READ(SKL_CSR_DC3_DC5_COUNT));
2907 seq_printf(m, "DC5 -> DC6 count: %d\n",
2908 I915_READ(SKL_CSR_DC5_DC6_COUNT));
36cdd013 2909 } else if (IS_BROXTON(dev_priv) && csr->version >= CSR_VERSION(1, 4)) {
16e11b99
MK
2910 seq_printf(m, "DC3 -> DC5 count: %d\n",
2911 I915_READ(BXT_CSR_DC3_DC5_COUNT));
8337206d
DL
2912 }
2913
6fb403de
MK
2914out:
2915 seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0)));
2916 seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE));
2917 seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL));
2918
8337206d
DL
2919 intel_runtime_pm_put(dev_priv);
2920
b7cec66d
DL
2921 return 0;
2922}
2923
53f5e3ca
JB
2924static void intel_seq_print_mode(struct seq_file *m, int tabs,
2925 struct drm_display_mode *mode)
2926{
2927 int i;
2928
2929 for (i = 0; i < tabs; i++)
2930 seq_putc(m, '\t');
2931
2932 seq_printf(m, "id %d:\"%s\" freq %d clock %d hdisp %d hss %d hse %d htot %d vdisp %d vss %d vse %d vtot %d type 0x%x flags 0x%x\n",
2933 mode->base.id, mode->name,
2934 mode->vrefresh, mode->clock,
2935 mode->hdisplay, mode->hsync_start,
2936 mode->hsync_end, mode->htotal,
2937 mode->vdisplay, mode->vsync_start,
2938 mode->vsync_end, mode->vtotal,
2939 mode->type, mode->flags);
2940}
2941
2942static void intel_encoder_info(struct seq_file *m,
2943 struct intel_crtc *intel_crtc,
2944 struct intel_encoder *intel_encoder)
2945{
36cdd013
DW
2946 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2947 struct drm_device *dev = &dev_priv->drm;
53f5e3ca
JB
2948 struct drm_crtc *crtc = &intel_crtc->base;
2949 struct intel_connector *intel_connector;
2950 struct drm_encoder *encoder;
2951
2952 encoder = &intel_encoder->base;
2953 seq_printf(m, "\tencoder %d: type: %s, connectors:\n",
8e329a03 2954 encoder->base.id, encoder->name);
53f5e3ca
JB
2955 for_each_connector_on_encoder(dev, encoder, intel_connector) {
2956 struct drm_connector *connector = &intel_connector->base;
2957 seq_printf(m, "\t\tconnector %d: type: %s, status: %s",
2958 connector->base.id,
c23cc417 2959 connector->name,
53f5e3ca
JB
2960 drm_get_connector_status_name(connector->status));
2961 if (connector->status == connector_status_connected) {
2962 struct drm_display_mode *mode = &crtc->mode;
2963 seq_printf(m, ", mode:\n");
2964 intel_seq_print_mode(m, 2, mode);
2965 } else {
2966 seq_putc(m, '\n');
2967 }
2968 }
2969}
2970
2971static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2972{
36cdd013
DW
2973 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2974 struct drm_device *dev = &dev_priv->drm;
53f5e3ca
JB
2975 struct drm_crtc *crtc = &intel_crtc->base;
2976 struct intel_encoder *intel_encoder;
23a48d53
ML
2977 struct drm_plane_state *plane_state = crtc->primary->state;
2978 struct drm_framebuffer *fb = plane_state->fb;
53f5e3ca 2979
23a48d53 2980 if (fb)
5aa8a937 2981 seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
23a48d53
ML
2982 fb->base.id, plane_state->src_x >> 16,
2983 plane_state->src_y >> 16, fb->width, fb->height);
5aa8a937
MR
2984 else
2985 seq_puts(m, "\tprimary plane disabled\n");
53f5e3ca
JB
2986 for_each_encoder_on_crtc(dev, crtc, intel_encoder)
2987 intel_encoder_info(m, intel_crtc, intel_encoder);
2988}
2989
2990static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
2991{
2992 struct drm_display_mode *mode = panel->fixed_mode;
2993
2994 seq_printf(m, "\tfixed mode:\n");
2995 intel_seq_print_mode(m, 2, mode);
2996}
2997
2998static void intel_dp_info(struct seq_file *m,
2999 struct intel_connector *intel_connector)
3000{
3001 struct intel_encoder *intel_encoder = intel_connector->encoder;
3002 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
3003
3004 seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
742f491d 3005 seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
b6dabe3b 3006 if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
53f5e3ca 3007 intel_panel_info(m, &intel_connector->panel);
80209e5f
MK
3008
3009 drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
3010 &intel_dp->aux);
53f5e3ca
JB
3011}
3012
9a148a96
LY
3013static void intel_dp_mst_info(struct seq_file *m,
3014 struct intel_connector *intel_connector)
3015{
3016 struct intel_encoder *intel_encoder = intel_connector->encoder;
3017 struct intel_dp_mst_encoder *intel_mst =
3018 enc_to_mst(&intel_encoder->base);
3019 struct intel_digital_port *intel_dig_port = intel_mst->primary;
3020 struct intel_dp *intel_dp = &intel_dig_port->dp;
3021 bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr,
3022 intel_connector->port);
3023
3024 seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
3025}
3026
53f5e3ca
JB
3027static void intel_hdmi_info(struct seq_file *m,
3028 struct intel_connector *intel_connector)
3029{
3030 struct intel_encoder *intel_encoder = intel_connector->encoder;
3031 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
3032
742f491d 3033 seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
53f5e3ca
JB
3034}
3035
3036static void intel_lvds_info(struct seq_file *m,
3037 struct intel_connector *intel_connector)
3038{
3039 intel_panel_info(m, &intel_connector->panel);
3040}
3041
3042static void intel_connector_info(struct seq_file *m,
3043 struct drm_connector *connector)
3044{
3045 struct intel_connector *intel_connector = to_intel_connector(connector);
3046 struct intel_encoder *intel_encoder = intel_connector->encoder;
f103fc7d 3047 struct drm_display_mode *mode;
53f5e3ca
JB
3048
3049 seq_printf(m, "connector %d: type %s, status: %s\n",
c23cc417 3050 connector->base.id, connector->name,
53f5e3ca
JB
3051 drm_get_connector_status_name(connector->status));
3052 if (connector->status == connector_status_connected) {
3053 seq_printf(m, "\tname: %s\n", connector->display_info.name);
3054 seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
3055 connector->display_info.width_mm,
3056 connector->display_info.height_mm);
3057 seq_printf(m, "\tsubpixel order: %s\n",
3058 drm_get_subpixel_order_name(connector->display_info.subpixel_order));
3059 seq_printf(m, "\tCEA rev: %d\n",
3060 connector->display_info.cea_rev);
3061 }
ee648a74 3062
77d1f615 3063 if (!intel_encoder)
ee648a74
ML
3064 return;
3065
3066 switch (connector->connector_type) {
3067 case DRM_MODE_CONNECTOR_DisplayPort:
3068 case DRM_MODE_CONNECTOR_eDP:
9a148a96
LY
3069 if (intel_encoder->type == INTEL_OUTPUT_DP_MST)
3070 intel_dp_mst_info(m, intel_connector);
3071 else
3072 intel_dp_info(m, intel_connector);
ee648a74
ML
3073 break;
3074 case DRM_MODE_CONNECTOR_LVDS:
3075 if (intel_encoder->type == INTEL_OUTPUT_LVDS)
36cd7444 3076 intel_lvds_info(m, intel_connector);
ee648a74
ML
3077 break;
3078 case DRM_MODE_CONNECTOR_HDMIA:
3079 if (intel_encoder->type == INTEL_OUTPUT_HDMI ||
7e732cac 3080 intel_encoder->type == INTEL_OUTPUT_DDI)
ee648a74
ML
3081 intel_hdmi_info(m, intel_connector);
3082 break;
3083 default:
3084 break;
36cd7444 3085 }
53f5e3ca 3086
f103fc7d
JB
3087 seq_printf(m, "\tmodes:\n");
3088 list_for_each_entry(mode, &connector->modes, head)
3089 intel_seq_print_mode(m, 2, mode);
53f5e3ca
JB
3090}
3091
3abc4e09
RF
3092static const char *plane_type(enum drm_plane_type type)
3093{
3094 switch (type) {
3095 case DRM_PLANE_TYPE_OVERLAY:
3096 return "OVL";
3097 case DRM_PLANE_TYPE_PRIMARY:
3098 return "PRI";
3099 case DRM_PLANE_TYPE_CURSOR:
3100 return "CUR";
3101 /*
3102 * Deliberately omitting default: to generate compiler warnings
3103 * when a new drm_plane_type gets added.
3104 */
3105 }
3106
3107 return "unknown";
3108}
3109
3110static const char *plane_rotation(unsigned int rotation)
3111{
3112 static char buf[48];
3113 /*
c2c446ad 3114 * According to doc only one DRM_MODE_ROTATE_ is allowed but this
3abc4e09
RF
3115 * will print them all to visualize if the values are misused
3116 */
3117 snprintf(buf, sizeof(buf),
3118 "%s%s%s%s%s%s(0x%08x)",
c2c446ad
RF
3119 (rotation & DRM_MODE_ROTATE_0) ? "0 " : "",
3120 (rotation & DRM_MODE_ROTATE_90) ? "90 " : "",
3121 (rotation & DRM_MODE_ROTATE_180) ? "180 " : "",
3122 (rotation & DRM_MODE_ROTATE_270) ? "270 " : "",
3123 (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "",
3124 (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "",
3abc4e09
RF
3125 rotation);
3126
3127 return buf;
3128}
3129
3130static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
3131{
36cdd013
DW
3132 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3133 struct drm_device *dev = &dev_priv->drm;
3abc4e09
RF
3134 struct intel_plane *intel_plane;
3135
3136 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
3137 struct drm_plane_state *state;
3138 struct drm_plane *plane = &intel_plane->base;
b3c11ac2 3139 struct drm_format_name_buf format_name;
3abc4e09
RF
3140
3141 if (!plane->state) {
3142 seq_puts(m, "plane->state is NULL!\n");
3143 continue;
3144 }
3145
3146 state = plane->state;
3147
90844f00 3148 if (state->fb) {
438b74a5
VS
3149 drm_get_format_name(state->fb->format->format,
3150 &format_name);
90844f00 3151 } else {
b3c11ac2 3152 sprintf(format_name.str, "N/A");
90844f00
EE
3153 }
3154
3abc4e09
RF
3155 seq_printf(m, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n",
3156 plane->base.id,
3157 plane_type(intel_plane->base.type),
3158 state->crtc_x, state->crtc_y,
3159 state->crtc_w, state->crtc_h,
3160 (state->src_x >> 16),
3161 ((state->src_x & 0xffff) * 15625) >> 10,
3162 (state->src_y >> 16),
3163 ((state->src_y & 0xffff) * 15625) >> 10,
3164 (state->src_w >> 16),
3165 ((state->src_w & 0xffff) * 15625) >> 10,
3166 (state->src_h >> 16),
3167 ((state->src_h & 0xffff) * 15625) >> 10,
b3c11ac2 3168 format_name.str,
3abc4e09
RF
3169 plane_rotation(state->rotation));
3170 }
3171}
3172
3173static void intel_scaler_info(struct seq_file *m, struct intel_crtc *intel_crtc)
3174{
3175 struct intel_crtc_state *pipe_config;
3176 int num_scalers = intel_crtc->num_scalers;
3177 int i;
3178
3179 pipe_config = to_intel_crtc_state(intel_crtc->base.state);
3180
3181 /* Not all platformas have a scaler */
3182 if (num_scalers) {
3183 seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
3184 num_scalers,
3185 pipe_config->scaler_state.scaler_users,
3186 pipe_config->scaler_state.scaler_id);
3187
58415918 3188 for (i = 0; i < num_scalers; i++) {
3abc4e09
RF
3189 struct intel_scaler *sc =
3190 &pipe_config->scaler_state.scalers[i];
3191
3192 seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
3193 i, yesno(sc->in_use), sc->mode);
3194 }
3195 seq_puts(m, "\n");
3196 } else {
3197 seq_puts(m, "\tNo scalers available on this platform\n");
3198 }
3199}
3200
53f5e3ca
JB
3201static int i915_display_info(struct seq_file *m, void *unused)
3202{
36cdd013
DW
3203 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3204 struct drm_device *dev = &dev_priv->drm;
065f2ec2 3205 struct intel_crtc *crtc;
53f5e3ca 3206 struct drm_connector *connector;
3f6a5e1e 3207 struct drm_connector_list_iter conn_iter;
53f5e3ca 3208
b0e5ddf3 3209 intel_runtime_pm_get(dev_priv);
53f5e3ca
JB
3210 seq_printf(m, "CRTC info\n");
3211 seq_printf(m, "---------\n");
d3fcc808 3212 for_each_intel_crtc(dev, crtc) {
f77076c9 3213 struct intel_crtc_state *pipe_config;
53f5e3ca 3214
3f6a5e1e 3215 drm_modeset_lock(&crtc->base.mutex, NULL);
f77076c9
ML
3216 pipe_config = to_intel_crtc_state(crtc->base.state);
3217
3abc4e09 3218 seq_printf(m, "CRTC %d: pipe: %c, active=%s, (size=%dx%d), dither=%s, bpp=%d\n",
065f2ec2 3219 crtc->base.base.id, pipe_name(crtc->pipe),
f77076c9 3220 yesno(pipe_config->base.active),
3abc4e09
RF
3221 pipe_config->pipe_src_w, pipe_config->pipe_src_h,
3222 yesno(pipe_config->dither), pipe_config->pipe_bpp);
3223
f77076c9 3224 if (pipe_config->base.active) {
cd5dcbf1
VS
3225 struct intel_plane *cursor =
3226 to_intel_plane(crtc->base.cursor);
3227
065f2ec2
CW
3228 intel_crtc_info(m, crtc);
3229
cd5dcbf1
VS
3230 seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x\n",
3231 yesno(cursor->base.state->visible),
3232 cursor->base.state->crtc_x,
3233 cursor->base.state->crtc_y,
3234 cursor->base.state->crtc_w,
3235 cursor->base.state->crtc_h,
3236 cursor->cursor.base);
3abc4e09
RF
3237 intel_scaler_info(m, crtc);
3238 intel_plane_info(m, crtc);
a23dc658 3239 }
cace841c
DV
3240
3241 seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n",
3242 yesno(!crtc->cpu_fifo_underrun_disabled),
3243 yesno(!crtc->pch_fifo_underrun_disabled));
3f6a5e1e 3244 drm_modeset_unlock(&crtc->base.mutex);
53f5e3ca
JB
3245 }
3246
3247 seq_printf(m, "\n");
3248 seq_printf(m, "Connector info\n");
3249 seq_printf(m, "--------------\n");
3f6a5e1e
DV
3250 mutex_lock(&dev->mode_config.mutex);
3251 drm_connector_list_iter_begin(dev, &conn_iter);
3252 drm_for_each_connector_iter(connector, &conn_iter)
53f5e3ca 3253 intel_connector_info(m, connector);
3f6a5e1e
DV
3254 drm_connector_list_iter_end(&conn_iter);
3255 mutex_unlock(&dev->mode_config.mutex);
3256
b0e5ddf3 3257 intel_runtime_pm_put(dev_priv);
53f5e3ca
JB
3258
3259 return 0;
3260}
3261
1b36595f
CW
3262static int i915_engine_info(struct seq_file *m, void *unused)
3263{
3264 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3265 struct intel_engine_cs *engine;
3b3f1650 3266 enum intel_engine_id id;
f636edb2 3267 struct drm_printer p;
1b36595f 3268
9c870d03
CW
3269 intel_runtime_pm_get(dev_priv);
3270
6f56103d
CW
3271 seq_printf(m, "GT awake? %s (epoch %u)\n",
3272 yesno(dev_priv->gt.awake), dev_priv->gt.epoch);
f73b5674
CW
3273 seq_printf(m, "Global active requests: %d\n",
3274 dev_priv->gt.active_requests);
f577a03b
LL
3275 seq_printf(m, "CS timestamp frequency: %u kHz\n",
3276 dev_priv->info.cs_timestamp_frequency_khz);
f73b5674 3277
f636edb2
CW
3278 p = drm_seq_file_printer(m);
3279 for_each_engine(engine, dev_priv, id)
0db18b17 3280 intel_engine_dump(engine, &p, "%s\n", engine->name);
1b36595f 3281
9c870d03
CW
3282 intel_runtime_pm_put(dev_priv);
3283
1b36595f
CW
3284 return 0;
3285}
3286
79e9cd5f
LL
3287static int i915_rcs_topology(struct seq_file *m, void *unused)
3288{
3289 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3290 struct drm_printer p = drm_seq_file_printer(m);
3291
3292 intel_device_info_dump_topology(&INTEL_INFO(dev_priv)->sseu, &p);
3293
3294 return 0;
3295}
3296
c5418a8b
CW
3297static int i915_shrinker_info(struct seq_file *m, void *unused)
3298{
3299 struct drm_i915_private *i915 = node_to_i915(m->private);
3300
3301 seq_printf(m, "seeks = %d\n", i915->mm.shrinker.seeks);
3302 seq_printf(m, "batch = %lu\n", i915->mm.shrinker.batch);
3303
3304 return 0;
3305}
3306
728e29d7
DV
3307static int i915_shared_dplls_info(struct seq_file *m, void *unused)
3308{
36cdd013
DW
3309 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3310 struct drm_device *dev = &dev_priv->drm;
728e29d7
DV
3311 int i;
3312
3313 drm_modeset_lock_all(dev);
3314 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
3315 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
3316
72f775fa 3317 seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name,
0823eb9c 3318 pll->info->id);
2dd66ebd 3319 seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n",
2c42e535 3320 pll->state.crtc_mask, pll->active_mask, yesno(pll->on));
728e29d7 3321 seq_printf(m, " tracked hardware state:\n");
2c42e535 3322 seq_printf(m, " dpll: 0x%08x\n", pll->state.hw_state.dpll);
3e369b76 3323 seq_printf(m, " dpll_md: 0x%08x\n",
2c42e535
ACO
3324 pll->state.hw_state.dpll_md);
3325 seq_printf(m, " fp0: 0x%08x\n", pll->state.hw_state.fp0);
3326 seq_printf(m, " fp1: 0x%08x\n", pll->state.hw_state.fp1);
3327 seq_printf(m, " wrpll: 0x%08x\n", pll->state.hw_state.wrpll);
c27e917e
PZ
3328 seq_printf(m, " cfgcr0: 0x%08x\n", pll->state.hw_state.cfgcr0);
3329 seq_printf(m, " cfgcr1: 0x%08x\n", pll->state.hw_state.cfgcr1);
3330 seq_printf(m, " mg_refclkin_ctl: 0x%08x\n",
3331 pll->state.hw_state.mg_refclkin_ctl);
3332 seq_printf(m, " mg_clktop2_coreclkctl1: 0x%08x\n",
3333 pll->state.hw_state.mg_clktop2_coreclkctl1);
3334 seq_printf(m, " mg_clktop2_hsclkctl: 0x%08x\n",
3335 pll->state.hw_state.mg_clktop2_hsclkctl);
3336 seq_printf(m, " mg_pll_div0: 0x%08x\n",
3337 pll->state.hw_state.mg_pll_div0);
3338 seq_printf(m, " mg_pll_div1: 0x%08x\n",
3339 pll->state.hw_state.mg_pll_div1);
3340 seq_printf(m, " mg_pll_lf: 0x%08x\n",
3341 pll->state.hw_state.mg_pll_lf);
3342 seq_printf(m, " mg_pll_frac_lock: 0x%08x\n",
3343 pll->state.hw_state.mg_pll_frac_lock);
3344 seq_printf(m, " mg_pll_ssc: 0x%08x\n",
3345 pll->state.hw_state.mg_pll_ssc);
3346 seq_printf(m, " mg_pll_bias: 0x%08x\n",
3347 pll->state.hw_state.mg_pll_bias);
3348 seq_printf(m, " mg_pll_tdc_coldst_bias: 0x%08x\n",
3349 pll->state.hw_state.mg_pll_tdc_coldst_bias);
728e29d7
DV
3350 }
3351 drm_modeset_unlock_all(dev);
3352
3353 return 0;
3354}
3355
1ed1ef9d 3356static int i915_wa_registers(struct seq_file *m, void *unused)
888b5995 3357{
548764bb 3358 struct i915_workarounds *wa = &node_to_i915(m->private)->workarounds;
f4ecfbfc 3359 int i;
888b5995 3360
548764bb
CW
3361 seq_printf(m, "Workarounds applied: %d\n", wa->count);
3362 for (i = 0; i < wa->count; ++i)
3363 seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n",
3364 wa->reg[i].addr, wa->reg[i].value, wa->reg[i].mask);
888b5995
AS
3365
3366 return 0;
3367}
3368
d2d4f39b
KM
3369static int i915_ipc_status_show(struct seq_file *m, void *data)
3370{
3371 struct drm_i915_private *dev_priv = m->private;
3372
3373 seq_printf(m, "Isochronous Priority Control: %s\n",
3374 yesno(dev_priv->ipc_enabled));
3375 return 0;
3376}
3377
3378static int i915_ipc_status_open(struct inode *inode, struct file *file)
3379{
3380 struct drm_i915_private *dev_priv = inode->i_private;
3381
3382 if (!HAS_IPC(dev_priv))
3383 return -ENODEV;
3384
3385 return single_open(file, i915_ipc_status_show, dev_priv);
3386}
3387
3388static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
3389 size_t len, loff_t *offp)
3390{
3391 struct seq_file *m = file->private_data;
3392 struct drm_i915_private *dev_priv = m->private;
3393 int ret;
3394 bool enable;
3395
3396 ret = kstrtobool_from_user(ubuf, len, &enable);
3397 if (ret < 0)
3398 return ret;
3399
3400 intel_runtime_pm_get(dev_priv);
3401 if (!dev_priv->ipc_enabled && enable)
3402 DRM_INFO("Enabling IPC: WM will be proper only after next commit\n");
3403 dev_priv->wm.distrust_bios_wm = true;
3404 dev_priv->ipc_enabled = enable;
3405 intel_enable_ipc(dev_priv);
3406 intel_runtime_pm_put(dev_priv);
3407
3408 return len;
3409}
3410
3411static const struct file_operations i915_ipc_status_fops = {
3412 .owner = THIS_MODULE,
3413 .open = i915_ipc_status_open,
3414 .read = seq_read,
3415 .llseek = seq_lseek,
3416 .release = single_release,
3417 .write = i915_ipc_status_write
3418};
3419
c5511e44
DL
3420static int i915_ddb_info(struct seq_file *m, void *unused)
3421{
36cdd013
DW
3422 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3423 struct drm_device *dev = &dev_priv->drm;
c5511e44
DL
3424 struct skl_ddb_allocation *ddb;
3425 struct skl_ddb_entry *entry;
3426 enum pipe pipe;
3427 int plane;
3428
36cdd013 3429 if (INTEL_GEN(dev_priv) < 9)
ab309a6a 3430 return -ENODEV;
2fcffe19 3431
c5511e44
DL
3432 drm_modeset_lock_all(dev);
3433
3434 ddb = &dev_priv->wm.skl_hw.ddb;
3435
3436 seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
3437
3438 for_each_pipe(dev_priv, pipe) {
3439 seq_printf(m, "Pipe %c\n", pipe_name(pipe));
3440
8b364b41 3441 for_each_universal_plane(dev_priv, pipe, plane) {
c5511e44
DL
3442 entry = &ddb->plane[pipe][plane];
3443 seq_printf(m, " Plane%-8d%8u%8u%8u\n", plane + 1,
3444 entry->start, entry->end,
3445 skl_ddb_entry_size(entry));
3446 }
3447
4969d33e 3448 entry = &ddb->plane[pipe][PLANE_CURSOR];
c5511e44
DL
3449 seq_printf(m, " %-13s%8u%8u%8u\n", "Cursor", entry->start,
3450 entry->end, skl_ddb_entry_size(entry));
3451 }
3452
3453 drm_modeset_unlock_all(dev);
3454
3455 return 0;
3456}
3457
a54746e3 3458static void drrs_status_per_crtc(struct seq_file *m,
36cdd013
DW
3459 struct drm_device *dev,
3460 struct intel_crtc *intel_crtc)
a54746e3 3461{
fac5e23e 3462 struct drm_i915_private *dev_priv = to_i915(dev);
a54746e3
VK
3463 struct i915_drrs *drrs = &dev_priv->drrs;
3464 int vrefresh = 0;
26875fe5 3465 struct drm_connector *connector;
3f6a5e1e 3466 struct drm_connector_list_iter conn_iter;
a54746e3 3467
3f6a5e1e
DV
3468 drm_connector_list_iter_begin(dev, &conn_iter);
3469 drm_for_each_connector_iter(connector, &conn_iter) {
26875fe5
ML
3470 if (connector->state->crtc != &intel_crtc->base)
3471 continue;
3472
3473 seq_printf(m, "%s:\n", connector->name);
a54746e3 3474 }
3f6a5e1e 3475 drm_connector_list_iter_end(&conn_iter);
a54746e3
VK
3476
3477 if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT)
3478 seq_puts(m, "\tVBT: DRRS_type: Static");
3479 else if (dev_priv->vbt.drrs_type == SEAMLESS_DRRS_SUPPORT)
3480 seq_puts(m, "\tVBT: DRRS_type: Seamless");
3481 else if (dev_priv->vbt.drrs_type == DRRS_NOT_SUPPORTED)
3482 seq_puts(m, "\tVBT: DRRS_type: None");
3483 else
3484 seq_puts(m, "\tVBT: DRRS_type: FIXME: Unrecognized Value");
3485
3486 seq_puts(m, "\n\n");
3487
f77076c9 3488 if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) {
a54746e3
VK
3489 struct intel_panel *panel;
3490
3491 mutex_lock(&drrs->mutex);
3492 /* DRRS Supported */
3493 seq_puts(m, "\tDRRS Supported: Yes\n");
3494
3495 /* disable_drrs() will make drrs->dp NULL */
3496 if (!drrs->dp) {
ce6e2137
R
3497 seq_puts(m, "Idleness DRRS: Disabled\n");
3498 if (dev_priv->psr.enabled)
3499 seq_puts(m,
3500 "\tAs PSR is enabled, DRRS is not enabled\n");
a54746e3
VK
3501 mutex_unlock(&drrs->mutex);
3502 return;
3503 }
3504
3505 panel = &drrs->dp->attached_connector->panel;
3506 seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
3507 drrs->busy_frontbuffer_bits);
3508
3509 seq_puts(m, "\n\t\t");
3510 if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
3511 seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
3512 vrefresh = panel->fixed_mode->vrefresh;
3513 } else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
3514 seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
3515 vrefresh = panel->downclock_mode->vrefresh;
3516 } else {
3517 seq_printf(m, "DRRS_State: Unknown(%d)\n",
3518 drrs->refresh_rate_type);
3519 mutex_unlock(&drrs->mutex);
3520 return;
3521 }
3522 seq_printf(m, "\t\tVrefresh: %d", vrefresh);
3523
3524 seq_puts(m, "\n\t\t");
3525 mutex_unlock(&drrs->mutex);
3526 } else {
3527 /* DRRS not supported. Print the VBT parameter*/
3528 seq_puts(m, "\tDRRS Supported : No");
3529 }
3530 seq_puts(m, "\n");
3531}
3532
3533static int i915_drrs_status(struct seq_file *m, void *unused)
3534{
36cdd013
DW
3535 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3536 struct drm_device *dev = &dev_priv->drm;
a54746e3
VK
3537 struct intel_crtc *intel_crtc;
3538 int active_crtc_cnt = 0;
3539
26875fe5 3540 drm_modeset_lock_all(dev);
a54746e3 3541 for_each_intel_crtc(dev, intel_crtc) {
f77076c9 3542 if (intel_crtc->base.state->active) {
a54746e3
VK
3543 active_crtc_cnt++;
3544 seq_printf(m, "\nCRTC %d: ", active_crtc_cnt);
3545
3546 drrs_status_per_crtc(m, dev, intel_crtc);
3547 }
a54746e3 3548 }
26875fe5 3549 drm_modeset_unlock_all(dev);
a54746e3
VK
3550
3551 if (!active_crtc_cnt)
3552 seq_puts(m, "No active crtc found\n");
3553
3554 return 0;
3555}
3556
11bed958
DA
3557static int i915_dp_mst_info(struct seq_file *m, void *unused)
3558{
36cdd013
DW
3559 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3560 struct drm_device *dev = &dev_priv->drm;
11bed958
DA
3561 struct intel_encoder *intel_encoder;
3562 struct intel_digital_port *intel_dig_port;
b6dabe3b 3563 struct drm_connector *connector;
3f6a5e1e 3564 struct drm_connector_list_iter conn_iter;
b6dabe3b 3565
3f6a5e1e
DV
3566 drm_connector_list_iter_begin(dev, &conn_iter);
3567 drm_for_each_connector_iter(connector, &conn_iter) {
b6dabe3b 3568 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
11bed958 3569 continue;
b6dabe3b
ML
3570
3571 intel_encoder = intel_attached_encoder(connector);
3572 if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
3573 continue;
3574
3575 intel_dig_port = enc_to_dig_port(&intel_encoder->base);
11bed958
DA
3576 if (!intel_dig_port->dp.can_mst)
3577 continue;
b6dabe3b 3578
40ae80cc 3579 seq_printf(m, "MST Source Port %c\n",
8f4f2797 3580 port_name(intel_dig_port->base.port));
11bed958
DA
3581 drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
3582 }
3f6a5e1e
DV
3583 drm_connector_list_iter_end(&conn_iter);
3584
11bed958
DA
3585 return 0;
3586}
3587
eb3394fa 3588static ssize_t i915_displayport_test_active_write(struct file *file,
36cdd013
DW
3589 const char __user *ubuf,
3590 size_t len, loff_t *offp)
eb3394fa
TP
3591{
3592 char *input_buffer;
3593 int status = 0;
eb3394fa
TP
3594 struct drm_device *dev;
3595 struct drm_connector *connector;
3f6a5e1e 3596 struct drm_connector_list_iter conn_iter;
eb3394fa
TP
3597 struct intel_dp *intel_dp;
3598 int val = 0;
3599
9aaffa34 3600 dev = ((struct seq_file *)file->private_data)->private;
eb3394fa 3601
eb3394fa
TP
3602 if (len == 0)
3603 return 0;
3604
261aeba8
GT
3605 input_buffer = memdup_user_nul(ubuf, len);
3606 if (IS_ERR(input_buffer))
3607 return PTR_ERR(input_buffer);
eb3394fa 3608
eb3394fa
TP
3609 DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len);
3610
3f6a5e1e
DV
3611 drm_connector_list_iter_begin(dev, &conn_iter);
3612 drm_for_each_connector_iter(connector, &conn_iter) {
a874b6a3
ML
3613 struct intel_encoder *encoder;
3614
eb3394fa
TP
3615 if (connector->connector_type !=
3616 DRM_MODE_CONNECTOR_DisplayPort)
3617 continue;
3618
a874b6a3
ML
3619 encoder = to_intel_encoder(connector->encoder);
3620 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3621 continue;
3622
3623 if (encoder && connector->status == connector_status_connected) {
3624 intel_dp = enc_to_intel_dp(&encoder->base);
eb3394fa
TP
3625 status = kstrtoint(input_buffer, 10, &val);
3626 if (status < 0)
3f6a5e1e 3627 break;
eb3394fa
TP
3628 DRM_DEBUG_DRIVER("Got %d for test active\n", val);
3629 /* To prevent erroneous activation of the compliance
3630 * testing code, only accept an actual value of 1 here
3631 */
3632 if (val == 1)
c1617abc 3633 intel_dp->compliance.test_active = 1;
eb3394fa 3634 else
c1617abc 3635 intel_dp->compliance.test_active = 0;
eb3394fa
TP
3636 }
3637 }
3f6a5e1e 3638 drm_connector_list_iter_end(&conn_iter);
eb3394fa
TP
3639 kfree(input_buffer);
3640 if (status < 0)
3641 return status;
3642
3643 *offp += len;
3644 return len;
3645}
3646
3647static int i915_displayport_test_active_show(struct seq_file *m, void *data)
3648{
e4006713
AS
3649 struct drm_i915_private *dev_priv = m->private;
3650 struct drm_device *dev = &dev_priv->drm;
eb3394fa 3651 struct drm_connector *connector;
3f6a5e1e 3652 struct drm_connector_list_iter conn_iter;
eb3394fa
TP
3653 struct intel_dp *intel_dp;
3654
3f6a5e1e
DV
3655 drm_connector_list_iter_begin(dev, &conn_iter);
3656 drm_for_each_connector_iter(connector, &conn_iter) {
a874b6a3
ML
3657 struct intel_encoder *encoder;
3658
eb3394fa
TP
3659 if (connector->connector_type !=
3660 DRM_MODE_CONNECTOR_DisplayPort)
3661 continue;
3662
a874b6a3
ML
3663 encoder = to_intel_encoder(connector->encoder);
3664 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3665 continue;
3666
3667 if (encoder && connector->status == connector_status_connected) {
3668 intel_dp = enc_to_intel_dp(&encoder->base);
c1617abc 3669 if (intel_dp->compliance.test_active)
eb3394fa
TP
3670 seq_puts(m, "1");
3671 else
3672 seq_puts(m, "0");
3673 } else
3674 seq_puts(m, "0");
3675 }
3f6a5e1e 3676 drm_connector_list_iter_end(&conn_iter);
eb3394fa
TP
3677
3678 return 0;
3679}
3680
3681static int i915_displayport_test_active_open(struct inode *inode,
36cdd013 3682 struct file *file)
eb3394fa 3683{
36cdd013 3684 return single_open(file, i915_displayport_test_active_show,
e4006713 3685 inode->i_private);
eb3394fa
TP
3686}
3687
3688static const struct file_operations i915_displayport_test_active_fops = {
3689 .owner = THIS_MODULE,
3690 .open = i915_displayport_test_active_open,
3691 .read = seq_read,
3692 .llseek = seq_lseek,
3693 .release = single_release,
3694 .write = i915_displayport_test_active_write
3695};
3696
3697static int i915_displayport_test_data_show(struct seq_file *m, void *data)
3698{
e4006713
AS
3699 struct drm_i915_private *dev_priv = m->private;
3700 struct drm_device *dev = &dev_priv->drm;
eb3394fa 3701 struct drm_connector *connector;
3f6a5e1e 3702 struct drm_connector_list_iter conn_iter;
eb3394fa
TP
3703 struct intel_dp *intel_dp;
3704
3f6a5e1e
DV
3705 drm_connector_list_iter_begin(dev, &conn_iter);
3706 drm_for_each_connector_iter(connector, &conn_iter) {
a874b6a3
ML
3707 struct intel_encoder *encoder;
3708
eb3394fa
TP
3709 if (connector->connector_type !=
3710 DRM_MODE_CONNECTOR_DisplayPort)
3711 continue;
3712
a874b6a3
ML
3713 encoder = to_intel_encoder(connector->encoder);
3714 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3715 continue;
3716
3717 if (encoder && connector->status == connector_status_connected) {
3718 intel_dp = enc_to_intel_dp(&encoder->base);
b48a5ba9
MN
3719 if (intel_dp->compliance.test_type ==
3720 DP_TEST_LINK_EDID_READ)
3721 seq_printf(m, "%lx",
3722 intel_dp->compliance.test_data.edid);
611032bf
MN
3723 else if (intel_dp->compliance.test_type ==
3724 DP_TEST_LINK_VIDEO_PATTERN) {
3725 seq_printf(m, "hdisplay: %d\n",
3726 intel_dp->compliance.test_data.hdisplay);
3727 seq_printf(m, "vdisplay: %d\n",
3728 intel_dp->compliance.test_data.vdisplay);
3729 seq_printf(m, "bpc: %u\n",
3730 intel_dp->compliance.test_data.bpc);
3731 }
eb3394fa
TP
3732 } else
3733 seq_puts(m, "0");
3734 }
3f6a5e1e 3735 drm_connector_list_iter_end(&conn_iter);
eb3394fa
TP
3736
3737 return 0;
3738}
e4006713 3739DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_data);
eb3394fa
TP
3740
3741static int i915_displayport_test_type_show(struct seq_file *m, void *data)
3742{
e4006713
AS
3743 struct drm_i915_private *dev_priv = m->private;
3744 struct drm_device *dev = &dev_priv->drm;
eb3394fa 3745 struct drm_connector *connector;
3f6a5e1e 3746 struct drm_connector_list_iter conn_iter;
eb3394fa
TP
3747 struct intel_dp *intel_dp;
3748
3f6a5e1e
DV
3749 drm_connector_list_iter_begin(dev, &conn_iter);
3750 drm_for_each_connector_iter(connector, &conn_iter) {
a874b6a3
ML
3751 struct intel_encoder *encoder;
3752
eb3394fa
TP
3753 if (connector->connector_type !=
3754 DRM_MODE_CONNECTOR_DisplayPort)
3755 continue;
3756
a874b6a3
ML
3757 encoder = to_intel_encoder(connector->encoder);
3758 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3759 continue;
3760
3761 if (encoder && connector->status == connector_status_connected) {
3762 intel_dp = enc_to_intel_dp(&encoder->base);
c1617abc 3763 seq_printf(m, "%02lx", intel_dp->compliance.test_type);
eb3394fa
TP
3764 } else
3765 seq_puts(m, "0");
3766 }
3f6a5e1e 3767 drm_connector_list_iter_end(&conn_iter);
eb3394fa
TP
3768
3769 return 0;
3770}
e4006713 3771DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type);
eb3394fa 3772
97e94b22 3773static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
369a1342 3774{
36cdd013
DW
3775 struct drm_i915_private *dev_priv = m->private;
3776 struct drm_device *dev = &dev_priv->drm;
369a1342 3777 int level;
de38b95c
VS
3778 int num_levels;
3779
36cdd013 3780 if (IS_CHERRYVIEW(dev_priv))
de38b95c 3781 num_levels = 3;
36cdd013 3782 else if (IS_VALLEYVIEW(dev_priv))
de38b95c 3783 num_levels = 1;
04548cba
VS
3784 else if (IS_G4X(dev_priv))
3785 num_levels = 3;
de38b95c 3786 else
5db94019 3787 num_levels = ilk_wm_max_level(dev_priv) + 1;
369a1342
VS
3788
3789 drm_modeset_lock_all(dev);
3790
3791 for (level = 0; level < num_levels; level++) {
3792 unsigned int latency = wm[level];
3793
97e94b22
DL
3794 /*
3795 * - WM1+ latency values in 0.5us units
de38b95c 3796 * - latencies are in us on gen9/vlv/chv
97e94b22 3797 */
04548cba
VS
3798 if (INTEL_GEN(dev_priv) >= 9 ||
3799 IS_VALLEYVIEW(dev_priv) ||
3800 IS_CHERRYVIEW(dev_priv) ||
3801 IS_G4X(dev_priv))
97e94b22
DL
3802 latency *= 10;
3803 else if (level > 0)
369a1342
VS
3804 latency *= 5;
3805
3806 seq_printf(m, "WM%d %u (%u.%u usec)\n",
97e94b22 3807 level, wm[level], latency / 10, latency % 10);
369a1342
VS
3808 }
3809
3810 drm_modeset_unlock_all(dev);
3811}
3812
3813static int pri_wm_latency_show(struct seq_file *m, void *data)
3814{
36cdd013 3815 struct drm_i915_private *dev_priv = m->private;
97e94b22
DL
3816 const uint16_t *latencies;
3817
36cdd013 3818 if (INTEL_GEN(dev_priv) >= 9)
97e94b22
DL
3819 latencies = dev_priv->wm.skl_latency;
3820 else
36cdd013 3821 latencies = dev_priv->wm.pri_latency;
369a1342 3822
97e94b22 3823 wm_latency_show(m, latencies);
369a1342
VS
3824
3825 return 0;
3826}
3827
3828static int spr_wm_latency_show(struct seq_file *m, void *data)
3829{
36cdd013 3830 struct drm_i915_private *dev_priv = m->private;
97e94b22
DL
3831 const uint16_t *latencies;
3832
36cdd013 3833 if (INTEL_GEN(dev_priv) >= 9)
97e94b22
DL
3834 latencies = dev_priv->wm.skl_latency;
3835 else
36cdd013 3836 latencies = dev_priv->wm.spr_latency;
369a1342 3837
97e94b22 3838 wm_latency_show(m, latencies);
369a1342
VS
3839
3840 return 0;
3841}
3842
3843static int cur_wm_latency_show(struct seq_file *m, void *data)
3844{
36cdd013 3845 struct drm_i915_private *dev_priv = m->private;
97e94b22
DL
3846 const uint16_t *latencies;
3847
36cdd013 3848 if (INTEL_GEN(dev_priv) >= 9)
97e94b22
DL
3849 latencies = dev_priv->wm.skl_latency;
3850 else
36cdd013 3851 latencies = dev_priv->wm.cur_latency;
369a1342 3852
97e94b22 3853 wm_latency_show(m, latencies);
369a1342
VS
3854
3855 return 0;
3856}
3857
3858static int pri_wm_latency_open(struct inode *inode, struct file *file)
3859{
36cdd013 3860 struct drm_i915_private *dev_priv = inode->i_private;
369a1342 3861
04548cba 3862 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
369a1342
VS
3863 return -ENODEV;
3864
36cdd013 3865 return single_open(file, pri_wm_latency_show, dev_priv);
369a1342
VS
3866}
3867
3868static int spr_wm_latency_open(struct inode *inode, struct file *file)
3869{
36cdd013 3870 struct drm_i915_private *dev_priv = inode->i_private;
369a1342 3871
36cdd013 3872 if (HAS_GMCH_DISPLAY(dev_priv))
369a1342
VS
3873 return -ENODEV;
3874
36cdd013 3875 return single_open(file, spr_wm_latency_show, dev_priv);
369a1342
VS
3876}
3877
3878static int cur_wm_latency_open(struct inode *inode, struct file *file)
3879{
36cdd013 3880 struct drm_i915_private *dev_priv = inode->i_private;
369a1342 3881
36cdd013 3882 if (HAS_GMCH_DISPLAY(dev_priv))
369a1342
VS
3883 return -ENODEV;
3884
36cdd013 3885 return single_open(file, cur_wm_latency_show, dev_priv);
369a1342
VS
3886}
3887
3888static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
97e94b22 3889 size_t len, loff_t *offp, uint16_t wm[8])
369a1342
VS
3890{
3891 struct seq_file *m = file->private_data;
36cdd013
DW
3892 struct drm_i915_private *dev_priv = m->private;
3893 struct drm_device *dev = &dev_priv->drm;
97e94b22 3894 uint16_t new[8] = { 0 };
de38b95c 3895 int num_levels;
369a1342
VS
3896 int level;
3897 int ret;
3898 char tmp[32];
3899
36cdd013 3900 if (IS_CHERRYVIEW(dev_priv))
de38b95c 3901 num_levels = 3;
36cdd013 3902 else if (IS_VALLEYVIEW(dev_priv))
de38b95c 3903 num_levels = 1;
04548cba
VS
3904 else if (IS_G4X(dev_priv))
3905 num_levels = 3;
de38b95c 3906 else
5db94019 3907 num_levels = ilk_wm_max_level(dev_priv) + 1;
de38b95c 3908
369a1342
VS
3909 if (len >= sizeof(tmp))
3910 return -EINVAL;
3911
3912 if (copy_from_user(tmp, ubuf, len))
3913 return -EFAULT;
3914
3915 tmp[len] = '\0';
3916
97e94b22
DL
3917 ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
3918 &new[0], &new[1], &new[2], &new[3],
3919 &new[4], &new[5], &new[6], &new[7]);
369a1342
VS
3920 if (ret != num_levels)
3921 return -EINVAL;
3922
3923 drm_modeset_lock_all(dev);
3924
3925 for (level = 0; level < num_levels; level++)
3926 wm[level] = new[level];
3927
3928 drm_modeset_unlock_all(dev);
3929
3930 return len;
3931}
3932
3933
3934static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
3935 size_t len, loff_t *offp)
3936{
3937 struct seq_file *m = file->private_data;
36cdd013 3938 struct drm_i915_private *dev_priv = m->private;
97e94b22 3939 uint16_t *latencies;
369a1342 3940
36cdd013 3941 if (INTEL_GEN(dev_priv) >= 9)
97e94b22
DL
3942 latencies = dev_priv->wm.skl_latency;
3943 else
36cdd013 3944 latencies = dev_priv->wm.pri_latency;
97e94b22
DL
3945
3946 return wm_latency_write(file, ubuf, len, offp, latencies);
369a1342
VS
3947}
3948
3949static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
3950 size_t len, loff_t *offp)
3951{
3952 struct seq_file *m = file->private_data;
36cdd013 3953 struct drm_i915_private *dev_priv = m->private;
97e94b22 3954 uint16_t *latencies;
369a1342 3955
36cdd013 3956 if (INTEL_GEN(dev_priv) >= 9)
97e94b22
DL
3957 latencies = dev_priv->wm.skl_latency;
3958 else
36cdd013 3959 latencies = dev_priv->wm.spr_latency;
97e94b22
DL
3960
3961 return wm_latency_write(file, ubuf, len, offp, latencies);
369a1342
VS
3962}
3963
3964static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
3965 size_t len, loff_t *offp)
3966{
3967 struct seq_file *m = file->private_data;
36cdd013 3968 struct drm_i915_private *dev_priv = m->private;
97e94b22
DL
3969 uint16_t *latencies;
3970
36cdd013 3971 if (INTEL_GEN(dev_priv) >= 9)
97e94b22
DL
3972 latencies = dev_priv->wm.skl_latency;
3973 else
36cdd013 3974 latencies = dev_priv->wm.cur_latency;
369a1342 3975
97e94b22 3976 return wm_latency_write(file, ubuf, len, offp, latencies);
369a1342
VS
3977}
3978
3979static const struct file_operations i915_pri_wm_latency_fops = {
3980 .owner = THIS_MODULE,
3981 .open = pri_wm_latency_open,
3982 .read = seq_read,
3983 .llseek = seq_lseek,
3984 .release = single_release,
3985 .write = pri_wm_latency_write
3986};
3987
3988static const struct file_operations i915_spr_wm_latency_fops = {
3989 .owner = THIS_MODULE,
3990 .open = spr_wm_latency_open,
3991 .read = seq_read,
3992 .llseek = seq_lseek,
3993 .release = single_release,
3994 .write = spr_wm_latency_write
3995};
3996
3997static const struct file_operations i915_cur_wm_latency_fops = {
3998 .owner = THIS_MODULE,
3999 .open = cur_wm_latency_open,
4000 .read = seq_read,
4001 .llseek = seq_lseek,
4002 .release = single_release,
4003 .write = cur_wm_latency_write
4004};
4005
647416f9
KC
4006static int
4007i915_wedged_get(void *data, u64 *val)
f3cd474b 4008{
36cdd013 4009 struct drm_i915_private *dev_priv = data;
f3cd474b 4010
d98c52cf 4011 *val = i915_terminally_wedged(&dev_priv->gpu_error);
f3cd474b 4012
647416f9 4013 return 0;
f3cd474b
CW
4014}
4015
647416f9
KC
4016static int
4017i915_wedged_set(void *data, u64 val)
f3cd474b 4018{
598b6b5a
CW
4019 struct drm_i915_private *i915 = data;
4020 struct intel_engine_cs *engine;
4021 unsigned int tmp;
d46c0517 4022
b8d24a06
MK
4023 /*
4024 * There is no safeguard against this debugfs entry colliding
4025 * with the hangcheck calling same i915_handle_error() in
4026 * parallel, causing an explosion. For now we assume that the
4027 * test harness is responsible enough not to inject gpu hangs
4028 * while it is writing to 'i915_wedged'
4029 */
4030
598b6b5a 4031 if (i915_reset_backoff(&i915->gpu_error))
b8d24a06
MK
4032 return -EAGAIN;
4033
598b6b5a
CW
4034 for_each_engine_masked(engine, i915, val, tmp) {
4035 engine->hangcheck.seqno = intel_engine_get_seqno(engine);
4036 engine->hangcheck.stalled = true;
4037 }
4038
ce800754
CW
4039 i915_handle_error(i915, val, I915_ERROR_CAPTURE,
4040 "Manually set wedged engine mask = %llx", val);
d46c0517 4041
598b6b5a 4042 wait_on_bit(&i915->gpu_error.flags,
d3df42b7
CW
4043 I915_RESET_HANDOFF,
4044 TASK_UNINTERRUPTIBLE);
4045
647416f9 4046 return 0;
f3cd474b
CW
4047}
4048
647416f9
KC
4049DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
4050 i915_wedged_get, i915_wedged_set,
3a3b4f98 4051 "%llu\n");
f3cd474b 4052
64486ae7
CW
4053static int
4054fault_irq_set(struct drm_i915_private *i915,
4055 unsigned long *irq,
4056 unsigned long val)
4057{
4058 int err;
4059
4060 err = mutex_lock_interruptible(&i915->drm.struct_mutex);
4061 if (err)
4062 return err;
4063
4064 err = i915_gem_wait_for_idle(i915,
4065 I915_WAIT_LOCKED |
ec625fb9
CW
4066 I915_WAIT_INTERRUPTIBLE,
4067 MAX_SCHEDULE_TIMEOUT);
64486ae7
CW
4068 if (err)
4069 goto err_unlock;
4070
64486ae7
CW
4071 *irq = val;
4072 mutex_unlock(&i915->drm.struct_mutex);
4073
4074 /* Flush idle worker to disarm irq */
7c26240e 4075 drain_delayed_work(&i915->gt.idle_work);
64486ae7
CW
4076
4077 return 0;
4078
4079err_unlock:
4080 mutex_unlock(&i915->drm.struct_mutex);
4081 return err;
4082}
4083
094f9a54
CW
4084static int
4085i915_ring_missed_irq_get(void *data, u64 *val)
4086{
36cdd013 4087 struct drm_i915_private *dev_priv = data;
094f9a54
CW
4088
4089 *val = dev_priv->gpu_error.missed_irq_rings;
4090 return 0;
4091}
4092
4093static int
4094i915_ring_missed_irq_set(void *data, u64 val)
4095{
64486ae7 4096 struct drm_i915_private *i915 = data;
094f9a54 4097
64486ae7 4098 return fault_irq_set(i915, &i915->gpu_error.missed_irq_rings, val);
094f9a54
CW
4099}
4100
4101DEFINE_SIMPLE_ATTRIBUTE(i915_ring_missed_irq_fops,
4102 i915_ring_missed_irq_get, i915_ring_missed_irq_set,
4103 "0x%08llx\n");
4104
4105static int
4106i915_ring_test_irq_get(void *data, u64 *val)
4107{
36cdd013 4108 struct drm_i915_private *dev_priv = data;
094f9a54
CW
4109
4110 *val = dev_priv->gpu_error.test_irq_rings;
4111
4112 return 0;
4113}
4114
4115static int
4116i915_ring_test_irq_set(void *data, u64 val)
4117{
64486ae7 4118 struct drm_i915_private *i915 = data;
094f9a54 4119
64486ae7 4120 val &= INTEL_INFO(i915)->ring_mask;
094f9a54 4121 DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val);
094f9a54 4122
64486ae7 4123 return fault_irq_set(i915, &i915->gpu_error.test_irq_rings, val);
094f9a54
CW
4124}
4125
4126DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops,
4127 i915_ring_test_irq_get, i915_ring_test_irq_set,
4128 "0x%08llx\n");
4129
b4a0b32d
CW
4130#define DROP_UNBOUND BIT(0)
4131#define DROP_BOUND BIT(1)
4132#define DROP_RETIRE BIT(2)
4133#define DROP_ACTIVE BIT(3)
4134#define DROP_FREED BIT(4)
4135#define DROP_SHRINK_ALL BIT(5)
4136#define DROP_IDLE BIT(6)
6b048706
CW
4137#define DROP_RESET_ACTIVE BIT(7)
4138#define DROP_RESET_SEQNO BIT(8)
fbbd37b3
CW
4139#define DROP_ALL (DROP_UNBOUND | \
4140 DROP_BOUND | \
4141 DROP_RETIRE | \
4142 DROP_ACTIVE | \
8eadc19b 4143 DROP_FREED | \
b4a0b32d 4144 DROP_SHRINK_ALL |\
6b048706
CW
4145 DROP_IDLE | \
4146 DROP_RESET_ACTIVE | \
4147 DROP_RESET_SEQNO)
647416f9
KC
4148static int
4149i915_drop_caches_get(void *data, u64 *val)
dd624afd 4150{
647416f9 4151 *val = DROP_ALL;
dd624afd 4152
647416f9 4153 return 0;
dd624afd
CW
4154}
4155
647416f9
KC
4156static int
4157i915_drop_caches_set(void *data, u64 val)
dd624afd 4158{
6b048706 4159 struct drm_i915_private *i915 = data;
00c26cf9 4160 int ret = 0;
dd624afd 4161
b4a0b32d
CW
4162 DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
4163 val, val & DROP_ALL);
dd624afd 4164
6b048706
CW
4165 if (val & DROP_RESET_ACTIVE && !intel_engines_are_idle(i915))
4166 i915_gem_set_wedged(i915);
4167
dd624afd
CW
4168 /* No need to check and wait for gpu resets, only libdrm auto-restarts
4169 * on ioctls on -EAGAIN. */
6b048706
CW
4170 if (val & (DROP_ACTIVE | DROP_RETIRE | DROP_RESET_SEQNO)) {
4171 ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
dd624afd 4172 if (ret)
00c26cf9 4173 return ret;
dd624afd 4174
00c26cf9 4175 if (val & DROP_ACTIVE)
6b048706 4176 ret = i915_gem_wait_for_idle(i915,
00c26cf9 4177 I915_WAIT_INTERRUPTIBLE |
ec625fb9
CW
4178 I915_WAIT_LOCKED,
4179 MAX_SCHEDULE_TIMEOUT);
00c26cf9 4180
6b048706
CW
4181 if (val & DROP_RESET_SEQNO) {
4182 intel_runtime_pm_get(i915);
4183 ret = i915_gem_set_global_seqno(&i915->drm, 1);
4184 intel_runtime_pm_put(i915);
4185 }
4186
00c26cf9 4187 if (val & DROP_RETIRE)
6b048706 4188 i915_retire_requests(i915);
00c26cf9 4189
6b048706
CW
4190 mutex_unlock(&i915->drm.struct_mutex);
4191 }
4192
4193 if (val & DROP_RESET_ACTIVE &&
4194 i915_terminally_wedged(&i915->gpu_error)) {
4195 i915_handle_error(i915, ALL_ENGINES, 0, NULL);
4196 wait_on_bit(&i915->gpu_error.flags,
4197 I915_RESET_HANDOFF,
4198 TASK_UNINTERRUPTIBLE);
00c26cf9 4199 }
dd624afd 4200
d92a8cfc 4201 fs_reclaim_acquire(GFP_KERNEL);
21ab4e74 4202 if (val & DROP_BOUND)
6b048706 4203 i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_BOUND);
4ad72b7f 4204
21ab4e74 4205 if (val & DROP_UNBOUND)
6b048706 4206 i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_UNBOUND);
dd624afd 4207
8eadc19b 4208 if (val & DROP_SHRINK_ALL)
6b048706 4209 i915_gem_shrink_all(i915);
d92a8cfc 4210 fs_reclaim_release(GFP_KERNEL);
8eadc19b 4211
4dfacb0b
CW
4212 if (val & DROP_IDLE) {
4213 do {
6b048706
CW
4214 if (READ_ONCE(i915->gt.active_requests))
4215 flush_delayed_work(&i915->gt.retire_work);
4216 drain_delayed_work(&i915->gt.idle_work);
4217 } while (READ_ONCE(i915->gt.awake));
4dfacb0b 4218 }
b4a0b32d 4219
c9c70471 4220 if (val & DROP_FREED)
6b048706 4221 i915_gem_drain_freed_objects(i915);
fbbd37b3 4222
647416f9 4223 return ret;
dd624afd
CW
4224}
4225
647416f9
KC
4226DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
4227 i915_drop_caches_get, i915_drop_caches_set,
4228 "0x%08llx\n");
dd624afd 4229
647416f9
KC
4230static int
4231i915_cache_sharing_get(void *data, u64 *val)
07b7ddd9 4232{
36cdd013 4233 struct drm_i915_private *dev_priv = data;
07b7ddd9 4234 u32 snpcr;
07b7ddd9 4235
36cdd013 4236 if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv)))
004777cb
DV
4237 return -ENODEV;
4238
c8c8fb33 4239 intel_runtime_pm_get(dev_priv);
22bcfc6a 4240
07b7ddd9 4241 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
c8c8fb33
PZ
4242
4243 intel_runtime_pm_put(dev_priv);
07b7ddd9 4244
647416f9 4245 *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
07b7ddd9 4246
647416f9 4247 return 0;
07b7ddd9
JB
4248}
4249
647416f9
KC
4250static int
4251i915_cache_sharing_set(void *data, u64 val)
07b7ddd9 4252{
36cdd013 4253 struct drm_i915_private *dev_priv = data;
07b7ddd9 4254 u32 snpcr;
07b7ddd9 4255
36cdd013 4256 if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv)))
004777cb
DV
4257 return -ENODEV;
4258
647416f9 4259 if (val > 3)
07b7ddd9
JB
4260 return -EINVAL;
4261
c8c8fb33 4262 intel_runtime_pm_get(dev_priv);
647416f9 4263 DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
07b7ddd9
JB
4264
4265 /* Update the cache sharing policy here as well */
4266 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
4267 snpcr &= ~GEN6_MBC_SNPCR_MASK;
4268 snpcr |= (val << GEN6_MBC_SNPCR_SHIFT);
4269 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
4270
c8c8fb33 4271 intel_runtime_pm_put(dev_priv);
647416f9 4272 return 0;
07b7ddd9
JB
4273}
4274
647416f9
KC
4275DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
4276 i915_cache_sharing_get, i915_cache_sharing_set,
4277 "%llu\n");
07b7ddd9 4278
36cdd013 4279static void cherryview_sseu_device_status(struct drm_i915_private *dev_priv,
915490d5 4280 struct sseu_dev_info *sseu)
5d39525a 4281{
7aa0b14e
CW
4282#define SS_MAX 2
4283 const int ss_max = SS_MAX;
4284 u32 sig1[SS_MAX], sig2[SS_MAX];
5d39525a 4285 int ss;
5d39525a
JM
4286
4287 sig1[0] = I915_READ(CHV_POWER_SS0_SIG1);
4288 sig1[1] = I915_READ(CHV_POWER_SS1_SIG1);
4289 sig2[0] = I915_READ(CHV_POWER_SS0_SIG2);
4290 sig2[1] = I915_READ(CHV_POWER_SS1_SIG2);
4291
4292 for (ss = 0; ss < ss_max; ss++) {
4293 unsigned int eu_cnt;
4294
4295 if (sig1[ss] & CHV_SS_PG_ENABLE)
4296 /* skip disabled subslice */
4297 continue;
4298
f08a0c92 4299 sseu->slice_mask = BIT(0);
8cc76693 4300 sseu->subslice_mask[0] |= BIT(ss);
5d39525a
JM
4301 eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
4302 ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
4303 ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
4304 ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2);
915490d5
ID
4305 sseu->eu_total += eu_cnt;
4306 sseu->eu_per_subslice = max_t(unsigned int,
4307 sseu->eu_per_subslice, eu_cnt);
5d39525a 4308 }
7aa0b14e 4309#undef SS_MAX
5d39525a
JM
4310}
4311
f8c3dcf9
RV
4312static void gen10_sseu_device_status(struct drm_i915_private *dev_priv,
4313 struct sseu_dev_info *sseu)
4314{
c7fb3c6c 4315#define SS_MAX 6
f8c3dcf9 4316 const struct intel_device_info *info = INTEL_INFO(dev_priv);
c7fb3c6c 4317 u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
f8c3dcf9 4318 int s, ss;
f8c3dcf9 4319
b3e7f866 4320 for (s = 0; s < info->sseu.max_slices; s++) {
f8c3dcf9
RV
4321 /*
4322 * FIXME: Valid SS Mask respects the spec and read
4323 * only valid bits for those registers, excluding reserverd
4324 * although this seems wrong because it would leave many
4325 * subslices without ACK.
4326 */
4327 s_reg[s] = I915_READ(GEN10_SLICE_PGCTL_ACK(s)) &
4328 GEN10_PGCTL_VALID_SS_MASK(s);
4329 eu_reg[2 * s] = I915_READ(GEN10_SS01_EU_PGCTL_ACK(s));
4330 eu_reg[2 * s + 1] = I915_READ(GEN10_SS23_EU_PGCTL_ACK(s));
4331 }
4332
4333 eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
4334 GEN9_PGCTL_SSA_EU19_ACK |
4335 GEN9_PGCTL_SSA_EU210_ACK |
4336 GEN9_PGCTL_SSA_EU311_ACK;
4337 eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
4338 GEN9_PGCTL_SSB_EU19_ACK |
4339 GEN9_PGCTL_SSB_EU210_ACK |
4340 GEN9_PGCTL_SSB_EU311_ACK;
4341
b3e7f866 4342 for (s = 0; s < info->sseu.max_slices; s++) {
f8c3dcf9
RV
4343 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
4344 /* skip disabled slice */
4345 continue;
4346
4347 sseu->slice_mask |= BIT(s);
8cc76693 4348 sseu->subslice_mask[s] = info->sseu.subslice_mask[s];
f8c3dcf9 4349
b3e7f866 4350 for (ss = 0; ss < info->sseu.max_subslices; ss++) {
f8c3dcf9
RV
4351 unsigned int eu_cnt;
4352
4353 if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
4354 /* skip disabled subslice */
4355 continue;
4356
4357 eu_cnt = 2 * hweight32(eu_reg[2 * s + ss / 2] &
4358 eu_mask[ss % 2]);
4359 sseu->eu_total += eu_cnt;
4360 sseu->eu_per_subslice = max_t(unsigned int,
4361 sseu->eu_per_subslice,
4362 eu_cnt);
4363 }
4364 }
c7fb3c6c 4365#undef SS_MAX
f8c3dcf9
RV
4366}
4367
36cdd013 4368static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
915490d5 4369 struct sseu_dev_info *sseu)
5d39525a 4370{
c7fb3c6c 4371#define SS_MAX 3
b3e7f866 4372 const struct intel_device_info *info = INTEL_INFO(dev_priv);
c7fb3c6c 4373 u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
5d39525a 4374 int s, ss;
1c046bc1 4375
b3e7f866 4376 for (s = 0; s < info->sseu.max_slices; s++) {
1c046bc1
JM
4377 s_reg[s] = I915_READ(GEN9_SLICE_PGCTL_ACK(s));
4378 eu_reg[2*s] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s));
4379 eu_reg[2*s + 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s));
4380 }
4381
5d39525a
JM
4382 eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
4383 GEN9_PGCTL_SSA_EU19_ACK |
4384 GEN9_PGCTL_SSA_EU210_ACK |
4385 GEN9_PGCTL_SSA_EU311_ACK;
4386 eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
4387 GEN9_PGCTL_SSB_EU19_ACK |
4388 GEN9_PGCTL_SSB_EU210_ACK |
4389 GEN9_PGCTL_SSB_EU311_ACK;
4390
b3e7f866 4391 for (s = 0; s < info->sseu.max_slices; s++) {
5d39525a
JM
4392 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
4393 /* skip disabled slice */
4394 continue;
4395
f08a0c92 4396 sseu->slice_mask |= BIT(s);
1c046bc1 4397
f8c3dcf9 4398 if (IS_GEN9_BC(dev_priv))
8cc76693
LL
4399 sseu->subslice_mask[s] =
4400 INTEL_INFO(dev_priv)->sseu.subslice_mask[s];
1c046bc1 4401
b3e7f866 4402 for (ss = 0; ss < info->sseu.max_subslices; ss++) {
5d39525a
JM
4403 unsigned int eu_cnt;
4404
cc3f90f0 4405 if (IS_GEN9_LP(dev_priv)) {
57ec171e
ID
4406 if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
4407 /* skip disabled subslice */
4408 continue;
1c046bc1 4409
8cc76693 4410 sseu->subslice_mask[s] |= BIT(ss);
57ec171e 4411 }
1c046bc1 4412
5d39525a
JM
4413 eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
4414 eu_mask[ss%2]);
915490d5
ID
4415 sseu->eu_total += eu_cnt;
4416 sseu->eu_per_subslice = max_t(unsigned int,
4417 sseu->eu_per_subslice,
4418 eu_cnt);
5d39525a
JM
4419 }
4420 }
c7fb3c6c 4421#undef SS_MAX
5d39525a
JM
4422}
4423
36cdd013 4424static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv,
915490d5 4425 struct sseu_dev_info *sseu)
91bedd34 4426{
91bedd34 4427 u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);
36cdd013 4428 int s;
91bedd34 4429
f08a0c92 4430 sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK;
91bedd34 4431
f08a0c92 4432 if (sseu->slice_mask) {
43b67998
ID
4433 sseu->eu_per_subslice =
4434 INTEL_INFO(dev_priv)->sseu.eu_per_subslice;
8cc76693
LL
4435 for (s = 0; s < fls(sseu->slice_mask); s++) {
4436 sseu->subslice_mask[s] =
4437 INTEL_INFO(dev_priv)->sseu.subslice_mask[s];
4438 }
57ec171e
ID
4439 sseu->eu_total = sseu->eu_per_subslice *
4440 sseu_subslice_total(sseu);
91bedd34
ŁD
4441
4442 /* subtract fused off EU(s) from enabled slice(s) */
795b38b3 4443 for (s = 0; s < fls(sseu->slice_mask); s++) {
43b67998
ID
4444 u8 subslice_7eu =
4445 INTEL_INFO(dev_priv)->sseu.subslice_7eu[s];
91bedd34 4446
915490d5 4447 sseu->eu_total -= hweight8(subslice_7eu);
91bedd34
ŁD
4448 }
4449 }
4450}
4451
615d8908
ID
4452static void i915_print_sseu_info(struct seq_file *m, bool is_available_info,
4453 const struct sseu_dev_info *sseu)
4454{
4455 struct drm_i915_private *dev_priv = node_to_i915(m->private);
4456 const char *type = is_available_info ? "Available" : "Enabled";
8cc76693 4457 int s;
615d8908 4458
c67ba538
ID
4459 seq_printf(m, " %s Slice Mask: %04x\n", type,
4460 sseu->slice_mask);
615d8908 4461 seq_printf(m, " %s Slice Total: %u\n", type,
f08a0c92 4462 hweight8(sseu->slice_mask));
615d8908 4463 seq_printf(m, " %s Subslice Total: %u\n", type,
57ec171e 4464 sseu_subslice_total(sseu));
8cc76693
LL
4465 for (s = 0; s < fls(sseu->slice_mask); s++) {
4466 seq_printf(m, " %s Slice%i subslices: %u\n", type,
4467 s, hweight8(sseu->subslice_mask[s]));
4468 }
615d8908
ID
4469 seq_printf(m, " %s EU Total: %u\n", type,
4470 sseu->eu_total);
4471 seq_printf(m, " %s EU Per Subslice: %u\n", type,
4472 sseu->eu_per_subslice);
4473
4474 if (!is_available_info)
4475 return;
4476
4477 seq_printf(m, " Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev_priv)));
4478 if (HAS_POOLED_EU(dev_priv))
4479 seq_printf(m, " Min EU in pool: %u\n", sseu->min_eu_in_pool);
4480
4481 seq_printf(m, " Has Slice Power Gating: %s\n",
4482 yesno(sseu->has_slice_pg));
4483 seq_printf(m, " Has Subslice Power Gating: %s\n",
4484 yesno(sseu->has_subslice_pg));
4485 seq_printf(m, " Has EU Power Gating: %s\n",
4486 yesno(sseu->has_eu_pg));
4487}
4488
3873218f
JM
4489static int i915_sseu_status(struct seq_file *m, void *unused)
4490{
36cdd013 4491 struct drm_i915_private *dev_priv = node_to_i915(m->private);
915490d5 4492 struct sseu_dev_info sseu;
3873218f 4493
36cdd013 4494 if (INTEL_GEN(dev_priv) < 8)
3873218f
JM
4495 return -ENODEV;
4496
4497 seq_puts(m, "SSEU Device Info\n");
615d8908 4498 i915_print_sseu_info(m, true, &INTEL_INFO(dev_priv)->sseu);
3873218f 4499
7f992aba 4500 seq_puts(m, "SSEU Device Status\n");
915490d5 4501 memset(&sseu, 0, sizeof(sseu));
8cc76693
LL
4502 sseu.max_slices = INTEL_INFO(dev_priv)->sseu.max_slices;
4503 sseu.max_subslices = INTEL_INFO(dev_priv)->sseu.max_subslices;
4504 sseu.max_eus_per_subslice =
4505 INTEL_INFO(dev_priv)->sseu.max_eus_per_subslice;
238010ed
DW
4506
4507 intel_runtime_pm_get(dev_priv);
4508
36cdd013 4509 if (IS_CHERRYVIEW(dev_priv)) {
915490d5 4510 cherryview_sseu_device_status(dev_priv, &sseu);
36cdd013 4511 } else if (IS_BROADWELL(dev_priv)) {
915490d5 4512 broadwell_sseu_device_status(dev_priv, &sseu);
f8c3dcf9 4513 } else if (IS_GEN9(dev_priv)) {
915490d5 4514 gen9_sseu_device_status(dev_priv, &sseu);
f8c3dcf9
RV
4515 } else if (INTEL_GEN(dev_priv) >= 10) {
4516 gen10_sseu_device_status(dev_priv, &sseu);
7f992aba 4517 }
238010ed
DW
4518
4519 intel_runtime_pm_put(dev_priv);
4520
615d8908 4521 i915_print_sseu_info(m, false, &sseu);
7f992aba 4522
3873218f
JM
4523 return 0;
4524}
4525
6d794d42
BW
4526static int i915_forcewake_open(struct inode *inode, struct file *file)
4527{
d7a133d8 4528 struct drm_i915_private *i915 = inode->i_private;
6d794d42 4529
d7a133d8 4530 if (INTEL_GEN(i915) < 6)
6d794d42
BW
4531 return 0;
4532
d7a133d8
CW
4533 intel_runtime_pm_get(i915);
4534 intel_uncore_forcewake_user_get(i915);
6d794d42
BW
4535
4536 return 0;
4537}
4538
c43b5634 4539static int i915_forcewake_release(struct inode *inode, struct file *file)
6d794d42 4540{
d7a133d8 4541 struct drm_i915_private *i915 = inode->i_private;
6d794d42 4542
d7a133d8 4543 if (INTEL_GEN(i915) < 6)
6d794d42
BW
4544 return 0;
4545
d7a133d8
CW
4546 intel_uncore_forcewake_user_put(i915);
4547 intel_runtime_pm_put(i915);
6d794d42
BW
4548
4549 return 0;
4550}
4551
4552static const struct file_operations i915_forcewake_fops = {
4553 .owner = THIS_MODULE,
4554 .open = i915_forcewake_open,
4555 .release = i915_forcewake_release,
4556};
4557
317eaa95
L
4558static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
4559{
4560 struct drm_i915_private *dev_priv = m->private;
4561 struct i915_hotplug *hotplug = &dev_priv->hotplug;
4562
4563 seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
4564 seq_printf(m, "Detected: %s\n",
4565 yesno(delayed_work_pending(&hotplug->reenable_work)));
4566
4567 return 0;
4568}
4569
4570static ssize_t i915_hpd_storm_ctl_write(struct file *file,
4571 const char __user *ubuf, size_t len,
4572 loff_t *offp)
4573{
4574 struct seq_file *m = file->private_data;
4575 struct drm_i915_private *dev_priv = m->private;
4576 struct i915_hotplug *hotplug = &dev_priv->hotplug;
4577 unsigned int new_threshold;
4578 int i;
4579 char *newline;
4580 char tmp[16];
4581
4582 if (len >= sizeof(tmp))
4583 return -EINVAL;
4584
4585 if (copy_from_user(tmp, ubuf, len))
4586 return -EFAULT;
4587
4588 tmp[len] = '\0';
4589
4590 /* Strip newline, if any */
4591 newline = strchr(tmp, '\n');
4592 if (newline)
4593 *newline = '\0';
4594
4595 if (strcmp(tmp, "reset") == 0)
4596 new_threshold = HPD_STORM_DEFAULT_THRESHOLD;
4597 else if (kstrtouint(tmp, 10, &new_threshold) != 0)
4598 return -EINVAL;
4599
4600 if (new_threshold > 0)
4601 DRM_DEBUG_KMS("Setting HPD storm detection threshold to %d\n",
4602 new_threshold);
4603 else
4604 DRM_DEBUG_KMS("Disabling HPD storm detection\n");
4605
4606 spin_lock_irq(&dev_priv->irq_lock);
4607 hotplug->hpd_storm_threshold = new_threshold;
4608 /* Reset the HPD storm stats so we don't accidentally trigger a storm */
4609 for_each_hpd_pin(i)
4610 hotplug->stats[i].count = 0;
4611 spin_unlock_irq(&dev_priv->irq_lock);
4612
4613 /* Re-enable hpd immediately if we were in an irq storm */
4614 flush_delayed_work(&dev_priv->hotplug.reenable_work);
4615
4616 return len;
4617}
4618
4619static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file)
4620{
4621 return single_open(file, i915_hpd_storm_ctl_show, inode->i_private);
4622}
4623
4624static const struct file_operations i915_hpd_storm_ctl_fops = {
4625 .owner = THIS_MODULE,
4626 .open = i915_hpd_storm_ctl_open,
4627 .read = seq_read,
4628 .llseek = seq_lseek,
4629 .release = single_release,
4630 .write = i915_hpd_storm_ctl_write
4631};
4632
35954e88
R
4633static int i915_drrs_ctl_set(void *data, u64 val)
4634{
4635 struct drm_i915_private *dev_priv = data;
4636 struct drm_device *dev = &dev_priv->drm;
4637 struct intel_crtc *intel_crtc;
4638 struct intel_encoder *encoder;
4639 struct intel_dp *intel_dp;
4640
4641 if (INTEL_GEN(dev_priv) < 7)
4642 return -ENODEV;
4643
4644 drm_modeset_lock_all(dev);
4645 for_each_intel_crtc(dev, intel_crtc) {
4646 if (!intel_crtc->base.state->active ||
4647 !intel_crtc->config->has_drrs)
4648 continue;
4649
4650 for_each_encoder_on_crtc(dev, &intel_crtc->base, encoder) {
4651 if (encoder->type != INTEL_OUTPUT_EDP)
4652 continue;
4653
4654 DRM_DEBUG_DRIVER("Manually %sabling DRRS. %llu\n",
4655 val ? "en" : "dis", val);
4656
4657 intel_dp = enc_to_intel_dp(&encoder->base);
4658 if (val)
4659 intel_edp_drrs_enable(intel_dp,
4660 intel_crtc->config);
4661 else
4662 intel_edp_drrs_disable(intel_dp,
4663 intel_crtc->config);
4664 }
4665 }
4666 drm_modeset_unlock_all(dev);
4667
4668 return 0;
4669}
4670
4671DEFINE_SIMPLE_ATTRIBUTE(i915_drrs_ctl_fops, NULL, i915_drrs_ctl_set, "%llu\n");
4672
d52ad9cb
ML
4673static ssize_t
4674i915_fifo_underrun_reset_write(struct file *filp,
4675 const char __user *ubuf,
4676 size_t cnt, loff_t *ppos)
4677{
4678 struct drm_i915_private *dev_priv = filp->private_data;
4679 struct intel_crtc *intel_crtc;
4680 struct drm_device *dev = &dev_priv->drm;
4681 int ret;
4682 bool reset;
4683
4684 ret = kstrtobool_from_user(ubuf, cnt, &reset);
4685 if (ret)
4686 return ret;
4687
4688 if (!reset)
4689 return cnt;
4690
4691 for_each_intel_crtc(dev, intel_crtc) {
4692 struct drm_crtc_commit *commit;
4693 struct intel_crtc_state *crtc_state;
4694
4695 ret = drm_modeset_lock_single_interruptible(&intel_crtc->base.mutex);
4696 if (ret)
4697 return ret;
4698
4699 crtc_state = to_intel_crtc_state(intel_crtc->base.state);
4700 commit = crtc_state->base.commit;
4701 if (commit) {
4702 ret = wait_for_completion_interruptible(&commit->hw_done);
4703 if (!ret)
4704 ret = wait_for_completion_interruptible(&commit->flip_done);
4705 }
4706
4707 if (!ret && crtc_state->base.active) {
4708 DRM_DEBUG_KMS("Re-arming FIFO underruns on pipe %c\n",
4709 pipe_name(intel_crtc->pipe));
4710
4711 intel_crtc_arm_fifo_underrun(intel_crtc, crtc_state);
4712 }
4713
4714 drm_modeset_unlock(&intel_crtc->base.mutex);
4715
4716 if (ret)
4717 return ret;
4718 }
4719
4720 ret = intel_fbc_reset_underrun(dev_priv);
4721 if (ret)
4722 return ret;
4723
4724 return cnt;
4725}
4726
4727static const struct file_operations i915_fifo_underrun_reset_ops = {
4728 .owner = THIS_MODULE,
4729 .open = simple_open,
4730 .write = i915_fifo_underrun_reset_write,
4731 .llseek = default_llseek,
4732};
4733
06c5bf8c 4734static const struct drm_info_list i915_debugfs_list[] = {
311bd68e 4735 {"i915_capabilities", i915_capabilities, 0},
73aa808f 4736 {"i915_gem_objects", i915_gem_object_info, 0},
08c18323 4737 {"i915_gem_gtt", i915_gem_gtt_info, 0},
6d2b8885 4738 {"i915_gem_stolen", i915_gem_stolen_list_info },
a6172a80 4739 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
2017263e 4740 {"i915_gem_interrupt", i915_interrupt_info, 0},
493018dc 4741 {"i915_gem_batch_pool", i915_gem_batch_pool_info, 0},
8b417c26 4742 {"i915_guc_info", i915_guc_info, 0},
fdf5d357 4743 {"i915_guc_load_status", i915_guc_load_status_info, 0},
4c7e77fc 4744 {"i915_guc_log_dump", i915_guc_log_dump, 0},
ac58d2ab 4745 {"i915_guc_load_err_log_dump", i915_guc_log_dump, 0, (void *)1},
a8b9370f 4746 {"i915_guc_stage_pool", i915_guc_stage_pool, 0},
0509ead1 4747 {"i915_huc_load_status", i915_huc_load_status_info, 0},
adb4bd12 4748 {"i915_frequency_info", i915_frequency_info, 0},
f654449a 4749 {"i915_hangcheck_info", i915_hangcheck_info, 0},
061d06a2 4750 {"i915_reset_info", i915_reset_info, 0},
f97108d1 4751 {"i915_drpc_info", i915_drpc_info, 0},
7648fa99 4752 {"i915_emon_status", i915_emon_status, 0},
23b2f8bb 4753 {"i915_ring_freq_table", i915_ring_freq_table, 0},
9a851789 4754 {"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
b5e50c3f 4755 {"i915_fbc_status", i915_fbc_status, 0},
92d44621 4756 {"i915_ips_status", i915_ips_status, 0},
4a9bef37 4757 {"i915_sr_status", i915_sr_status, 0},
44834a67 4758 {"i915_opregion", i915_opregion, 0},
ada8f955 4759 {"i915_vbt", i915_vbt, 0},
37811fcc 4760 {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
e76d3630 4761 {"i915_context_status", i915_context_status, 0},
f65367b5 4762 {"i915_forcewake_domains", i915_forcewake_domains, 0},
ea16a3cd 4763 {"i915_swizzle_info", i915_swizzle_info, 0},
3cf17fc5 4764 {"i915_ppgtt_info", i915_ppgtt_info, 0},
63573eb7 4765 {"i915_llc", i915_llc, 0},
e91fd8c6 4766 {"i915_edp_psr_status", i915_edp_psr_status, 0},
ec013e7f 4767 {"i915_energy_uJ", i915_energy_uJ, 0},
6455c870 4768 {"i915_runtime_pm_status", i915_runtime_pm_status, 0},
1da51581 4769 {"i915_power_domain_info", i915_power_domain_info, 0},
b7cec66d 4770 {"i915_dmc_info", i915_dmc_info, 0},
53f5e3ca 4771 {"i915_display_info", i915_display_info, 0},
1b36595f 4772 {"i915_engine_info", i915_engine_info, 0},
79e9cd5f 4773 {"i915_rcs_topology", i915_rcs_topology, 0},
c5418a8b 4774 {"i915_shrinker_info", i915_shrinker_info, 0},
728e29d7 4775 {"i915_shared_dplls_info", i915_shared_dplls_info, 0},
11bed958 4776 {"i915_dp_mst_info", i915_dp_mst_info, 0},
1ed1ef9d 4777 {"i915_wa_registers", i915_wa_registers, 0},
c5511e44 4778 {"i915_ddb_info", i915_ddb_info, 0},
3873218f 4779 {"i915_sseu_status", i915_sseu_status, 0},
a54746e3 4780 {"i915_drrs_status", i915_drrs_status, 0},
1854d5ca 4781 {"i915_rps_boost_info", i915_rps_boost_info, 0},
2017263e 4782};
27c202ad 4783#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
2017263e 4784
06c5bf8c 4785static const struct i915_debugfs_files {
34b9674c
DV
4786 const char *name;
4787 const struct file_operations *fops;
4788} i915_debugfs_files[] = {
4789 {"i915_wedged", &i915_wedged_fops},
34b9674c 4790 {"i915_cache_sharing", &i915_cache_sharing_fops},
094f9a54
CW
4791 {"i915_ring_missed_irq", &i915_ring_missed_irq_fops},
4792 {"i915_ring_test_irq", &i915_ring_test_irq_fops},
34b9674c 4793 {"i915_gem_drop_caches", &i915_drop_caches_fops},
98a2f411 4794#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
34b9674c 4795 {"i915_error_state", &i915_error_state_fops},
5a4c6f1b 4796 {"i915_gpu_info", &i915_gpu_info_fops},
98a2f411 4797#endif
d52ad9cb 4798 {"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops},
34b9674c 4799 {"i915_next_seqno", &i915_next_seqno_fops},
369a1342
VS
4800 {"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
4801 {"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
4802 {"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
4127dc43 4803 {"i915_fbc_false_color", &i915_fbc_false_color_fops},
eb3394fa
TP
4804 {"i915_dp_test_data", &i915_displayport_test_data_fops},
4805 {"i915_dp_test_type", &i915_displayport_test_type_fops},
685534ef 4806 {"i915_dp_test_active", &i915_displayport_test_active_fops},
4977a287
MW
4807 {"i915_guc_log_level", &i915_guc_log_level_fops},
4808 {"i915_guc_log_relay", &i915_guc_log_relay_fops},
d2d4f39b 4809 {"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
35954e88 4810 {"i915_ipc_status", &i915_ipc_status_fops},
54fd3149
DP
4811 {"i915_drrs_ctl", &i915_drrs_ctl_fops},
4812 {"i915_edp_psr_debug", &i915_edp_psr_debug_fops}
34b9674c
DV
4813};
4814
1dac891c 4815int i915_debugfs_register(struct drm_i915_private *dev_priv)
2017263e 4816{
91c8a326 4817 struct drm_minor *minor = dev_priv->drm.primary;
b05eeb0f 4818 struct dentry *ent;
6cc42152 4819 int i;
f3cd474b 4820
b05eeb0f
NT
4821 ent = debugfs_create_file("i915_forcewake_user", S_IRUSR,
4822 minor->debugfs_root, to_i915(minor->dev),
4823 &i915_forcewake_fops);
4824 if (!ent)
4825 return -ENOMEM;
6a9c308d 4826
34b9674c 4827 for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
b05eeb0f
NT
4828 ent = debugfs_create_file(i915_debugfs_files[i].name,
4829 S_IRUGO | S_IWUSR,
4830 minor->debugfs_root,
4831 to_i915(minor->dev),
34b9674c 4832 i915_debugfs_files[i].fops);
b05eeb0f
NT
4833 if (!ent)
4834 return -ENOMEM;
34b9674c 4835 }
40633219 4836
27c202ad
BG
4837 return drm_debugfs_create_files(i915_debugfs_list,
4838 I915_DEBUGFS_ENTRIES,
2017263e
BG
4839 minor->debugfs_root, minor);
4840}
4841
aa7471d2
JN
4842struct dpcd_block {
4843 /* DPCD dump start address. */
4844 unsigned int offset;
4845 /* DPCD dump end address, inclusive. If unset, .size will be used. */
4846 unsigned int end;
4847 /* DPCD dump size. Used if .end is unset. If unset, defaults to 1. */
4848 size_t size;
4849 /* Only valid for eDP. */
4850 bool edp;
4851};
4852
4853static const struct dpcd_block i915_dpcd_debug[] = {
4854 { .offset = DP_DPCD_REV, .size = DP_RECEIVER_CAP_SIZE },
4855 { .offset = DP_PSR_SUPPORT, .end = DP_PSR_CAPS },
4856 { .offset = DP_DOWNSTREAM_PORT_0, .size = 16 },
4857 { .offset = DP_LINK_BW_SET, .end = DP_EDP_CONFIGURATION_SET },
4858 { .offset = DP_SINK_COUNT, .end = DP_ADJUST_REQUEST_LANE2_3 },
4859 { .offset = DP_SET_POWER },
4860 { .offset = DP_EDP_DPCD_REV },
4861 { .offset = DP_EDP_GENERAL_CAP_1, .end = DP_EDP_GENERAL_CAP_3 },
4862 { .offset = DP_EDP_DISPLAY_CONTROL_REGISTER, .end = DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB },
4863 { .offset = DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET, .end = DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET },
4864};
4865
4866static int i915_dpcd_show(struct seq_file *m, void *data)
4867{
4868 struct drm_connector *connector = m->private;
4869 struct intel_dp *intel_dp =
4870 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4871 uint8_t buf[16];
4872 ssize_t err;
4873 int i;
4874
5c1a8875
MK
4875 if (connector->status != connector_status_connected)
4876 return -ENODEV;
4877
aa7471d2
JN
4878 for (i = 0; i < ARRAY_SIZE(i915_dpcd_debug); i++) {
4879 const struct dpcd_block *b = &i915_dpcd_debug[i];
4880 size_t size = b->end ? b->end - b->offset + 1 : (b->size ?: 1);
4881
4882 if (b->edp &&
4883 connector->connector_type != DRM_MODE_CONNECTOR_eDP)
4884 continue;
4885
4886 /* low tech for now */
4887 if (WARN_ON(size > sizeof(buf)))
4888 continue;
4889
4890 err = drm_dp_dpcd_read(&intel_dp->aux, b->offset, buf, size);
4891 if (err <= 0) {
4892 DRM_ERROR("dpcd read (%zu bytes at %u) failed (%zd)\n",
4893 size, b->offset, err);
4894 continue;
4895 }
4896
4897 seq_printf(m, "%04x: %*ph\n", b->offset, (int) size, buf);
b3f9d7d7 4898 }
aa7471d2
JN
4899
4900 return 0;
4901}
e4006713 4902DEFINE_SHOW_ATTRIBUTE(i915_dpcd);
aa7471d2 4903
ecbd6781
DW
4904static int i915_panel_show(struct seq_file *m, void *data)
4905{
4906 struct drm_connector *connector = m->private;
4907 struct intel_dp *intel_dp =
4908 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4909
4910 if (connector->status != connector_status_connected)
4911 return -ENODEV;
4912
4913 seq_printf(m, "Panel power up delay: %d\n",
4914 intel_dp->panel_power_up_delay);
4915 seq_printf(m, "Panel power down delay: %d\n",
4916 intel_dp->panel_power_down_delay);
4917 seq_printf(m, "Backlight on delay: %d\n",
4918 intel_dp->backlight_on_delay);
4919 seq_printf(m, "Backlight off delay: %d\n",
4920 intel_dp->backlight_off_delay);
4921
4922 return 0;
4923}
e4006713 4924DEFINE_SHOW_ATTRIBUTE(i915_panel);
ecbd6781 4925
aa7471d2
JN
4926/**
4927 * i915_debugfs_connector_add - add i915 specific connector debugfs files
4928 * @connector: pointer to a registered drm_connector
4929 *
4930 * Cleanup will be done by drm_connector_unregister() through a call to
4931 * drm_debugfs_connector_remove().
4932 *
4933 * Returns 0 on success, negative error codes on error.
4934 */
4935int i915_debugfs_connector_add(struct drm_connector *connector)
4936{
4937 struct dentry *root = connector->debugfs_entry;
4938
4939 /* The connector must have been registered beforehands. */
4940 if (!root)
4941 return -ENODEV;
4942
4943 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4944 connector->connector_type == DRM_MODE_CONNECTOR_eDP)
ecbd6781
DW
4945 debugfs_create_file("i915_dpcd", S_IRUGO, root,
4946 connector, &i915_dpcd_fops);
4947
5b7b3086 4948 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
ecbd6781
DW
4949 debugfs_create_file("i915_panel_timings", S_IRUGO, root,
4950 connector, &i915_panel_fops);
5b7b3086
DP
4951 debugfs_create_file("i915_psr_sink_status", S_IRUGO, root,
4952 connector, &i915_psr_sink_status_fops);
4953 }
aa7471d2
JN
4954
4955 return 0;
4956}