]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/gpu/drm/i915/i915_debugfs.c
drm/i915/perf: More documentation hooked to i915.rst
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / i915 / i915_debugfs.c
CommitLineData
2017263e
BG
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
26 *
27 */
28
29#include <linux/seq_file.h>
b2c88f5b 30#include <linux/circ_buf.h>
926321d5 31#include <linux/ctype.h>
f3cd474b 32#include <linux/debugfs.h>
5a0e3ad6 33#include <linux/slab.h>
2d1a8a48 34#include <linux/export.h>
6d2b8885 35#include <linux/list_sort.h>
ec013e7f 36#include <asm/msr-index.h>
760285e7 37#include <drm/drmP.h>
4e5359cd 38#include "intel_drv.h"
e5c65260 39#include "intel_ringbuffer.h"
760285e7 40#include <drm/i915_drm.h>
2017263e
BG
41#include "i915_drv.h"
42
36cdd013
DW
43static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
44{
45 return to_i915(node->minor->dev);
46}
47
497666d8
DL
48/* As the drm_debugfs_init() routines are called before dev->dev_private is
49 * allocated we need to hook into the minor for release. */
50static int
51drm_add_fake_info_node(struct drm_minor *minor,
52 struct dentry *ent,
53 const void *key)
54{
55 struct drm_info_node *node;
56
57 node = kmalloc(sizeof(*node), GFP_KERNEL);
58 if (node == NULL) {
59 debugfs_remove(ent);
60 return -ENOMEM;
61 }
62
63 node->minor = minor;
64 node->dent = ent;
36cdd013 65 node->info_ent = (void *)key;
497666d8
DL
66
67 mutex_lock(&minor->debugfs_lock);
68 list_add(&node->list, &minor->debugfs_list);
69 mutex_unlock(&minor->debugfs_lock);
70
71 return 0;
72}
73
70d39fe4
CW
74static int i915_capabilities(struct seq_file *m, void *data)
75{
36cdd013
DW
76 struct drm_i915_private *dev_priv = node_to_i915(m->private);
77 const struct intel_device_info *info = INTEL_INFO(dev_priv);
70d39fe4 78
36cdd013 79 seq_printf(m, "gen: %d\n", INTEL_GEN(dev_priv));
2e0d26f8 80 seq_printf(m, "platform: %s\n", intel_platform_name(info->platform));
36cdd013 81 seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv));
79fc46df 82#define PRINT_FLAG(x) seq_printf(m, #x ": %s\n", yesno(info->x))
604db650 83 DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG);
79fc46df 84#undef PRINT_FLAG
70d39fe4
CW
85
86 return 0;
87}
2017263e 88
a7363de7 89static char get_active_flag(struct drm_i915_gem_object *obj)
a6172a80 90{
573adb39 91 return i915_gem_object_is_active(obj) ? '*' : ' ';
a6172a80
CW
92}
93
a7363de7 94static char get_pin_flag(struct drm_i915_gem_object *obj)
be12a86b
TU
95{
96 return obj->pin_display ? 'p' : ' ';
97}
98
a7363de7 99static char get_tiling_flag(struct drm_i915_gem_object *obj)
a6172a80 100{
3e510a8e 101 switch (i915_gem_object_get_tiling(obj)) {
0206e353 102 default:
be12a86b
TU
103 case I915_TILING_NONE: return ' ';
104 case I915_TILING_X: return 'X';
105 case I915_TILING_Y: return 'Y';
0206e353 106 }
a6172a80
CW
107}
108
a7363de7 109static char get_global_flag(struct drm_i915_gem_object *obj)
be12a86b 110{
275f039d 111 return !list_empty(&obj->userfault_link) ? 'g' : ' ';
be12a86b
TU
112}
113
a7363de7 114static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
1d693bcc 115{
a4f5ea64 116 return obj->mm.mapping ? 'M' : ' ';
1d693bcc
BW
117}
118
ca1543be
TU
119static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
120{
121 u64 size = 0;
122 struct i915_vma *vma;
123
1c7f4bca 124 list_for_each_entry(vma, &obj->vma_list, obj_link) {
3272db53 125 if (i915_vma_is_ggtt(vma) && drm_mm_node_allocated(&vma->node))
ca1543be
TU
126 size += vma->node.size;
127 }
128
129 return size;
130}
131
37811fcc
CW
132static void
133describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
134{
b4716185 135 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
e2f80391 136 struct intel_engine_cs *engine;
1d693bcc 137 struct i915_vma *vma;
faf5bf0a 138 unsigned int frontbuffer_bits;
d7f46fc4
BW
139 int pin_count = 0;
140
188c1ab7
CW
141 lockdep_assert_held(&obj->base.dev->struct_mutex);
142
d07f0e59 143 seq_printf(m, "%pK: %c%c%c%c%c %8zdKiB %02x %02x %s%s%s",
37811fcc 144 &obj->base,
be12a86b 145 get_active_flag(obj),
37811fcc
CW
146 get_pin_flag(obj),
147 get_tiling_flag(obj),
1d693bcc 148 get_global_flag(obj),
be12a86b 149 get_pin_mapped_flag(obj),
a05a5862 150 obj->base.size / 1024,
37811fcc 151 obj->base.read_domains,
d07f0e59 152 obj->base.write_domain,
36cdd013 153 i915_cache_level_str(dev_priv, obj->cache_level),
a4f5ea64
CW
154 obj->mm.dirty ? " dirty" : "",
155 obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
37811fcc
CW
156 if (obj->base.name)
157 seq_printf(m, " (name: %d)", obj->base.name);
1c7f4bca 158 list_for_each_entry(vma, &obj->vma_list, obj_link) {
20dfbde4 159 if (i915_vma_is_pinned(vma))
d7f46fc4 160 pin_count++;
ba0635ff
DC
161 }
162 seq_printf(m, " (pinned x %d)", pin_count);
cc98b413
CW
163 if (obj->pin_display)
164 seq_printf(m, " (display)");
1c7f4bca 165 list_for_each_entry(vma, &obj->vma_list, obj_link) {
15717de2
CW
166 if (!drm_mm_node_allocated(&vma->node))
167 continue;
168
8d2fdc3f 169 seq_printf(m, " (%sgtt offset: %08llx, size: %08llx",
3272db53 170 i915_vma_is_ggtt(vma) ? "g" : "pp",
8d2fdc3f 171 vma->node.start, vma->node.size);
3272db53 172 if (i915_vma_is_ggtt(vma))
596c5923 173 seq_printf(m, ", type: %u", vma->ggtt_view.type);
49ef5294
CW
174 if (vma->fence)
175 seq_printf(m, " , fence: %d%s",
176 vma->fence->id,
177 i915_gem_active_isset(&vma->last_fence) ? "*" : "");
596c5923 178 seq_puts(m, ")");
1d693bcc 179 }
c1ad11fc 180 if (obj->stolen)
440fd528 181 seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
27c01aae 182
d07f0e59 183 engine = i915_gem_object_last_write_engine(obj);
27c01aae
CW
184 if (engine)
185 seq_printf(m, " (%s)", engine->name);
186
faf5bf0a
CW
187 frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
188 if (frontbuffer_bits)
189 seq_printf(m, " (frontbuffer: 0x%03x)", frontbuffer_bits);
37811fcc
CW
190}
191
6d2b8885
CW
192static int obj_rank_by_stolen(void *priv,
193 struct list_head *A, struct list_head *B)
194{
195 struct drm_i915_gem_object *a =
b25cb2f8 196 container_of(A, struct drm_i915_gem_object, obj_exec_link);
6d2b8885 197 struct drm_i915_gem_object *b =
b25cb2f8 198 container_of(B, struct drm_i915_gem_object, obj_exec_link);
6d2b8885 199
2d05fa16
RV
200 if (a->stolen->start < b->stolen->start)
201 return -1;
202 if (a->stolen->start > b->stolen->start)
203 return 1;
204 return 0;
6d2b8885
CW
205}
206
207static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
208{
36cdd013
DW
209 struct drm_i915_private *dev_priv = node_to_i915(m->private);
210 struct drm_device *dev = &dev_priv->drm;
6d2b8885 211 struct drm_i915_gem_object *obj;
c44ef60e 212 u64 total_obj_size, total_gtt_size;
6d2b8885
CW
213 LIST_HEAD(stolen);
214 int count, ret;
215
216 ret = mutex_lock_interruptible(&dev->struct_mutex);
217 if (ret)
218 return ret;
219
220 total_obj_size = total_gtt_size = count = 0;
56cea323 221 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
6d2b8885
CW
222 if (obj->stolen == NULL)
223 continue;
224
b25cb2f8 225 list_add(&obj->obj_exec_link, &stolen);
6d2b8885
CW
226
227 total_obj_size += obj->base.size;
ca1543be 228 total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
6d2b8885
CW
229 count++;
230 }
56cea323 231 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_link) {
6d2b8885
CW
232 if (obj->stolen == NULL)
233 continue;
234
b25cb2f8 235 list_add(&obj->obj_exec_link, &stolen);
6d2b8885
CW
236
237 total_obj_size += obj->base.size;
238 count++;
239 }
240 list_sort(NULL, &stolen, obj_rank_by_stolen);
241 seq_puts(m, "Stolen:\n");
242 while (!list_empty(&stolen)) {
b25cb2f8 243 obj = list_first_entry(&stolen, typeof(*obj), obj_exec_link);
6d2b8885
CW
244 seq_puts(m, " ");
245 describe_obj(m, obj);
246 seq_putc(m, '\n');
b25cb2f8 247 list_del_init(&obj->obj_exec_link);
6d2b8885
CW
248 }
249 mutex_unlock(&dev->struct_mutex);
250
c44ef60e 251 seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
6d2b8885
CW
252 count, total_obj_size, total_gtt_size);
253 return 0;
254}
255
2db8e9d6 256struct file_stats {
6313c204 257 struct drm_i915_file_private *file_priv;
c44ef60e
MK
258 unsigned long count;
259 u64 total, unbound;
260 u64 global, shared;
261 u64 active, inactive;
2db8e9d6
CW
262};
263
264static int per_file_stats(int id, void *ptr, void *data)
265{
266 struct drm_i915_gem_object *obj = ptr;
267 struct file_stats *stats = data;
6313c204 268 struct i915_vma *vma;
2db8e9d6
CW
269
270 stats->count++;
271 stats->total += obj->base.size;
15717de2
CW
272 if (!obj->bind_count)
273 stats->unbound += obj->base.size;
c67a17e9
CW
274 if (obj->base.name || obj->base.dma_buf)
275 stats->shared += obj->base.size;
276
894eeecc
CW
277 list_for_each_entry(vma, &obj->vma_list, obj_link) {
278 if (!drm_mm_node_allocated(&vma->node))
279 continue;
6313c204 280
3272db53 281 if (i915_vma_is_ggtt(vma)) {
894eeecc
CW
282 stats->global += vma->node.size;
283 } else {
284 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vma->vm);
6313c204 285
2bfa996e 286 if (ppgtt->base.file != stats->file_priv)
6313c204 287 continue;
6313c204 288 }
894eeecc 289
b0decaf7 290 if (i915_vma_is_active(vma))
894eeecc
CW
291 stats->active += vma->node.size;
292 else
293 stats->inactive += vma->node.size;
2db8e9d6
CW
294 }
295
296 return 0;
297}
298
b0da1b79
CW
299#define print_file_stats(m, name, stats) do { \
300 if (stats.count) \
c44ef60e 301 seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound)\n", \
b0da1b79
CW
302 name, \
303 stats.count, \
304 stats.total, \
305 stats.active, \
306 stats.inactive, \
307 stats.global, \
308 stats.shared, \
309 stats.unbound); \
310} while (0)
493018dc
BV
311
312static void print_batch_pool_stats(struct seq_file *m,
313 struct drm_i915_private *dev_priv)
314{
315 struct drm_i915_gem_object *obj;
316 struct file_stats stats;
e2f80391 317 struct intel_engine_cs *engine;
3b3f1650 318 enum intel_engine_id id;
b4ac5afc 319 int j;
493018dc
BV
320
321 memset(&stats, 0, sizeof(stats));
322
3b3f1650 323 for_each_engine(engine, dev_priv, id) {
e2f80391 324 for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
8d9d5744 325 list_for_each_entry(obj,
e2f80391 326 &engine->batch_pool.cache_list[j],
8d9d5744
CW
327 batch_pool_link)
328 per_file_stats(0, obj, &stats);
329 }
06fbca71 330 }
493018dc 331
b0da1b79 332 print_file_stats(m, "[k]batch pool", stats);
493018dc
BV
333}
334
15da9565
CW
335static int per_file_ctx_stats(int id, void *ptr, void *data)
336{
337 struct i915_gem_context *ctx = ptr;
338 int n;
339
340 for (n = 0; n < ARRAY_SIZE(ctx->engine); n++) {
341 if (ctx->engine[n].state)
bf3783e5 342 per_file_stats(0, ctx->engine[n].state->obj, data);
dca33ecc 343 if (ctx->engine[n].ring)
57e88531 344 per_file_stats(0, ctx->engine[n].ring->vma->obj, data);
15da9565
CW
345 }
346
347 return 0;
348}
349
350static void print_context_stats(struct seq_file *m,
351 struct drm_i915_private *dev_priv)
352{
36cdd013 353 struct drm_device *dev = &dev_priv->drm;
15da9565
CW
354 struct file_stats stats;
355 struct drm_file *file;
356
357 memset(&stats, 0, sizeof(stats));
358
36cdd013 359 mutex_lock(&dev->struct_mutex);
15da9565
CW
360 if (dev_priv->kernel_context)
361 per_file_ctx_stats(0, dev_priv->kernel_context, &stats);
362
36cdd013 363 list_for_each_entry(file, &dev->filelist, lhead) {
15da9565
CW
364 struct drm_i915_file_private *fpriv = file->driver_priv;
365 idr_for_each(&fpriv->context_idr, per_file_ctx_stats, &stats);
366 }
36cdd013 367 mutex_unlock(&dev->struct_mutex);
15da9565
CW
368
369 print_file_stats(m, "[k]contexts", stats);
370}
371
36cdd013 372static int i915_gem_object_info(struct seq_file *m, void *data)
73aa808f 373{
36cdd013
DW
374 struct drm_i915_private *dev_priv = node_to_i915(m->private);
375 struct drm_device *dev = &dev_priv->drm;
72e96d64 376 struct i915_ggtt *ggtt = &dev_priv->ggtt;
2bd160a1
CW
377 u32 count, mapped_count, purgeable_count, dpy_count;
378 u64 size, mapped_size, purgeable_size, dpy_size;
6299f992 379 struct drm_i915_gem_object *obj;
2db8e9d6 380 struct drm_file *file;
73aa808f
CW
381 int ret;
382
383 ret = mutex_lock_interruptible(&dev->struct_mutex);
384 if (ret)
385 return ret;
386
3ef7f228 387 seq_printf(m, "%u objects, %llu bytes\n",
6299f992
CW
388 dev_priv->mm.object_count,
389 dev_priv->mm.object_memory);
390
1544c42e
CW
391 size = count = 0;
392 mapped_size = mapped_count = 0;
393 purgeable_size = purgeable_count = 0;
56cea323 394 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_link) {
2bd160a1
CW
395 size += obj->base.size;
396 ++count;
397
a4f5ea64 398 if (obj->mm.madv == I915_MADV_DONTNEED) {
2bd160a1
CW
399 purgeable_size += obj->base.size;
400 ++purgeable_count;
401 }
402
a4f5ea64 403 if (obj->mm.mapping) {
2bd160a1
CW
404 mapped_count++;
405 mapped_size += obj->base.size;
be19b10d 406 }
b7abb714 407 }
c44ef60e 408 seq_printf(m, "%u unbound objects, %llu bytes\n", count, size);
6c085a72 409
2bd160a1 410 size = count = dpy_size = dpy_count = 0;
56cea323 411 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
2bd160a1
CW
412 size += obj->base.size;
413 ++count;
414
30154650 415 if (obj->pin_display) {
2bd160a1
CW
416 dpy_size += obj->base.size;
417 ++dpy_count;
6299f992 418 }
2bd160a1 419
a4f5ea64 420 if (obj->mm.madv == I915_MADV_DONTNEED) {
b7abb714
CW
421 purgeable_size += obj->base.size;
422 ++purgeable_count;
423 }
2bd160a1 424
a4f5ea64 425 if (obj->mm.mapping) {
2bd160a1
CW
426 mapped_count++;
427 mapped_size += obj->base.size;
be19b10d 428 }
6299f992 429 }
2bd160a1
CW
430 seq_printf(m, "%u bound objects, %llu bytes\n",
431 count, size);
c44ef60e 432 seq_printf(m, "%u purgeable objects, %llu bytes\n",
b7abb714 433 purgeable_count, purgeable_size);
2bd160a1
CW
434 seq_printf(m, "%u mapped objects, %llu bytes\n",
435 mapped_count, mapped_size);
436 seq_printf(m, "%u display objects (pinned), %llu bytes\n",
437 dpy_count, dpy_size);
6299f992 438
c44ef60e 439 seq_printf(m, "%llu [%llu] gtt total\n",
72e96d64 440 ggtt->base.total, ggtt->mappable_end - ggtt->base.start);
73aa808f 441
493018dc
BV
442 seq_putc(m, '\n');
443 print_batch_pool_stats(m, dev_priv);
1d2ac403
DV
444 mutex_unlock(&dev->struct_mutex);
445
446 mutex_lock(&dev->filelist_mutex);
15da9565 447 print_context_stats(m, dev_priv);
2db8e9d6
CW
448 list_for_each_entry_reverse(file, &dev->filelist, lhead) {
449 struct file_stats stats;
c84455b4
CW
450 struct drm_i915_file_private *file_priv = file->driver_priv;
451 struct drm_i915_gem_request *request;
3ec2f427 452 struct task_struct *task;
2db8e9d6
CW
453
454 memset(&stats, 0, sizeof(stats));
6313c204 455 stats.file_priv = file->driver_priv;
5b5ffff0 456 spin_lock(&file->table_lock);
2db8e9d6 457 idr_for_each(&file->object_idr, per_file_stats, &stats);
5b5ffff0 458 spin_unlock(&file->table_lock);
3ec2f427
TH
459 /*
460 * Although we have a valid reference on file->pid, that does
461 * not guarantee that the task_struct who called get_pid() is
462 * still alive (e.g. get_pid(current) => fork() => exit()).
463 * Therefore, we need to protect this ->comm access using RCU.
464 */
c84455b4
CW
465 mutex_lock(&dev->struct_mutex);
466 request = list_first_entry_or_null(&file_priv->mm.request_list,
467 struct drm_i915_gem_request,
468 client_list);
3ec2f427 469 rcu_read_lock();
c84455b4
CW
470 task = pid_task(request && request->ctx->pid ?
471 request->ctx->pid : file->pid,
472 PIDTYPE_PID);
493018dc 473 print_file_stats(m, task ? task->comm : "<unknown>", stats);
3ec2f427 474 rcu_read_unlock();
c84455b4 475 mutex_unlock(&dev->struct_mutex);
2db8e9d6 476 }
1d2ac403 477 mutex_unlock(&dev->filelist_mutex);
73aa808f
CW
478
479 return 0;
480}
481
aee56cff 482static int i915_gem_gtt_info(struct seq_file *m, void *data)
08c18323 483{
9f25d007 484 struct drm_info_node *node = m->private;
36cdd013
DW
485 struct drm_i915_private *dev_priv = node_to_i915(node);
486 struct drm_device *dev = &dev_priv->drm;
5f4b091a 487 bool show_pin_display_only = !!node->info_ent->data;
08c18323 488 struct drm_i915_gem_object *obj;
c44ef60e 489 u64 total_obj_size, total_gtt_size;
08c18323
CW
490 int count, ret;
491
492 ret = mutex_lock_interruptible(&dev->struct_mutex);
493 if (ret)
494 return ret;
495
496 total_obj_size = total_gtt_size = count = 0;
56cea323 497 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
6da84829 498 if (show_pin_display_only && !obj->pin_display)
1b50247a
CW
499 continue;
500
267f0c90 501 seq_puts(m, " ");
08c18323 502 describe_obj(m, obj);
267f0c90 503 seq_putc(m, '\n');
08c18323 504 total_obj_size += obj->base.size;
ca1543be 505 total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
08c18323
CW
506 count++;
507 }
508
509 mutex_unlock(&dev->struct_mutex);
510
c44ef60e 511 seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
08c18323
CW
512 count, total_obj_size, total_gtt_size);
513
514 return 0;
515}
516
4e5359cd
SF
517static int i915_gem_pageflip_info(struct seq_file *m, void *data)
518{
36cdd013
DW
519 struct drm_i915_private *dev_priv = node_to_i915(m->private);
520 struct drm_device *dev = &dev_priv->drm;
4e5359cd 521 struct intel_crtc *crtc;
8a270ebf
DV
522 int ret;
523
524 ret = mutex_lock_interruptible(&dev->struct_mutex);
525 if (ret)
526 return ret;
4e5359cd 527
d3fcc808 528 for_each_intel_crtc(dev, crtc) {
9db4a9c7
JB
529 const char pipe = pipe_name(crtc->pipe);
530 const char plane = plane_name(crtc->plane);
51cbaf01 531 struct intel_flip_work *work;
4e5359cd 532
5e2d7afc 533 spin_lock_irq(&dev->event_lock);
5a21b665
DV
534 work = crtc->flip_work;
535 if (work == NULL) {
9db4a9c7 536 seq_printf(m, "No flip due on pipe %c (plane %c)\n",
4e5359cd
SF
537 pipe, plane);
538 } else {
5a21b665
DV
539 u32 pending;
540 u32 addr;
541
542 pending = atomic_read(&work->pending);
543 if (pending) {
544 seq_printf(m, "Flip ioctl preparing on pipe %c (plane %c)\n",
545 pipe, plane);
546 } else {
547 seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n",
548 pipe, plane);
549 }
550 if (work->flip_queued_req) {
24327f83 551 struct intel_engine_cs *engine = work->flip_queued_req->engine;
5a21b665 552
312c3c47 553 seq_printf(m, "Flip queued on %s at seqno %x, last submitted seqno %x [current breadcrumb %x], completed? %d\n",
5a21b665 554 engine->name,
24327f83 555 work->flip_queued_req->global_seqno,
312c3c47 556 intel_engine_last_submit(engine),
1b7744e7 557 intel_engine_get_seqno(engine),
f69a02c9 558 i915_gem_request_completed(work->flip_queued_req));
5a21b665
DV
559 } else
560 seq_printf(m, "Flip not associated with any ring\n");
561 seq_printf(m, "Flip queued on frame %d, (was ready on frame %d), now %d\n",
562 work->flip_queued_vblank,
563 work->flip_ready_vblank,
564 intel_crtc_get_vblank_counter(crtc));
565 seq_printf(m, "%d prepares\n", atomic_read(&work->pending));
566
36cdd013 567 if (INTEL_GEN(dev_priv) >= 4)
5a21b665
DV
568 addr = I915_HI_DISPBASE(I915_READ(DSPSURF(crtc->plane)));
569 else
570 addr = I915_READ(DSPADDR(crtc->plane));
571 seq_printf(m, "Current scanout address 0x%08x\n", addr);
572
573 if (work->pending_flip_obj) {
574 seq_printf(m, "New framebuffer address 0x%08lx\n", (long)work->gtt_offset);
575 seq_printf(m, "MMIO update completed? %d\n", addr == work->gtt_offset);
4e5359cd
SF
576 }
577 }
5e2d7afc 578 spin_unlock_irq(&dev->event_lock);
4e5359cd
SF
579 }
580
8a270ebf
DV
581 mutex_unlock(&dev->struct_mutex);
582
4e5359cd
SF
583 return 0;
584}
585
493018dc
BV
586static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
587{
36cdd013
DW
588 struct drm_i915_private *dev_priv = node_to_i915(m->private);
589 struct drm_device *dev = &dev_priv->drm;
493018dc 590 struct drm_i915_gem_object *obj;
e2f80391 591 struct intel_engine_cs *engine;
3b3f1650 592 enum intel_engine_id id;
8d9d5744 593 int total = 0;
b4ac5afc 594 int ret, j;
493018dc
BV
595
596 ret = mutex_lock_interruptible(&dev->struct_mutex);
597 if (ret)
598 return ret;
599
3b3f1650 600 for_each_engine(engine, dev_priv, id) {
e2f80391 601 for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
8d9d5744
CW
602 int count;
603
604 count = 0;
605 list_for_each_entry(obj,
e2f80391 606 &engine->batch_pool.cache_list[j],
8d9d5744
CW
607 batch_pool_link)
608 count++;
609 seq_printf(m, "%s cache[%d]: %d objects\n",
e2f80391 610 engine->name, j, count);
8d9d5744
CW
611
612 list_for_each_entry(obj,
e2f80391 613 &engine->batch_pool.cache_list[j],
8d9d5744
CW
614 batch_pool_link) {
615 seq_puts(m, " ");
616 describe_obj(m, obj);
617 seq_putc(m, '\n');
618 }
619
620 total += count;
06fbca71 621 }
493018dc
BV
622 }
623
8d9d5744 624 seq_printf(m, "total: %d\n", total);
493018dc
BV
625
626 mutex_unlock(&dev->struct_mutex);
627
628 return 0;
629}
630
1b36595f
CW
631static void print_request(struct seq_file *m,
632 struct drm_i915_gem_request *rq,
633 const char *prefix)
634{
20311bd3 635 seq_printf(m, "%s%x [%x:%x] prio=%d @ %dms: %s\n", prefix,
65e4760e 636 rq->global_seqno, rq->ctx->hw_id, rq->fence.seqno,
20311bd3 637 rq->priotree.priority,
1b36595f 638 jiffies_to_msecs(jiffies - rq->emitted_jiffies),
562f5d45 639 rq->timeline->common->name);
1b36595f
CW
640}
641
2017263e
BG
642static int i915_gem_request_info(struct seq_file *m, void *data)
643{
36cdd013
DW
644 struct drm_i915_private *dev_priv = node_to_i915(m->private);
645 struct drm_device *dev = &dev_priv->drm;
eed29a5b 646 struct drm_i915_gem_request *req;
3b3f1650
AG
647 struct intel_engine_cs *engine;
648 enum intel_engine_id id;
b4ac5afc 649 int ret, any;
de227ef0
CW
650
651 ret = mutex_lock_interruptible(&dev->struct_mutex);
652 if (ret)
653 return ret;
2017263e 654
2d1070b2 655 any = 0;
3b3f1650 656 for_each_engine(engine, dev_priv, id) {
2d1070b2
CW
657 int count;
658
659 count = 0;
73cb9701 660 list_for_each_entry(req, &engine->timeline->requests, link)
2d1070b2
CW
661 count++;
662 if (count == 0)
a2c7f6fd
CW
663 continue;
664
e2f80391 665 seq_printf(m, "%s requests: %d\n", engine->name, count);
73cb9701 666 list_for_each_entry(req, &engine->timeline->requests, link)
1b36595f 667 print_request(m, req, " ");
2d1070b2
CW
668
669 any++;
2017263e 670 }
de227ef0
CW
671 mutex_unlock(&dev->struct_mutex);
672
2d1070b2 673 if (any == 0)
267f0c90 674 seq_puts(m, "No requests\n");
c2c347a9 675
2017263e
BG
676 return 0;
677}
678
b2223497 679static void i915_ring_seqno_info(struct seq_file *m,
0bc40be8 680 struct intel_engine_cs *engine)
b2223497 681{
688e6c72
CW
682 struct intel_breadcrumbs *b = &engine->breadcrumbs;
683 struct rb_node *rb;
684
12471ba8 685 seq_printf(m, "Current sequence (%s): %x\n",
1b7744e7 686 engine->name, intel_engine_get_seqno(engine));
688e6c72 687
f6168e33 688 spin_lock_irq(&b->lock);
688e6c72
CW
689 for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
690 struct intel_wait *w = container_of(rb, typeof(*w), node);
691
692 seq_printf(m, "Waiting (%s): %s [%d] on %x\n",
693 engine->name, w->tsk->comm, w->tsk->pid, w->seqno);
694 }
f6168e33 695 spin_unlock_irq(&b->lock);
b2223497
CW
696}
697
2017263e
BG
698static int i915_gem_seqno_info(struct seq_file *m, void *data)
699{
36cdd013 700 struct drm_i915_private *dev_priv = node_to_i915(m->private);
e2f80391 701 struct intel_engine_cs *engine;
3b3f1650 702 enum intel_engine_id id;
2017263e 703
3b3f1650 704 for_each_engine(engine, dev_priv, id)
e2f80391 705 i915_ring_seqno_info(m, engine);
de227ef0 706
2017263e
BG
707 return 0;
708}
709
710
711static int i915_interrupt_info(struct seq_file *m, void *data)
712{
36cdd013 713 struct drm_i915_private *dev_priv = node_to_i915(m->private);
e2f80391 714 struct intel_engine_cs *engine;
3b3f1650 715 enum intel_engine_id id;
4bb05040 716 int i, pipe;
de227ef0 717
c8c8fb33 718 intel_runtime_pm_get(dev_priv);
2017263e 719
36cdd013 720 if (IS_CHERRYVIEW(dev_priv)) {
74e1ca8c
VS
721 seq_printf(m, "Master Interrupt Control:\t%08x\n",
722 I915_READ(GEN8_MASTER_IRQ));
723
724 seq_printf(m, "Display IER:\t%08x\n",
725 I915_READ(VLV_IER));
726 seq_printf(m, "Display IIR:\t%08x\n",
727 I915_READ(VLV_IIR));
728 seq_printf(m, "Display IIR_RW:\t%08x\n",
729 I915_READ(VLV_IIR_RW));
730 seq_printf(m, "Display IMR:\t%08x\n",
731 I915_READ(VLV_IMR));
9c870d03
CW
732 for_each_pipe(dev_priv, pipe) {
733 enum intel_display_power_domain power_domain;
734
735 power_domain = POWER_DOMAIN_PIPE(pipe);
736 if (!intel_display_power_get_if_enabled(dev_priv,
737 power_domain)) {
738 seq_printf(m, "Pipe %c power disabled\n",
739 pipe_name(pipe));
740 continue;
741 }
742
74e1ca8c
VS
743 seq_printf(m, "Pipe %c stat:\t%08x\n",
744 pipe_name(pipe),
745 I915_READ(PIPESTAT(pipe)));
746
9c870d03
CW
747 intel_display_power_put(dev_priv, power_domain);
748 }
749
750 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
74e1ca8c
VS
751 seq_printf(m, "Port hotplug:\t%08x\n",
752 I915_READ(PORT_HOTPLUG_EN));
753 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
754 I915_READ(VLV_DPFLIPSTAT));
755 seq_printf(m, "DPINVGTT:\t%08x\n",
756 I915_READ(DPINVGTT));
9c870d03 757 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
74e1ca8c
VS
758
759 for (i = 0; i < 4; i++) {
760 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
761 i, I915_READ(GEN8_GT_IMR(i)));
762 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
763 i, I915_READ(GEN8_GT_IIR(i)));
764 seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
765 i, I915_READ(GEN8_GT_IER(i)));
766 }
767
768 seq_printf(m, "PCU interrupt mask:\t%08x\n",
769 I915_READ(GEN8_PCU_IMR));
770 seq_printf(m, "PCU interrupt identity:\t%08x\n",
771 I915_READ(GEN8_PCU_IIR));
772 seq_printf(m, "PCU interrupt enable:\t%08x\n",
773 I915_READ(GEN8_PCU_IER));
36cdd013 774 } else if (INTEL_GEN(dev_priv) >= 8) {
a123f157
BW
775 seq_printf(m, "Master Interrupt Control:\t%08x\n",
776 I915_READ(GEN8_MASTER_IRQ));
777
778 for (i = 0; i < 4; i++) {
779 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
780 i, I915_READ(GEN8_GT_IMR(i)));
781 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
782 i, I915_READ(GEN8_GT_IIR(i)));
783 seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
784 i, I915_READ(GEN8_GT_IER(i)));
785 }
786
055e393f 787 for_each_pipe(dev_priv, pipe) {
e129649b
ID
788 enum intel_display_power_domain power_domain;
789
790 power_domain = POWER_DOMAIN_PIPE(pipe);
791 if (!intel_display_power_get_if_enabled(dev_priv,
792 power_domain)) {
22c59960
PZ
793 seq_printf(m, "Pipe %c power disabled\n",
794 pipe_name(pipe));
795 continue;
796 }
a123f157 797 seq_printf(m, "Pipe %c IMR:\t%08x\n",
07d27e20
DL
798 pipe_name(pipe),
799 I915_READ(GEN8_DE_PIPE_IMR(pipe)));
a123f157 800 seq_printf(m, "Pipe %c IIR:\t%08x\n",
07d27e20
DL
801 pipe_name(pipe),
802 I915_READ(GEN8_DE_PIPE_IIR(pipe)));
a123f157 803 seq_printf(m, "Pipe %c IER:\t%08x\n",
07d27e20
DL
804 pipe_name(pipe),
805 I915_READ(GEN8_DE_PIPE_IER(pipe)));
e129649b
ID
806
807 intel_display_power_put(dev_priv, power_domain);
a123f157
BW
808 }
809
810 seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
811 I915_READ(GEN8_DE_PORT_IMR));
812 seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
813 I915_READ(GEN8_DE_PORT_IIR));
814 seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
815 I915_READ(GEN8_DE_PORT_IER));
816
817 seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
818 I915_READ(GEN8_DE_MISC_IMR));
819 seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
820 I915_READ(GEN8_DE_MISC_IIR));
821 seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
822 I915_READ(GEN8_DE_MISC_IER));
823
824 seq_printf(m, "PCU interrupt mask:\t%08x\n",
825 I915_READ(GEN8_PCU_IMR));
826 seq_printf(m, "PCU interrupt identity:\t%08x\n",
827 I915_READ(GEN8_PCU_IIR));
828 seq_printf(m, "PCU interrupt enable:\t%08x\n",
829 I915_READ(GEN8_PCU_IER));
36cdd013 830 } else if (IS_VALLEYVIEW(dev_priv)) {
7e231dbe
JB
831 seq_printf(m, "Display IER:\t%08x\n",
832 I915_READ(VLV_IER));
833 seq_printf(m, "Display IIR:\t%08x\n",
834 I915_READ(VLV_IIR));
835 seq_printf(m, "Display IIR_RW:\t%08x\n",
836 I915_READ(VLV_IIR_RW));
837 seq_printf(m, "Display IMR:\t%08x\n",
838 I915_READ(VLV_IMR));
055e393f 839 for_each_pipe(dev_priv, pipe)
7e231dbe
JB
840 seq_printf(m, "Pipe %c stat:\t%08x\n",
841 pipe_name(pipe),
842 I915_READ(PIPESTAT(pipe)));
843
844 seq_printf(m, "Master IER:\t%08x\n",
845 I915_READ(VLV_MASTER_IER));
846
847 seq_printf(m, "Render IER:\t%08x\n",
848 I915_READ(GTIER));
849 seq_printf(m, "Render IIR:\t%08x\n",
850 I915_READ(GTIIR));
851 seq_printf(m, "Render IMR:\t%08x\n",
852 I915_READ(GTIMR));
853
854 seq_printf(m, "PM IER:\t\t%08x\n",
855 I915_READ(GEN6_PMIER));
856 seq_printf(m, "PM IIR:\t\t%08x\n",
857 I915_READ(GEN6_PMIIR));
858 seq_printf(m, "PM IMR:\t\t%08x\n",
859 I915_READ(GEN6_PMIMR));
860
861 seq_printf(m, "Port hotplug:\t%08x\n",
862 I915_READ(PORT_HOTPLUG_EN));
863 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
864 I915_READ(VLV_DPFLIPSTAT));
865 seq_printf(m, "DPINVGTT:\t%08x\n",
866 I915_READ(DPINVGTT));
867
36cdd013 868 } else if (!HAS_PCH_SPLIT(dev_priv)) {
5f6a1695
ZW
869 seq_printf(m, "Interrupt enable: %08x\n",
870 I915_READ(IER));
871 seq_printf(m, "Interrupt identity: %08x\n",
872 I915_READ(IIR));
873 seq_printf(m, "Interrupt mask: %08x\n",
874 I915_READ(IMR));
055e393f 875 for_each_pipe(dev_priv, pipe)
9db4a9c7
JB
876 seq_printf(m, "Pipe %c stat: %08x\n",
877 pipe_name(pipe),
878 I915_READ(PIPESTAT(pipe)));
5f6a1695
ZW
879 } else {
880 seq_printf(m, "North Display Interrupt enable: %08x\n",
881 I915_READ(DEIER));
882 seq_printf(m, "North Display Interrupt identity: %08x\n",
883 I915_READ(DEIIR));
884 seq_printf(m, "North Display Interrupt mask: %08x\n",
885 I915_READ(DEIMR));
886 seq_printf(m, "South Display Interrupt enable: %08x\n",
887 I915_READ(SDEIER));
888 seq_printf(m, "South Display Interrupt identity: %08x\n",
889 I915_READ(SDEIIR));
890 seq_printf(m, "South Display Interrupt mask: %08x\n",
891 I915_READ(SDEIMR));
892 seq_printf(m, "Graphics Interrupt enable: %08x\n",
893 I915_READ(GTIER));
894 seq_printf(m, "Graphics Interrupt identity: %08x\n",
895 I915_READ(GTIIR));
896 seq_printf(m, "Graphics Interrupt mask: %08x\n",
897 I915_READ(GTIMR));
898 }
3b3f1650 899 for_each_engine(engine, dev_priv, id) {
36cdd013 900 if (INTEL_GEN(dev_priv) >= 6) {
a2c7f6fd
CW
901 seq_printf(m,
902 "Graphics Interrupt mask (%s): %08x\n",
e2f80391 903 engine->name, I915_READ_IMR(engine));
9862e600 904 }
e2f80391 905 i915_ring_seqno_info(m, engine);
9862e600 906 }
c8c8fb33 907 intel_runtime_pm_put(dev_priv);
de227ef0 908
2017263e
BG
909 return 0;
910}
911
a6172a80
CW
912static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
913{
36cdd013
DW
914 struct drm_i915_private *dev_priv = node_to_i915(m->private);
915 struct drm_device *dev = &dev_priv->drm;
de227ef0
CW
916 int i, ret;
917
918 ret = mutex_lock_interruptible(&dev->struct_mutex);
919 if (ret)
920 return ret;
a6172a80 921
a6172a80
CW
922 seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
923 for (i = 0; i < dev_priv->num_fence_regs; i++) {
49ef5294 924 struct i915_vma *vma = dev_priv->fence_regs[i].vma;
a6172a80 925
6c085a72
CW
926 seq_printf(m, "Fence %d, pin count = %d, object = ",
927 i, dev_priv->fence_regs[i].pin_count);
49ef5294 928 if (!vma)
267f0c90 929 seq_puts(m, "unused");
c2c347a9 930 else
49ef5294 931 describe_obj(m, vma->obj);
267f0c90 932 seq_putc(m, '\n');
a6172a80
CW
933 }
934
05394f39 935 mutex_unlock(&dev->struct_mutex);
a6172a80
CW
936 return 0;
937}
938
98a2f411
CW
939#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
940
d5442303
DV
941static ssize_t
942i915_error_state_write(struct file *filp,
943 const char __user *ubuf,
944 size_t cnt,
945 loff_t *ppos)
946{
edc3d884 947 struct i915_error_state_file_priv *error_priv = filp->private_data;
d5442303
DV
948
949 DRM_DEBUG_DRIVER("Resetting error state\n");
12ff05e7 950 i915_destroy_error_state(error_priv->i915);
d5442303
DV
951
952 return cnt;
953}
954
955static int i915_error_state_open(struct inode *inode, struct file *file)
956{
36cdd013 957 struct drm_i915_private *dev_priv = inode->i_private;
d5442303 958 struct i915_error_state_file_priv *error_priv;
d5442303
DV
959
960 error_priv = kzalloc(sizeof(*error_priv), GFP_KERNEL);
961 if (!error_priv)
962 return -ENOMEM;
963
12ff05e7 964 error_priv->i915 = dev_priv;
d5442303 965
36cdd013 966 i915_error_state_get(&dev_priv->drm, error_priv);
d5442303 967
edc3d884
MK
968 file->private_data = error_priv;
969
970 return 0;
d5442303
DV
971}
972
973static int i915_error_state_release(struct inode *inode, struct file *file)
974{
edc3d884 975 struct i915_error_state_file_priv *error_priv = file->private_data;
d5442303 976
95d5bfb3 977 i915_error_state_put(error_priv);
d5442303
DV
978 kfree(error_priv);
979
edc3d884
MK
980 return 0;
981}
982
4dc955f7
MK
983static ssize_t i915_error_state_read(struct file *file, char __user *userbuf,
984 size_t count, loff_t *pos)
985{
986 struct i915_error_state_file_priv *error_priv = file->private_data;
987 struct drm_i915_error_state_buf error_str;
988 loff_t tmp_pos = 0;
989 ssize_t ret_count = 0;
990 int ret;
991
12ff05e7
TU
992 ret = i915_error_state_buf_init(&error_str, error_priv->i915,
993 count, *pos);
4dc955f7
MK
994 if (ret)
995 return ret;
edc3d884 996
fc16b48b 997 ret = i915_error_state_to_str(&error_str, error_priv);
edc3d884
MK
998 if (ret)
999 goto out;
1000
edc3d884
MK
1001 ret_count = simple_read_from_buffer(userbuf, count, &tmp_pos,
1002 error_str.buf,
1003 error_str.bytes);
1004
1005 if (ret_count < 0)
1006 ret = ret_count;
1007 else
1008 *pos = error_str.start + ret_count;
1009out:
4dc955f7 1010 i915_error_state_buf_release(&error_str);
edc3d884 1011 return ret ?: ret_count;
d5442303
DV
1012}
1013
1014static const struct file_operations i915_error_state_fops = {
1015 .owner = THIS_MODULE,
1016 .open = i915_error_state_open,
edc3d884 1017 .read = i915_error_state_read,
d5442303
DV
1018 .write = i915_error_state_write,
1019 .llseek = default_llseek,
1020 .release = i915_error_state_release,
1021};
1022
98a2f411
CW
1023#endif
1024
647416f9
KC
1025static int
1026i915_next_seqno_get(void *data, u64 *val)
40633219 1027{
36cdd013 1028 struct drm_i915_private *dev_priv = data;
40633219 1029
4c266edb 1030 *val = 1 + atomic_read(&dev_priv->gt.global_timeline.seqno);
647416f9 1031 return 0;
40633219
MK
1032}
1033
647416f9
KC
1034static int
1035i915_next_seqno_set(void *data, u64 val)
1036{
36cdd013
DW
1037 struct drm_i915_private *dev_priv = data;
1038 struct drm_device *dev = &dev_priv->drm;
40633219
MK
1039 int ret;
1040
40633219
MK
1041 ret = mutex_lock_interruptible(&dev->struct_mutex);
1042 if (ret)
1043 return ret;
1044
73cb9701 1045 ret = i915_gem_set_global_seqno(dev, val);
40633219
MK
1046 mutex_unlock(&dev->struct_mutex);
1047
647416f9 1048 return ret;
40633219
MK
1049}
1050
647416f9
KC
1051DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops,
1052 i915_next_seqno_get, i915_next_seqno_set,
3a3b4f98 1053 "0x%llx\n");
40633219 1054
adb4bd12 1055static int i915_frequency_info(struct seq_file *m, void *unused)
f97108d1 1056{
36cdd013
DW
1057 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1058 struct drm_device *dev = &dev_priv->drm;
c8c8fb33
PZ
1059 int ret = 0;
1060
1061 intel_runtime_pm_get(dev_priv);
3b8d8d91 1062
36cdd013 1063 if (IS_GEN5(dev_priv)) {
3b8d8d91
JB
1064 u16 rgvswctl = I915_READ16(MEMSWCTL);
1065 u16 rgvstat = I915_READ16(MEMSTAT_ILK);
1066
1067 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
1068 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
1069 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
1070 MEMSTAT_VID_SHIFT);
1071 seq_printf(m, "Current P-state: %d\n",
1072 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
36cdd013 1073 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
666a4537
WB
1074 u32 freq_sts;
1075
1076 mutex_lock(&dev_priv->rps.hw_lock);
1077 freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
1078 seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
1079 seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
1080
1081 seq_printf(m, "actual GPU freq: %d MHz\n",
1082 intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
1083
1084 seq_printf(m, "current GPU freq: %d MHz\n",
1085 intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq));
1086
1087 seq_printf(m, "max GPU freq: %d MHz\n",
1088 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
1089
1090 seq_printf(m, "min GPU freq: %d MHz\n",
1091 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq));
1092
1093 seq_printf(m, "idle GPU freq: %d MHz\n",
1094 intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq));
1095
1096 seq_printf(m,
1097 "efficient (RPe) frequency: %d MHz\n",
1098 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
1099 mutex_unlock(&dev_priv->rps.hw_lock);
36cdd013 1100 } else if (INTEL_GEN(dev_priv) >= 6) {
35040562
BP
1101 u32 rp_state_limits;
1102 u32 gt_perf_status;
1103 u32 rp_state_cap;
0d8f9491 1104 u32 rpmodectl, rpinclimit, rpdeclimit;
8e8c06cd 1105 u32 rpstat, cagf, reqf;
ccab5c82
JB
1106 u32 rpupei, rpcurup, rpprevup;
1107 u32 rpdownei, rpcurdown, rpprevdown;
9dd3c605 1108 u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
3b8d8d91
JB
1109 int max_freq;
1110
35040562 1111 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
cc3f90f0 1112 if (IS_GEN9_LP(dev_priv)) {
35040562
BP
1113 rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
1114 gt_perf_status = I915_READ(BXT_GT_PERF_STATUS);
1115 } else {
1116 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
1117 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
1118 }
1119
3b8d8d91 1120 /* RPSTAT1 is in the GT power well */
d1ebd816
BW
1121 ret = mutex_lock_interruptible(&dev->struct_mutex);
1122 if (ret)
c8c8fb33 1123 goto out;
d1ebd816 1124
59bad947 1125 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
3b8d8d91 1126
8e8c06cd 1127 reqf = I915_READ(GEN6_RPNSWREQ);
36cdd013 1128 if (IS_GEN9(dev_priv))
60260a5b
AG
1129 reqf >>= 23;
1130 else {
1131 reqf &= ~GEN6_TURBO_DISABLE;
36cdd013 1132 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
60260a5b
AG
1133 reqf >>= 24;
1134 else
1135 reqf >>= 25;
1136 }
7c59a9c1 1137 reqf = intel_gpu_freq(dev_priv, reqf);
8e8c06cd 1138
0d8f9491
CW
1139 rpmodectl = I915_READ(GEN6_RP_CONTROL);
1140 rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
1141 rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);
1142
ccab5c82 1143 rpstat = I915_READ(GEN6_RPSTAT1);
d6cda9c7
AG
1144 rpupei = I915_READ(GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
1145 rpcurup = I915_READ(GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
1146 rpprevup = I915_READ(GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
1147 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
1148 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
1149 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
36cdd013 1150 if (IS_GEN9(dev_priv))
60260a5b 1151 cagf = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT;
36cdd013 1152 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
f82855d3
BW
1153 cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
1154 else
1155 cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
7c59a9c1 1156 cagf = intel_gpu_freq(dev_priv, cagf);
ccab5c82 1157
59bad947 1158 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
d1ebd816
BW
1159 mutex_unlock(&dev->struct_mutex);
1160
36cdd013 1161 if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) {
9dd3c605
PZ
1162 pm_ier = I915_READ(GEN6_PMIER);
1163 pm_imr = I915_READ(GEN6_PMIMR);
1164 pm_isr = I915_READ(GEN6_PMISR);
1165 pm_iir = I915_READ(GEN6_PMIIR);
1166 pm_mask = I915_READ(GEN6_PMINTRMSK);
1167 } else {
1168 pm_ier = I915_READ(GEN8_GT_IER(2));
1169 pm_imr = I915_READ(GEN8_GT_IMR(2));
1170 pm_isr = I915_READ(GEN8_GT_ISR(2));
1171 pm_iir = I915_READ(GEN8_GT_IIR(2));
1172 pm_mask = I915_READ(GEN6_PMINTRMSK);
1173 }
0d8f9491 1174 seq_printf(m, "PM IER=0x%08x IMR=0x%08x ISR=0x%08x IIR=0x%08x, MASK=0x%08x\n",
9dd3c605 1175 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask);
1800ad25 1176 seq_printf(m, "pm_intr_keep: 0x%08x\n", dev_priv->rps.pm_intr_keep);
3b8d8d91 1177 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
3b8d8d91 1178 seq_printf(m, "Render p-state ratio: %d\n",
36cdd013 1179 (gt_perf_status & (IS_GEN9(dev_priv) ? 0x1ff00 : 0xff00)) >> 8);
3b8d8d91
JB
1180 seq_printf(m, "Render p-state VID: %d\n",
1181 gt_perf_status & 0xff);
1182 seq_printf(m, "Render p-state limit: %d\n",
1183 rp_state_limits & 0xff);
0d8f9491
CW
1184 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
1185 seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
1186 seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
1187 seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
8e8c06cd 1188 seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
f82855d3 1189 seq_printf(m, "CAGF: %dMHz\n", cagf);
d6cda9c7
AG
1190 seq_printf(m, "RP CUR UP EI: %d (%dus)\n",
1191 rpupei, GT_PM_INTERVAL_TO_US(dev_priv, rpupei));
1192 seq_printf(m, "RP CUR UP: %d (%dus)\n",
1193 rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup));
1194 seq_printf(m, "RP PREV UP: %d (%dus)\n",
1195 rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup));
d86ed34a
CW
1196 seq_printf(m, "Up threshold: %d%%\n",
1197 dev_priv->rps.up_threshold);
1198
d6cda9c7
AG
1199 seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
1200 rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei));
1201 seq_printf(m, "RP CUR DOWN: %d (%dus)\n",
1202 rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown));
1203 seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
1204 rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown));
d86ed34a
CW
1205 seq_printf(m, "Down threshold: %d%%\n",
1206 dev_priv->rps.down_threshold);
3b8d8d91 1207
cc3f90f0 1208 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 :
35040562 1209 rp_state_cap >> 16) & 0xff;
36cdd013 1210 max_freq *= (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ?
ef11bdb3 1211 GEN9_FREQ_SCALER : 1);
3b8d8d91 1212 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
7c59a9c1 1213 intel_gpu_freq(dev_priv, max_freq));
3b8d8d91
JB
1214
1215 max_freq = (rp_state_cap & 0xff00) >> 8;
36cdd013 1216 max_freq *= (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ?
ef11bdb3 1217 GEN9_FREQ_SCALER : 1);
3b8d8d91 1218 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
7c59a9c1 1219 intel_gpu_freq(dev_priv, max_freq));
3b8d8d91 1220
cc3f90f0 1221 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 :
35040562 1222 rp_state_cap >> 0) & 0xff;
36cdd013 1223 max_freq *= (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ?
ef11bdb3 1224 GEN9_FREQ_SCALER : 1);
3b8d8d91 1225 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
7c59a9c1 1226 intel_gpu_freq(dev_priv, max_freq));
31c77388 1227 seq_printf(m, "Max overclocked frequency: %dMHz\n",
7c59a9c1 1228 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
aed242ff 1229
d86ed34a
CW
1230 seq_printf(m, "Current freq: %d MHz\n",
1231 intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq));
1232 seq_printf(m, "Actual freq: %d MHz\n", cagf);
aed242ff
CW
1233 seq_printf(m, "Idle freq: %d MHz\n",
1234 intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq));
d86ed34a
CW
1235 seq_printf(m, "Min freq: %d MHz\n",
1236 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq));
29ecd78d
CW
1237 seq_printf(m, "Boost freq: %d MHz\n",
1238 intel_gpu_freq(dev_priv, dev_priv->rps.boost_freq));
d86ed34a
CW
1239 seq_printf(m, "Max freq: %d MHz\n",
1240 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
1241 seq_printf(m,
1242 "efficient (RPe) frequency: %d MHz\n",
1243 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
3b8d8d91 1244 } else {
267f0c90 1245 seq_puts(m, "no P-state info available\n");
3b8d8d91 1246 }
f97108d1 1247
1170f28c
MK
1248 seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk_freq);
1249 seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq);
1250 seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq);
1251
c8c8fb33
PZ
1252out:
1253 intel_runtime_pm_put(dev_priv);
1254 return ret;
f97108d1
JB
1255}
1256
d636951e
BW
1257static void i915_instdone_info(struct drm_i915_private *dev_priv,
1258 struct seq_file *m,
1259 struct intel_instdone *instdone)
1260{
f9e61372
BW
1261 int slice;
1262 int subslice;
1263
d636951e
BW
1264 seq_printf(m, "\t\tINSTDONE: 0x%08x\n",
1265 instdone->instdone);
1266
1267 if (INTEL_GEN(dev_priv) <= 3)
1268 return;
1269
1270 seq_printf(m, "\t\tSC_INSTDONE: 0x%08x\n",
1271 instdone->slice_common);
1272
1273 if (INTEL_GEN(dev_priv) <= 6)
1274 return;
1275
f9e61372
BW
1276 for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1277 seq_printf(m, "\t\tSAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
1278 slice, subslice, instdone->sampler[slice][subslice]);
1279
1280 for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1281 seq_printf(m, "\t\tROW_INSTDONE[%d][%d]: 0x%08x\n",
1282 slice, subslice, instdone->row[slice][subslice]);
d636951e
BW
1283}
1284
f654449a
CW
1285static int i915_hangcheck_info(struct seq_file *m, void *unused)
1286{
36cdd013 1287 struct drm_i915_private *dev_priv = node_to_i915(m->private);
e2f80391 1288 struct intel_engine_cs *engine;
666796da
TU
1289 u64 acthd[I915_NUM_ENGINES];
1290 u32 seqno[I915_NUM_ENGINES];
d636951e 1291 struct intel_instdone instdone;
c3232b18 1292 enum intel_engine_id id;
f654449a 1293
8af29b0c
CW
1294 if (test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
1295 seq_printf(m, "Wedged\n");
1296 if (test_bit(I915_RESET_IN_PROGRESS, &dev_priv->gpu_error.flags))
1297 seq_printf(m, "Reset in progress\n");
1298 if (waitqueue_active(&dev_priv->gpu_error.wait_queue))
1299 seq_printf(m, "Waiter holding struct mutex\n");
1300 if (waitqueue_active(&dev_priv->gpu_error.reset_queue))
1301 seq_printf(m, "struct_mutex blocked for reset\n");
1302
f654449a
CW
1303 if (!i915.enable_hangcheck) {
1304 seq_printf(m, "Hangcheck disabled\n");
1305 return 0;
1306 }
1307
ebbc7546
MK
1308 intel_runtime_pm_get(dev_priv);
1309
3b3f1650 1310 for_each_engine(engine, dev_priv, id) {
7e37f889 1311 acthd[id] = intel_engine_get_active_head(engine);
1b7744e7 1312 seqno[id] = intel_engine_get_seqno(engine);
ebbc7546
MK
1313 }
1314
3b3f1650 1315 intel_engine_get_instdone(dev_priv->engine[RCS], &instdone);
61642ff0 1316
ebbc7546
MK
1317 intel_runtime_pm_put(dev_priv);
1318
f654449a
CW
1319 if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work)) {
1320 seq_printf(m, "Hangcheck active, fires in %dms\n",
1321 jiffies_to_msecs(dev_priv->gpu_error.hangcheck_work.timer.expires -
1322 jiffies));
1323 } else
1324 seq_printf(m, "Hangcheck inactive\n");
1325
3b3f1650 1326 for_each_engine(engine, dev_priv, id) {
33f53719
CW
1327 struct intel_breadcrumbs *b = &engine->breadcrumbs;
1328 struct rb_node *rb;
1329
e2f80391 1330 seq_printf(m, "%s:\n", engine->name);
14fd0d6d 1331 seq_printf(m, "\tseqno = %x [current %x, last %x]\n",
cb399eab
CW
1332 engine->hangcheck.seqno, seqno[id],
1333 intel_engine_last_submit(engine));
3fe3b030 1334 seq_printf(m, "\twaiters? %s, fake irq active? %s, stalled? %s\n",
83348ba8
CW
1335 yesno(intel_engine_has_waiter(engine)),
1336 yesno(test_bit(engine->id,
3fe3b030
MK
1337 &dev_priv->gpu_error.missed_irq_rings)),
1338 yesno(engine->hangcheck.stalled));
1339
f6168e33 1340 spin_lock_irq(&b->lock);
33f53719
CW
1341 for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
1342 struct intel_wait *w = container_of(rb, typeof(*w), node);
1343
1344 seq_printf(m, "\t%s [%d] waiting for %x\n",
1345 w->tsk->comm, w->tsk->pid, w->seqno);
1346 }
f6168e33 1347 spin_unlock_irq(&b->lock);
33f53719 1348
f654449a 1349 seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
e2f80391 1350 (long long)engine->hangcheck.acthd,
c3232b18 1351 (long long)acthd[id]);
3fe3b030
MK
1352 seq_printf(m, "\taction = %s(%d) %d ms ago\n",
1353 hangcheck_action_to_str(engine->hangcheck.action),
1354 engine->hangcheck.action,
1355 jiffies_to_msecs(jiffies -
1356 engine->hangcheck.action_timestamp));
61642ff0 1357
e2f80391 1358 if (engine->id == RCS) {
d636951e 1359 seq_puts(m, "\tinstdone read =\n");
61642ff0 1360
d636951e 1361 i915_instdone_info(dev_priv, m, &instdone);
61642ff0 1362
d636951e 1363 seq_puts(m, "\tinstdone accu =\n");
61642ff0 1364
d636951e
BW
1365 i915_instdone_info(dev_priv, m,
1366 &engine->hangcheck.instdone);
61642ff0 1367 }
f654449a
CW
1368 }
1369
1370 return 0;
1371}
1372
4d85529d 1373static int ironlake_drpc_info(struct seq_file *m)
f97108d1 1374{
36cdd013 1375 struct drm_i915_private *dev_priv = node_to_i915(m->private);
616fdb5a
BW
1376 u32 rgvmodectl, rstdbyctl;
1377 u16 crstandvid;
616fdb5a 1378
c8c8fb33 1379 intel_runtime_pm_get(dev_priv);
616fdb5a
BW
1380
1381 rgvmodectl = I915_READ(MEMMODECTL);
1382 rstdbyctl = I915_READ(RSTDBYCTL);
1383 crstandvid = I915_READ16(CRSTANDVID);
1384
c8c8fb33 1385 intel_runtime_pm_put(dev_priv);
f97108d1 1386
742f491d 1387 seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN));
f97108d1
JB
1388 seq_printf(m, "Boost freq: %d\n",
1389 (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
1390 MEMMODE_BOOST_FREQ_SHIFT);
1391 seq_printf(m, "HW control enabled: %s\n",
742f491d 1392 yesno(rgvmodectl & MEMMODE_HWIDLE_EN));
f97108d1 1393 seq_printf(m, "SW control enabled: %s\n",
742f491d 1394 yesno(rgvmodectl & MEMMODE_SWMODE_EN));
f97108d1 1395 seq_printf(m, "Gated voltage change: %s\n",
742f491d 1396 yesno(rgvmodectl & MEMMODE_RCLK_GATE));
f97108d1
JB
1397 seq_printf(m, "Starting frequency: P%d\n",
1398 (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
7648fa99 1399 seq_printf(m, "Max P-state: P%d\n",
f97108d1 1400 (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
7648fa99
JB
1401 seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
1402 seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
1403 seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
1404 seq_printf(m, "Render standby enabled: %s\n",
742f491d 1405 yesno(!(rstdbyctl & RCX_SW_EXIT)));
267f0c90 1406 seq_puts(m, "Current RS state: ");
88271da3
JB
1407 switch (rstdbyctl & RSX_STATUS_MASK) {
1408 case RSX_STATUS_ON:
267f0c90 1409 seq_puts(m, "on\n");
88271da3
JB
1410 break;
1411 case RSX_STATUS_RC1:
267f0c90 1412 seq_puts(m, "RC1\n");
88271da3
JB
1413 break;
1414 case RSX_STATUS_RC1E:
267f0c90 1415 seq_puts(m, "RC1E\n");
88271da3
JB
1416 break;
1417 case RSX_STATUS_RS1:
267f0c90 1418 seq_puts(m, "RS1\n");
88271da3
JB
1419 break;
1420 case RSX_STATUS_RS2:
267f0c90 1421 seq_puts(m, "RS2 (RC6)\n");
88271da3
JB
1422 break;
1423 case RSX_STATUS_RS3:
267f0c90 1424 seq_puts(m, "RC3 (RC6+)\n");
88271da3
JB
1425 break;
1426 default:
267f0c90 1427 seq_puts(m, "unknown\n");
88271da3
JB
1428 break;
1429 }
f97108d1
JB
1430
1431 return 0;
1432}
1433
f65367b5 1434static int i915_forcewake_domains(struct seq_file *m, void *data)
669ab5aa 1435{
36cdd013 1436 struct drm_i915_private *dev_priv = node_to_i915(m->private);
b2cff0db 1437 struct intel_uncore_forcewake_domain *fw_domain;
b2cff0db
CW
1438
1439 spin_lock_irq(&dev_priv->uncore.lock);
33c582c1 1440 for_each_fw_domain(fw_domain, dev_priv) {
b2cff0db 1441 seq_printf(m, "%s.wake_count = %u\n",
33c582c1 1442 intel_uncore_forcewake_domain_to_str(fw_domain->id),
b2cff0db
CW
1443 fw_domain->wake_count);
1444 }
1445 spin_unlock_irq(&dev_priv->uncore.lock);
669ab5aa 1446
b2cff0db
CW
1447 return 0;
1448}
1449
1450static int vlv_drpc_info(struct seq_file *m)
1451{
36cdd013 1452 struct drm_i915_private *dev_priv = node_to_i915(m->private);
6b312cd3 1453 u32 rpmodectl1, rcctl1, pw_status;
669ab5aa 1454
d46c0517
ID
1455 intel_runtime_pm_get(dev_priv);
1456
6b312cd3 1457 pw_status = I915_READ(VLV_GTLC_PW_STATUS);
669ab5aa
D
1458 rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
1459 rcctl1 = I915_READ(GEN6_RC_CONTROL);
1460
d46c0517
ID
1461 intel_runtime_pm_put(dev_priv);
1462
669ab5aa
D
1463 seq_printf(m, "Video Turbo Mode: %s\n",
1464 yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
1465 seq_printf(m, "Turbo enabled: %s\n",
1466 yesno(rpmodectl1 & GEN6_RP_ENABLE));
1467 seq_printf(m, "HW control enabled: %s\n",
1468 yesno(rpmodectl1 & GEN6_RP_ENABLE));
1469 seq_printf(m, "SW control enabled: %s\n",
1470 yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) ==
1471 GEN6_RP_MEDIA_SW_MODE));
1472 seq_printf(m, "RC6 Enabled: %s\n",
1473 yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
1474 GEN6_RC_CTL_EI_MODE(1))));
1475 seq_printf(m, "Render Power Well: %s\n",
6b312cd3 1476 (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
669ab5aa 1477 seq_printf(m, "Media Power Well: %s\n",
6b312cd3 1478 (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
669ab5aa 1479
9cc19be5
ID
1480 seq_printf(m, "Render RC6 residency since boot: %u\n",
1481 I915_READ(VLV_GT_RENDER_RC6));
1482 seq_printf(m, "Media RC6 residency since boot: %u\n",
1483 I915_READ(VLV_GT_MEDIA_RC6));
1484
f65367b5 1485 return i915_forcewake_domains(m, NULL);
669ab5aa
D
1486}
1487
4d85529d
BW
1488static int gen6_drpc_info(struct seq_file *m)
1489{
36cdd013
DW
1490 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1491 struct drm_device *dev = &dev_priv->drm;
ecd8faea 1492 u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0;
f2dd7578 1493 u32 gen9_powergate_enable = 0, gen9_powergate_status = 0;
93b525dc 1494 unsigned forcewake_count;
aee56cff 1495 int count = 0, ret;
4d85529d
BW
1496
1497 ret = mutex_lock_interruptible(&dev->struct_mutex);
1498 if (ret)
1499 return ret;
c8c8fb33 1500 intel_runtime_pm_get(dev_priv);
4d85529d 1501
907b28c5 1502 spin_lock_irq(&dev_priv->uncore.lock);
b2cff0db 1503 forcewake_count = dev_priv->uncore.fw_domain[FW_DOMAIN_ID_RENDER].wake_count;
907b28c5 1504 spin_unlock_irq(&dev_priv->uncore.lock);
93b525dc
DV
1505
1506 if (forcewake_count) {
267f0c90
DL
1507 seq_puts(m, "RC information inaccurate because somebody "
1508 "holds a forcewake reference \n");
4d85529d
BW
1509 } else {
1510 /* NB: we cannot use forcewake, else we read the wrong values */
1511 while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
1512 udelay(10);
1513 seq_printf(m, "RC information accurate: %s\n", yesno(count < 51));
1514 }
1515
75aa3f63 1516 gt_core_status = I915_READ_FW(GEN6_GT_CORE_STATUS);
ed71f1b4 1517 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
4d85529d
BW
1518
1519 rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
1520 rcctl1 = I915_READ(GEN6_RC_CONTROL);
36cdd013 1521 if (INTEL_GEN(dev_priv) >= 9) {
f2dd7578
AG
1522 gen9_powergate_enable = I915_READ(GEN9_PG_ENABLE);
1523 gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS);
1524 }
4d85529d 1525 mutex_unlock(&dev->struct_mutex);
44cbd338
BW
1526 mutex_lock(&dev_priv->rps.hw_lock);
1527 sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
1528 mutex_unlock(&dev_priv->rps.hw_lock);
4d85529d 1529
c8c8fb33
PZ
1530 intel_runtime_pm_put(dev_priv);
1531
4d85529d
BW
1532 seq_printf(m, "Video Turbo Mode: %s\n",
1533 yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
1534 seq_printf(m, "HW control enabled: %s\n",
1535 yesno(rpmodectl1 & GEN6_RP_ENABLE));
1536 seq_printf(m, "SW control enabled: %s\n",
1537 yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) ==
1538 GEN6_RP_MEDIA_SW_MODE));
fff24e21 1539 seq_printf(m, "RC1e Enabled: %s\n",
4d85529d
BW
1540 yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
1541 seq_printf(m, "RC6 Enabled: %s\n",
1542 yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
36cdd013 1543 if (INTEL_GEN(dev_priv) >= 9) {
f2dd7578
AG
1544 seq_printf(m, "Render Well Gating Enabled: %s\n",
1545 yesno(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE));
1546 seq_printf(m, "Media Well Gating Enabled: %s\n",
1547 yesno(gen9_powergate_enable & GEN9_MEDIA_PG_ENABLE));
1548 }
4d85529d
BW
1549 seq_printf(m, "Deep RC6 Enabled: %s\n",
1550 yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
1551 seq_printf(m, "Deepest RC6 Enabled: %s\n",
1552 yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
267f0c90 1553 seq_puts(m, "Current RC state: ");
4d85529d
BW
1554 switch (gt_core_status & GEN6_RCn_MASK) {
1555 case GEN6_RC0:
1556 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
267f0c90 1557 seq_puts(m, "Core Power Down\n");
4d85529d 1558 else
267f0c90 1559 seq_puts(m, "on\n");
4d85529d
BW
1560 break;
1561 case GEN6_RC3:
267f0c90 1562 seq_puts(m, "RC3\n");
4d85529d
BW
1563 break;
1564 case GEN6_RC6:
267f0c90 1565 seq_puts(m, "RC6\n");
4d85529d
BW
1566 break;
1567 case GEN6_RC7:
267f0c90 1568 seq_puts(m, "RC7\n");
4d85529d
BW
1569 break;
1570 default:
267f0c90 1571 seq_puts(m, "Unknown\n");
4d85529d
BW
1572 break;
1573 }
1574
1575 seq_printf(m, "Core Power Down: %s\n",
1576 yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
36cdd013 1577 if (INTEL_GEN(dev_priv) >= 9) {
f2dd7578
AG
1578 seq_printf(m, "Render Power Well: %s\n",
1579 (gen9_powergate_status &
1580 GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down");
1581 seq_printf(m, "Media Power Well: %s\n",
1582 (gen9_powergate_status &
1583 GEN9_PWRGT_MEDIA_STATUS_MASK) ? "Up" : "Down");
1584 }
cce66a28
BW
1585
1586 /* Not exactly sure what this is */
1587 seq_printf(m, "RC6 \"Locked to RPn\" residency since boot: %u\n",
1588 I915_READ(GEN6_GT_GFX_RC6_LOCKED));
1589 seq_printf(m, "RC6 residency since boot: %u\n",
1590 I915_READ(GEN6_GT_GFX_RC6));
1591 seq_printf(m, "RC6+ residency since boot: %u\n",
1592 I915_READ(GEN6_GT_GFX_RC6p));
1593 seq_printf(m, "RC6++ residency since boot: %u\n",
1594 I915_READ(GEN6_GT_GFX_RC6pp));
1595
ecd8faea
BW
1596 seq_printf(m, "RC6 voltage: %dmV\n",
1597 GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
1598 seq_printf(m, "RC6+ voltage: %dmV\n",
1599 GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
1600 seq_printf(m, "RC6++ voltage: %dmV\n",
1601 GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
f2dd7578 1602 return i915_forcewake_domains(m, NULL);
4d85529d
BW
1603}
1604
1605static int i915_drpc_info(struct seq_file *m, void *unused)
1606{
36cdd013 1607 struct drm_i915_private *dev_priv = node_to_i915(m->private);
4d85529d 1608
36cdd013 1609 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
669ab5aa 1610 return vlv_drpc_info(m);
36cdd013 1611 else if (INTEL_GEN(dev_priv) >= 6)
4d85529d
BW
1612 return gen6_drpc_info(m);
1613 else
1614 return ironlake_drpc_info(m);
1615}
1616
9a851789
DV
1617static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
1618{
36cdd013 1619 struct drm_i915_private *dev_priv = node_to_i915(m->private);
9a851789
DV
1620
1621 seq_printf(m, "FB tracking busy bits: 0x%08x\n",
1622 dev_priv->fb_tracking.busy_bits);
1623
1624 seq_printf(m, "FB tracking flip bits: 0x%08x\n",
1625 dev_priv->fb_tracking.flip_bits);
1626
1627 return 0;
1628}
1629
b5e50c3f
JB
1630static int i915_fbc_status(struct seq_file *m, void *unused)
1631{
36cdd013 1632 struct drm_i915_private *dev_priv = node_to_i915(m->private);
b5e50c3f 1633
36cdd013 1634 if (!HAS_FBC(dev_priv)) {
267f0c90 1635 seq_puts(m, "FBC unsupported on this chipset\n");
b5e50c3f
JB
1636 return 0;
1637 }
1638
36623ef8 1639 intel_runtime_pm_get(dev_priv);
25ad93fd 1640 mutex_lock(&dev_priv->fbc.lock);
36623ef8 1641
0e631adc 1642 if (intel_fbc_is_active(dev_priv))
267f0c90 1643 seq_puts(m, "FBC enabled\n");
2e8144a5
PZ
1644 else
1645 seq_printf(m, "FBC disabled: %s\n",
bf6189c6 1646 dev_priv->fbc.no_fbc_reason);
36623ef8 1647
0fc6a9dc
PZ
1648 if (intel_fbc_is_active(dev_priv) && INTEL_GEN(dev_priv) >= 7) {
1649 uint32_t mask = INTEL_GEN(dev_priv) >= 8 ?
1650 BDW_FBC_COMPRESSION_MASK :
1651 IVB_FBC_COMPRESSION_MASK;
31b9df10 1652 seq_printf(m, "Compressing: %s\n",
0fc6a9dc
PZ
1653 yesno(I915_READ(FBC_STATUS2) & mask));
1654 }
31b9df10 1655
25ad93fd 1656 mutex_unlock(&dev_priv->fbc.lock);
36623ef8
PZ
1657 intel_runtime_pm_put(dev_priv);
1658
b5e50c3f
JB
1659 return 0;
1660}
1661
da46f936
RV
1662static int i915_fbc_fc_get(void *data, u64 *val)
1663{
36cdd013 1664 struct drm_i915_private *dev_priv = data;
da46f936 1665
36cdd013 1666 if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
da46f936
RV
1667 return -ENODEV;
1668
da46f936 1669 *val = dev_priv->fbc.false_color;
da46f936
RV
1670
1671 return 0;
1672}
1673
1674static int i915_fbc_fc_set(void *data, u64 val)
1675{
36cdd013 1676 struct drm_i915_private *dev_priv = data;
da46f936
RV
1677 u32 reg;
1678
36cdd013 1679 if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
da46f936
RV
1680 return -ENODEV;
1681
25ad93fd 1682 mutex_lock(&dev_priv->fbc.lock);
da46f936
RV
1683
1684 reg = I915_READ(ILK_DPFC_CONTROL);
1685 dev_priv->fbc.false_color = val;
1686
1687 I915_WRITE(ILK_DPFC_CONTROL, val ?
1688 (reg | FBC_CTL_FALSE_COLOR) :
1689 (reg & ~FBC_CTL_FALSE_COLOR));
1690
25ad93fd 1691 mutex_unlock(&dev_priv->fbc.lock);
da46f936
RV
1692 return 0;
1693}
1694
1695DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_fc_fops,
1696 i915_fbc_fc_get, i915_fbc_fc_set,
1697 "%llu\n");
1698
92d44621
PZ
1699static int i915_ips_status(struct seq_file *m, void *unused)
1700{
36cdd013 1701 struct drm_i915_private *dev_priv = node_to_i915(m->private);
92d44621 1702
36cdd013 1703 if (!HAS_IPS(dev_priv)) {
92d44621
PZ
1704 seq_puts(m, "not supported\n");
1705 return 0;
1706 }
1707
36623ef8
PZ
1708 intel_runtime_pm_get(dev_priv);
1709
0eaa53f0
RV
1710 seq_printf(m, "Enabled by kernel parameter: %s\n",
1711 yesno(i915.enable_ips));
1712
36cdd013 1713 if (INTEL_GEN(dev_priv) >= 8) {
0eaa53f0
RV
1714 seq_puts(m, "Currently: unknown\n");
1715 } else {
1716 if (I915_READ(IPS_CTL) & IPS_ENABLE)
1717 seq_puts(m, "Currently: enabled\n");
1718 else
1719 seq_puts(m, "Currently: disabled\n");
1720 }
92d44621 1721
36623ef8
PZ
1722 intel_runtime_pm_put(dev_priv);
1723
92d44621
PZ
1724 return 0;
1725}
1726
4a9bef37
JB
1727static int i915_sr_status(struct seq_file *m, void *unused)
1728{
36cdd013 1729 struct drm_i915_private *dev_priv = node_to_i915(m->private);
4a9bef37
JB
1730 bool sr_enabled = false;
1731
36623ef8 1732 intel_runtime_pm_get(dev_priv);
9c870d03 1733 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
36623ef8 1734
36cdd013 1735 if (HAS_PCH_SPLIT(dev_priv))
5ba2aaaa 1736 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
c0f86832 1737 else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) ||
36cdd013 1738 IS_I945G(dev_priv) || IS_I945GM(dev_priv))
4a9bef37 1739 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
36cdd013 1740 else if (IS_I915GM(dev_priv))
4a9bef37 1741 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
36cdd013 1742 else if (IS_PINEVIEW(dev_priv))
4a9bef37 1743 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
36cdd013 1744 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
77b64555 1745 sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
4a9bef37 1746
9c870d03 1747 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
36623ef8
PZ
1748 intel_runtime_pm_put(dev_priv);
1749
08c4d7fc 1750 seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
4a9bef37
JB
1751
1752 return 0;
1753}
1754
7648fa99
JB
1755static int i915_emon_status(struct seq_file *m, void *unused)
1756{
36cdd013
DW
1757 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1758 struct drm_device *dev = &dev_priv->drm;
7648fa99 1759 unsigned long temp, chipset, gfx;
de227ef0
CW
1760 int ret;
1761
36cdd013 1762 if (!IS_GEN5(dev_priv))
582be6b4
CW
1763 return -ENODEV;
1764
de227ef0
CW
1765 ret = mutex_lock_interruptible(&dev->struct_mutex);
1766 if (ret)
1767 return ret;
7648fa99
JB
1768
1769 temp = i915_mch_val(dev_priv);
1770 chipset = i915_chipset_val(dev_priv);
1771 gfx = i915_gfx_val(dev_priv);
de227ef0 1772 mutex_unlock(&dev->struct_mutex);
7648fa99
JB
1773
1774 seq_printf(m, "GMCH temp: %ld\n", temp);
1775 seq_printf(m, "Chipset power: %ld\n", chipset);
1776 seq_printf(m, "GFX power: %ld\n", gfx);
1777 seq_printf(m, "Total power: %ld\n", chipset + gfx);
1778
1779 return 0;
1780}
1781
23b2f8bb
JB
1782static int i915_ring_freq_table(struct seq_file *m, void *unused)
1783{
36cdd013 1784 struct drm_i915_private *dev_priv = node_to_i915(m->private);
5bfa0199 1785 int ret = 0;
23b2f8bb 1786 int gpu_freq, ia_freq;
f936ec34 1787 unsigned int max_gpu_freq, min_gpu_freq;
23b2f8bb 1788
26310346 1789 if (!HAS_LLC(dev_priv)) {
267f0c90 1790 seq_puts(m, "unsupported on this chipset\n");
23b2f8bb
JB
1791 return 0;
1792 }
1793
5bfa0199
PZ
1794 intel_runtime_pm_get(dev_priv);
1795
4fc688ce 1796 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
23b2f8bb 1797 if (ret)
5bfa0199 1798 goto out;
23b2f8bb 1799
36cdd013 1800 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
f936ec34
AG
1801 /* Convert GT frequency to 50 HZ units */
1802 min_gpu_freq =
1803 dev_priv->rps.min_freq_softlimit / GEN9_FREQ_SCALER;
1804 max_gpu_freq =
1805 dev_priv->rps.max_freq_softlimit / GEN9_FREQ_SCALER;
1806 } else {
1807 min_gpu_freq = dev_priv->rps.min_freq_softlimit;
1808 max_gpu_freq = dev_priv->rps.max_freq_softlimit;
1809 }
1810
267f0c90 1811 seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
23b2f8bb 1812
f936ec34 1813 for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
42c0526c
BW
1814 ia_freq = gpu_freq;
1815 sandybridge_pcode_read(dev_priv,
1816 GEN6_PCODE_READ_MIN_FREQ_TABLE,
1817 &ia_freq);
3ebecd07 1818 seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
f936ec34 1819 intel_gpu_freq(dev_priv, (gpu_freq *
36cdd013 1820 (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ?
ef11bdb3 1821 GEN9_FREQ_SCALER : 1))),
3ebecd07
CW
1822 ((ia_freq >> 0) & 0xff) * 100,
1823 ((ia_freq >> 8) & 0xff) * 100);
23b2f8bb
JB
1824 }
1825
4fc688ce 1826 mutex_unlock(&dev_priv->rps.hw_lock);
23b2f8bb 1827
5bfa0199
PZ
1828out:
1829 intel_runtime_pm_put(dev_priv);
1830 return ret;
23b2f8bb
JB
1831}
1832
44834a67
CW
1833static int i915_opregion(struct seq_file *m, void *unused)
1834{
36cdd013
DW
1835 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1836 struct drm_device *dev = &dev_priv->drm;
44834a67
CW
1837 struct intel_opregion *opregion = &dev_priv->opregion;
1838 int ret;
1839
1840 ret = mutex_lock_interruptible(&dev->struct_mutex);
1841 if (ret)
0d38f009 1842 goto out;
44834a67 1843
2455a8e4
JN
1844 if (opregion->header)
1845 seq_write(m, opregion->header, OPREGION_SIZE);
44834a67
CW
1846
1847 mutex_unlock(&dev->struct_mutex);
1848
0d38f009 1849out:
44834a67
CW
1850 return 0;
1851}
1852
ada8f955
JN
1853static int i915_vbt(struct seq_file *m, void *unused)
1854{
36cdd013 1855 struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
ada8f955
JN
1856
1857 if (opregion->vbt)
1858 seq_write(m, opregion->vbt, opregion->vbt_size);
1859
1860 return 0;
1861}
1862
37811fcc
CW
1863static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1864{
36cdd013
DW
1865 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1866 struct drm_device *dev = &dev_priv->drm;
b13b8402 1867 struct intel_framebuffer *fbdev_fb = NULL;
3a58ee10 1868 struct drm_framebuffer *drm_fb;
188c1ab7
CW
1869 int ret;
1870
1871 ret = mutex_lock_interruptible(&dev->struct_mutex);
1872 if (ret)
1873 return ret;
37811fcc 1874
0695726e 1875#ifdef CONFIG_DRM_FBDEV_EMULATION
36cdd013
DW
1876 if (dev_priv->fbdev) {
1877 fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
25bcce94
CW
1878
1879 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1880 fbdev_fb->base.width,
1881 fbdev_fb->base.height,
1882 fbdev_fb->base.depth,
1883 fbdev_fb->base.bits_per_pixel,
bae781b2 1884 fbdev_fb->base.modifier,
25bcce94
CW
1885 drm_framebuffer_read_refcount(&fbdev_fb->base));
1886 describe_obj(m, fbdev_fb->obj);
1887 seq_putc(m, '\n');
1888 }
4520f53a 1889#endif
37811fcc 1890
4b096ac1 1891 mutex_lock(&dev->mode_config.fb_lock);
3a58ee10 1892 drm_for_each_fb(drm_fb, dev) {
b13b8402
NS
1893 struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
1894 if (fb == fbdev_fb)
37811fcc
CW
1895 continue;
1896
c1ca506d 1897 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
37811fcc
CW
1898 fb->base.width,
1899 fb->base.height,
1900 fb->base.depth,
623f9783 1901 fb->base.bits_per_pixel,
bae781b2 1902 fb->base.modifier,
747a598f 1903 drm_framebuffer_read_refcount(&fb->base));
05394f39 1904 describe_obj(m, fb->obj);
267f0c90 1905 seq_putc(m, '\n');
37811fcc 1906 }
4b096ac1 1907 mutex_unlock(&dev->mode_config.fb_lock);
188c1ab7 1908 mutex_unlock(&dev->struct_mutex);
37811fcc
CW
1909
1910 return 0;
1911}
1912
7e37f889 1913static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
c9fe99bd
OM
1914{
1915 seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, last head: %d)",
7e37f889
CW
1916 ring->space, ring->head, ring->tail,
1917 ring->last_retired_head);
c9fe99bd
OM
1918}
1919
e76d3630
BW
1920static int i915_context_status(struct seq_file *m, void *unused)
1921{
36cdd013
DW
1922 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1923 struct drm_device *dev = &dev_priv->drm;
e2f80391 1924 struct intel_engine_cs *engine;
e2efd130 1925 struct i915_gem_context *ctx;
3b3f1650 1926 enum intel_engine_id id;
c3232b18 1927 int ret;
e76d3630 1928
f3d28878 1929 ret = mutex_lock_interruptible(&dev->struct_mutex);
e76d3630
BW
1930 if (ret)
1931 return ret;
1932
a33afea5 1933 list_for_each_entry(ctx, &dev_priv->context_list, link) {
5d1808ec 1934 seq_printf(m, "HW context %u ", ctx->hw_id);
c84455b4 1935 if (ctx->pid) {
d28b99ab
CW
1936 struct task_struct *task;
1937
c84455b4 1938 task = get_pid_task(ctx->pid, PIDTYPE_PID);
d28b99ab
CW
1939 if (task) {
1940 seq_printf(m, "(%s [%d]) ",
1941 task->comm, task->pid);
1942 put_task_struct(task);
1943 }
c84455b4
CW
1944 } else if (IS_ERR(ctx->file_priv)) {
1945 seq_puts(m, "(deleted) ");
d28b99ab
CW
1946 } else {
1947 seq_puts(m, "(kernel) ");
1948 }
1949
bca44d80
CW
1950 seq_putc(m, ctx->remap_slice ? 'R' : 'r');
1951 seq_putc(m, '\n');
c9fe99bd 1952
3b3f1650 1953 for_each_engine(engine, dev_priv, id) {
bca44d80
CW
1954 struct intel_context *ce = &ctx->engine[engine->id];
1955
1956 seq_printf(m, "%s: ", engine->name);
1957 seq_putc(m, ce->initialised ? 'I' : 'i');
1958 if (ce->state)
bf3783e5 1959 describe_obj(m, ce->state->obj);
dca33ecc 1960 if (ce->ring)
7e37f889 1961 describe_ctx_ring(m, ce->ring);
c9fe99bd 1962 seq_putc(m, '\n');
c9fe99bd 1963 }
a33afea5 1964
a33afea5 1965 seq_putc(m, '\n');
a168c293
BW
1966 }
1967
f3d28878 1968 mutex_unlock(&dev->struct_mutex);
e76d3630
BW
1969
1970 return 0;
1971}
1972
064ca1d2 1973static void i915_dump_lrc_obj(struct seq_file *m,
e2efd130 1974 struct i915_gem_context *ctx,
0bc40be8 1975 struct intel_engine_cs *engine)
064ca1d2 1976{
bf3783e5 1977 struct i915_vma *vma = ctx->engine[engine->id].state;
064ca1d2 1978 struct page *page;
064ca1d2 1979 int j;
064ca1d2 1980
7069b144
CW
1981 seq_printf(m, "CONTEXT: %s %u\n", engine->name, ctx->hw_id);
1982
bf3783e5
CW
1983 if (!vma) {
1984 seq_puts(m, "\tFake context\n");
064ca1d2
TD
1985 return;
1986 }
1987
bf3783e5
CW
1988 if (vma->flags & I915_VMA_GLOBAL_BIND)
1989 seq_printf(m, "\tBound in GGTT at 0x%08x\n",
bde13ebd 1990 i915_ggtt_offset(vma));
064ca1d2 1991
a4f5ea64 1992 if (i915_gem_object_pin_pages(vma->obj)) {
bf3783e5 1993 seq_puts(m, "\tFailed to get pages for context object\n\n");
064ca1d2
TD
1994 return;
1995 }
1996
bf3783e5
CW
1997 page = i915_gem_object_get_page(vma->obj, LRC_STATE_PN);
1998 if (page) {
1999 u32 *reg_state = kmap_atomic(page);
064ca1d2
TD
2000
2001 for (j = 0; j < 0x600 / sizeof(u32) / 4; j += 4) {
bf3783e5
CW
2002 seq_printf(m,
2003 "\t[0x%04x] 0x%08x 0x%08x 0x%08x 0x%08x\n",
2004 j * 4,
064ca1d2
TD
2005 reg_state[j], reg_state[j + 1],
2006 reg_state[j + 2], reg_state[j + 3]);
2007 }
2008 kunmap_atomic(reg_state);
2009 }
2010
a4f5ea64 2011 i915_gem_object_unpin_pages(vma->obj);
064ca1d2
TD
2012 seq_putc(m, '\n');
2013}
2014
c0ab1ae9
BW
2015static int i915_dump_lrc(struct seq_file *m, void *unused)
2016{
36cdd013
DW
2017 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2018 struct drm_device *dev = &dev_priv->drm;
e2f80391 2019 struct intel_engine_cs *engine;
e2efd130 2020 struct i915_gem_context *ctx;
3b3f1650 2021 enum intel_engine_id id;
b4ac5afc 2022 int ret;
c0ab1ae9
BW
2023
2024 if (!i915.enable_execlists) {
2025 seq_printf(m, "Logical Ring Contexts are disabled\n");
2026 return 0;
2027 }
2028
2029 ret = mutex_lock_interruptible(&dev->struct_mutex);
2030 if (ret)
2031 return ret;
2032
e28e404c 2033 list_for_each_entry(ctx, &dev_priv->context_list, link)
3b3f1650 2034 for_each_engine(engine, dev_priv, id)
24f1d3cc 2035 i915_dump_lrc_obj(m, ctx, engine);
c0ab1ae9
BW
2036
2037 mutex_unlock(&dev->struct_mutex);
2038
2039 return 0;
2040}
2041
ea16a3cd
DV
2042static const char *swizzle_string(unsigned swizzle)
2043{
aee56cff 2044 switch (swizzle) {
ea16a3cd
DV
2045 case I915_BIT_6_SWIZZLE_NONE:
2046 return "none";
2047 case I915_BIT_6_SWIZZLE_9:
2048 return "bit9";
2049 case I915_BIT_6_SWIZZLE_9_10:
2050 return "bit9/bit10";
2051 case I915_BIT_6_SWIZZLE_9_11:
2052 return "bit9/bit11";
2053 case I915_BIT_6_SWIZZLE_9_10_11:
2054 return "bit9/bit10/bit11";
2055 case I915_BIT_6_SWIZZLE_9_17:
2056 return "bit9/bit17";
2057 case I915_BIT_6_SWIZZLE_9_10_17:
2058 return "bit9/bit10/bit17";
2059 case I915_BIT_6_SWIZZLE_UNKNOWN:
8a168ca7 2060 return "unknown";
ea16a3cd
DV
2061 }
2062
2063 return "bug";
2064}
2065
2066static int i915_swizzle_info(struct seq_file *m, void *data)
2067{
36cdd013 2068 struct drm_i915_private *dev_priv = node_to_i915(m->private);
22bcfc6a 2069
c8c8fb33 2070 intel_runtime_pm_get(dev_priv);
ea16a3cd 2071
ea16a3cd
DV
2072 seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
2073 swizzle_string(dev_priv->mm.bit_6_swizzle_x));
2074 seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
2075 swizzle_string(dev_priv->mm.bit_6_swizzle_y));
2076
36cdd013 2077 if (IS_GEN3(dev_priv) || IS_GEN4(dev_priv)) {
ea16a3cd
DV
2078 seq_printf(m, "DDC = 0x%08x\n",
2079 I915_READ(DCC));
656bfa3a
DV
2080 seq_printf(m, "DDC2 = 0x%08x\n",
2081 I915_READ(DCC2));
ea16a3cd
DV
2082 seq_printf(m, "C0DRB3 = 0x%04x\n",
2083 I915_READ16(C0DRB3));
2084 seq_printf(m, "C1DRB3 = 0x%04x\n",
2085 I915_READ16(C1DRB3));
36cdd013 2086 } else if (INTEL_GEN(dev_priv) >= 6) {
3fa7d235
DV
2087 seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
2088 I915_READ(MAD_DIMM_C0));
2089 seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
2090 I915_READ(MAD_DIMM_C1));
2091 seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
2092 I915_READ(MAD_DIMM_C2));
2093 seq_printf(m, "TILECTL = 0x%08x\n",
2094 I915_READ(TILECTL));
36cdd013 2095 if (INTEL_GEN(dev_priv) >= 8)
9d3203e1
BW
2096 seq_printf(m, "GAMTARBMODE = 0x%08x\n",
2097 I915_READ(GAMTARBMODE));
2098 else
2099 seq_printf(m, "ARB_MODE = 0x%08x\n",
2100 I915_READ(ARB_MODE));
3fa7d235
DV
2101 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
2102 I915_READ(DISP_ARB_CTL));
ea16a3cd 2103 }
656bfa3a
DV
2104
2105 if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
2106 seq_puts(m, "L-shaped memory detected\n");
2107
c8c8fb33 2108 intel_runtime_pm_put(dev_priv);
ea16a3cd
DV
2109
2110 return 0;
2111}
2112
1c60fef5
BW
2113static int per_file_ctx(int id, void *ptr, void *data)
2114{
e2efd130 2115 struct i915_gem_context *ctx = ptr;
1c60fef5 2116 struct seq_file *m = data;
ae6c4806
DV
2117 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
2118
2119 if (!ppgtt) {
2120 seq_printf(m, " no ppgtt for context %d\n",
2121 ctx->user_handle);
2122 return 0;
2123 }
1c60fef5 2124
f83d6518
OM
2125 if (i915_gem_context_is_default(ctx))
2126 seq_puts(m, " default context:\n");
2127 else
821d66dd 2128 seq_printf(m, " context %d:\n", ctx->user_handle);
1c60fef5
BW
2129 ppgtt->debug_dump(ppgtt, m);
2130
2131 return 0;
2132}
2133
36cdd013
DW
2134static void gen8_ppgtt_info(struct seq_file *m,
2135 struct drm_i915_private *dev_priv)
3cf17fc5 2136{
77df6772 2137 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
3b3f1650
AG
2138 struct intel_engine_cs *engine;
2139 enum intel_engine_id id;
b4ac5afc 2140 int i;
3cf17fc5 2141
77df6772
BW
2142 if (!ppgtt)
2143 return;
2144
3b3f1650 2145 for_each_engine(engine, dev_priv, id) {
e2f80391 2146 seq_printf(m, "%s\n", engine->name);
77df6772 2147 for (i = 0; i < 4; i++) {
e2f80391 2148 u64 pdp = I915_READ(GEN8_RING_PDP_UDW(engine, i));
77df6772 2149 pdp <<= 32;
e2f80391 2150 pdp |= I915_READ(GEN8_RING_PDP_LDW(engine, i));
a2a5b15c 2151 seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp);
77df6772
BW
2152 }
2153 }
2154}
2155
36cdd013
DW
2156static void gen6_ppgtt_info(struct seq_file *m,
2157 struct drm_i915_private *dev_priv)
77df6772 2158{
e2f80391 2159 struct intel_engine_cs *engine;
3b3f1650 2160 enum intel_engine_id id;
3cf17fc5 2161
7e22dbbb 2162 if (IS_GEN6(dev_priv))
3cf17fc5
DV
2163 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
2164
3b3f1650 2165 for_each_engine(engine, dev_priv, id) {
e2f80391 2166 seq_printf(m, "%s\n", engine->name);
7e22dbbb 2167 if (IS_GEN7(dev_priv))
e2f80391
TU
2168 seq_printf(m, "GFX_MODE: 0x%08x\n",
2169 I915_READ(RING_MODE_GEN7(engine)));
2170 seq_printf(m, "PP_DIR_BASE: 0x%08x\n",
2171 I915_READ(RING_PP_DIR_BASE(engine)));
2172 seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n",
2173 I915_READ(RING_PP_DIR_BASE_READ(engine)));
2174 seq_printf(m, "PP_DIR_DCLV: 0x%08x\n",
2175 I915_READ(RING_PP_DIR_DCLV(engine)));
3cf17fc5
DV
2176 }
2177 if (dev_priv->mm.aliasing_ppgtt) {
2178 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
2179
267f0c90 2180 seq_puts(m, "aliasing PPGTT:\n");
44159ddb 2181 seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd.base.ggtt_offset);
1c60fef5 2182
87d60b63 2183 ppgtt->debug_dump(ppgtt, m);
ae6c4806 2184 }
1c60fef5 2185
3cf17fc5 2186 seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
77df6772
BW
2187}
2188
2189static int i915_ppgtt_info(struct seq_file *m, void *data)
2190{
36cdd013
DW
2191 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2192 struct drm_device *dev = &dev_priv->drm;
ea91e401 2193 struct drm_file *file;
637ee29e 2194 int ret;
77df6772 2195
637ee29e
CW
2196 mutex_lock(&dev->filelist_mutex);
2197 ret = mutex_lock_interruptible(&dev->struct_mutex);
77df6772 2198 if (ret)
637ee29e
CW
2199 goto out_unlock;
2200
c8c8fb33 2201 intel_runtime_pm_get(dev_priv);
77df6772 2202
36cdd013
DW
2203 if (INTEL_GEN(dev_priv) >= 8)
2204 gen8_ppgtt_info(m, dev_priv);
2205 else if (INTEL_GEN(dev_priv) >= 6)
2206 gen6_ppgtt_info(m, dev_priv);
77df6772 2207
ea91e401
MT
2208 list_for_each_entry_reverse(file, &dev->filelist, lhead) {
2209 struct drm_i915_file_private *file_priv = file->driver_priv;
7cb5dff8 2210 struct task_struct *task;
ea91e401 2211
7cb5dff8 2212 task = get_pid_task(file->pid, PIDTYPE_PID);
06812760
DC
2213 if (!task) {
2214 ret = -ESRCH;
637ee29e 2215 goto out_rpm;
06812760 2216 }
7cb5dff8
GT
2217 seq_printf(m, "\nproc: %s\n", task->comm);
2218 put_task_struct(task);
ea91e401
MT
2219 idr_for_each(&file_priv->context_idr, per_file_ctx,
2220 (void *)(unsigned long)m);
2221 }
2222
637ee29e 2223out_rpm:
c8c8fb33 2224 intel_runtime_pm_put(dev_priv);
3cf17fc5 2225 mutex_unlock(&dev->struct_mutex);
637ee29e
CW
2226out_unlock:
2227 mutex_unlock(&dev->filelist_mutex);
06812760 2228 return ret;
3cf17fc5
DV
2229}
2230
f5a4c67d
CW
2231static int count_irq_waiters(struct drm_i915_private *i915)
2232{
e2f80391 2233 struct intel_engine_cs *engine;
3b3f1650 2234 enum intel_engine_id id;
f5a4c67d 2235 int count = 0;
f5a4c67d 2236
3b3f1650 2237 for_each_engine(engine, i915, id)
688e6c72 2238 count += intel_engine_has_waiter(engine);
f5a4c67d
CW
2239
2240 return count;
2241}
2242
7466c291
CW
2243static const char *rps_power_to_str(unsigned int power)
2244{
2245 static const char * const strings[] = {
2246 [LOW_POWER] = "low power",
2247 [BETWEEN] = "mixed",
2248 [HIGH_POWER] = "high power",
2249 };
2250
2251 if (power >= ARRAY_SIZE(strings) || !strings[power])
2252 return "unknown";
2253
2254 return strings[power];
2255}
2256
1854d5ca
CW
2257static int i915_rps_boost_info(struct seq_file *m, void *data)
2258{
36cdd013
DW
2259 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2260 struct drm_device *dev = &dev_priv->drm;
1854d5ca 2261 struct drm_file *file;
1854d5ca 2262
f5a4c67d 2263 seq_printf(m, "RPS enabled? %d\n", dev_priv->rps.enabled);
28176ef4
CW
2264 seq_printf(m, "GPU busy? %s [%d requests]\n",
2265 yesno(dev_priv->gt.awake), dev_priv->gt.active_requests);
f5a4c67d 2266 seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv));
7466c291
CW
2267 seq_printf(m, "Frequency requested %d\n",
2268 intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq));
2269 seq_printf(m, " min hard:%d, soft:%d; max soft:%d, hard:%d\n",
f5a4c67d
CW
2270 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq),
2271 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit),
2272 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit),
2273 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
7466c291
CW
2274 seq_printf(m, " idle:%d, efficient:%d, boost:%d\n",
2275 intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq),
2276 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
2277 intel_gpu_freq(dev_priv, dev_priv->rps.boost_freq));
1d2ac403
DV
2278
2279 mutex_lock(&dev->filelist_mutex);
8d3afd7d 2280 spin_lock(&dev_priv->rps.client_lock);
1854d5ca
CW
2281 list_for_each_entry_reverse(file, &dev->filelist, lhead) {
2282 struct drm_i915_file_private *file_priv = file->driver_priv;
2283 struct task_struct *task;
2284
2285 rcu_read_lock();
2286 task = pid_task(file->pid, PIDTYPE_PID);
2287 seq_printf(m, "%s [%d]: %d boosts%s\n",
2288 task ? task->comm : "<unknown>",
2289 task ? task->pid : -1,
2e1b8730
CW
2290 file_priv->rps.boosts,
2291 list_empty(&file_priv->rps.link) ? "" : ", active");
1854d5ca
CW
2292 rcu_read_unlock();
2293 }
197be2ae 2294 seq_printf(m, "Kernel (anonymous) boosts: %d\n", dev_priv->rps.boosts);
8d3afd7d 2295 spin_unlock(&dev_priv->rps.client_lock);
1d2ac403 2296 mutex_unlock(&dev->filelist_mutex);
1854d5ca 2297
7466c291
CW
2298 if (INTEL_GEN(dev_priv) >= 6 &&
2299 dev_priv->rps.enabled &&
28176ef4 2300 dev_priv->gt.active_requests) {
7466c291
CW
2301 u32 rpup, rpupei;
2302 u32 rpdown, rpdownei;
2303
2304 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
2305 rpup = I915_READ_FW(GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK;
2306 rpupei = I915_READ_FW(GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK;
2307 rpdown = I915_READ_FW(GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK;
2308 rpdownei = I915_READ_FW(GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK;
2309 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
2310
2311 seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
2312 rps_power_to_str(dev_priv->rps.power));
2313 seq_printf(m, " Avg. up: %d%% [above threshold? %d%%]\n",
2314 100 * rpup / rpupei,
2315 dev_priv->rps.up_threshold);
2316 seq_printf(m, " Avg. down: %d%% [below threshold? %d%%]\n",
2317 100 * rpdown / rpdownei,
2318 dev_priv->rps.down_threshold);
2319 } else {
2320 seq_puts(m, "\nRPS Autotuning inactive\n");
2321 }
2322
8d3afd7d 2323 return 0;
1854d5ca
CW
2324}
2325
63573eb7
BW
2326static int i915_llc(struct seq_file *m, void *data)
2327{
36cdd013 2328 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3accaf7e 2329 const bool edram = INTEL_GEN(dev_priv) > 8;
63573eb7 2330
36cdd013 2331 seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev_priv)));
3accaf7e
MK
2332 seq_printf(m, "%s: %lluMB\n", edram ? "eDRAM" : "eLLC",
2333 intel_uncore_edram_size(dev_priv)/1024/1024);
63573eb7
BW
2334
2335 return 0;
2336}
2337
fdf5d357
AD
2338static int i915_guc_load_status_info(struct seq_file *m, void *data)
2339{
36cdd013 2340 struct drm_i915_private *dev_priv = node_to_i915(m->private);
fdf5d357
AD
2341 struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
2342 u32 tmp, i;
2343
2d1fe073 2344 if (!HAS_GUC_UCODE(dev_priv))
fdf5d357
AD
2345 return 0;
2346
2347 seq_printf(m, "GuC firmware status:\n");
2348 seq_printf(m, "\tpath: %s\n",
2349 guc_fw->guc_fw_path);
2350 seq_printf(m, "\tfetch: %s\n",
2351 intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status));
2352 seq_printf(m, "\tload: %s\n",
2353 intel_guc_fw_status_repr(guc_fw->guc_fw_load_status));
2354 seq_printf(m, "\tversion wanted: %d.%d\n",
2355 guc_fw->guc_fw_major_wanted, guc_fw->guc_fw_minor_wanted);
2356 seq_printf(m, "\tversion found: %d.%d\n",
2357 guc_fw->guc_fw_major_found, guc_fw->guc_fw_minor_found);
feda33ef
AD
2358 seq_printf(m, "\theader: offset is %d; size = %d\n",
2359 guc_fw->header_offset, guc_fw->header_size);
2360 seq_printf(m, "\tuCode: offset is %d; size = %d\n",
2361 guc_fw->ucode_offset, guc_fw->ucode_size);
2362 seq_printf(m, "\tRSA: offset is %d; size = %d\n",
2363 guc_fw->rsa_offset, guc_fw->rsa_size);
fdf5d357
AD
2364
2365 tmp = I915_READ(GUC_STATUS);
2366
2367 seq_printf(m, "\nGuC status 0x%08x:\n", tmp);
2368 seq_printf(m, "\tBootrom status = 0x%x\n",
2369 (tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
2370 seq_printf(m, "\tuKernel status = 0x%x\n",
2371 (tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
2372 seq_printf(m, "\tMIA Core status = 0x%x\n",
2373 (tmp & GS_MIA_MASK) >> GS_MIA_SHIFT);
2374 seq_puts(m, "\nScratch registers:\n");
2375 for (i = 0; i < 16; i++)
2376 seq_printf(m, "\t%2d: \t0x%x\n", i, I915_READ(SOFT_SCRATCH(i)));
2377
2378 return 0;
2379}
2380
5aa1ee4b
AG
2381static void i915_guc_log_info(struct seq_file *m,
2382 struct drm_i915_private *dev_priv)
2383{
2384 struct intel_guc *guc = &dev_priv->guc;
2385
2386 seq_puts(m, "\nGuC logging stats:\n");
2387
2388 seq_printf(m, "\tISR: flush count %10u, overflow count %10u\n",
2389 guc->log.flush_count[GUC_ISR_LOG_BUFFER],
2390 guc->log.total_overflow_count[GUC_ISR_LOG_BUFFER]);
2391
2392 seq_printf(m, "\tDPC: flush count %10u, overflow count %10u\n",
2393 guc->log.flush_count[GUC_DPC_LOG_BUFFER],
2394 guc->log.total_overflow_count[GUC_DPC_LOG_BUFFER]);
2395
2396 seq_printf(m, "\tCRASH: flush count %10u, overflow count %10u\n",
2397 guc->log.flush_count[GUC_CRASH_DUMP_LOG_BUFFER],
2398 guc->log.total_overflow_count[GUC_CRASH_DUMP_LOG_BUFFER]);
2399
2400 seq_printf(m, "\tTotal flush interrupt count: %u\n",
2401 guc->log.flush_interrupt_count);
2402
2403 seq_printf(m, "\tCapture miss count: %u\n",
2404 guc->log.capture_miss_count);
2405}
2406
8b417c26
DG
2407static void i915_guc_client_info(struct seq_file *m,
2408 struct drm_i915_private *dev_priv,
2409 struct i915_guc_client *client)
2410{
e2f80391 2411 struct intel_engine_cs *engine;
c18468c4 2412 enum intel_engine_id id;
8b417c26 2413 uint64_t tot = 0;
8b417c26
DG
2414
2415 seq_printf(m, "\tPriority %d, GuC ctx index: %u, PD offset 0x%x\n",
2416 client->priority, client->ctx_index, client->proc_desc_offset);
2417 seq_printf(m, "\tDoorbell id %d, offset: 0x%x, cookie 0x%x\n",
357248bf 2418 client->doorbell_id, client->doorbell_offset, client->doorbell_cookie);
8b417c26
DG
2419 seq_printf(m, "\tWQ size %d, offset: 0x%x, tail %d\n",
2420 client->wq_size, client->wq_offset, client->wq_tail);
2421
551aaecd 2422 seq_printf(m, "\tWork queue full: %u\n", client->no_wq_space);
8b417c26
DG
2423 seq_printf(m, "\tFailed doorbell: %u\n", client->b_fail);
2424 seq_printf(m, "\tLast submission result: %d\n", client->retcode);
2425
3b3f1650 2426 for_each_engine(engine, dev_priv, id) {
c18468c4
DG
2427 u64 submissions = client->submissions[id];
2428 tot += submissions;
8b417c26 2429 seq_printf(m, "\tSubmissions: %llu %s\n",
c18468c4 2430 submissions, engine->name);
8b417c26
DG
2431 }
2432 seq_printf(m, "\tTotal: %llu\n", tot);
2433}
2434
2435static int i915_guc_info(struct seq_file *m, void *data)
2436{
36cdd013 2437 struct drm_i915_private *dev_priv = node_to_i915(m->private);
334636c6 2438 const struct intel_guc *guc = &dev_priv->guc;
e2f80391 2439 struct intel_engine_cs *engine;
c18468c4 2440 enum intel_engine_id id;
334636c6 2441 u64 total;
8b417c26 2442
334636c6
CW
2443 if (!guc->execbuf_client) {
2444 seq_printf(m, "GuC submission %s\n",
2445 HAS_GUC_SCHED(dev_priv) ?
2446 "disabled" :
2447 "not supported");
5a843307 2448 return 0;
334636c6 2449 }
8b417c26 2450
9636f6db 2451 seq_printf(m, "Doorbell map:\n");
334636c6
CW
2452 seq_printf(m, "\t%*pb\n", GUC_MAX_DOORBELLS, guc->doorbell_bitmap);
2453 seq_printf(m, "Doorbell next cacheline: 0x%x\n\n", guc->db_cacheline);
9636f6db 2454
334636c6
CW
2455 seq_printf(m, "GuC total action count: %llu\n", guc->action_count);
2456 seq_printf(m, "GuC action failure count: %u\n", guc->action_fail);
2457 seq_printf(m, "GuC last action command: 0x%x\n", guc->action_cmd);
2458 seq_printf(m, "GuC last action status: 0x%x\n", guc->action_status);
2459 seq_printf(m, "GuC last action error code: %d\n", guc->action_err);
8b417c26 2460
334636c6 2461 total = 0;
8b417c26 2462 seq_printf(m, "\nGuC submissions:\n");
3b3f1650 2463 for_each_engine(engine, dev_priv, id) {
334636c6 2464 u64 submissions = guc->submissions[id];
c18468c4 2465 total += submissions;
397097b0 2466 seq_printf(m, "\t%-24s: %10llu, last seqno 0x%08x\n",
334636c6 2467 engine->name, submissions, guc->last_seqno[id]);
8b417c26
DG
2468 }
2469 seq_printf(m, "\t%s: %llu\n", "Total", total);
2470
334636c6
CW
2471 seq_printf(m, "\nGuC execbuf client @ %p:\n", guc->execbuf_client);
2472 i915_guc_client_info(m, dev_priv, guc->execbuf_client);
8b417c26 2473
5aa1ee4b
AG
2474 i915_guc_log_info(m, dev_priv);
2475
8b417c26
DG
2476 /* Add more as required ... */
2477
2478 return 0;
2479}
2480
4c7e77fc
AD
2481static int i915_guc_log_dump(struct seq_file *m, void *data)
2482{
36cdd013 2483 struct drm_i915_private *dev_priv = node_to_i915(m->private);
8b797af1 2484 struct drm_i915_gem_object *obj;
4c7e77fc
AD
2485 int i = 0, pg;
2486
d6b40b4b 2487 if (!dev_priv->guc.log.vma)
4c7e77fc
AD
2488 return 0;
2489
d6b40b4b 2490 obj = dev_priv->guc.log.vma->obj;
8b797af1
CW
2491 for (pg = 0; pg < obj->base.size / PAGE_SIZE; pg++) {
2492 u32 *log = kmap_atomic(i915_gem_object_get_page(obj, pg));
4c7e77fc
AD
2493
2494 for (i = 0; i < PAGE_SIZE / sizeof(u32); i += 4)
2495 seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n",
2496 *(log + i), *(log + i + 1),
2497 *(log + i + 2), *(log + i + 3));
2498
2499 kunmap_atomic(log);
2500 }
2501
2502 seq_putc(m, '\n');
2503
2504 return 0;
2505}
2506
685534ef
SAK
2507static int i915_guc_log_control_get(void *data, u64 *val)
2508{
2509 struct drm_device *dev = data;
2510 struct drm_i915_private *dev_priv = to_i915(dev);
2511
2512 if (!dev_priv->guc.log.vma)
2513 return -EINVAL;
2514
2515 *val = i915.guc_log_level;
2516
2517 return 0;
2518}
2519
2520static int i915_guc_log_control_set(void *data, u64 val)
2521{
2522 struct drm_device *dev = data;
2523 struct drm_i915_private *dev_priv = to_i915(dev);
2524 int ret;
2525
2526 if (!dev_priv->guc.log.vma)
2527 return -EINVAL;
2528
2529 ret = mutex_lock_interruptible(&dev->struct_mutex);
2530 if (ret)
2531 return ret;
2532
2533 intel_runtime_pm_get(dev_priv);
2534 ret = i915_guc_log_control(dev_priv, val);
2535 intel_runtime_pm_put(dev_priv);
2536
2537 mutex_unlock(&dev->struct_mutex);
2538 return ret;
2539}
2540
2541DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_control_fops,
2542 i915_guc_log_control_get, i915_guc_log_control_set,
2543 "%lld\n");
2544
e91fd8c6
RV
2545static int i915_edp_psr_status(struct seq_file *m, void *data)
2546{
36cdd013 2547 struct drm_i915_private *dev_priv = node_to_i915(m->private);
a031d709 2548 u32 psrperf = 0;
a6cbdb8e
RV
2549 u32 stat[3];
2550 enum pipe pipe;
a031d709 2551 bool enabled = false;
e91fd8c6 2552
36cdd013 2553 if (!HAS_PSR(dev_priv)) {
3553a8ea
DL
2554 seq_puts(m, "PSR not supported\n");
2555 return 0;
2556 }
2557
c8c8fb33
PZ
2558 intel_runtime_pm_get(dev_priv);
2559
fa128fa6 2560 mutex_lock(&dev_priv->psr.lock);
a031d709
RV
2561 seq_printf(m, "Sink_Support: %s\n", yesno(dev_priv->psr.sink_support));
2562 seq_printf(m, "Source_OK: %s\n", yesno(dev_priv->psr.source_ok));
2807cf69 2563 seq_printf(m, "Enabled: %s\n", yesno((bool)dev_priv->psr.enabled));
5755c78f 2564 seq_printf(m, "Active: %s\n", yesno(dev_priv->psr.active));
fa128fa6
DV
2565 seq_printf(m, "Busy frontbuffer bits: 0x%03x\n",
2566 dev_priv->psr.busy_frontbuffer_bits);
2567 seq_printf(m, "Re-enable work scheduled: %s\n",
2568 yesno(work_busy(&dev_priv->psr.work.work)));
e91fd8c6 2569
36cdd013 2570 if (HAS_DDI(dev_priv))
443a389f 2571 enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE;
3553a8ea
DL
2572 else {
2573 for_each_pipe(dev_priv, pipe) {
9c870d03
CW
2574 enum transcoder cpu_transcoder =
2575 intel_pipe_to_cpu_transcoder(dev_priv, pipe);
2576 enum intel_display_power_domain power_domain;
2577
2578 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
2579 if (!intel_display_power_get_if_enabled(dev_priv,
2580 power_domain))
2581 continue;
2582
3553a8ea
DL
2583 stat[pipe] = I915_READ(VLV_PSRSTAT(pipe)) &
2584 VLV_EDP_PSR_CURR_STATE_MASK;
2585 if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
2586 (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE))
2587 enabled = true;
9c870d03
CW
2588
2589 intel_display_power_put(dev_priv, power_domain);
a6cbdb8e
RV
2590 }
2591 }
60e5ffe3
RV
2592
2593 seq_printf(m, "Main link in standby mode: %s\n",
2594 yesno(dev_priv->psr.link_standby));
2595
a6cbdb8e
RV
2596 seq_printf(m, "HW Enabled & Active bit: %s", yesno(enabled));
2597
36cdd013 2598 if (!HAS_DDI(dev_priv))
a6cbdb8e
RV
2599 for_each_pipe(dev_priv, pipe) {
2600 if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
2601 (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE))
2602 seq_printf(m, " pipe %c", pipe_name(pipe));
2603 }
2604 seq_puts(m, "\n");
e91fd8c6 2605
05eec3c2
RV
2606 /*
2607 * VLV/CHV PSR has no kind of performance counter
2608 * SKL+ Perf counter is reset to 0 everytime DC state is entered
2609 */
36cdd013 2610 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
443a389f 2611 psrperf = I915_READ(EDP_PSR_PERF_CNT) &
a031d709 2612 EDP_PSR_PERF_CNT_MASK;
a6cbdb8e
RV
2613
2614 seq_printf(m, "Performance_Counter: %u\n", psrperf);
2615 }
fa128fa6 2616 mutex_unlock(&dev_priv->psr.lock);
e91fd8c6 2617
c8c8fb33 2618 intel_runtime_pm_put(dev_priv);
e91fd8c6
RV
2619 return 0;
2620}
2621
d2e216d0
RV
2622static int i915_sink_crc(struct seq_file *m, void *data)
2623{
36cdd013
DW
2624 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2625 struct drm_device *dev = &dev_priv->drm;
d2e216d0
RV
2626 struct intel_connector *connector;
2627 struct intel_dp *intel_dp = NULL;
2628 int ret;
2629 u8 crc[6];
2630
2631 drm_modeset_lock_all(dev);
aca5e361 2632 for_each_intel_connector(dev, connector) {
26c17cf6 2633 struct drm_crtc *crtc;
d2e216d0 2634
26c17cf6 2635 if (!connector->base.state->best_encoder)
d2e216d0
RV
2636 continue;
2637
26c17cf6
ML
2638 crtc = connector->base.state->crtc;
2639 if (!crtc->state->active)
b6ae3c7c
PZ
2640 continue;
2641
26c17cf6 2642 if (connector->base.connector_type != DRM_MODE_CONNECTOR_eDP)
d2e216d0
RV
2643 continue;
2644
26c17cf6 2645 intel_dp = enc_to_intel_dp(connector->base.state->best_encoder);
d2e216d0
RV
2646
2647 ret = intel_dp_sink_crc(intel_dp, crc);
2648 if (ret)
2649 goto out;
2650
2651 seq_printf(m, "%02x%02x%02x%02x%02x%02x\n",
2652 crc[0], crc[1], crc[2],
2653 crc[3], crc[4], crc[5]);
2654 goto out;
2655 }
2656 ret = -ENODEV;
2657out:
2658 drm_modeset_unlock_all(dev);
2659 return ret;
2660}
2661
ec013e7f
JB
2662static int i915_energy_uJ(struct seq_file *m, void *data)
2663{
36cdd013 2664 struct drm_i915_private *dev_priv = node_to_i915(m->private);
ec013e7f
JB
2665 u64 power;
2666 u32 units;
2667
36cdd013 2668 if (INTEL_GEN(dev_priv) < 6)
ec013e7f
JB
2669 return -ENODEV;
2670
36623ef8
PZ
2671 intel_runtime_pm_get(dev_priv);
2672
ec013e7f
JB
2673 rdmsrl(MSR_RAPL_POWER_UNIT, power);
2674 power = (power & 0x1f00) >> 8;
2675 units = 1000000 / (1 << power); /* convert to uJ */
2676 power = I915_READ(MCH_SECP_NRG_STTS);
2677 power *= units;
2678
36623ef8
PZ
2679 intel_runtime_pm_put(dev_priv);
2680
ec013e7f 2681 seq_printf(m, "%llu", (long long unsigned)power);
371db66a
PZ
2682
2683 return 0;
2684}
2685
6455c870 2686static int i915_runtime_pm_status(struct seq_file *m, void *unused)
371db66a 2687{
36cdd013 2688 struct drm_i915_private *dev_priv = node_to_i915(m->private);
52a05c30 2689 struct pci_dev *pdev = dev_priv->drm.pdev;
371db66a 2690
a156e64d
CW
2691 if (!HAS_RUNTIME_PM(dev_priv))
2692 seq_puts(m, "Runtime power management not supported\n");
371db66a 2693
67d97da3 2694 seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->gt.awake));
371db66a 2695 seq_printf(m, "IRQs disabled: %s\n",
9df7575f 2696 yesno(!intel_irqs_enabled(dev_priv)));
0d804184 2697#ifdef CONFIG_PM
a6aaec8b 2698 seq_printf(m, "Usage count: %d\n",
36cdd013 2699 atomic_read(&dev_priv->drm.dev->power.usage_count));
0d804184
CW
2700#else
2701 seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
2702#endif
a156e64d 2703 seq_printf(m, "PCI device power state: %s [%d]\n",
52a05c30
DW
2704 pci_power_name(pdev->current_state),
2705 pdev->current_state);
371db66a 2706
ec013e7f
JB
2707 return 0;
2708}
2709
1da51581
ID
2710static int i915_power_domain_info(struct seq_file *m, void *unused)
2711{
36cdd013 2712 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1da51581
ID
2713 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2714 int i;
2715
2716 mutex_lock(&power_domains->lock);
2717
2718 seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
2719 for (i = 0; i < power_domains->power_well_count; i++) {
2720 struct i915_power_well *power_well;
2721 enum intel_display_power_domain power_domain;
2722
2723 power_well = &power_domains->power_wells[i];
2724 seq_printf(m, "%-25s %d\n", power_well->name,
2725 power_well->count);
2726
2727 for (power_domain = 0; power_domain < POWER_DOMAIN_NUM;
2728 power_domain++) {
2729 if (!(BIT(power_domain) & power_well->domains))
2730 continue;
2731
2732 seq_printf(m, " %-23s %d\n",
9895ad03 2733 intel_display_power_domain_str(power_domain),
1da51581
ID
2734 power_domains->domain_use_count[power_domain]);
2735 }
2736 }
2737
2738 mutex_unlock(&power_domains->lock);
2739
2740 return 0;
2741}
2742
b7cec66d
DL
2743static int i915_dmc_info(struct seq_file *m, void *unused)
2744{
36cdd013 2745 struct drm_i915_private *dev_priv = node_to_i915(m->private);
b7cec66d
DL
2746 struct intel_csr *csr;
2747
36cdd013 2748 if (!HAS_CSR(dev_priv)) {
b7cec66d
DL
2749 seq_puts(m, "not supported\n");
2750 return 0;
2751 }
2752
2753 csr = &dev_priv->csr;
2754
6fb403de
MK
2755 intel_runtime_pm_get(dev_priv);
2756
b7cec66d
DL
2757 seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
2758 seq_printf(m, "path: %s\n", csr->fw_path);
2759
2760 if (!csr->dmc_payload)
6fb403de 2761 goto out;
b7cec66d
DL
2762
2763 seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
2764 CSR_VERSION_MINOR(csr->version));
2765
36cdd013 2766 if (IS_SKYLAKE(dev_priv) && csr->version >= CSR_VERSION(1, 6)) {
8337206d
DL
2767 seq_printf(m, "DC3 -> DC5 count: %d\n",
2768 I915_READ(SKL_CSR_DC3_DC5_COUNT));
2769 seq_printf(m, "DC5 -> DC6 count: %d\n",
2770 I915_READ(SKL_CSR_DC5_DC6_COUNT));
36cdd013 2771 } else if (IS_BROXTON(dev_priv) && csr->version >= CSR_VERSION(1, 4)) {
16e11b99
MK
2772 seq_printf(m, "DC3 -> DC5 count: %d\n",
2773 I915_READ(BXT_CSR_DC3_DC5_COUNT));
8337206d
DL
2774 }
2775
6fb403de
MK
2776out:
2777 seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0)));
2778 seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE));
2779 seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL));
2780
8337206d
DL
2781 intel_runtime_pm_put(dev_priv);
2782
b7cec66d
DL
2783 return 0;
2784}
2785
53f5e3ca
JB
2786static void intel_seq_print_mode(struct seq_file *m, int tabs,
2787 struct drm_display_mode *mode)
2788{
2789 int i;
2790
2791 for (i = 0; i < tabs; i++)
2792 seq_putc(m, '\t');
2793
2794 seq_printf(m, "id %d:\"%s\" freq %d clock %d hdisp %d hss %d hse %d htot %d vdisp %d vss %d vse %d vtot %d type 0x%x flags 0x%x\n",
2795 mode->base.id, mode->name,
2796 mode->vrefresh, mode->clock,
2797 mode->hdisplay, mode->hsync_start,
2798 mode->hsync_end, mode->htotal,
2799 mode->vdisplay, mode->vsync_start,
2800 mode->vsync_end, mode->vtotal,
2801 mode->type, mode->flags);
2802}
2803
2804static void intel_encoder_info(struct seq_file *m,
2805 struct intel_crtc *intel_crtc,
2806 struct intel_encoder *intel_encoder)
2807{
36cdd013
DW
2808 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2809 struct drm_device *dev = &dev_priv->drm;
53f5e3ca
JB
2810 struct drm_crtc *crtc = &intel_crtc->base;
2811 struct intel_connector *intel_connector;
2812 struct drm_encoder *encoder;
2813
2814 encoder = &intel_encoder->base;
2815 seq_printf(m, "\tencoder %d: type: %s, connectors:\n",
8e329a03 2816 encoder->base.id, encoder->name);
53f5e3ca
JB
2817 for_each_connector_on_encoder(dev, encoder, intel_connector) {
2818 struct drm_connector *connector = &intel_connector->base;
2819 seq_printf(m, "\t\tconnector %d: type: %s, status: %s",
2820 connector->base.id,
c23cc417 2821 connector->name,
53f5e3ca
JB
2822 drm_get_connector_status_name(connector->status));
2823 if (connector->status == connector_status_connected) {
2824 struct drm_display_mode *mode = &crtc->mode;
2825 seq_printf(m, ", mode:\n");
2826 intel_seq_print_mode(m, 2, mode);
2827 } else {
2828 seq_putc(m, '\n');
2829 }
2830 }
2831}
2832
2833static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2834{
36cdd013
DW
2835 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2836 struct drm_device *dev = &dev_priv->drm;
53f5e3ca
JB
2837 struct drm_crtc *crtc = &intel_crtc->base;
2838 struct intel_encoder *intel_encoder;
23a48d53
ML
2839 struct drm_plane_state *plane_state = crtc->primary->state;
2840 struct drm_framebuffer *fb = plane_state->fb;
53f5e3ca 2841
23a48d53 2842 if (fb)
5aa8a937 2843 seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
23a48d53
ML
2844 fb->base.id, plane_state->src_x >> 16,
2845 plane_state->src_y >> 16, fb->width, fb->height);
5aa8a937
MR
2846 else
2847 seq_puts(m, "\tprimary plane disabled\n");
53f5e3ca
JB
2848 for_each_encoder_on_crtc(dev, crtc, intel_encoder)
2849 intel_encoder_info(m, intel_crtc, intel_encoder);
2850}
2851
2852static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
2853{
2854 struct drm_display_mode *mode = panel->fixed_mode;
2855
2856 seq_printf(m, "\tfixed mode:\n");
2857 intel_seq_print_mode(m, 2, mode);
2858}
2859
2860static void intel_dp_info(struct seq_file *m,
2861 struct intel_connector *intel_connector)
2862{
2863 struct intel_encoder *intel_encoder = intel_connector->encoder;
2864 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
2865
2866 seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
742f491d 2867 seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
b6dabe3b 2868 if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
53f5e3ca 2869 intel_panel_info(m, &intel_connector->panel);
80209e5f
MK
2870
2871 drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
2872 &intel_dp->aux);
53f5e3ca
JB
2873}
2874
9a148a96
LY
2875static void intel_dp_mst_info(struct seq_file *m,
2876 struct intel_connector *intel_connector)
2877{
2878 struct intel_encoder *intel_encoder = intel_connector->encoder;
2879 struct intel_dp_mst_encoder *intel_mst =
2880 enc_to_mst(&intel_encoder->base);
2881 struct intel_digital_port *intel_dig_port = intel_mst->primary;
2882 struct intel_dp *intel_dp = &intel_dig_port->dp;
2883 bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr,
2884 intel_connector->port);
2885
2886 seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
2887}
2888
53f5e3ca
JB
2889static void intel_hdmi_info(struct seq_file *m,
2890 struct intel_connector *intel_connector)
2891{
2892 struct intel_encoder *intel_encoder = intel_connector->encoder;
2893 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
2894
742f491d 2895 seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
53f5e3ca
JB
2896}
2897
2898static void intel_lvds_info(struct seq_file *m,
2899 struct intel_connector *intel_connector)
2900{
2901 intel_panel_info(m, &intel_connector->panel);
2902}
2903
2904static void intel_connector_info(struct seq_file *m,
2905 struct drm_connector *connector)
2906{
2907 struct intel_connector *intel_connector = to_intel_connector(connector);
2908 struct intel_encoder *intel_encoder = intel_connector->encoder;
f103fc7d 2909 struct drm_display_mode *mode;
53f5e3ca
JB
2910
2911 seq_printf(m, "connector %d: type %s, status: %s\n",
c23cc417 2912 connector->base.id, connector->name,
53f5e3ca
JB
2913 drm_get_connector_status_name(connector->status));
2914 if (connector->status == connector_status_connected) {
2915 seq_printf(m, "\tname: %s\n", connector->display_info.name);
2916 seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
2917 connector->display_info.width_mm,
2918 connector->display_info.height_mm);
2919 seq_printf(m, "\tsubpixel order: %s\n",
2920 drm_get_subpixel_order_name(connector->display_info.subpixel_order));
2921 seq_printf(m, "\tCEA rev: %d\n",
2922 connector->display_info.cea_rev);
2923 }
ee648a74
ML
2924
2925 if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
2926 return;
2927
2928 switch (connector->connector_type) {
2929 case DRM_MODE_CONNECTOR_DisplayPort:
2930 case DRM_MODE_CONNECTOR_eDP:
9a148a96
LY
2931 if (intel_encoder->type == INTEL_OUTPUT_DP_MST)
2932 intel_dp_mst_info(m, intel_connector);
2933 else
2934 intel_dp_info(m, intel_connector);
ee648a74
ML
2935 break;
2936 case DRM_MODE_CONNECTOR_LVDS:
2937 if (intel_encoder->type == INTEL_OUTPUT_LVDS)
36cd7444 2938 intel_lvds_info(m, intel_connector);
ee648a74
ML
2939 break;
2940 case DRM_MODE_CONNECTOR_HDMIA:
2941 if (intel_encoder->type == INTEL_OUTPUT_HDMI ||
2942 intel_encoder->type == INTEL_OUTPUT_UNKNOWN)
2943 intel_hdmi_info(m, intel_connector);
2944 break;
2945 default:
2946 break;
36cd7444 2947 }
53f5e3ca 2948
f103fc7d
JB
2949 seq_printf(m, "\tmodes:\n");
2950 list_for_each_entry(mode, &connector->modes, head)
2951 intel_seq_print_mode(m, 2, mode);
53f5e3ca
JB
2952}
2953
36cdd013 2954static bool cursor_active(struct drm_i915_private *dev_priv, int pipe)
065f2ec2 2955{
065f2ec2
CW
2956 u32 state;
2957
2a307c2e 2958 if (IS_I845G(dev_priv) || IS_I865G(dev_priv))
0b87c24e 2959 state = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
065f2ec2 2960 else
5efb3e28 2961 state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
065f2ec2
CW
2962
2963 return state;
2964}
2965
36cdd013
DW
2966static bool cursor_position(struct drm_i915_private *dev_priv,
2967 int pipe, int *x, int *y)
065f2ec2 2968{
065f2ec2
CW
2969 u32 pos;
2970
5efb3e28 2971 pos = I915_READ(CURPOS(pipe));
065f2ec2
CW
2972
2973 *x = (pos >> CURSOR_X_SHIFT) & CURSOR_POS_MASK;
2974 if (pos & (CURSOR_POS_SIGN << CURSOR_X_SHIFT))
2975 *x = -*x;
2976
2977 *y = (pos >> CURSOR_Y_SHIFT) & CURSOR_POS_MASK;
2978 if (pos & (CURSOR_POS_SIGN << CURSOR_Y_SHIFT))
2979 *y = -*y;
2980
36cdd013 2981 return cursor_active(dev_priv, pipe);
065f2ec2
CW
2982}
2983
3abc4e09
RF
2984static const char *plane_type(enum drm_plane_type type)
2985{
2986 switch (type) {
2987 case DRM_PLANE_TYPE_OVERLAY:
2988 return "OVL";
2989 case DRM_PLANE_TYPE_PRIMARY:
2990 return "PRI";
2991 case DRM_PLANE_TYPE_CURSOR:
2992 return "CUR";
2993 /*
2994 * Deliberately omitting default: to generate compiler warnings
2995 * when a new drm_plane_type gets added.
2996 */
2997 }
2998
2999 return "unknown";
3000}
3001
3002static const char *plane_rotation(unsigned int rotation)
3003{
3004 static char buf[48];
3005 /*
3006 * According to doc only one DRM_ROTATE_ is allowed but this
3007 * will print them all to visualize if the values are misused
3008 */
3009 snprintf(buf, sizeof(buf),
3010 "%s%s%s%s%s%s(0x%08x)",
31ad61e4
JL
3011 (rotation & DRM_ROTATE_0) ? "0 " : "",
3012 (rotation & DRM_ROTATE_90) ? "90 " : "",
3013 (rotation & DRM_ROTATE_180) ? "180 " : "",
3014 (rotation & DRM_ROTATE_270) ? "270 " : "",
3015 (rotation & DRM_REFLECT_X) ? "FLIPX " : "",
3016 (rotation & DRM_REFLECT_Y) ? "FLIPY " : "",
3abc4e09
RF
3017 rotation);
3018
3019 return buf;
3020}
3021
3022static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
3023{
36cdd013
DW
3024 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3025 struct drm_device *dev = &dev_priv->drm;
3abc4e09
RF
3026 struct intel_plane *intel_plane;
3027
3028 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
3029 struct drm_plane_state *state;
3030 struct drm_plane *plane = &intel_plane->base;
b3c11ac2 3031 struct drm_format_name_buf format_name;
3abc4e09
RF
3032
3033 if (!plane->state) {
3034 seq_puts(m, "plane->state is NULL!\n");
3035 continue;
3036 }
3037
3038 state = plane->state;
3039
90844f00 3040 if (state->fb) {
b3c11ac2 3041 drm_get_format_name(state->fb->pixel_format, &format_name);
90844f00 3042 } else {
b3c11ac2 3043 sprintf(format_name.str, "N/A");
90844f00
EE
3044 }
3045
3abc4e09
RF
3046 seq_printf(m, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n",
3047 plane->base.id,
3048 plane_type(intel_plane->base.type),
3049 state->crtc_x, state->crtc_y,
3050 state->crtc_w, state->crtc_h,
3051 (state->src_x >> 16),
3052 ((state->src_x & 0xffff) * 15625) >> 10,
3053 (state->src_y >> 16),
3054 ((state->src_y & 0xffff) * 15625) >> 10,
3055 (state->src_w >> 16),
3056 ((state->src_w & 0xffff) * 15625) >> 10,
3057 (state->src_h >> 16),
3058 ((state->src_h & 0xffff) * 15625) >> 10,
b3c11ac2 3059 format_name.str,
3abc4e09
RF
3060 plane_rotation(state->rotation));
3061 }
3062}
3063
3064static void intel_scaler_info(struct seq_file *m, struct intel_crtc *intel_crtc)
3065{
3066 struct intel_crtc_state *pipe_config;
3067 int num_scalers = intel_crtc->num_scalers;
3068 int i;
3069
3070 pipe_config = to_intel_crtc_state(intel_crtc->base.state);
3071
3072 /* Not all platformas have a scaler */
3073 if (num_scalers) {
3074 seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
3075 num_scalers,
3076 pipe_config->scaler_state.scaler_users,
3077 pipe_config->scaler_state.scaler_id);
3078
58415918 3079 for (i = 0; i < num_scalers; i++) {
3abc4e09
RF
3080 struct intel_scaler *sc =
3081 &pipe_config->scaler_state.scalers[i];
3082
3083 seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
3084 i, yesno(sc->in_use), sc->mode);
3085 }
3086 seq_puts(m, "\n");
3087 } else {
3088 seq_puts(m, "\tNo scalers available on this platform\n");
3089 }
3090}
3091
53f5e3ca
JB
3092static int i915_display_info(struct seq_file *m, void *unused)
3093{
36cdd013
DW
3094 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3095 struct drm_device *dev = &dev_priv->drm;
065f2ec2 3096 struct intel_crtc *crtc;
53f5e3ca
JB
3097 struct drm_connector *connector;
3098
b0e5ddf3 3099 intel_runtime_pm_get(dev_priv);
53f5e3ca
JB
3100 drm_modeset_lock_all(dev);
3101 seq_printf(m, "CRTC info\n");
3102 seq_printf(m, "---------\n");
d3fcc808 3103 for_each_intel_crtc(dev, crtc) {
065f2ec2 3104 bool active;
f77076c9 3105 struct intel_crtc_state *pipe_config;
065f2ec2 3106 int x, y;
53f5e3ca 3107
f77076c9
ML
3108 pipe_config = to_intel_crtc_state(crtc->base.state);
3109
3abc4e09 3110 seq_printf(m, "CRTC %d: pipe: %c, active=%s, (size=%dx%d), dither=%s, bpp=%d\n",
065f2ec2 3111 crtc->base.base.id, pipe_name(crtc->pipe),
f77076c9 3112 yesno(pipe_config->base.active),
3abc4e09
RF
3113 pipe_config->pipe_src_w, pipe_config->pipe_src_h,
3114 yesno(pipe_config->dither), pipe_config->pipe_bpp);
3115
f77076c9 3116 if (pipe_config->base.active) {
065f2ec2
CW
3117 intel_crtc_info(m, crtc);
3118
36cdd013 3119 active = cursor_position(dev_priv, crtc->pipe, &x, &y);
57127efa 3120 seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x, active? %s\n",
4b0e333e 3121 yesno(crtc->cursor_base),
3dd512fb
MR
3122 x, y, crtc->base.cursor->state->crtc_w,
3123 crtc->base.cursor->state->crtc_h,
57127efa 3124 crtc->cursor_addr, yesno(active));
3abc4e09
RF
3125 intel_scaler_info(m, crtc);
3126 intel_plane_info(m, crtc);
a23dc658 3127 }
cace841c
DV
3128
3129 seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n",
3130 yesno(!crtc->cpu_fifo_underrun_disabled),
3131 yesno(!crtc->pch_fifo_underrun_disabled));
53f5e3ca
JB
3132 }
3133
3134 seq_printf(m, "\n");
3135 seq_printf(m, "Connector info\n");
3136 seq_printf(m, "--------------\n");
3137 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
3138 intel_connector_info(m, connector);
3139 }
3140 drm_modeset_unlock_all(dev);
b0e5ddf3 3141 intel_runtime_pm_put(dev_priv);
53f5e3ca
JB
3142
3143 return 0;
3144}
3145
1b36595f
CW
3146static int i915_engine_info(struct seq_file *m, void *unused)
3147{
3148 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3149 struct intel_engine_cs *engine;
3b3f1650 3150 enum intel_engine_id id;
1b36595f 3151
9c870d03
CW
3152 intel_runtime_pm_get(dev_priv);
3153
3b3f1650 3154 for_each_engine(engine, dev_priv, id) {
1b36595f
CW
3155 struct intel_breadcrumbs *b = &engine->breadcrumbs;
3156 struct drm_i915_gem_request *rq;
3157 struct rb_node *rb;
3158 u64 addr;
3159
3160 seq_printf(m, "%s\n", engine->name);
3fe3b030 3161 seq_printf(m, "\tcurrent seqno %x, last %x, hangcheck %x [%d ms]\n",
1b36595f 3162 intel_engine_get_seqno(engine),
cb399eab 3163 intel_engine_last_submit(engine),
1b36595f 3164 engine->hangcheck.seqno,
3fe3b030 3165 jiffies_to_msecs(jiffies - engine->hangcheck.action_timestamp));
1b36595f
CW
3166
3167 rcu_read_lock();
3168
3169 seq_printf(m, "\tRequests:\n");
3170
73cb9701
CW
3171 rq = list_first_entry(&engine->timeline->requests,
3172 struct drm_i915_gem_request, link);
3173 if (&rq->link != &engine->timeline->requests)
1b36595f
CW
3174 print_request(m, rq, "\t\tfirst ");
3175
73cb9701
CW
3176 rq = list_last_entry(&engine->timeline->requests,
3177 struct drm_i915_gem_request, link);
3178 if (&rq->link != &engine->timeline->requests)
1b36595f
CW
3179 print_request(m, rq, "\t\tlast ");
3180
3181 rq = i915_gem_find_active_request(engine);
3182 if (rq) {
3183 print_request(m, rq, "\t\tactive ");
3184 seq_printf(m,
3185 "\t\t[head %04x, postfix %04x, tail %04x, batch 0x%08x_%08x]\n",
3186 rq->head, rq->postfix, rq->tail,
3187 rq->batch ? upper_32_bits(rq->batch->node.start) : ~0u,
3188 rq->batch ? lower_32_bits(rq->batch->node.start) : ~0u);
3189 }
3190
3191 seq_printf(m, "\tRING_START: 0x%08x [0x%08x]\n",
3192 I915_READ(RING_START(engine->mmio_base)),
3193 rq ? i915_ggtt_offset(rq->ring->vma) : 0);
3194 seq_printf(m, "\tRING_HEAD: 0x%08x [0x%08x]\n",
3195 I915_READ(RING_HEAD(engine->mmio_base)) & HEAD_ADDR,
3196 rq ? rq->ring->head : 0);
3197 seq_printf(m, "\tRING_TAIL: 0x%08x [0x%08x]\n",
3198 I915_READ(RING_TAIL(engine->mmio_base)) & TAIL_ADDR,
3199 rq ? rq->ring->tail : 0);
3200 seq_printf(m, "\tRING_CTL: 0x%08x [%s]\n",
3201 I915_READ(RING_CTL(engine->mmio_base)),
3202 I915_READ(RING_CTL(engine->mmio_base)) & (RING_WAIT | RING_WAIT_SEMAPHORE) ? "waiting" : "");
3203
3204 rcu_read_unlock();
3205
3206 addr = intel_engine_get_active_head(engine);
3207 seq_printf(m, "\tACTHD: 0x%08x_%08x\n",
3208 upper_32_bits(addr), lower_32_bits(addr));
3209 addr = intel_engine_get_last_batch_head(engine);
3210 seq_printf(m, "\tBBADDR: 0x%08x_%08x\n",
3211 upper_32_bits(addr), lower_32_bits(addr));
3212
3213 if (i915.enable_execlists) {
3214 u32 ptr, read, write;
20311bd3 3215 struct rb_node *rb;
1b36595f
CW
3216
3217 seq_printf(m, "\tExeclist status: 0x%08x %08x\n",
3218 I915_READ(RING_EXECLIST_STATUS_LO(engine)),
3219 I915_READ(RING_EXECLIST_STATUS_HI(engine)));
3220
3221 ptr = I915_READ(RING_CONTEXT_STATUS_PTR(engine));
3222 read = GEN8_CSB_READ_PTR(ptr);
3223 write = GEN8_CSB_WRITE_PTR(ptr);
3224 seq_printf(m, "\tExeclist CSB read %d, write %d\n",
3225 read, write);
3226 if (read >= GEN8_CSB_ENTRIES)
3227 read = 0;
3228 if (write >= GEN8_CSB_ENTRIES)
3229 write = 0;
3230 if (read > write)
3231 write += GEN8_CSB_ENTRIES;
3232 while (read < write) {
3233 unsigned int idx = ++read % GEN8_CSB_ENTRIES;
3234
3235 seq_printf(m, "\tExeclist CSB[%d]: 0x%08x, context: %d\n",
3236 idx,
3237 I915_READ(RING_CONTEXT_STATUS_BUF_LO(engine, idx)),
3238 I915_READ(RING_CONTEXT_STATUS_BUF_HI(engine, idx)));
3239 }
3240
3241 rcu_read_lock();
3242 rq = READ_ONCE(engine->execlist_port[0].request);
3243 if (rq)
3244 print_request(m, rq, "\t\tELSP[0] ");
3245 else
3246 seq_printf(m, "\t\tELSP[0] idle\n");
3247 rq = READ_ONCE(engine->execlist_port[1].request);
3248 if (rq)
3249 print_request(m, rq, "\t\tELSP[1] ");
3250 else
3251 seq_printf(m, "\t\tELSP[1] idle\n");
3252 rcu_read_unlock();
c8247c06 3253
663f71e7 3254 spin_lock_irq(&engine->timeline->lock);
20311bd3
CW
3255 for (rb = engine->execlist_first; rb; rb = rb_next(rb)) {
3256 rq = rb_entry(rb, typeof(*rq), priotree.node);
c8247c06
CW
3257 print_request(m, rq, "\t\tQ ");
3258 }
663f71e7 3259 spin_unlock_irq(&engine->timeline->lock);
1b36595f
CW
3260 } else if (INTEL_GEN(dev_priv) > 6) {
3261 seq_printf(m, "\tPP_DIR_BASE: 0x%08x\n",
3262 I915_READ(RING_PP_DIR_BASE(engine)));
3263 seq_printf(m, "\tPP_DIR_BASE_READ: 0x%08x\n",
3264 I915_READ(RING_PP_DIR_BASE_READ(engine)));
3265 seq_printf(m, "\tPP_DIR_DCLV: 0x%08x\n",
3266 I915_READ(RING_PP_DIR_DCLV(engine)));
3267 }
3268
f6168e33 3269 spin_lock_irq(&b->lock);
1b36595f
CW
3270 for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
3271 struct intel_wait *w = container_of(rb, typeof(*w), node);
3272
3273 seq_printf(m, "\t%s [%d] waiting for %x\n",
3274 w->tsk->comm, w->tsk->pid, w->seqno);
3275 }
f6168e33 3276 spin_unlock_irq(&b->lock);
1b36595f
CW
3277
3278 seq_puts(m, "\n");
3279 }
3280
9c870d03
CW
3281 intel_runtime_pm_put(dev_priv);
3282
1b36595f
CW
3283 return 0;
3284}
3285
e04934cf
BW
3286static int i915_semaphore_status(struct seq_file *m, void *unused)
3287{
36cdd013
DW
3288 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3289 struct drm_device *dev = &dev_priv->drm;
e2f80391 3290 struct intel_engine_cs *engine;
36cdd013 3291 int num_rings = INTEL_INFO(dev_priv)->num_rings;
c3232b18
DG
3292 enum intel_engine_id id;
3293 int j, ret;
e04934cf 3294
39df9190 3295 if (!i915.semaphores) {
e04934cf
BW
3296 seq_puts(m, "Semaphores are disabled\n");
3297 return 0;
3298 }
3299
3300 ret = mutex_lock_interruptible(&dev->struct_mutex);
3301 if (ret)
3302 return ret;
03872064 3303 intel_runtime_pm_get(dev_priv);
e04934cf 3304
36cdd013 3305 if (IS_BROADWELL(dev_priv)) {
e04934cf
BW
3306 struct page *page;
3307 uint64_t *seqno;
3308
51d545d0 3309 page = i915_gem_object_get_page(dev_priv->semaphore->obj, 0);
e04934cf
BW
3310
3311 seqno = (uint64_t *)kmap_atomic(page);
3b3f1650 3312 for_each_engine(engine, dev_priv, id) {
e04934cf
BW
3313 uint64_t offset;
3314
e2f80391 3315 seq_printf(m, "%s\n", engine->name);
e04934cf
BW
3316
3317 seq_puts(m, " Last signal:");
3318 for (j = 0; j < num_rings; j++) {
c3232b18 3319 offset = id * I915_NUM_ENGINES + j;
e04934cf
BW
3320 seq_printf(m, "0x%08llx (0x%02llx) ",
3321 seqno[offset], offset * 8);
3322 }
3323 seq_putc(m, '\n');
3324
3325 seq_puts(m, " Last wait: ");
3326 for (j = 0; j < num_rings; j++) {
c3232b18 3327 offset = id + (j * I915_NUM_ENGINES);
e04934cf
BW
3328 seq_printf(m, "0x%08llx (0x%02llx) ",
3329 seqno[offset], offset * 8);
3330 }
3331 seq_putc(m, '\n');
3332
3333 }
3334 kunmap_atomic(seqno);
3335 } else {
3336 seq_puts(m, " Last signal:");
3b3f1650 3337 for_each_engine(engine, dev_priv, id)
e04934cf
BW
3338 for (j = 0; j < num_rings; j++)
3339 seq_printf(m, "0x%08x\n",
e2f80391 3340 I915_READ(engine->semaphore.mbox.signal[j]));
e04934cf
BW
3341 seq_putc(m, '\n');
3342 }
3343
03872064 3344 intel_runtime_pm_put(dev_priv);
e04934cf
BW
3345 mutex_unlock(&dev->struct_mutex);
3346 return 0;
3347}
3348
728e29d7
DV
3349static int i915_shared_dplls_info(struct seq_file *m, void *unused)
3350{
36cdd013
DW
3351 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3352 struct drm_device *dev = &dev_priv->drm;
728e29d7
DV
3353 int i;
3354
3355 drm_modeset_lock_all(dev);
3356 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
3357 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
3358
3359 seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->name, pll->id);
2dd66ebd
ML
3360 seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n",
3361 pll->config.crtc_mask, pll->active_mask, yesno(pll->on));
728e29d7 3362 seq_printf(m, " tracked hardware state:\n");
3e369b76
ACO
3363 seq_printf(m, " dpll: 0x%08x\n", pll->config.hw_state.dpll);
3364 seq_printf(m, " dpll_md: 0x%08x\n",
3365 pll->config.hw_state.dpll_md);
3366 seq_printf(m, " fp0: 0x%08x\n", pll->config.hw_state.fp0);
3367 seq_printf(m, " fp1: 0x%08x\n", pll->config.hw_state.fp1);
3368 seq_printf(m, " wrpll: 0x%08x\n", pll->config.hw_state.wrpll);
728e29d7
DV
3369 }
3370 drm_modeset_unlock_all(dev);
3371
3372 return 0;
3373}
3374
1ed1ef9d 3375static int i915_wa_registers(struct seq_file *m, void *unused)
888b5995
AS
3376{
3377 int i;
3378 int ret;
e2f80391 3379 struct intel_engine_cs *engine;
36cdd013
DW
3380 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3381 struct drm_device *dev = &dev_priv->drm;
33136b06 3382 struct i915_workarounds *workarounds = &dev_priv->workarounds;
c3232b18 3383 enum intel_engine_id id;
888b5995 3384
888b5995
AS
3385 ret = mutex_lock_interruptible(&dev->struct_mutex);
3386 if (ret)
3387 return ret;
3388
3389 intel_runtime_pm_get(dev_priv);
3390
33136b06 3391 seq_printf(m, "Workarounds applied: %d\n", workarounds->count);
3b3f1650 3392 for_each_engine(engine, dev_priv, id)
33136b06 3393 seq_printf(m, "HW whitelist count for %s: %d\n",
c3232b18 3394 engine->name, workarounds->hw_whitelist_count[id]);
33136b06 3395 for (i = 0; i < workarounds->count; ++i) {
f0f59a00
VS
3396 i915_reg_t addr;
3397 u32 mask, value, read;
2fa60f6d 3398 bool ok;
888b5995 3399
33136b06
AS
3400 addr = workarounds->reg[i].addr;
3401 mask = workarounds->reg[i].mask;
3402 value = workarounds->reg[i].value;
2fa60f6d
MK
3403 read = I915_READ(addr);
3404 ok = (value & mask) == (read & mask);
3405 seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X, read: 0x%08x, status: %s\n",
f0f59a00 3406 i915_mmio_reg_offset(addr), value, mask, read, ok ? "OK" : "FAIL");
888b5995
AS
3407 }
3408
3409 intel_runtime_pm_put(dev_priv);
3410 mutex_unlock(&dev->struct_mutex);
3411
3412 return 0;
3413}
3414
c5511e44
DL
3415static int i915_ddb_info(struct seq_file *m, void *unused)
3416{
36cdd013
DW
3417 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3418 struct drm_device *dev = &dev_priv->drm;
c5511e44
DL
3419 struct skl_ddb_allocation *ddb;
3420 struct skl_ddb_entry *entry;
3421 enum pipe pipe;
3422 int plane;
3423
36cdd013 3424 if (INTEL_GEN(dev_priv) < 9)
2fcffe19
DL
3425 return 0;
3426
c5511e44
DL
3427 drm_modeset_lock_all(dev);
3428
3429 ddb = &dev_priv->wm.skl_hw.ddb;
3430
3431 seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
3432
3433 for_each_pipe(dev_priv, pipe) {
3434 seq_printf(m, "Pipe %c\n", pipe_name(pipe));
3435
8b364b41 3436 for_each_universal_plane(dev_priv, pipe, plane) {
c5511e44
DL
3437 entry = &ddb->plane[pipe][plane];
3438 seq_printf(m, " Plane%-8d%8u%8u%8u\n", plane + 1,
3439 entry->start, entry->end,
3440 skl_ddb_entry_size(entry));
3441 }
3442
4969d33e 3443 entry = &ddb->plane[pipe][PLANE_CURSOR];
c5511e44
DL
3444 seq_printf(m, " %-13s%8u%8u%8u\n", "Cursor", entry->start,
3445 entry->end, skl_ddb_entry_size(entry));
3446 }
3447
3448 drm_modeset_unlock_all(dev);
3449
3450 return 0;
3451}
3452
a54746e3 3453static void drrs_status_per_crtc(struct seq_file *m,
36cdd013
DW
3454 struct drm_device *dev,
3455 struct intel_crtc *intel_crtc)
a54746e3 3456{
fac5e23e 3457 struct drm_i915_private *dev_priv = to_i915(dev);
a54746e3
VK
3458 struct i915_drrs *drrs = &dev_priv->drrs;
3459 int vrefresh = 0;
26875fe5 3460 struct drm_connector *connector;
a54746e3 3461
26875fe5
ML
3462 drm_for_each_connector(connector, dev) {
3463 if (connector->state->crtc != &intel_crtc->base)
3464 continue;
3465
3466 seq_printf(m, "%s:\n", connector->name);
a54746e3
VK
3467 }
3468
3469 if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT)
3470 seq_puts(m, "\tVBT: DRRS_type: Static");
3471 else if (dev_priv->vbt.drrs_type == SEAMLESS_DRRS_SUPPORT)
3472 seq_puts(m, "\tVBT: DRRS_type: Seamless");
3473 else if (dev_priv->vbt.drrs_type == DRRS_NOT_SUPPORTED)
3474 seq_puts(m, "\tVBT: DRRS_type: None");
3475 else
3476 seq_puts(m, "\tVBT: DRRS_type: FIXME: Unrecognized Value");
3477
3478 seq_puts(m, "\n\n");
3479
f77076c9 3480 if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) {
a54746e3
VK
3481 struct intel_panel *panel;
3482
3483 mutex_lock(&drrs->mutex);
3484 /* DRRS Supported */
3485 seq_puts(m, "\tDRRS Supported: Yes\n");
3486
3487 /* disable_drrs() will make drrs->dp NULL */
3488 if (!drrs->dp) {
3489 seq_puts(m, "Idleness DRRS: Disabled");
3490 mutex_unlock(&drrs->mutex);
3491 return;
3492 }
3493
3494 panel = &drrs->dp->attached_connector->panel;
3495 seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
3496 drrs->busy_frontbuffer_bits);
3497
3498 seq_puts(m, "\n\t\t");
3499 if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
3500 seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
3501 vrefresh = panel->fixed_mode->vrefresh;
3502 } else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
3503 seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
3504 vrefresh = panel->downclock_mode->vrefresh;
3505 } else {
3506 seq_printf(m, "DRRS_State: Unknown(%d)\n",
3507 drrs->refresh_rate_type);
3508 mutex_unlock(&drrs->mutex);
3509 return;
3510 }
3511 seq_printf(m, "\t\tVrefresh: %d", vrefresh);
3512
3513 seq_puts(m, "\n\t\t");
3514 mutex_unlock(&drrs->mutex);
3515 } else {
3516 /* DRRS not supported. Print the VBT parameter*/
3517 seq_puts(m, "\tDRRS Supported : No");
3518 }
3519 seq_puts(m, "\n");
3520}
3521
3522static int i915_drrs_status(struct seq_file *m, void *unused)
3523{
36cdd013
DW
3524 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3525 struct drm_device *dev = &dev_priv->drm;
a54746e3
VK
3526 struct intel_crtc *intel_crtc;
3527 int active_crtc_cnt = 0;
3528
26875fe5 3529 drm_modeset_lock_all(dev);
a54746e3 3530 for_each_intel_crtc(dev, intel_crtc) {
f77076c9 3531 if (intel_crtc->base.state->active) {
a54746e3
VK
3532 active_crtc_cnt++;
3533 seq_printf(m, "\nCRTC %d: ", active_crtc_cnt);
3534
3535 drrs_status_per_crtc(m, dev, intel_crtc);
3536 }
a54746e3 3537 }
26875fe5 3538 drm_modeset_unlock_all(dev);
a54746e3
VK
3539
3540 if (!active_crtc_cnt)
3541 seq_puts(m, "No active crtc found\n");
3542
3543 return 0;
3544}
3545
07144428
DL
3546struct pipe_crc_info {
3547 const char *name;
36cdd013 3548 struct drm_i915_private *dev_priv;
07144428
DL
3549 enum pipe pipe;
3550};
3551
11bed958
DA
3552static int i915_dp_mst_info(struct seq_file *m, void *unused)
3553{
36cdd013
DW
3554 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3555 struct drm_device *dev = &dev_priv->drm;
11bed958
DA
3556 struct intel_encoder *intel_encoder;
3557 struct intel_digital_port *intel_dig_port;
b6dabe3b
ML
3558 struct drm_connector *connector;
3559
11bed958 3560 drm_modeset_lock_all(dev);
b6dabe3b
ML
3561 drm_for_each_connector(connector, dev) {
3562 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
11bed958 3563 continue;
b6dabe3b
ML
3564
3565 intel_encoder = intel_attached_encoder(connector);
3566 if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
3567 continue;
3568
3569 intel_dig_port = enc_to_dig_port(&intel_encoder->base);
11bed958
DA
3570 if (!intel_dig_port->dp.can_mst)
3571 continue;
b6dabe3b 3572
40ae80cc
JB
3573 seq_printf(m, "MST Source Port %c\n",
3574 port_name(intel_dig_port->port));
11bed958
DA
3575 drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
3576 }
3577 drm_modeset_unlock_all(dev);
3578 return 0;
3579}
3580
07144428
DL
3581static int i915_pipe_crc_open(struct inode *inode, struct file *filep)
3582{
be5c7a90 3583 struct pipe_crc_info *info = inode->i_private;
36cdd013 3584 struct drm_i915_private *dev_priv = info->dev_priv;
be5c7a90
DL
3585 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
3586
36cdd013 3587 if (info->pipe >= INTEL_INFO(dev_priv)->num_pipes)
7eb1c496
DV
3588 return -ENODEV;
3589
d538bbdf
DL
3590 spin_lock_irq(&pipe_crc->lock);
3591
3592 if (pipe_crc->opened) {
3593 spin_unlock_irq(&pipe_crc->lock);
be5c7a90
DL
3594 return -EBUSY; /* already open */
3595 }
3596
d538bbdf 3597 pipe_crc->opened = true;
07144428
DL
3598 filep->private_data = inode->i_private;
3599
d538bbdf
DL
3600 spin_unlock_irq(&pipe_crc->lock);
3601
07144428
DL
3602 return 0;
3603}
3604
3605static int i915_pipe_crc_release(struct inode *inode, struct file *filep)
3606{
be5c7a90 3607 struct pipe_crc_info *info = inode->i_private;
36cdd013 3608 struct drm_i915_private *dev_priv = info->dev_priv;
be5c7a90
DL
3609 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
3610
d538bbdf
DL
3611 spin_lock_irq(&pipe_crc->lock);
3612 pipe_crc->opened = false;
3613 spin_unlock_irq(&pipe_crc->lock);
be5c7a90 3614
07144428
DL
3615 return 0;
3616}
3617
3618/* (6 fields, 8 chars each, space separated (5) + '\n') */
3619#define PIPE_CRC_LINE_LEN (6 * 8 + 5 + 1)
3620/* account for \'0' */
3621#define PIPE_CRC_BUFFER_LEN (PIPE_CRC_LINE_LEN + 1)
3622
3623static int pipe_crc_data_count(struct intel_pipe_crc *pipe_crc)
8bf1e9f1 3624{
d538bbdf
DL
3625 assert_spin_locked(&pipe_crc->lock);
3626 return CIRC_CNT(pipe_crc->head, pipe_crc->tail,
3627 INTEL_PIPE_CRC_ENTRIES_NR);
07144428
DL
3628}
3629
3630static ssize_t
3631i915_pipe_crc_read(struct file *filep, char __user *user_buf, size_t count,
3632 loff_t *pos)
3633{
3634 struct pipe_crc_info *info = filep->private_data;
36cdd013 3635 struct drm_i915_private *dev_priv = info->dev_priv;
07144428
DL
3636 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
3637 char buf[PIPE_CRC_BUFFER_LEN];
9ad6d99f 3638 int n_entries;
07144428
DL
3639 ssize_t bytes_read;
3640
3641 /*
3642 * Don't allow user space to provide buffers not big enough to hold
3643 * a line of data.
3644 */
3645 if (count < PIPE_CRC_LINE_LEN)
3646 return -EINVAL;
3647
3648 if (pipe_crc->source == INTEL_PIPE_CRC_SOURCE_NONE)
8bf1e9f1 3649 return 0;
07144428
DL
3650
3651 /* nothing to read */
d538bbdf 3652 spin_lock_irq(&pipe_crc->lock);
07144428 3653 while (pipe_crc_data_count(pipe_crc) == 0) {
d538bbdf
DL
3654 int ret;
3655
3656 if (filep->f_flags & O_NONBLOCK) {
3657 spin_unlock_irq(&pipe_crc->lock);
07144428 3658 return -EAGAIN;
d538bbdf 3659 }
07144428 3660
d538bbdf
DL
3661 ret = wait_event_interruptible_lock_irq(pipe_crc->wq,
3662 pipe_crc_data_count(pipe_crc), pipe_crc->lock);
3663 if (ret) {
3664 spin_unlock_irq(&pipe_crc->lock);
3665 return ret;
3666 }
8bf1e9f1
SH
3667 }
3668
07144428 3669 /* We now have one or more entries to read */
9ad6d99f 3670 n_entries = count / PIPE_CRC_LINE_LEN;
d538bbdf 3671
07144428 3672 bytes_read = 0;
9ad6d99f
VS
3673 while (n_entries > 0) {
3674 struct intel_pipe_crc_entry *entry =
3675 &pipe_crc->entries[pipe_crc->tail];
8bf1e9f1 3676
9ad6d99f
VS
3677 if (CIRC_CNT(pipe_crc->head, pipe_crc->tail,
3678 INTEL_PIPE_CRC_ENTRIES_NR) < 1)
3679 break;
3680
3681 BUILD_BUG_ON_NOT_POWER_OF_2(INTEL_PIPE_CRC_ENTRIES_NR);
3682 pipe_crc->tail = (pipe_crc->tail + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
3683
07144428
DL
3684 bytes_read += snprintf(buf, PIPE_CRC_BUFFER_LEN,
3685 "%8u %8x %8x %8x %8x %8x\n",
3686 entry->frame, entry->crc[0],
3687 entry->crc[1], entry->crc[2],
3688 entry->crc[3], entry->crc[4]);
3689
9ad6d99f
VS
3690 spin_unlock_irq(&pipe_crc->lock);
3691
4e9121e6 3692 if (copy_to_user(user_buf, buf, PIPE_CRC_LINE_LEN))
07144428 3693 return -EFAULT;
b2c88f5b 3694
9ad6d99f
VS
3695 user_buf += PIPE_CRC_LINE_LEN;
3696 n_entries--;
3697
3698 spin_lock_irq(&pipe_crc->lock);
3699 }
8bf1e9f1 3700
d538bbdf
DL
3701 spin_unlock_irq(&pipe_crc->lock);
3702
07144428
DL
3703 return bytes_read;
3704}
3705
3706static const struct file_operations i915_pipe_crc_fops = {
3707 .owner = THIS_MODULE,
3708 .open = i915_pipe_crc_open,
3709 .read = i915_pipe_crc_read,
3710 .release = i915_pipe_crc_release,
3711};
3712
3713static struct pipe_crc_info i915_pipe_crc_data[I915_MAX_PIPES] = {
3714 {
3715 .name = "i915_pipe_A_crc",
3716 .pipe = PIPE_A,
3717 },
3718 {
3719 .name = "i915_pipe_B_crc",
3720 .pipe = PIPE_B,
3721 },
3722 {
3723 .name = "i915_pipe_C_crc",
3724 .pipe = PIPE_C,
3725 },
3726};
3727
3728static int i915_pipe_crc_create(struct dentry *root, struct drm_minor *minor,
3729 enum pipe pipe)
3730{
36cdd013 3731 struct drm_i915_private *dev_priv = to_i915(minor->dev);
07144428
DL
3732 struct dentry *ent;
3733 struct pipe_crc_info *info = &i915_pipe_crc_data[pipe];
3734
36cdd013 3735 info->dev_priv = dev_priv;
07144428
DL
3736 ent = debugfs_create_file(info->name, S_IRUGO, root, info,
3737 &i915_pipe_crc_fops);
f3c5fe97
WY
3738 if (!ent)
3739 return -ENOMEM;
07144428
DL
3740
3741 return drm_add_fake_info_node(minor, ent, info);
8bf1e9f1
SH
3742}
3743
e8dfcf78 3744static const char * const pipe_crc_sources[] = {
926321d5
DV
3745 "none",
3746 "plane1",
3747 "plane2",
3748 "pf",
5b3a856b 3749 "pipe",
3d099a05
DV
3750 "TV",
3751 "DP-B",
3752 "DP-C",
3753 "DP-D",
46a19188 3754 "auto",
926321d5
DV
3755};
3756
3757static const char *pipe_crc_source_name(enum intel_pipe_crc_source source)
3758{
3759 BUILD_BUG_ON(ARRAY_SIZE(pipe_crc_sources) != INTEL_PIPE_CRC_SOURCE_MAX);
3760 return pipe_crc_sources[source];
3761}
3762
bd9db02f 3763static int display_crc_ctl_show(struct seq_file *m, void *data)
926321d5 3764{
36cdd013 3765 struct drm_i915_private *dev_priv = m->private;
926321d5
DV
3766 int i;
3767
3768 for (i = 0; i < I915_MAX_PIPES; i++)
3769 seq_printf(m, "%c %s\n", pipe_name(i),
3770 pipe_crc_source_name(dev_priv->pipe_crc[i].source));
3771
3772 return 0;
3773}
3774
bd9db02f 3775static int display_crc_ctl_open(struct inode *inode, struct file *file)
926321d5 3776{
36cdd013 3777 return single_open(file, display_crc_ctl_show, inode->i_private);
926321d5
DV
3778}
3779
46a19188 3780static int i8xx_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
52f843f6
DV
3781 uint32_t *val)
3782{
46a19188
DV
3783 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
3784 *source = INTEL_PIPE_CRC_SOURCE_PIPE;
3785
3786 switch (*source) {
52f843f6
DV
3787 case INTEL_PIPE_CRC_SOURCE_PIPE:
3788 *val = PIPE_CRC_ENABLE | PIPE_CRC_INCLUDE_BORDER_I8XX;
3789 break;
3790 case INTEL_PIPE_CRC_SOURCE_NONE:
3791 *val = 0;
3792 break;
3793 default:
3794 return -EINVAL;
3795 }
3796
3797 return 0;
3798}
3799
36cdd013
DW
3800static int i9xx_pipe_crc_auto_source(struct drm_i915_private *dev_priv,
3801 enum pipe pipe,
46a19188
DV
3802 enum intel_pipe_crc_source *source)
3803{
36cdd013 3804 struct drm_device *dev = &dev_priv->drm;
46a19188
DV
3805 struct intel_encoder *encoder;
3806 struct intel_crtc *crtc;
26756809 3807 struct intel_digital_port *dig_port;
46a19188
DV
3808 int ret = 0;
3809
3810 *source = INTEL_PIPE_CRC_SOURCE_PIPE;
3811
6e9f798d 3812 drm_modeset_lock_all(dev);
b2784e15 3813 for_each_intel_encoder(dev, encoder) {
46a19188
DV
3814 if (!encoder->base.crtc)
3815 continue;
3816
3817 crtc = to_intel_crtc(encoder->base.crtc);
3818
3819 if (crtc->pipe != pipe)
3820 continue;
3821
3822 switch (encoder->type) {
3823 case INTEL_OUTPUT_TVOUT:
3824 *source = INTEL_PIPE_CRC_SOURCE_TV;
3825 break;
cca0502b 3826 case INTEL_OUTPUT_DP:
46a19188 3827 case INTEL_OUTPUT_EDP:
26756809
DV
3828 dig_port = enc_to_dig_port(&encoder->base);
3829 switch (dig_port->port) {
3830 case PORT_B:
3831 *source = INTEL_PIPE_CRC_SOURCE_DP_B;
3832 break;
3833 case PORT_C:
3834 *source = INTEL_PIPE_CRC_SOURCE_DP_C;
3835 break;
3836 case PORT_D:
3837 *source = INTEL_PIPE_CRC_SOURCE_DP_D;
3838 break;
3839 default:
3840 WARN(1, "nonexisting DP port %c\n",
3841 port_name(dig_port->port));
3842 break;
3843 }
46a19188 3844 break;
6847d71b
PZ
3845 default:
3846 break;
46a19188
DV
3847 }
3848 }
6e9f798d 3849 drm_modeset_unlock_all(dev);
46a19188
DV
3850
3851 return ret;
3852}
3853
36cdd013 3854static int vlv_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
46a19188
DV
3855 enum pipe pipe,
3856 enum intel_pipe_crc_source *source,
7ac0129b
DV
3857 uint32_t *val)
3858{
8d2f24ca
DV
3859 bool need_stable_symbols = false;
3860
46a19188 3861 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
36cdd013 3862 int ret = i9xx_pipe_crc_auto_source(dev_priv, pipe, source);
46a19188
DV
3863 if (ret)
3864 return ret;
3865 }
3866
3867 switch (*source) {
7ac0129b
DV
3868 case INTEL_PIPE_CRC_SOURCE_PIPE:
3869 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_VLV;
3870 break;
3871 case INTEL_PIPE_CRC_SOURCE_DP_B:
3872 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_VLV;
8d2f24ca 3873 need_stable_symbols = true;
7ac0129b
DV
3874 break;
3875 case INTEL_PIPE_CRC_SOURCE_DP_C:
3876 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_VLV;
8d2f24ca 3877 need_stable_symbols = true;
7ac0129b 3878 break;
2be57922 3879 case INTEL_PIPE_CRC_SOURCE_DP_D:
36cdd013 3880 if (!IS_CHERRYVIEW(dev_priv))
2be57922
VS
3881 return -EINVAL;
3882 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_VLV;
3883 need_stable_symbols = true;
3884 break;
7ac0129b
DV
3885 case INTEL_PIPE_CRC_SOURCE_NONE:
3886 *val = 0;
3887 break;
3888 default:
3889 return -EINVAL;
3890 }
3891
8d2f24ca
DV
3892 /*
3893 * When the pipe CRC tap point is after the transcoders we need
3894 * to tweak symbol-level features to produce a deterministic series of
3895 * symbols for a given frame. We need to reset those features only once
3896 * a frame (instead of every nth symbol):
3897 * - DC-balance: used to ensure a better clock recovery from the data
3898 * link (SDVO)
3899 * - DisplayPort scrambling: used for EMI reduction
3900 */
3901 if (need_stable_symbols) {
3902 uint32_t tmp = I915_READ(PORT_DFT2_G4X);
3903
8d2f24ca 3904 tmp |= DC_BALANCE_RESET_VLV;
eb736679
VS
3905 switch (pipe) {
3906 case PIPE_A:
8d2f24ca 3907 tmp |= PIPE_A_SCRAMBLE_RESET;
eb736679
VS
3908 break;
3909 case PIPE_B:
8d2f24ca 3910 tmp |= PIPE_B_SCRAMBLE_RESET;
eb736679
VS
3911 break;
3912 case PIPE_C:
3913 tmp |= PIPE_C_SCRAMBLE_RESET;
3914 break;
3915 default:
3916 return -EINVAL;
3917 }
8d2f24ca
DV
3918 I915_WRITE(PORT_DFT2_G4X, tmp);
3919 }
3920
7ac0129b
DV
3921 return 0;
3922}
3923
36cdd013 3924static int i9xx_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
46a19188
DV
3925 enum pipe pipe,
3926 enum intel_pipe_crc_source *source,
4b79ebf7
DV
3927 uint32_t *val)
3928{
84093603
DV
3929 bool need_stable_symbols = false;
3930
46a19188 3931 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
36cdd013 3932 int ret = i9xx_pipe_crc_auto_source(dev_priv, pipe, source);
46a19188
DV
3933 if (ret)
3934 return ret;
3935 }
3936
3937 switch (*source) {
4b79ebf7
DV
3938 case INTEL_PIPE_CRC_SOURCE_PIPE:
3939 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_I9XX;
3940 break;
3941 case INTEL_PIPE_CRC_SOURCE_TV:
36cdd013 3942 if (!SUPPORTS_TV(dev_priv))
4b79ebf7
DV
3943 return -EINVAL;
3944 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_TV_PRE;
3945 break;
3946 case INTEL_PIPE_CRC_SOURCE_DP_B:
36cdd013 3947 if (!IS_G4X(dev_priv))
4b79ebf7
DV
3948 return -EINVAL;
3949 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_G4X;
84093603 3950 need_stable_symbols = true;
4b79ebf7
DV
3951 break;
3952 case INTEL_PIPE_CRC_SOURCE_DP_C:
36cdd013 3953 if (!IS_G4X(dev_priv))
4b79ebf7
DV
3954 return -EINVAL;
3955 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_G4X;
84093603 3956 need_stable_symbols = true;
4b79ebf7
DV
3957 break;
3958 case INTEL_PIPE_CRC_SOURCE_DP_D:
36cdd013 3959 if (!IS_G4X(dev_priv))
4b79ebf7
DV
3960 return -EINVAL;
3961 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_G4X;
84093603 3962 need_stable_symbols = true;
4b79ebf7
DV
3963 break;
3964 case INTEL_PIPE_CRC_SOURCE_NONE:
3965 *val = 0;
3966 break;
3967 default:
3968 return -EINVAL;
3969 }
3970
84093603
DV
3971 /*
3972 * When the pipe CRC tap point is after the transcoders we need
3973 * to tweak symbol-level features to produce a deterministic series of
3974 * symbols for a given frame. We need to reset those features only once
3975 * a frame (instead of every nth symbol):
3976 * - DC-balance: used to ensure a better clock recovery from the data
3977 * link (SDVO)
3978 * - DisplayPort scrambling: used for EMI reduction
3979 */
3980 if (need_stable_symbols) {
3981 uint32_t tmp = I915_READ(PORT_DFT2_G4X);
3982
36cdd013 3983 WARN_ON(!IS_G4X(dev_priv));
84093603
DV
3984
3985 I915_WRITE(PORT_DFT_I9XX,
3986 I915_READ(PORT_DFT_I9XX) | DC_BALANCE_RESET);
3987
3988 if (pipe == PIPE_A)
3989 tmp |= PIPE_A_SCRAMBLE_RESET;
3990 else
3991 tmp |= PIPE_B_SCRAMBLE_RESET;
3992
3993 I915_WRITE(PORT_DFT2_G4X, tmp);
3994 }
3995
4b79ebf7
DV
3996 return 0;
3997}
3998
36cdd013 3999static void vlv_undo_pipe_scramble_reset(struct drm_i915_private *dev_priv,
8d2f24ca
DV
4000 enum pipe pipe)
4001{
8d2f24ca
DV
4002 uint32_t tmp = I915_READ(PORT_DFT2_G4X);
4003
eb736679
VS
4004 switch (pipe) {
4005 case PIPE_A:
8d2f24ca 4006 tmp &= ~PIPE_A_SCRAMBLE_RESET;
eb736679
VS
4007 break;
4008 case PIPE_B:
8d2f24ca 4009 tmp &= ~PIPE_B_SCRAMBLE_RESET;
eb736679
VS
4010 break;
4011 case PIPE_C:
4012 tmp &= ~PIPE_C_SCRAMBLE_RESET;
4013 break;
4014 default:
4015 return;
4016 }
8d2f24ca
DV
4017 if (!(tmp & PIPE_SCRAMBLE_RESET_MASK))
4018 tmp &= ~DC_BALANCE_RESET_VLV;
4019 I915_WRITE(PORT_DFT2_G4X, tmp);
4020
4021}
4022
36cdd013 4023static void g4x_undo_pipe_scramble_reset(struct drm_i915_private *dev_priv,
84093603
DV
4024 enum pipe pipe)
4025{
84093603
DV
4026 uint32_t tmp = I915_READ(PORT_DFT2_G4X);
4027
4028 if (pipe == PIPE_A)
4029 tmp &= ~PIPE_A_SCRAMBLE_RESET;
4030 else
4031 tmp &= ~PIPE_B_SCRAMBLE_RESET;
4032 I915_WRITE(PORT_DFT2_G4X, tmp);
4033
4034 if (!(tmp & PIPE_SCRAMBLE_RESET_MASK)) {
4035 I915_WRITE(PORT_DFT_I9XX,
4036 I915_READ(PORT_DFT_I9XX) & ~DC_BALANCE_RESET);
4037 }
4038}
4039
46a19188 4040static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
5b3a856b
DV
4041 uint32_t *val)
4042{
46a19188
DV
4043 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
4044 *source = INTEL_PIPE_CRC_SOURCE_PIPE;
4045
4046 switch (*source) {
5b3a856b
DV
4047 case INTEL_PIPE_CRC_SOURCE_PLANE1:
4048 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_ILK;
4049 break;
4050 case INTEL_PIPE_CRC_SOURCE_PLANE2:
4051 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_ILK;
4052 break;
5b3a856b
DV
4053 case INTEL_PIPE_CRC_SOURCE_PIPE:
4054 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_ILK;
4055 break;
3d099a05 4056 case INTEL_PIPE_CRC_SOURCE_NONE:
5b3a856b
DV
4057 *val = 0;
4058 break;
3d099a05
DV
4059 default:
4060 return -EINVAL;
5b3a856b
DV
4061 }
4062
4063 return 0;
4064}
4065
36cdd013
DW
4066static void hsw_trans_edp_pipe_A_crc_wa(struct drm_i915_private *dev_priv,
4067 bool enable)
fabf6e51 4068{
36cdd013 4069 struct drm_device *dev = &dev_priv->drm;
98187836 4070 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_A);
f77076c9 4071 struct intel_crtc_state *pipe_config;
c4e2d043
ML
4072 struct drm_atomic_state *state;
4073 int ret = 0;
fabf6e51
DV
4074
4075 drm_modeset_lock_all(dev);
c4e2d043
ML
4076 state = drm_atomic_state_alloc(dev);
4077 if (!state) {
4078 ret = -ENOMEM;
4079 goto out;
fabf6e51 4080 }
fabf6e51 4081
c4e2d043
ML
4082 state->acquire_ctx = drm_modeset_legacy_acquire_ctx(&crtc->base);
4083 pipe_config = intel_atomic_get_crtc_state(state, crtc);
4084 if (IS_ERR(pipe_config)) {
4085 ret = PTR_ERR(pipe_config);
4086 goto out;
4087 }
fabf6e51 4088
c4e2d043
ML
4089 pipe_config->pch_pfit.force_thru = enable;
4090 if (pipe_config->cpu_transcoder == TRANSCODER_EDP &&
4091 pipe_config->pch_pfit.enabled != enable)
4092 pipe_config->base.connectors_changed = true;
1b509259 4093
c4e2d043
ML
4094 ret = drm_atomic_commit(state);
4095out:
c4e2d043 4096 WARN(ret, "Toggling workaround to %i returns %i\n", enable, ret);
0853695c
CW
4097 drm_modeset_unlock_all(dev);
4098 drm_atomic_state_put(state);
fabf6e51
DV
4099}
4100
36cdd013 4101static int ivb_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
fabf6e51
DV
4102 enum pipe pipe,
4103 enum intel_pipe_crc_source *source,
5b3a856b
DV
4104 uint32_t *val)
4105{
46a19188
DV
4106 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
4107 *source = INTEL_PIPE_CRC_SOURCE_PF;
4108
4109 switch (*source) {
5b3a856b
DV
4110 case INTEL_PIPE_CRC_SOURCE_PLANE1:
4111 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_IVB;
4112 break;
4113 case INTEL_PIPE_CRC_SOURCE_PLANE2:
4114 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_IVB;
4115 break;
4116 case INTEL_PIPE_CRC_SOURCE_PF:
36cdd013
DW
4117 if (IS_HASWELL(dev_priv) && pipe == PIPE_A)
4118 hsw_trans_edp_pipe_A_crc_wa(dev_priv, true);
fabf6e51 4119
5b3a856b
DV
4120 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PF_IVB;
4121 break;
3d099a05 4122 case INTEL_PIPE_CRC_SOURCE_NONE:
5b3a856b
DV
4123 *val = 0;
4124 break;
3d099a05
DV
4125 default:
4126 return -EINVAL;
5b3a856b
DV
4127 }
4128
4129 return 0;
4130}
4131
36cdd013
DW
4132static int pipe_crc_set_source(struct drm_i915_private *dev_priv,
4133 enum pipe pipe,
926321d5
DV
4134 enum intel_pipe_crc_source source)
4135{
cc3da175 4136 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
b91eb5cc 4137 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
e129649b 4138 enum intel_display_power_domain power_domain;
432f3342 4139 u32 val = 0; /* shut up gcc */
5b3a856b 4140 int ret;
926321d5 4141
cc3da175
DL
4142 if (pipe_crc->source == source)
4143 return 0;
4144
ae676fcd
DL
4145 /* forbid changing the source without going back to 'none' */
4146 if (pipe_crc->source && source)
4147 return -EINVAL;
4148
e129649b
ID
4149 power_domain = POWER_DOMAIN_PIPE(pipe);
4150 if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) {
9d8b0588
DV
4151 DRM_DEBUG_KMS("Trying to capture CRC while pipe is off\n");
4152 return -EIO;
4153 }
4154
36cdd013 4155 if (IS_GEN2(dev_priv))
46a19188 4156 ret = i8xx_pipe_crc_ctl_reg(&source, &val);
36cdd013
DW
4157 else if (INTEL_GEN(dev_priv) < 5)
4158 ret = i9xx_pipe_crc_ctl_reg(dev_priv, pipe, &source, &val);
4159 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
4160 ret = vlv_pipe_crc_ctl_reg(dev_priv, pipe, &source, &val);
4161 else if (IS_GEN5(dev_priv) || IS_GEN6(dev_priv))
46a19188 4162 ret = ilk_pipe_crc_ctl_reg(&source, &val);
5b3a856b 4163 else
36cdd013 4164 ret = ivb_pipe_crc_ctl_reg(dev_priv, pipe, &source, &val);
5b3a856b
DV
4165
4166 if (ret != 0)
e129649b 4167 goto out;
5b3a856b 4168
4b584369
DL
4169 /* none -> real source transition */
4170 if (source) {
4252fbc3
VS
4171 struct intel_pipe_crc_entry *entries;
4172
7cd6ccff
DL
4173 DRM_DEBUG_DRIVER("collecting CRCs for pipe %c, %s\n",
4174 pipe_name(pipe), pipe_crc_source_name(source));
4175
3cf54b34
VS
4176 entries = kcalloc(INTEL_PIPE_CRC_ENTRIES_NR,
4177 sizeof(pipe_crc->entries[0]),
4252fbc3 4178 GFP_KERNEL);
e129649b
ID
4179 if (!entries) {
4180 ret = -ENOMEM;
4181 goto out;
4182 }
e5f75aca 4183
8c740dce
PZ
4184 /*
4185 * When IPS gets enabled, the pipe CRC changes. Since IPS gets
4186 * enabled and disabled dynamically based on package C states,
4187 * user space can't make reliable use of the CRCs, so let's just
4188 * completely disable it.
4189 */
4190 hsw_disable_ips(crtc);
4191
d538bbdf 4192 spin_lock_irq(&pipe_crc->lock);
64387b61 4193 kfree(pipe_crc->entries);
4252fbc3 4194 pipe_crc->entries = entries;
d538bbdf
DL
4195 pipe_crc->head = 0;
4196 pipe_crc->tail = 0;
4197 spin_unlock_irq(&pipe_crc->lock);
4b584369
DL
4198 }
4199
cc3da175 4200 pipe_crc->source = source;
926321d5 4201
926321d5
DV
4202 I915_WRITE(PIPE_CRC_CTL(pipe), val);
4203 POSTING_READ(PIPE_CRC_CTL(pipe));
4204
e5f75aca
DL
4205 /* real source -> none transition */
4206 if (source == INTEL_PIPE_CRC_SOURCE_NONE) {
d538bbdf 4207 struct intel_pipe_crc_entry *entries;
98187836
VS
4208 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv,
4209 pipe);
d538bbdf 4210
7cd6ccff
DL
4211 DRM_DEBUG_DRIVER("stopping CRCs for pipe %c\n",
4212 pipe_name(pipe));
4213
a33d7105 4214 drm_modeset_lock(&crtc->base.mutex, NULL);
f77076c9 4215 if (crtc->base.state->active)
0f0f74bc 4216 intel_wait_for_vblank(dev_priv, pipe);
a33d7105 4217 drm_modeset_unlock(&crtc->base.mutex);
bcf17ab2 4218
d538bbdf
DL
4219 spin_lock_irq(&pipe_crc->lock);
4220 entries = pipe_crc->entries;
e5f75aca 4221 pipe_crc->entries = NULL;
9ad6d99f
VS
4222 pipe_crc->head = 0;
4223 pipe_crc->tail = 0;
d538bbdf
DL
4224 spin_unlock_irq(&pipe_crc->lock);
4225
4226 kfree(entries);
84093603 4227
36cdd013
DW
4228 if (IS_G4X(dev_priv))
4229 g4x_undo_pipe_scramble_reset(dev_priv, pipe);
4230 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
4231 vlv_undo_pipe_scramble_reset(dev_priv, pipe);
4232 else if (IS_HASWELL(dev_priv) && pipe == PIPE_A)
4233 hsw_trans_edp_pipe_A_crc_wa(dev_priv, false);
8c740dce
PZ
4234
4235 hsw_enable_ips(crtc);
e5f75aca
DL
4236 }
4237
e129649b
ID
4238 ret = 0;
4239
4240out:
4241 intel_display_power_put(dev_priv, power_domain);
4242
4243 return ret;
926321d5
DV
4244}
4245
4246/*
4247 * Parse pipe CRC command strings:
b94dec87
DL
4248 * command: wsp* object wsp+ name wsp+ source wsp*
4249 * object: 'pipe'
4250 * name: (A | B | C)
926321d5
DV
4251 * source: (none | plane1 | plane2 | pf)
4252 * wsp: (#0x20 | #0x9 | #0xA)+
4253 *
4254 * eg.:
b94dec87
DL
4255 * "pipe A plane1" -> Start CRC computations on plane1 of pipe A
4256 * "pipe A none" -> Stop CRC
926321d5 4257 */
bd9db02f 4258static int display_crc_ctl_tokenize(char *buf, char *words[], int max_words)
926321d5
DV
4259{
4260 int n_words = 0;
4261
4262 while (*buf) {
4263 char *end;
4264
4265 /* skip leading white space */
4266 buf = skip_spaces(buf);
4267 if (!*buf)
4268 break; /* end of buffer */
4269
4270 /* find end of word */
4271 for (end = buf; *end && !isspace(*end); end++)
4272 ;
4273
4274 if (n_words == max_words) {
4275 DRM_DEBUG_DRIVER("too many words, allowed <= %d\n",
4276 max_words);
4277 return -EINVAL; /* ran out of words[] before bytes */
4278 }
4279
4280 if (*end)
4281 *end++ = '\0';
4282 words[n_words++] = buf;
4283 buf = end;
4284 }
4285
4286 return n_words;
4287}
4288
b94dec87
DL
4289enum intel_pipe_crc_object {
4290 PIPE_CRC_OBJECT_PIPE,
4291};
4292
e8dfcf78 4293static const char * const pipe_crc_objects[] = {
b94dec87
DL
4294 "pipe",
4295};
4296
4297static int
bd9db02f 4298display_crc_ctl_parse_object(const char *buf, enum intel_pipe_crc_object *o)
b94dec87
DL
4299{
4300 int i;
4301
4302 for (i = 0; i < ARRAY_SIZE(pipe_crc_objects); i++)
4303 if (!strcmp(buf, pipe_crc_objects[i])) {
bd9db02f 4304 *o = i;
b94dec87
DL
4305 return 0;
4306 }
4307
4308 return -EINVAL;
4309}
4310
bd9db02f 4311static int display_crc_ctl_parse_pipe(const char *buf, enum pipe *pipe)
926321d5
DV
4312{
4313 const char name = buf[0];
4314
4315 if (name < 'A' || name >= pipe_name(I915_MAX_PIPES))
4316 return -EINVAL;
4317
4318 *pipe = name - 'A';
4319
4320 return 0;
4321}
4322
4323static int
bd9db02f 4324display_crc_ctl_parse_source(const char *buf, enum intel_pipe_crc_source *s)
926321d5
DV
4325{
4326 int i;
4327
4328 for (i = 0; i < ARRAY_SIZE(pipe_crc_sources); i++)
4329 if (!strcmp(buf, pipe_crc_sources[i])) {
bd9db02f 4330 *s = i;
926321d5
DV
4331 return 0;
4332 }
4333
4334 return -EINVAL;
4335}
4336
36cdd013
DW
4337static int display_crc_ctl_parse(struct drm_i915_private *dev_priv,
4338 char *buf, size_t len)
926321d5 4339{
b94dec87 4340#define N_WORDS 3
926321d5 4341 int n_words;
b94dec87 4342 char *words[N_WORDS];
926321d5 4343 enum pipe pipe;
b94dec87 4344 enum intel_pipe_crc_object object;
926321d5
DV
4345 enum intel_pipe_crc_source source;
4346
bd9db02f 4347 n_words = display_crc_ctl_tokenize(buf, words, N_WORDS);
b94dec87
DL
4348 if (n_words != N_WORDS) {
4349 DRM_DEBUG_DRIVER("tokenize failed, a command is %d words\n",
4350 N_WORDS);
4351 return -EINVAL;
4352 }
4353
bd9db02f 4354 if (display_crc_ctl_parse_object(words[0], &object) < 0) {
b94dec87 4355 DRM_DEBUG_DRIVER("unknown object %s\n", words[0]);
926321d5
DV
4356 return -EINVAL;
4357 }
4358
bd9db02f 4359 if (display_crc_ctl_parse_pipe(words[1], &pipe) < 0) {
b94dec87 4360 DRM_DEBUG_DRIVER("unknown pipe %s\n", words[1]);
926321d5
DV
4361 return -EINVAL;
4362 }
4363
bd9db02f 4364 if (display_crc_ctl_parse_source(words[2], &source) < 0) {
b94dec87 4365 DRM_DEBUG_DRIVER("unknown source %s\n", words[2]);
926321d5
DV
4366 return -EINVAL;
4367 }
4368
36cdd013 4369 return pipe_crc_set_source(dev_priv, pipe, source);
926321d5
DV
4370}
4371
bd9db02f
DL
4372static ssize_t display_crc_ctl_write(struct file *file, const char __user *ubuf,
4373 size_t len, loff_t *offp)
926321d5
DV
4374{
4375 struct seq_file *m = file->private_data;
36cdd013 4376 struct drm_i915_private *dev_priv = m->private;
926321d5
DV
4377 char *tmpbuf;
4378 int ret;
4379
4380 if (len == 0)
4381 return 0;
4382
4383 if (len > PAGE_SIZE - 1) {
4384 DRM_DEBUG_DRIVER("expected <%lu bytes into pipe crc control\n",
4385 PAGE_SIZE);
4386 return -E2BIG;
4387 }
4388
4389 tmpbuf = kmalloc(len + 1, GFP_KERNEL);
4390 if (!tmpbuf)
4391 return -ENOMEM;
4392
4393 if (copy_from_user(tmpbuf, ubuf, len)) {
4394 ret = -EFAULT;
4395 goto out;
4396 }
4397 tmpbuf[len] = '\0';
4398
36cdd013 4399 ret = display_crc_ctl_parse(dev_priv, tmpbuf, len);
926321d5
DV
4400
4401out:
4402 kfree(tmpbuf);
4403 if (ret < 0)
4404 return ret;
4405
4406 *offp += len;
4407 return len;
4408}
4409
bd9db02f 4410static const struct file_operations i915_display_crc_ctl_fops = {
926321d5 4411 .owner = THIS_MODULE,
bd9db02f 4412 .open = display_crc_ctl_open,
926321d5
DV
4413 .read = seq_read,
4414 .llseek = seq_lseek,
4415 .release = single_release,
bd9db02f 4416 .write = display_crc_ctl_write
926321d5
DV
4417};
4418
eb3394fa 4419static ssize_t i915_displayport_test_active_write(struct file *file,
36cdd013
DW
4420 const char __user *ubuf,
4421 size_t len, loff_t *offp)
eb3394fa
TP
4422{
4423 char *input_buffer;
4424 int status = 0;
eb3394fa
TP
4425 struct drm_device *dev;
4426 struct drm_connector *connector;
4427 struct list_head *connector_list;
4428 struct intel_dp *intel_dp;
4429 int val = 0;
4430
9aaffa34 4431 dev = ((struct seq_file *)file->private_data)->private;
eb3394fa 4432
eb3394fa
TP
4433 connector_list = &dev->mode_config.connector_list;
4434
4435 if (len == 0)
4436 return 0;
4437
4438 input_buffer = kmalloc(len + 1, GFP_KERNEL);
4439 if (!input_buffer)
4440 return -ENOMEM;
4441
4442 if (copy_from_user(input_buffer, ubuf, len)) {
4443 status = -EFAULT;
4444 goto out;
4445 }
4446
4447 input_buffer[len] = '\0';
4448 DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len);
4449
4450 list_for_each_entry(connector, connector_list, head) {
eb3394fa
TP
4451 if (connector->connector_type !=
4452 DRM_MODE_CONNECTOR_DisplayPort)
4453 continue;
4454
b8bb08ec 4455 if (connector->status == connector_status_connected &&
eb3394fa
TP
4456 connector->encoder != NULL) {
4457 intel_dp = enc_to_intel_dp(connector->encoder);
4458 status = kstrtoint(input_buffer, 10, &val);
4459 if (status < 0)
4460 goto out;
4461 DRM_DEBUG_DRIVER("Got %d for test active\n", val);
4462 /* To prevent erroneous activation of the compliance
4463 * testing code, only accept an actual value of 1 here
4464 */
4465 if (val == 1)
4466 intel_dp->compliance_test_active = 1;
4467 else
4468 intel_dp->compliance_test_active = 0;
4469 }
4470 }
4471out:
4472 kfree(input_buffer);
4473 if (status < 0)
4474 return status;
4475
4476 *offp += len;
4477 return len;
4478}
4479
4480static int i915_displayport_test_active_show(struct seq_file *m, void *data)
4481{
4482 struct drm_device *dev = m->private;
4483 struct drm_connector *connector;
4484 struct list_head *connector_list = &dev->mode_config.connector_list;
4485 struct intel_dp *intel_dp;
4486
eb3394fa 4487 list_for_each_entry(connector, connector_list, head) {
eb3394fa
TP
4488 if (connector->connector_type !=
4489 DRM_MODE_CONNECTOR_DisplayPort)
4490 continue;
4491
4492 if (connector->status == connector_status_connected &&
4493 connector->encoder != NULL) {
4494 intel_dp = enc_to_intel_dp(connector->encoder);
4495 if (intel_dp->compliance_test_active)
4496 seq_puts(m, "1");
4497 else
4498 seq_puts(m, "0");
4499 } else
4500 seq_puts(m, "0");
4501 }
4502
4503 return 0;
4504}
4505
4506static int i915_displayport_test_active_open(struct inode *inode,
36cdd013 4507 struct file *file)
eb3394fa 4508{
36cdd013 4509 struct drm_i915_private *dev_priv = inode->i_private;
eb3394fa 4510
36cdd013
DW
4511 return single_open(file, i915_displayport_test_active_show,
4512 &dev_priv->drm);
eb3394fa
TP
4513}
4514
4515static const struct file_operations i915_displayport_test_active_fops = {
4516 .owner = THIS_MODULE,
4517 .open = i915_displayport_test_active_open,
4518 .read = seq_read,
4519 .llseek = seq_lseek,
4520 .release = single_release,
4521 .write = i915_displayport_test_active_write
4522};
4523
4524static int i915_displayport_test_data_show(struct seq_file *m, void *data)
4525{
4526 struct drm_device *dev = m->private;
4527 struct drm_connector *connector;
4528 struct list_head *connector_list = &dev->mode_config.connector_list;
4529 struct intel_dp *intel_dp;
4530
eb3394fa 4531 list_for_each_entry(connector, connector_list, head) {
eb3394fa
TP
4532 if (connector->connector_type !=
4533 DRM_MODE_CONNECTOR_DisplayPort)
4534 continue;
4535
4536 if (connector->status == connector_status_connected &&
4537 connector->encoder != NULL) {
4538 intel_dp = enc_to_intel_dp(connector->encoder);
4539 seq_printf(m, "%lx", intel_dp->compliance_test_data);
4540 } else
4541 seq_puts(m, "0");
4542 }
4543
4544 return 0;
4545}
4546static int i915_displayport_test_data_open(struct inode *inode,
36cdd013 4547 struct file *file)
eb3394fa 4548{
36cdd013 4549 struct drm_i915_private *dev_priv = inode->i_private;
eb3394fa 4550
36cdd013
DW
4551 return single_open(file, i915_displayport_test_data_show,
4552 &dev_priv->drm);
eb3394fa
TP
4553}
4554
4555static const struct file_operations i915_displayport_test_data_fops = {
4556 .owner = THIS_MODULE,
4557 .open = i915_displayport_test_data_open,
4558 .read = seq_read,
4559 .llseek = seq_lseek,
4560 .release = single_release
4561};
4562
4563static int i915_displayport_test_type_show(struct seq_file *m, void *data)
4564{
4565 struct drm_device *dev = m->private;
4566 struct drm_connector *connector;
4567 struct list_head *connector_list = &dev->mode_config.connector_list;
4568 struct intel_dp *intel_dp;
4569
eb3394fa 4570 list_for_each_entry(connector, connector_list, head) {
eb3394fa
TP
4571 if (connector->connector_type !=
4572 DRM_MODE_CONNECTOR_DisplayPort)
4573 continue;
4574
4575 if (connector->status == connector_status_connected &&
4576 connector->encoder != NULL) {
4577 intel_dp = enc_to_intel_dp(connector->encoder);
4578 seq_printf(m, "%02lx", intel_dp->compliance_test_type);
4579 } else
4580 seq_puts(m, "0");
4581 }
4582
4583 return 0;
4584}
4585
4586static int i915_displayport_test_type_open(struct inode *inode,
4587 struct file *file)
4588{
36cdd013 4589 struct drm_i915_private *dev_priv = inode->i_private;
eb3394fa 4590
36cdd013
DW
4591 return single_open(file, i915_displayport_test_type_show,
4592 &dev_priv->drm);
eb3394fa
TP
4593}
4594
4595static const struct file_operations i915_displayport_test_type_fops = {
4596 .owner = THIS_MODULE,
4597 .open = i915_displayport_test_type_open,
4598 .read = seq_read,
4599 .llseek = seq_lseek,
4600 .release = single_release
4601};
4602
97e94b22 4603static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
369a1342 4604{
36cdd013
DW
4605 struct drm_i915_private *dev_priv = m->private;
4606 struct drm_device *dev = &dev_priv->drm;
369a1342 4607 int level;
de38b95c
VS
4608 int num_levels;
4609
36cdd013 4610 if (IS_CHERRYVIEW(dev_priv))
de38b95c 4611 num_levels = 3;
36cdd013 4612 else if (IS_VALLEYVIEW(dev_priv))
de38b95c
VS
4613 num_levels = 1;
4614 else
5db94019 4615 num_levels = ilk_wm_max_level(dev_priv) + 1;
369a1342
VS
4616
4617 drm_modeset_lock_all(dev);
4618
4619 for (level = 0; level < num_levels; level++) {
4620 unsigned int latency = wm[level];
4621
97e94b22
DL
4622 /*
4623 * - WM1+ latency values in 0.5us units
de38b95c 4624 * - latencies are in us on gen9/vlv/chv
97e94b22 4625 */
36cdd013
DW
4626 if (INTEL_GEN(dev_priv) >= 9 || IS_VALLEYVIEW(dev_priv) ||
4627 IS_CHERRYVIEW(dev_priv))
97e94b22
DL
4628 latency *= 10;
4629 else if (level > 0)
369a1342
VS
4630 latency *= 5;
4631
4632 seq_printf(m, "WM%d %u (%u.%u usec)\n",
97e94b22 4633 level, wm[level], latency / 10, latency % 10);
369a1342
VS
4634 }
4635
4636 drm_modeset_unlock_all(dev);
4637}
4638
4639static int pri_wm_latency_show(struct seq_file *m, void *data)
4640{
36cdd013 4641 struct drm_i915_private *dev_priv = m->private;
97e94b22
DL
4642 const uint16_t *latencies;
4643
36cdd013 4644 if (INTEL_GEN(dev_priv) >= 9)
97e94b22
DL
4645 latencies = dev_priv->wm.skl_latency;
4646 else
36cdd013 4647 latencies = dev_priv->wm.pri_latency;
369a1342 4648
97e94b22 4649 wm_latency_show(m, latencies);
369a1342
VS
4650
4651 return 0;
4652}
4653
4654static int spr_wm_latency_show(struct seq_file *m, void *data)
4655{
36cdd013 4656 struct drm_i915_private *dev_priv = m->private;
97e94b22
DL
4657 const uint16_t *latencies;
4658
36cdd013 4659 if (INTEL_GEN(dev_priv) >= 9)
97e94b22
DL
4660 latencies = dev_priv->wm.skl_latency;
4661 else
36cdd013 4662 latencies = dev_priv->wm.spr_latency;
369a1342 4663
97e94b22 4664 wm_latency_show(m, latencies);
369a1342
VS
4665
4666 return 0;
4667}
4668
4669static int cur_wm_latency_show(struct seq_file *m, void *data)
4670{
36cdd013 4671 struct drm_i915_private *dev_priv = m->private;
97e94b22
DL
4672 const uint16_t *latencies;
4673
36cdd013 4674 if (INTEL_GEN(dev_priv) >= 9)
97e94b22
DL
4675 latencies = dev_priv->wm.skl_latency;
4676 else
36cdd013 4677 latencies = dev_priv->wm.cur_latency;
369a1342 4678
97e94b22 4679 wm_latency_show(m, latencies);
369a1342
VS
4680
4681 return 0;
4682}
4683
4684static int pri_wm_latency_open(struct inode *inode, struct file *file)
4685{
36cdd013 4686 struct drm_i915_private *dev_priv = inode->i_private;
369a1342 4687
36cdd013 4688 if (INTEL_GEN(dev_priv) < 5)
369a1342
VS
4689 return -ENODEV;
4690
36cdd013 4691 return single_open(file, pri_wm_latency_show, dev_priv);
369a1342
VS
4692}
4693
4694static int spr_wm_latency_open(struct inode *inode, struct file *file)
4695{
36cdd013 4696 struct drm_i915_private *dev_priv = inode->i_private;
369a1342 4697
36cdd013 4698 if (HAS_GMCH_DISPLAY(dev_priv))
369a1342
VS
4699 return -ENODEV;
4700
36cdd013 4701 return single_open(file, spr_wm_latency_show, dev_priv);
369a1342
VS
4702}
4703
4704static int cur_wm_latency_open(struct inode *inode, struct file *file)
4705{
36cdd013 4706 struct drm_i915_private *dev_priv = inode->i_private;
369a1342 4707
36cdd013 4708 if (HAS_GMCH_DISPLAY(dev_priv))
369a1342
VS
4709 return -ENODEV;
4710
36cdd013 4711 return single_open(file, cur_wm_latency_show, dev_priv);
369a1342
VS
4712}
4713
4714static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
97e94b22 4715 size_t len, loff_t *offp, uint16_t wm[8])
369a1342
VS
4716{
4717 struct seq_file *m = file->private_data;
36cdd013
DW
4718 struct drm_i915_private *dev_priv = m->private;
4719 struct drm_device *dev = &dev_priv->drm;
97e94b22 4720 uint16_t new[8] = { 0 };
de38b95c 4721 int num_levels;
369a1342
VS
4722 int level;
4723 int ret;
4724 char tmp[32];
4725
36cdd013 4726 if (IS_CHERRYVIEW(dev_priv))
de38b95c 4727 num_levels = 3;
36cdd013 4728 else if (IS_VALLEYVIEW(dev_priv))
de38b95c
VS
4729 num_levels = 1;
4730 else
5db94019 4731 num_levels = ilk_wm_max_level(dev_priv) + 1;
de38b95c 4732
369a1342
VS
4733 if (len >= sizeof(tmp))
4734 return -EINVAL;
4735
4736 if (copy_from_user(tmp, ubuf, len))
4737 return -EFAULT;
4738
4739 tmp[len] = '\0';
4740
97e94b22
DL
4741 ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
4742 &new[0], &new[1], &new[2], &new[3],
4743 &new[4], &new[5], &new[6], &new[7]);
369a1342
VS
4744 if (ret != num_levels)
4745 return -EINVAL;
4746
4747 drm_modeset_lock_all(dev);
4748
4749 for (level = 0; level < num_levels; level++)
4750 wm[level] = new[level];
4751
4752 drm_modeset_unlock_all(dev);
4753
4754 return len;
4755}
4756
4757
4758static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
4759 size_t len, loff_t *offp)
4760{
4761 struct seq_file *m = file->private_data;
36cdd013 4762 struct drm_i915_private *dev_priv = m->private;
97e94b22 4763 uint16_t *latencies;
369a1342 4764
36cdd013 4765 if (INTEL_GEN(dev_priv) >= 9)
97e94b22
DL
4766 latencies = dev_priv->wm.skl_latency;
4767 else
36cdd013 4768 latencies = dev_priv->wm.pri_latency;
97e94b22
DL
4769
4770 return wm_latency_write(file, ubuf, len, offp, latencies);
369a1342
VS
4771}
4772
4773static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
4774 size_t len, loff_t *offp)
4775{
4776 struct seq_file *m = file->private_data;
36cdd013 4777 struct drm_i915_private *dev_priv = m->private;
97e94b22 4778 uint16_t *latencies;
369a1342 4779
36cdd013 4780 if (INTEL_GEN(dev_priv) >= 9)
97e94b22
DL
4781 latencies = dev_priv->wm.skl_latency;
4782 else
36cdd013 4783 latencies = dev_priv->wm.spr_latency;
97e94b22
DL
4784
4785 return wm_latency_write(file, ubuf, len, offp, latencies);
369a1342
VS
4786}
4787
4788static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
4789 size_t len, loff_t *offp)
4790{
4791 struct seq_file *m = file->private_data;
36cdd013 4792 struct drm_i915_private *dev_priv = m->private;
97e94b22
DL
4793 uint16_t *latencies;
4794
36cdd013 4795 if (INTEL_GEN(dev_priv) >= 9)
97e94b22
DL
4796 latencies = dev_priv->wm.skl_latency;
4797 else
36cdd013 4798 latencies = dev_priv->wm.cur_latency;
369a1342 4799
97e94b22 4800 return wm_latency_write(file, ubuf, len, offp, latencies);
369a1342
VS
4801}
4802
4803static const struct file_operations i915_pri_wm_latency_fops = {
4804 .owner = THIS_MODULE,
4805 .open = pri_wm_latency_open,
4806 .read = seq_read,
4807 .llseek = seq_lseek,
4808 .release = single_release,
4809 .write = pri_wm_latency_write
4810};
4811
4812static const struct file_operations i915_spr_wm_latency_fops = {
4813 .owner = THIS_MODULE,
4814 .open = spr_wm_latency_open,
4815 .read = seq_read,
4816 .llseek = seq_lseek,
4817 .release = single_release,
4818 .write = spr_wm_latency_write
4819};
4820
4821static const struct file_operations i915_cur_wm_latency_fops = {
4822 .owner = THIS_MODULE,
4823 .open = cur_wm_latency_open,
4824 .read = seq_read,
4825 .llseek = seq_lseek,
4826 .release = single_release,
4827 .write = cur_wm_latency_write
4828};
4829
647416f9
KC
4830static int
4831i915_wedged_get(void *data, u64 *val)
f3cd474b 4832{
36cdd013 4833 struct drm_i915_private *dev_priv = data;
f3cd474b 4834
d98c52cf 4835 *val = i915_terminally_wedged(&dev_priv->gpu_error);
f3cd474b 4836
647416f9 4837 return 0;
f3cd474b
CW
4838}
4839
647416f9
KC
4840static int
4841i915_wedged_set(void *data, u64 val)
f3cd474b 4842{
36cdd013 4843 struct drm_i915_private *dev_priv = data;
d46c0517 4844
b8d24a06
MK
4845 /*
4846 * There is no safeguard against this debugfs entry colliding
4847 * with the hangcheck calling same i915_handle_error() in
4848 * parallel, causing an explosion. For now we assume that the
4849 * test harness is responsible enough not to inject gpu hangs
4850 * while it is writing to 'i915_wedged'
4851 */
4852
d98c52cf 4853 if (i915_reset_in_progress(&dev_priv->gpu_error))
b8d24a06
MK
4854 return -EAGAIN;
4855
c033666a 4856 i915_handle_error(dev_priv, val,
58174462 4857 "Manually setting wedged to %llu", val);
d46c0517 4858
647416f9 4859 return 0;
f3cd474b
CW
4860}
4861
647416f9
KC
4862DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
4863 i915_wedged_get, i915_wedged_set,
3a3b4f98 4864 "%llu\n");
f3cd474b 4865
094f9a54
CW
4866static int
4867i915_ring_missed_irq_get(void *data, u64 *val)
4868{
36cdd013 4869 struct drm_i915_private *dev_priv = data;
094f9a54
CW
4870
4871 *val = dev_priv->gpu_error.missed_irq_rings;
4872 return 0;
4873}
4874
4875static int
4876i915_ring_missed_irq_set(void *data, u64 val)
4877{
36cdd013
DW
4878 struct drm_i915_private *dev_priv = data;
4879 struct drm_device *dev = &dev_priv->drm;
094f9a54
CW
4880 int ret;
4881
4882 /* Lock against concurrent debugfs callers */
4883 ret = mutex_lock_interruptible(&dev->struct_mutex);
4884 if (ret)
4885 return ret;
4886 dev_priv->gpu_error.missed_irq_rings = val;
4887 mutex_unlock(&dev->struct_mutex);
4888
4889 return 0;
4890}
4891
4892DEFINE_SIMPLE_ATTRIBUTE(i915_ring_missed_irq_fops,
4893 i915_ring_missed_irq_get, i915_ring_missed_irq_set,
4894 "0x%08llx\n");
4895
4896static int
4897i915_ring_test_irq_get(void *data, u64 *val)
4898{
36cdd013 4899 struct drm_i915_private *dev_priv = data;
094f9a54
CW
4900
4901 *val = dev_priv->gpu_error.test_irq_rings;
4902
4903 return 0;
4904}
4905
4906static int
4907i915_ring_test_irq_set(void *data, u64 val)
4908{
36cdd013 4909 struct drm_i915_private *dev_priv = data;
094f9a54 4910
3a122c27 4911 val &= INTEL_INFO(dev_priv)->ring_mask;
094f9a54 4912 DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val);
094f9a54 4913 dev_priv->gpu_error.test_irq_rings = val;
094f9a54
CW
4914
4915 return 0;
4916}
4917
4918DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops,
4919 i915_ring_test_irq_get, i915_ring_test_irq_set,
4920 "0x%08llx\n");
4921
dd624afd
CW
4922#define DROP_UNBOUND 0x1
4923#define DROP_BOUND 0x2
4924#define DROP_RETIRE 0x4
4925#define DROP_ACTIVE 0x8
fbbd37b3
CW
4926#define DROP_FREED 0x10
4927#define DROP_ALL (DROP_UNBOUND | \
4928 DROP_BOUND | \
4929 DROP_RETIRE | \
4930 DROP_ACTIVE | \
4931 DROP_FREED)
647416f9
KC
4932static int
4933i915_drop_caches_get(void *data, u64 *val)
dd624afd 4934{
647416f9 4935 *val = DROP_ALL;
dd624afd 4936
647416f9 4937 return 0;
dd624afd
CW
4938}
4939
647416f9
KC
4940static int
4941i915_drop_caches_set(void *data, u64 val)
dd624afd 4942{
36cdd013
DW
4943 struct drm_i915_private *dev_priv = data;
4944 struct drm_device *dev = &dev_priv->drm;
647416f9 4945 int ret;
dd624afd 4946
2f9fe5ff 4947 DRM_DEBUG("Dropping caches: 0x%08llx\n", val);
dd624afd
CW
4948
4949 /* No need to check and wait for gpu resets, only libdrm auto-restarts
4950 * on ioctls on -EAGAIN. */
4951 ret = mutex_lock_interruptible(&dev->struct_mutex);
4952 if (ret)
4953 return ret;
4954
4955 if (val & DROP_ACTIVE) {
22dd3bb9
CW
4956 ret = i915_gem_wait_for_idle(dev_priv,
4957 I915_WAIT_INTERRUPTIBLE |
4958 I915_WAIT_LOCKED);
dd624afd
CW
4959 if (ret)
4960 goto unlock;
4961 }
4962
4963 if (val & (DROP_RETIRE | DROP_ACTIVE))
c033666a 4964 i915_gem_retire_requests(dev_priv);
dd624afd 4965
21ab4e74
CW
4966 if (val & DROP_BOUND)
4967 i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_BOUND);
4ad72b7f 4968
21ab4e74
CW
4969 if (val & DROP_UNBOUND)
4970 i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_UNBOUND);
dd624afd
CW
4971
4972unlock:
4973 mutex_unlock(&dev->struct_mutex);
4974
fbbd37b3
CW
4975 if (val & DROP_FREED) {
4976 synchronize_rcu();
4977 flush_work(&dev_priv->mm.free_work);
4978 }
4979
647416f9 4980 return ret;
dd624afd
CW
4981}
4982
647416f9
KC
4983DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
4984 i915_drop_caches_get, i915_drop_caches_set,
4985 "0x%08llx\n");
dd624afd 4986
647416f9
KC
4987static int
4988i915_max_freq_get(void *data, u64 *val)
358733e9 4989{
36cdd013 4990 struct drm_i915_private *dev_priv = data;
004777cb 4991
36cdd013 4992 if (INTEL_GEN(dev_priv) < 6)
004777cb
DV
4993 return -ENODEV;
4994
7c59a9c1 4995 *val = intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit);
647416f9 4996 return 0;
358733e9
JB
4997}
4998
647416f9
KC
4999static int
5000i915_max_freq_set(void *data, u64 val)
358733e9 5001{
36cdd013 5002 struct drm_i915_private *dev_priv = data;
bc4d91f6 5003 u32 hw_max, hw_min;
647416f9 5004 int ret;
004777cb 5005
36cdd013 5006 if (INTEL_GEN(dev_priv) < 6)
004777cb 5007 return -ENODEV;
358733e9 5008
647416f9 5009 DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val);
358733e9 5010
4fc688ce 5011 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
004777cb
DV
5012 if (ret)
5013 return ret;
5014
358733e9
JB
5015 /*
5016 * Turbo will still be enabled, but won't go above the set value.
5017 */
bc4d91f6 5018 val = intel_freq_opcode(dev_priv, val);
dd0a1aa1 5019
bc4d91f6
AG
5020 hw_max = dev_priv->rps.max_freq;
5021 hw_min = dev_priv->rps.min_freq;
dd0a1aa1 5022
b39fb297 5023 if (val < hw_min || val > hw_max || val < dev_priv->rps.min_freq_softlimit) {
dd0a1aa1
JM
5024 mutex_unlock(&dev_priv->rps.hw_lock);
5025 return -EINVAL;
0a073b84
JB
5026 }
5027
b39fb297 5028 dev_priv->rps.max_freq_softlimit = val;
dd0a1aa1 5029
dc97997a 5030 intel_set_rps(dev_priv, val);
dd0a1aa1 5031
4fc688ce 5032 mutex_unlock(&dev_priv->rps.hw_lock);
358733e9 5033
647416f9 5034 return 0;
358733e9
JB
5035}
5036
647416f9
KC
5037DEFINE_SIMPLE_ATTRIBUTE(i915_max_freq_fops,
5038 i915_max_freq_get, i915_max_freq_set,
3a3b4f98 5039 "%llu\n");
358733e9 5040
647416f9
KC
5041static int
5042i915_min_freq_get(void *data, u64 *val)
1523c310 5043{
36cdd013 5044 struct drm_i915_private *dev_priv = data;
004777cb 5045
62e1baa1 5046 if (INTEL_GEN(dev_priv) < 6)
004777cb
DV
5047 return -ENODEV;
5048
7c59a9c1 5049 *val = intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit);
647416f9 5050 return 0;
1523c310
JB
5051}
5052
647416f9
KC
5053static int
5054i915_min_freq_set(void *data, u64 val)
1523c310 5055{
36cdd013 5056 struct drm_i915_private *dev_priv = data;
bc4d91f6 5057 u32 hw_max, hw_min;
647416f9 5058 int ret;
004777cb 5059
62e1baa1 5060 if (INTEL_GEN(dev_priv) < 6)
004777cb 5061 return -ENODEV;
1523c310 5062
647416f9 5063 DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val);
1523c310 5064
4fc688ce 5065 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
004777cb
DV
5066 if (ret)
5067 return ret;
5068
1523c310
JB
5069 /*
5070 * Turbo will still be enabled, but won't go below the set value.
5071 */
bc4d91f6 5072 val = intel_freq_opcode(dev_priv, val);
dd0a1aa1 5073
bc4d91f6
AG
5074 hw_max = dev_priv->rps.max_freq;
5075 hw_min = dev_priv->rps.min_freq;
dd0a1aa1 5076
36cdd013
DW
5077 if (val < hw_min ||
5078 val > hw_max || val > dev_priv->rps.max_freq_softlimit) {
dd0a1aa1
JM
5079 mutex_unlock(&dev_priv->rps.hw_lock);
5080 return -EINVAL;
0a073b84 5081 }
dd0a1aa1 5082
b39fb297 5083 dev_priv->rps.min_freq_softlimit = val;
dd0a1aa1 5084
dc97997a 5085 intel_set_rps(dev_priv, val);
dd0a1aa1 5086
4fc688ce 5087 mutex_unlock(&dev_priv->rps.hw_lock);
1523c310 5088
647416f9 5089 return 0;
1523c310
JB
5090}
5091
647416f9
KC
5092DEFINE_SIMPLE_ATTRIBUTE(i915_min_freq_fops,
5093 i915_min_freq_get, i915_min_freq_set,
3a3b4f98 5094 "%llu\n");
1523c310 5095
647416f9
KC
5096static int
5097i915_cache_sharing_get(void *data, u64 *val)
07b7ddd9 5098{
36cdd013 5099 struct drm_i915_private *dev_priv = data;
07b7ddd9 5100 u32 snpcr;
07b7ddd9 5101
36cdd013 5102 if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv)))
004777cb
DV
5103 return -ENODEV;
5104
c8c8fb33 5105 intel_runtime_pm_get(dev_priv);
22bcfc6a 5106
07b7ddd9 5107 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
c8c8fb33
PZ
5108
5109 intel_runtime_pm_put(dev_priv);
07b7ddd9 5110
647416f9 5111 *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
07b7ddd9 5112
647416f9 5113 return 0;
07b7ddd9
JB
5114}
5115
647416f9
KC
5116static int
5117i915_cache_sharing_set(void *data, u64 val)
07b7ddd9 5118{
36cdd013 5119 struct drm_i915_private *dev_priv = data;
07b7ddd9 5120 u32 snpcr;
07b7ddd9 5121
36cdd013 5122 if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv)))
004777cb
DV
5123 return -ENODEV;
5124
647416f9 5125 if (val > 3)
07b7ddd9
JB
5126 return -EINVAL;
5127
c8c8fb33 5128 intel_runtime_pm_get(dev_priv);
647416f9 5129 DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
07b7ddd9
JB
5130
5131 /* Update the cache sharing policy here as well */
5132 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
5133 snpcr &= ~GEN6_MBC_SNPCR_MASK;
5134 snpcr |= (val << GEN6_MBC_SNPCR_SHIFT);
5135 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
5136
c8c8fb33 5137 intel_runtime_pm_put(dev_priv);
647416f9 5138 return 0;
07b7ddd9
JB
5139}
5140
647416f9
KC
5141DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
5142 i915_cache_sharing_get, i915_cache_sharing_set,
5143 "%llu\n");
07b7ddd9 5144
36cdd013 5145static void cherryview_sseu_device_status(struct drm_i915_private *dev_priv,
915490d5 5146 struct sseu_dev_info *sseu)
5d39525a 5147{
0a0b457f 5148 int ss_max = 2;
5d39525a
JM
5149 int ss;
5150 u32 sig1[ss_max], sig2[ss_max];
5151
5152 sig1[0] = I915_READ(CHV_POWER_SS0_SIG1);
5153 sig1[1] = I915_READ(CHV_POWER_SS1_SIG1);
5154 sig2[0] = I915_READ(CHV_POWER_SS0_SIG2);
5155 sig2[1] = I915_READ(CHV_POWER_SS1_SIG2);
5156
5157 for (ss = 0; ss < ss_max; ss++) {
5158 unsigned int eu_cnt;
5159
5160 if (sig1[ss] & CHV_SS_PG_ENABLE)
5161 /* skip disabled subslice */
5162 continue;
5163
f08a0c92 5164 sseu->slice_mask = BIT(0);
57ec171e 5165 sseu->subslice_mask |= BIT(ss);
5d39525a
JM
5166 eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
5167 ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
5168 ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
5169 ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2);
915490d5
ID
5170 sseu->eu_total += eu_cnt;
5171 sseu->eu_per_subslice = max_t(unsigned int,
5172 sseu->eu_per_subslice, eu_cnt);
5d39525a 5173 }
5d39525a
JM
5174}
5175
36cdd013 5176static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
915490d5 5177 struct sseu_dev_info *sseu)
5d39525a 5178{
1c046bc1 5179 int s_max = 3, ss_max = 4;
5d39525a
JM
5180 int s, ss;
5181 u32 s_reg[s_max], eu_reg[2*s_max], eu_mask[2];
5182
1c046bc1 5183 /* BXT has a single slice and at most 3 subslices. */
cc3f90f0 5184 if (IS_GEN9_LP(dev_priv)) {
1c046bc1
JM
5185 s_max = 1;
5186 ss_max = 3;
5187 }
5188
5189 for (s = 0; s < s_max; s++) {
5190 s_reg[s] = I915_READ(GEN9_SLICE_PGCTL_ACK(s));
5191 eu_reg[2*s] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s));
5192 eu_reg[2*s + 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s));
5193 }
5194
5d39525a
JM
5195 eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
5196 GEN9_PGCTL_SSA_EU19_ACK |
5197 GEN9_PGCTL_SSA_EU210_ACK |
5198 GEN9_PGCTL_SSA_EU311_ACK;
5199 eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
5200 GEN9_PGCTL_SSB_EU19_ACK |
5201 GEN9_PGCTL_SSB_EU210_ACK |
5202 GEN9_PGCTL_SSB_EU311_ACK;
5203
5204 for (s = 0; s < s_max; s++) {
5205 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
5206 /* skip disabled slice */
5207 continue;
5208
f08a0c92 5209 sseu->slice_mask |= BIT(s);
1c046bc1 5210
36cdd013 5211 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
57ec171e
ID
5212 sseu->subslice_mask =
5213 INTEL_INFO(dev_priv)->sseu.subslice_mask;
1c046bc1 5214
5d39525a
JM
5215 for (ss = 0; ss < ss_max; ss++) {
5216 unsigned int eu_cnt;
5217
cc3f90f0 5218 if (IS_GEN9_LP(dev_priv)) {
57ec171e
ID
5219 if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
5220 /* skip disabled subslice */
5221 continue;
1c046bc1 5222
57ec171e
ID
5223 sseu->subslice_mask |= BIT(ss);
5224 }
1c046bc1 5225
5d39525a
JM
5226 eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
5227 eu_mask[ss%2]);
915490d5
ID
5228 sseu->eu_total += eu_cnt;
5229 sseu->eu_per_subslice = max_t(unsigned int,
5230 sseu->eu_per_subslice,
5231 eu_cnt);
5d39525a
JM
5232 }
5233 }
5234}
5235
36cdd013 5236static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv,
915490d5 5237 struct sseu_dev_info *sseu)
91bedd34 5238{
91bedd34 5239 u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);
36cdd013 5240 int s;
91bedd34 5241
f08a0c92 5242 sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK;
91bedd34 5243
f08a0c92 5244 if (sseu->slice_mask) {
57ec171e 5245 sseu->subslice_mask = INTEL_INFO(dev_priv)->sseu.subslice_mask;
43b67998
ID
5246 sseu->eu_per_subslice =
5247 INTEL_INFO(dev_priv)->sseu.eu_per_subslice;
57ec171e
ID
5248 sseu->eu_total = sseu->eu_per_subslice *
5249 sseu_subslice_total(sseu);
91bedd34
ŁD
5250
5251 /* subtract fused off EU(s) from enabled slice(s) */
795b38b3 5252 for (s = 0; s < fls(sseu->slice_mask); s++) {
43b67998
ID
5253 u8 subslice_7eu =
5254 INTEL_INFO(dev_priv)->sseu.subslice_7eu[s];
91bedd34 5255
915490d5 5256 sseu->eu_total -= hweight8(subslice_7eu);
91bedd34
ŁD
5257 }
5258 }
5259}
5260
615d8908
ID
5261static void i915_print_sseu_info(struct seq_file *m, bool is_available_info,
5262 const struct sseu_dev_info *sseu)
5263{
5264 struct drm_i915_private *dev_priv = node_to_i915(m->private);
5265 const char *type = is_available_info ? "Available" : "Enabled";
5266
c67ba538
ID
5267 seq_printf(m, " %s Slice Mask: %04x\n", type,
5268 sseu->slice_mask);
615d8908 5269 seq_printf(m, " %s Slice Total: %u\n", type,
f08a0c92 5270 hweight8(sseu->slice_mask));
615d8908 5271 seq_printf(m, " %s Subslice Total: %u\n", type,
57ec171e 5272 sseu_subslice_total(sseu));
c67ba538
ID
5273 seq_printf(m, " %s Subslice Mask: %04x\n", type,
5274 sseu->subslice_mask);
615d8908 5275 seq_printf(m, " %s Subslice Per Slice: %u\n", type,
57ec171e 5276 hweight8(sseu->subslice_mask));
615d8908
ID
5277 seq_printf(m, " %s EU Total: %u\n", type,
5278 sseu->eu_total);
5279 seq_printf(m, " %s EU Per Subslice: %u\n", type,
5280 sseu->eu_per_subslice);
5281
5282 if (!is_available_info)
5283 return;
5284
5285 seq_printf(m, " Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev_priv)));
5286 if (HAS_POOLED_EU(dev_priv))
5287 seq_printf(m, " Min EU in pool: %u\n", sseu->min_eu_in_pool);
5288
5289 seq_printf(m, " Has Slice Power Gating: %s\n",
5290 yesno(sseu->has_slice_pg));
5291 seq_printf(m, " Has Subslice Power Gating: %s\n",
5292 yesno(sseu->has_subslice_pg));
5293 seq_printf(m, " Has EU Power Gating: %s\n",
5294 yesno(sseu->has_eu_pg));
5295}
5296
3873218f
JM
5297static int i915_sseu_status(struct seq_file *m, void *unused)
5298{
36cdd013 5299 struct drm_i915_private *dev_priv = node_to_i915(m->private);
915490d5 5300 struct sseu_dev_info sseu;
3873218f 5301
36cdd013 5302 if (INTEL_GEN(dev_priv) < 8)
3873218f
JM
5303 return -ENODEV;
5304
5305 seq_puts(m, "SSEU Device Info\n");
615d8908 5306 i915_print_sseu_info(m, true, &INTEL_INFO(dev_priv)->sseu);
3873218f 5307
7f992aba 5308 seq_puts(m, "SSEU Device Status\n");
915490d5 5309 memset(&sseu, 0, sizeof(sseu));
238010ed
DW
5310
5311 intel_runtime_pm_get(dev_priv);
5312
36cdd013 5313 if (IS_CHERRYVIEW(dev_priv)) {
915490d5 5314 cherryview_sseu_device_status(dev_priv, &sseu);
36cdd013 5315 } else if (IS_BROADWELL(dev_priv)) {
915490d5 5316 broadwell_sseu_device_status(dev_priv, &sseu);
36cdd013 5317 } else if (INTEL_GEN(dev_priv) >= 9) {
915490d5 5318 gen9_sseu_device_status(dev_priv, &sseu);
7f992aba 5319 }
238010ed
DW
5320
5321 intel_runtime_pm_put(dev_priv);
5322
615d8908 5323 i915_print_sseu_info(m, false, &sseu);
7f992aba 5324
3873218f
JM
5325 return 0;
5326}
5327
6d794d42
BW
5328static int i915_forcewake_open(struct inode *inode, struct file *file)
5329{
36cdd013 5330 struct drm_i915_private *dev_priv = inode->i_private;
6d794d42 5331
36cdd013 5332 if (INTEL_GEN(dev_priv) < 6)
6d794d42
BW
5333 return 0;
5334
6daccb0b 5335 intel_runtime_pm_get(dev_priv);
59bad947 5336 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
6d794d42
BW
5337
5338 return 0;
5339}
5340
c43b5634 5341static int i915_forcewake_release(struct inode *inode, struct file *file)
6d794d42 5342{
36cdd013 5343 struct drm_i915_private *dev_priv = inode->i_private;
6d794d42 5344
36cdd013 5345 if (INTEL_GEN(dev_priv) < 6)
6d794d42
BW
5346 return 0;
5347
59bad947 5348 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
6daccb0b 5349 intel_runtime_pm_put(dev_priv);
6d794d42
BW
5350
5351 return 0;
5352}
5353
5354static const struct file_operations i915_forcewake_fops = {
5355 .owner = THIS_MODULE,
5356 .open = i915_forcewake_open,
5357 .release = i915_forcewake_release,
5358};
5359
5360static int i915_forcewake_create(struct dentry *root, struct drm_minor *minor)
5361{
6d794d42
BW
5362 struct dentry *ent;
5363
5364 ent = debugfs_create_file("i915_forcewake_user",
8eb57294 5365 S_IRUSR,
36cdd013 5366 root, to_i915(minor->dev),
6d794d42 5367 &i915_forcewake_fops);
f3c5fe97
WY
5368 if (!ent)
5369 return -ENOMEM;
6d794d42 5370
8eb57294 5371 return drm_add_fake_info_node(minor, ent, &i915_forcewake_fops);
6d794d42
BW
5372}
5373
6a9c308d
DV
5374static int i915_debugfs_create(struct dentry *root,
5375 struct drm_minor *minor,
5376 const char *name,
5377 const struct file_operations *fops)
07b7ddd9 5378{
07b7ddd9
JB
5379 struct dentry *ent;
5380
6a9c308d 5381 ent = debugfs_create_file(name,
07b7ddd9 5382 S_IRUGO | S_IWUSR,
36cdd013 5383 root, to_i915(minor->dev),
6a9c308d 5384 fops);
f3c5fe97
WY
5385 if (!ent)
5386 return -ENOMEM;
07b7ddd9 5387
6a9c308d 5388 return drm_add_fake_info_node(minor, ent, fops);
07b7ddd9
JB
5389}
5390
06c5bf8c 5391static const struct drm_info_list i915_debugfs_list[] = {
311bd68e 5392 {"i915_capabilities", i915_capabilities, 0},
73aa808f 5393 {"i915_gem_objects", i915_gem_object_info, 0},
08c18323 5394 {"i915_gem_gtt", i915_gem_gtt_info, 0},
6da84829 5395 {"i915_gem_pin_display", i915_gem_gtt_info, 0, (void *)1},
6d2b8885 5396 {"i915_gem_stolen", i915_gem_stolen_list_info },
4e5359cd 5397 {"i915_gem_pageflip", i915_gem_pageflip_info, 0},
2017263e
BG
5398 {"i915_gem_request", i915_gem_request_info, 0},
5399 {"i915_gem_seqno", i915_gem_seqno_info, 0},
a6172a80 5400 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
2017263e 5401 {"i915_gem_interrupt", i915_interrupt_info, 0},
493018dc 5402 {"i915_gem_batch_pool", i915_gem_batch_pool_info, 0},
8b417c26 5403 {"i915_guc_info", i915_guc_info, 0},
fdf5d357 5404 {"i915_guc_load_status", i915_guc_load_status_info, 0},
4c7e77fc 5405 {"i915_guc_log_dump", i915_guc_log_dump, 0},
adb4bd12 5406 {"i915_frequency_info", i915_frequency_info, 0},
f654449a 5407 {"i915_hangcheck_info", i915_hangcheck_info, 0},
f97108d1 5408 {"i915_drpc_info", i915_drpc_info, 0},
7648fa99 5409 {"i915_emon_status", i915_emon_status, 0},
23b2f8bb 5410 {"i915_ring_freq_table", i915_ring_freq_table, 0},
9a851789 5411 {"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
b5e50c3f 5412 {"i915_fbc_status", i915_fbc_status, 0},
92d44621 5413 {"i915_ips_status", i915_ips_status, 0},
4a9bef37 5414 {"i915_sr_status", i915_sr_status, 0},
44834a67 5415 {"i915_opregion", i915_opregion, 0},
ada8f955 5416 {"i915_vbt", i915_vbt, 0},
37811fcc 5417 {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
e76d3630 5418 {"i915_context_status", i915_context_status, 0},
c0ab1ae9 5419 {"i915_dump_lrc", i915_dump_lrc, 0},
f65367b5 5420 {"i915_forcewake_domains", i915_forcewake_domains, 0},
ea16a3cd 5421 {"i915_swizzle_info", i915_swizzle_info, 0},
3cf17fc5 5422 {"i915_ppgtt_info", i915_ppgtt_info, 0},
63573eb7 5423 {"i915_llc", i915_llc, 0},
e91fd8c6 5424 {"i915_edp_psr_status", i915_edp_psr_status, 0},
d2e216d0 5425 {"i915_sink_crc_eDP1", i915_sink_crc, 0},
ec013e7f 5426 {"i915_energy_uJ", i915_energy_uJ, 0},
6455c870 5427 {"i915_runtime_pm_status", i915_runtime_pm_status, 0},
1da51581 5428 {"i915_power_domain_info", i915_power_domain_info, 0},
b7cec66d 5429 {"i915_dmc_info", i915_dmc_info, 0},
53f5e3ca 5430 {"i915_display_info", i915_display_info, 0},
1b36595f 5431 {"i915_engine_info", i915_engine_info, 0},
e04934cf 5432 {"i915_semaphore_status", i915_semaphore_status, 0},
728e29d7 5433 {"i915_shared_dplls_info", i915_shared_dplls_info, 0},
11bed958 5434 {"i915_dp_mst_info", i915_dp_mst_info, 0},
1ed1ef9d 5435 {"i915_wa_registers", i915_wa_registers, 0},
c5511e44 5436 {"i915_ddb_info", i915_ddb_info, 0},
3873218f 5437 {"i915_sseu_status", i915_sseu_status, 0},
a54746e3 5438 {"i915_drrs_status", i915_drrs_status, 0},
1854d5ca 5439 {"i915_rps_boost_info", i915_rps_boost_info, 0},
2017263e 5440};
27c202ad 5441#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
2017263e 5442
06c5bf8c 5443static const struct i915_debugfs_files {
34b9674c
DV
5444 const char *name;
5445 const struct file_operations *fops;
5446} i915_debugfs_files[] = {
5447 {"i915_wedged", &i915_wedged_fops},
5448 {"i915_max_freq", &i915_max_freq_fops},
5449 {"i915_min_freq", &i915_min_freq_fops},
5450 {"i915_cache_sharing", &i915_cache_sharing_fops},
094f9a54
CW
5451 {"i915_ring_missed_irq", &i915_ring_missed_irq_fops},
5452 {"i915_ring_test_irq", &i915_ring_test_irq_fops},
34b9674c 5453 {"i915_gem_drop_caches", &i915_drop_caches_fops},
98a2f411 5454#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
34b9674c 5455 {"i915_error_state", &i915_error_state_fops},
98a2f411 5456#endif
34b9674c 5457 {"i915_next_seqno", &i915_next_seqno_fops},
bd9db02f 5458 {"i915_display_crc_ctl", &i915_display_crc_ctl_fops},
369a1342
VS
5459 {"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
5460 {"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
5461 {"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
da46f936 5462 {"i915_fbc_false_color", &i915_fbc_fc_fops},
eb3394fa
TP
5463 {"i915_dp_test_data", &i915_displayport_test_data_fops},
5464 {"i915_dp_test_type", &i915_displayport_test_type_fops},
685534ef
SAK
5465 {"i915_dp_test_active", &i915_displayport_test_active_fops},
5466 {"i915_guc_log_control", &i915_guc_log_control_fops}
34b9674c
DV
5467};
5468
36cdd013 5469void intel_display_crc_init(struct drm_i915_private *dev_priv)
07144428 5470{
b378360e 5471 enum pipe pipe;
07144428 5472
055e393f 5473 for_each_pipe(dev_priv, pipe) {
b378360e 5474 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
07144428 5475
d538bbdf
DL
5476 pipe_crc->opened = false;
5477 spin_lock_init(&pipe_crc->lock);
07144428
DL
5478 init_waitqueue_head(&pipe_crc->wq);
5479 }
5480}
5481
1dac891c 5482int i915_debugfs_register(struct drm_i915_private *dev_priv)
2017263e 5483{
91c8a326 5484 struct drm_minor *minor = dev_priv->drm.primary;
34b9674c 5485 int ret, i;
f3cd474b 5486
6d794d42 5487 ret = i915_forcewake_create(minor->debugfs_root, minor);
358733e9
JB
5488 if (ret)
5489 return ret;
6a9c308d 5490
07144428
DL
5491 for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) {
5492 ret = i915_pipe_crc_create(minor->debugfs_root, minor, i);
5493 if (ret)
5494 return ret;
5495 }
5496
34b9674c
DV
5497 for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
5498 ret = i915_debugfs_create(minor->debugfs_root, minor,
5499 i915_debugfs_files[i].name,
5500 i915_debugfs_files[i].fops);
5501 if (ret)
5502 return ret;
5503 }
40633219 5504
27c202ad
BG
5505 return drm_debugfs_create_files(i915_debugfs_list,
5506 I915_DEBUGFS_ENTRIES,
2017263e
BG
5507 minor->debugfs_root, minor);
5508}
5509
1dac891c 5510void i915_debugfs_unregister(struct drm_i915_private *dev_priv)
2017263e 5511{
91c8a326 5512 struct drm_minor *minor = dev_priv->drm.primary;
34b9674c
DV
5513 int i;
5514
27c202ad
BG
5515 drm_debugfs_remove_files(i915_debugfs_list,
5516 I915_DEBUGFS_ENTRIES, minor);
07144428 5517
36cdd013 5518 drm_debugfs_remove_files((struct drm_info_list *)&i915_forcewake_fops,
6d794d42 5519 1, minor);
07144428 5520
e309a997 5521 for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) {
07144428
DL
5522 struct drm_info_list *info_list =
5523 (struct drm_info_list *)&i915_pipe_crc_data[i];
5524
5525 drm_debugfs_remove_files(info_list, 1, minor);
5526 }
5527
34b9674c
DV
5528 for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
5529 struct drm_info_list *info_list =
36cdd013 5530 (struct drm_info_list *)i915_debugfs_files[i].fops;
34b9674c
DV
5531
5532 drm_debugfs_remove_files(info_list, 1, minor);
5533 }
2017263e 5534}
aa7471d2
JN
5535
5536struct dpcd_block {
5537 /* DPCD dump start address. */
5538 unsigned int offset;
5539 /* DPCD dump end address, inclusive. If unset, .size will be used. */
5540 unsigned int end;
5541 /* DPCD dump size. Used if .end is unset. If unset, defaults to 1. */
5542 size_t size;
5543 /* Only valid for eDP. */
5544 bool edp;
5545};
5546
5547static const struct dpcd_block i915_dpcd_debug[] = {
5548 { .offset = DP_DPCD_REV, .size = DP_RECEIVER_CAP_SIZE },
5549 { .offset = DP_PSR_SUPPORT, .end = DP_PSR_CAPS },
5550 { .offset = DP_DOWNSTREAM_PORT_0, .size = 16 },
5551 { .offset = DP_LINK_BW_SET, .end = DP_EDP_CONFIGURATION_SET },
5552 { .offset = DP_SINK_COUNT, .end = DP_ADJUST_REQUEST_LANE2_3 },
5553 { .offset = DP_SET_POWER },
5554 { .offset = DP_EDP_DPCD_REV },
5555 { .offset = DP_EDP_GENERAL_CAP_1, .end = DP_EDP_GENERAL_CAP_3 },
5556 { .offset = DP_EDP_DISPLAY_CONTROL_REGISTER, .end = DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB },
5557 { .offset = DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET, .end = DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET },
5558};
5559
5560static int i915_dpcd_show(struct seq_file *m, void *data)
5561{
5562 struct drm_connector *connector = m->private;
5563 struct intel_dp *intel_dp =
5564 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
5565 uint8_t buf[16];
5566 ssize_t err;
5567 int i;
5568
5c1a8875
MK
5569 if (connector->status != connector_status_connected)
5570 return -ENODEV;
5571
aa7471d2
JN
5572 for (i = 0; i < ARRAY_SIZE(i915_dpcd_debug); i++) {
5573 const struct dpcd_block *b = &i915_dpcd_debug[i];
5574 size_t size = b->end ? b->end - b->offset + 1 : (b->size ?: 1);
5575
5576 if (b->edp &&
5577 connector->connector_type != DRM_MODE_CONNECTOR_eDP)
5578 continue;
5579
5580 /* low tech for now */
5581 if (WARN_ON(size > sizeof(buf)))
5582 continue;
5583
5584 err = drm_dp_dpcd_read(&intel_dp->aux, b->offset, buf, size);
5585 if (err <= 0) {
5586 DRM_ERROR("dpcd read (%zu bytes at %u) failed (%zd)\n",
5587 size, b->offset, err);
5588 continue;
5589 }
5590
5591 seq_printf(m, "%04x: %*ph\n", b->offset, (int) size, buf);
b3f9d7d7 5592 }
aa7471d2
JN
5593
5594 return 0;
5595}
5596
5597static int i915_dpcd_open(struct inode *inode, struct file *file)
5598{
5599 return single_open(file, i915_dpcd_show, inode->i_private);
5600}
5601
5602static const struct file_operations i915_dpcd_fops = {
5603 .owner = THIS_MODULE,
5604 .open = i915_dpcd_open,
5605 .read = seq_read,
5606 .llseek = seq_lseek,
5607 .release = single_release,
5608};
5609
ecbd6781
DW
5610static int i915_panel_show(struct seq_file *m, void *data)
5611{
5612 struct drm_connector *connector = m->private;
5613 struct intel_dp *intel_dp =
5614 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
5615
5616 if (connector->status != connector_status_connected)
5617 return -ENODEV;
5618
5619 seq_printf(m, "Panel power up delay: %d\n",
5620 intel_dp->panel_power_up_delay);
5621 seq_printf(m, "Panel power down delay: %d\n",
5622 intel_dp->panel_power_down_delay);
5623 seq_printf(m, "Backlight on delay: %d\n",
5624 intel_dp->backlight_on_delay);
5625 seq_printf(m, "Backlight off delay: %d\n",
5626 intel_dp->backlight_off_delay);
5627
5628 return 0;
5629}
5630
5631static int i915_panel_open(struct inode *inode, struct file *file)
5632{
5633 return single_open(file, i915_panel_show, inode->i_private);
5634}
5635
5636static const struct file_operations i915_panel_fops = {
5637 .owner = THIS_MODULE,
5638 .open = i915_panel_open,
5639 .read = seq_read,
5640 .llseek = seq_lseek,
5641 .release = single_release,
5642};
5643
aa7471d2
JN
5644/**
5645 * i915_debugfs_connector_add - add i915 specific connector debugfs files
5646 * @connector: pointer to a registered drm_connector
5647 *
5648 * Cleanup will be done by drm_connector_unregister() through a call to
5649 * drm_debugfs_connector_remove().
5650 *
5651 * Returns 0 on success, negative error codes on error.
5652 */
5653int i915_debugfs_connector_add(struct drm_connector *connector)
5654{
5655 struct dentry *root = connector->debugfs_entry;
5656
5657 /* The connector must have been registered beforehands. */
5658 if (!root)
5659 return -ENODEV;
5660
5661 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
5662 connector->connector_type == DRM_MODE_CONNECTOR_eDP)
ecbd6781
DW
5663 debugfs_create_file("i915_dpcd", S_IRUGO, root,
5664 connector, &i915_dpcd_fops);
5665
5666 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
5667 debugfs_create_file("i915_panel_timings", S_IRUGO, root,
5668 connector, &i915_panel_fops);
aa7471d2
JN
5669
5670 return 0;
5671}