2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
29 #include <linux/debugfs.h>
30 #include <linux/sort.h>
31 #include "intel_drv.h"
33 static inline struct drm_i915_private
*node_to_i915(struct drm_info_node
*node
)
35 return to_i915(node
->minor
->dev
);
38 static __always_inline
void seq_print_param(struct seq_file
*m
,
43 if (!__builtin_strcmp(type
, "bool"))
44 seq_printf(m
, "i915.%s=%s\n", name
, yesno(*(const bool *)x
));
45 else if (!__builtin_strcmp(type
, "int"))
46 seq_printf(m
, "i915.%s=%d\n", name
, *(const int *)x
);
47 else if (!__builtin_strcmp(type
, "unsigned int"))
48 seq_printf(m
, "i915.%s=%u\n", name
, *(const unsigned int *)x
);
49 else if (!__builtin_strcmp(type
, "char *"))
50 seq_printf(m
, "i915.%s=%s\n", name
, *(const char **)x
);
55 static int i915_capabilities(struct seq_file
*m
, void *data
)
57 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
58 const struct intel_device_info
*info
= INTEL_INFO(dev_priv
);
60 seq_printf(m
, "gen: %d\n", INTEL_GEN(dev_priv
));
61 seq_printf(m
, "platform: %s\n", intel_platform_name(info
->platform
));
62 seq_printf(m
, "pch: %d\n", INTEL_PCH_TYPE(dev_priv
));
64 #define PRINT_FLAG(x) seq_printf(m, #x ": %s\n", yesno(info->x))
65 DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG
);
68 kernel_param_lock(THIS_MODULE
);
69 #define PRINT_PARAM(T, x) seq_print_param(m, #x, #T, &i915.x);
70 I915_PARAMS_FOR_EACH(PRINT_PARAM
);
72 kernel_param_unlock(THIS_MODULE
);
77 static char get_active_flag(struct drm_i915_gem_object
*obj
)
79 return i915_gem_object_is_active(obj
) ? '*' : ' ';
82 static char get_pin_flag(struct drm_i915_gem_object
*obj
)
84 return obj
->pin_display
? 'p' : ' ';
87 static char get_tiling_flag(struct drm_i915_gem_object
*obj
)
89 switch (i915_gem_object_get_tiling(obj
)) {
91 case I915_TILING_NONE
: return ' ';
92 case I915_TILING_X
: return 'X';
93 case I915_TILING_Y
: return 'Y';
97 static char get_global_flag(struct drm_i915_gem_object
*obj
)
99 return !list_empty(&obj
->userfault_link
) ? 'g' : ' ';
102 static char get_pin_mapped_flag(struct drm_i915_gem_object
*obj
)
104 return obj
->mm
.mapping
? 'M' : ' ';
107 static u64
i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object
*obj
)
110 struct i915_vma
*vma
;
112 list_for_each_entry(vma
, &obj
->vma_list
, obj_link
) {
113 if (i915_vma_is_ggtt(vma
) && drm_mm_node_allocated(&vma
->node
))
114 size
+= vma
->node
.size
;
121 describe_obj(struct seq_file
*m
, struct drm_i915_gem_object
*obj
)
123 struct drm_i915_private
*dev_priv
= to_i915(obj
->base
.dev
);
124 struct intel_engine_cs
*engine
;
125 struct i915_vma
*vma
;
126 unsigned int frontbuffer_bits
;
129 lockdep_assert_held(&obj
->base
.dev
->struct_mutex
);
131 seq_printf(m
, "%pK: %c%c%c%c%c %8zdKiB %02x %02x %s%s%s",
133 get_active_flag(obj
),
135 get_tiling_flag(obj
),
136 get_global_flag(obj
),
137 get_pin_mapped_flag(obj
),
138 obj
->base
.size
/ 1024,
139 obj
->base
.read_domains
,
140 obj
->base
.write_domain
,
141 i915_cache_level_str(dev_priv
, obj
->cache_level
),
142 obj
->mm
.dirty
? " dirty" : "",
143 obj
->mm
.madv
== I915_MADV_DONTNEED
? " purgeable" : "");
145 seq_printf(m
, " (name: %d)", obj
->base
.name
);
146 list_for_each_entry(vma
, &obj
->vma_list
, obj_link
) {
147 if (i915_vma_is_pinned(vma
))
150 seq_printf(m
, " (pinned x %d)", pin_count
);
151 if (obj
->pin_display
)
152 seq_printf(m
, " (display)");
153 list_for_each_entry(vma
, &obj
->vma_list
, obj_link
) {
154 if (!drm_mm_node_allocated(&vma
->node
))
157 seq_printf(m
, " (%sgtt offset: %08llx, size: %08llx",
158 i915_vma_is_ggtt(vma
) ? "g" : "pp",
159 vma
->node
.start
, vma
->node
.size
);
160 if (i915_vma_is_ggtt(vma
)) {
161 switch (vma
->ggtt_view
.type
) {
162 case I915_GGTT_VIEW_NORMAL
:
163 seq_puts(m
, ", normal");
166 case I915_GGTT_VIEW_PARTIAL
:
167 seq_printf(m
, ", partial [%08llx+%x]",
168 vma
->ggtt_view
.partial
.offset
<< PAGE_SHIFT
,
169 vma
->ggtt_view
.partial
.size
<< PAGE_SHIFT
);
172 case I915_GGTT_VIEW_ROTATED
:
173 seq_printf(m
, ", rotated [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
174 vma
->ggtt_view
.rotated
.plane
[0].width
,
175 vma
->ggtt_view
.rotated
.plane
[0].height
,
176 vma
->ggtt_view
.rotated
.plane
[0].stride
,
177 vma
->ggtt_view
.rotated
.plane
[0].offset
,
178 vma
->ggtt_view
.rotated
.plane
[1].width
,
179 vma
->ggtt_view
.rotated
.plane
[1].height
,
180 vma
->ggtt_view
.rotated
.plane
[1].stride
,
181 vma
->ggtt_view
.rotated
.plane
[1].offset
);
185 MISSING_CASE(vma
->ggtt_view
.type
);
190 seq_printf(m
, " , fence: %d%s",
192 i915_gem_active_isset(&vma
->last_fence
) ? "*" : "");
196 seq_printf(m
, " (stolen: %08llx)", obj
->stolen
->start
);
198 engine
= i915_gem_object_last_write_engine(obj
);
200 seq_printf(m
, " (%s)", engine
->name
);
202 frontbuffer_bits
= atomic_read(&obj
->frontbuffer_bits
);
203 if (frontbuffer_bits
)
204 seq_printf(m
, " (frontbuffer: 0x%03x)", frontbuffer_bits
);
207 static int obj_rank_by_stolen(const void *A
, const void *B
)
209 const struct drm_i915_gem_object
*a
=
210 *(const struct drm_i915_gem_object
**)A
;
211 const struct drm_i915_gem_object
*b
=
212 *(const struct drm_i915_gem_object
**)B
;
214 if (a
->stolen
->start
< b
->stolen
->start
)
216 if (a
->stolen
->start
> b
->stolen
->start
)
221 static int i915_gem_stolen_list_info(struct seq_file
*m
, void *data
)
223 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
224 struct drm_device
*dev
= &dev_priv
->drm
;
225 struct drm_i915_gem_object
**objects
;
226 struct drm_i915_gem_object
*obj
;
227 u64 total_obj_size
, total_gtt_size
;
228 unsigned long total
, count
, n
;
231 total
= READ_ONCE(dev_priv
->mm
.object_count
);
232 objects
= kvmalloc_array(total
, sizeof(*objects
), GFP_KERNEL
);
236 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
240 total_obj_size
= total_gtt_size
= count
= 0;
241 list_for_each_entry(obj
, &dev_priv
->mm
.bound_list
, global_link
) {
245 if (obj
->stolen
== NULL
)
248 objects
[count
++] = obj
;
249 total_obj_size
+= obj
->base
.size
;
250 total_gtt_size
+= i915_gem_obj_total_ggtt_size(obj
);
253 list_for_each_entry(obj
, &dev_priv
->mm
.unbound_list
, global_link
) {
257 if (obj
->stolen
== NULL
)
260 objects
[count
++] = obj
;
261 total_obj_size
+= obj
->base
.size
;
264 sort(objects
, count
, sizeof(*objects
), obj_rank_by_stolen
, NULL
);
266 seq_puts(m
, "Stolen:\n");
267 for (n
= 0; n
< count
; n
++) {
269 describe_obj(m
, objects
[n
]);
272 seq_printf(m
, "Total %lu objects, %llu bytes, %llu GTT size\n",
273 count
, total_obj_size
, total_gtt_size
);
275 mutex_unlock(&dev
->struct_mutex
);
282 struct drm_i915_file_private
*file_priv
;
286 u64 active
, inactive
;
289 static int per_file_stats(int id
, void *ptr
, void *data
)
291 struct drm_i915_gem_object
*obj
= ptr
;
292 struct file_stats
*stats
= data
;
293 struct i915_vma
*vma
;
295 lockdep_assert_held(&obj
->base
.dev
->struct_mutex
);
298 stats
->total
+= obj
->base
.size
;
299 if (!obj
->bind_count
)
300 stats
->unbound
+= obj
->base
.size
;
301 if (obj
->base
.name
|| obj
->base
.dma_buf
)
302 stats
->shared
+= obj
->base
.size
;
304 list_for_each_entry(vma
, &obj
->vma_list
, obj_link
) {
305 if (!drm_mm_node_allocated(&vma
->node
))
308 if (i915_vma_is_ggtt(vma
)) {
309 stats
->global
+= vma
->node
.size
;
311 struct i915_hw_ppgtt
*ppgtt
= i915_vm_to_ppgtt(vma
->vm
);
313 if (ppgtt
->base
.file
!= stats
->file_priv
)
317 if (i915_vma_is_active(vma
))
318 stats
->active
+= vma
->node
.size
;
320 stats
->inactive
+= vma
->node
.size
;
326 #define print_file_stats(m, name, stats) do { \
328 seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound)\n", \
339 static void print_batch_pool_stats(struct seq_file
*m
,
340 struct drm_i915_private
*dev_priv
)
342 struct drm_i915_gem_object
*obj
;
343 struct file_stats stats
;
344 struct intel_engine_cs
*engine
;
345 enum intel_engine_id id
;
348 memset(&stats
, 0, sizeof(stats
));
350 for_each_engine(engine
, dev_priv
, id
) {
351 for (j
= 0; j
< ARRAY_SIZE(engine
->batch_pool
.cache_list
); j
++) {
352 list_for_each_entry(obj
,
353 &engine
->batch_pool
.cache_list
[j
],
355 per_file_stats(0, obj
, &stats
);
359 print_file_stats(m
, "[k]batch pool", stats
);
362 static int per_file_ctx_stats(int id
, void *ptr
, void *data
)
364 struct i915_gem_context
*ctx
= ptr
;
367 for (n
= 0; n
< ARRAY_SIZE(ctx
->engine
); n
++) {
368 if (ctx
->engine
[n
].state
)
369 per_file_stats(0, ctx
->engine
[n
].state
->obj
, data
);
370 if (ctx
->engine
[n
].ring
)
371 per_file_stats(0, ctx
->engine
[n
].ring
->vma
->obj
, data
);
377 static void print_context_stats(struct seq_file
*m
,
378 struct drm_i915_private
*dev_priv
)
380 struct drm_device
*dev
= &dev_priv
->drm
;
381 struct file_stats stats
;
382 struct drm_file
*file
;
384 memset(&stats
, 0, sizeof(stats
));
386 mutex_lock(&dev
->struct_mutex
);
387 if (dev_priv
->kernel_context
)
388 per_file_ctx_stats(0, dev_priv
->kernel_context
, &stats
);
390 list_for_each_entry(file
, &dev
->filelist
, lhead
) {
391 struct drm_i915_file_private
*fpriv
= file
->driver_priv
;
392 idr_for_each(&fpriv
->context_idr
, per_file_ctx_stats
, &stats
);
394 mutex_unlock(&dev
->struct_mutex
);
396 print_file_stats(m
, "[k]contexts", stats
);
399 static int i915_gem_object_info(struct seq_file
*m
, void *data
)
401 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
402 struct drm_device
*dev
= &dev_priv
->drm
;
403 struct i915_ggtt
*ggtt
= &dev_priv
->ggtt
;
404 u32 count
, mapped_count
, purgeable_count
, dpy_count
;
405 u64 size
, mapped_size
, purgeable_size
, dpy_size
;
406 struct drm_i915_gem_object
*obj
;
407 struct drm_file
*file
;
410 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
414 seq_printf(m
, "%u objects, %llu bytes\n",
415 dev_priv
->mm
.object_count
,
416 dev_priv
->mm
.object_memory
);
419 mapped_size
= mapped_count
= 0;
420 purgeable_size
= purgeable_count
= 0;
421 list_for_each_entry(obj
, &dev_priv
->mm
.unbound_list
, global_link
) {
422 size
+= obj
->base
.size
;
425 if (obj
->mm
.madv
== I915_MADV_DONTNEED
) {
426 purgeable_size
+= obj
->base
.size
;
430 if (obj
->mm
.mapping
) {
432 mapped_size
+= obj
->base
.size
;
435 seq_printf(m
, "%u unbound objects, %llu bytes\n", count
, size
);
437 size
= count
= dpy_size
= dpy_count
= 0;
438 list_for_each_entry(obj
, &dev_priv
->mm
.bound_list
, global_link
) {
439 size
+= obj
->base
.size
;
442 if (obj
->pin_display
) {
443 dpy_size
+= obj
->base
.size
;
447 if (obj
->mm
.madv
== I915_MADV_DONTNEED
) {
448 purgeable_size
+= obj
->base
.size
;
452 if (obj
->mm
.mapping
) {
454 mapped_size
+= obj
->base
.size
;
457 seq_printf(m
, "%u bound objects, %llu bytes\n",
459 seq_printf(m
, "%u purgeable objects, %llu bytes\n",
460 purgeable_count
, purgeable_size
);
461 seq_printf(m
, "%u mapped objects, %llu bytes\n",
462 mapped_count
, mapped_size
);
463 seq_printf(m
, "%u display objects (pinned), %llu bytes\n",
464 dpy_count
, dpy_size
);
466 seq_printf(m
, "%llu [%llu] gtt total\n",
467 ggtt
->base
.total
, ggtt
->mappable_end
);
470 print_batch_pool_stats(m
, dev_priv
);
471 mutex_unlock(&dev
->struct_mutex
);
473 mutex_lock(&dev
->filelist_mutex
);
474 print_context_stats(m
, dev_priv
);
475 list_for_each_entry_reverse(file
, &dev
->filelist
, lhead
) {
476 struct file_stats stats
;
477 struct drm_i915_file_private
*file_priv
= file
->driver_priv
;
478 struct drm_i915_gem_request
*request
;
479 struct task_struct
*task
;
481 mutex_lock(&dev
->struct_mutex
);
483 memset(&stats
, 0, sizeof(stats
));
484 stats
.file_priv
= file
->driver_priv
;
485 spin_lock(&file
->table_lock
);
486 idr_for_each(&file
->object_idr
, per_file_stats
, &stats
);
487 spin_unlock(&file
->table_lock
);
489 * Although we have a valid reference on file->pid, that does
490 * not guarantee that the task_struct who called get_pid() is
491 * still alive (e.g. get_pid(current) => fork() => exit()).
492 * Therefore, we need to protect this ->comm access using RCU.
494 request
= list_first_entry_or_null(&file_priv
->mm
.request_list
,
495 struct drm_i915_gem_request
,
498 task
= pid_task(request
&& request
->ctx
->pid
?
499 request
->ctx
->pid
: file
->pid
,
501 print_file_stats(m
, task
? task
->comm
: "<unknown>", stats
);
504 mutex_unlock(&dev
->struct_mutex
);
506 mutex_unlock(&dev
->filelist_mutex
);
511 static int i915_gem_gtt_info(struct seq_file
*m
, void *data
)
513 struct drm_info_node
*node
= m
->private;
514 struct drm_i915_private
*dev_priv
= node_to_i915(node
);
515 struct drm_device
*dev
= &dev_priv
->drm
;
516 bool show_pin_display_only
= !!node
->info_ent
->data
;
517 struct drm_i915_gem_object
*obj
;
518 u64 total_obj_size
, total_gtt_size
;
521 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
525 total_obj_size
= total_gtt_size
= count
= 0;
526 list_for_each_entry(obj
, &dev_priv
->mm
.bound_list
, global_link
) {
527 if (show_pin_display_only
&& !obj
->pin_display
)
531 describe_obj(m
, obj
);
533 total_obj_size
+= obj
->base
.size
;
534 total_gtt_size
+= i915_gem_obj_total_ggtt_size(obj
);
538 mutex_unlock(&dev
->struct_mutex
);
540 seq_printf(m
, "Total %d objects, %llu bytes, %llu GTT size\n",
541 count
, total_obj_size
, total_gtt_size
);
546 static int i915_gem_pageflip_info(struct seq_file
*m
, void *data
)
548 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
549 struct drm_device
*dev
= &dev_priv
->drm
;
550 struct intel_crtc
*crtc
;
553 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
557 for_each_intel_crtc(dev
, crtc
) {
558 const char pipe
= pipe_name(crtc
->pipe
);
559 const char plane
= plane_name(crtc
->plane
);
560 struct intel_flip_work
*work
;
562 spin_lock_irq(&dev
->event_lock
);
563 work
= crtc
->flip_work
;
565 seq_printf(m
, "No flip due on pipe %c (plane %c)\n",
571 pending
= atomic_read(&work
->pending
);
573 seq_printf(m
, "Flip ioctl preparing on pipe %c (plane %c)\n",
576 seq_printf(m
, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n",
579 if (work
->flip_queued_req
) {
580 struct intel_engine_cs
*engine
= work
->flip_queued_req
->engine
;
582 seq_printf(m
, "Flip queued on %s at seqno %x, last submitted seqno %x [current breadcrumb %x], completed? %d\n",
584 work
->flip_queued_req
->global_seqno
,
585 intel_engine_last_submit(engine
),
586 intel_engine_get_seqno(engine
),
587 i915_gem_request_completed(work
->flip_queued_req
));
589 seq_printf(m
, "Flip not associated with any ring\n");
590 seq_printf(m
, "Flip queued on frame %d, (was ready on frame %d), now %d\n",
591 work
->flip_queued_vblank
,
592 work
->flip_ready_vblank
,
593 intel_crtc_get_vblank_counter(crtc
));
594 seq_printf(m
, "%d prepares\n", atomic_read(&work
->pending
));
596 if (INTEL_GEN(dev_priv
) >= 4)
597 addr
= I915_HI_DISPBASE(I915_READ(DSPSURF(crtc
->plane
)));
599 addr
= I915_READ(DSPADDR(crtc
->plane
));
600 seq_printf(m
, "Current scanout address 0x%08x\n", addr
);
602 if (work
->pending_flip_obj
) {
603 seq_printf(m
, "New framebuffer address 0x%08lx\n", (long)work
->gtt_offset
);
604 seq_printf(m
, "MMIO update completed? %d\n", addr
== work
->gtt_offset
);
607 spin_unlock_irq(&dev
->event_lock
);
610 mutex_unlock(&dev
->struct_mutex
);
615 static int i915_gem_batch_pool_info(struct seq_file
*m
, void *data
)
617 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
618 struct drm_device
*dev
= &dev_priv
->drm
;
619 struct drm_i915_gem_object
*obj
;
620 struct intel_engine_cs
*engine
;
621 enum intel_engine_id id
;
625 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
629 for_each_engine(engine
, dev_priv
, id
) {
630 for (j
= 0; j
< ARRAY_SIZE(engine
->batch_pool
.cache_list
); j
++) {
634 list_for_each_entry(obj
,
635 &engine
->batch_pool
.cache_list
[j
],
638 seq_printf(m
, "%s cache[%d]: %d objects\n",
639 engine
->name
, j
, count
);
641 list_for_each_entry(obj
,
642 &engine
->batch_pool
.cache_list
[j
],
645 describe_obj(m
, obj
);
653 seq_printf(m
, "total: %d\n", total
);
655 mutex_unlock(&dev
->struct_mutex
);
660 static void print_request(struct seq_file
*m
,
661 struct drm_i915_gem_request
*rq
,
664 seq_printf(m
, "%s%x [%x:%x] prio=%d @ %dms: %s\n", prefix
,
665 rq
->global_seqno
, rq
->ctx
->hw_id
, rq
->fence
.seqno
,
666 rq
->priotree
.priority
,
667 jiffies_to_msecs(jiffies
- rq
->emitted_jiffies
),
668 rq
->timeline
->common
->name
);
671 static int i915_gem_request_info(struct seq_file
*m
, void *data
)
673 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
674 struct drm_device
*dev
= &dev_priv
->drm
;
675 struct drm_i915_gem_request
*req
;
676 struct intel_engine_cs
*engine
;
677 enum intel_engine_id id
;
680 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
685 for_each_engine(engine
, dev_priv
, id
) {
689 list_for_each_entry(req
, &engine
->timeline
->requests
, link
)
694 seq_printf(m
, "%s requests: %d\n", engine
->name
, count
);
695 list_for_each_entry(req
, &engine
->timeline
->requests
, link
)
696 print_request(m
, req
, " ");
700 mutex_unlock(&dev
->struct_mutex
);
703 seq_puts(m
, "No requests\n");
708 static void i915_ring_seqno_info(struct seq_file
*m
,
709 struct intel_engine_cs
*engine
)
711 struct intel_breadcrumbs
*b
= &engine
->breadcrumbs
;
714 seq_printf(m
, "Current sequence (%s): %x\n",
715 engine
->name
, intel_engine_get_seqno(engine
));
717 spin_lock_irq(&b
->rb_lock
);
718 for (rb
= rb_first(&b
->waiters
); rb
; rb
= rb_next(rb
)) {
719 struct intel_wait
*w
= rb_entry(rb
, typeof(*w
), node
);
721 seq_printf(m
, "Waiting (%s): %s [%d] on %x\n",
722 engine
->name
, w
->tsk
->comm
, w
->tsk
->pid
, w
->seqno
);
724 spin_unlock_irq(&b
->rb_lock
);
727 static int i915_gem_seqno_info(struct seq_file
*m
, void *data
)
729 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
730 struct intel_engine_cs
*engine
;
731 enum intel_engine_id id
;
733 for_each_engine(engine
, dev_priv
, id
)
734 i915_ring_seqno_info(m
, engine
);
740 static int i915_interrupt_info(struct seq_file
*m
, void *data
)
742 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
743 struct intel_engine_cs
*engine
;
744 enum intel_engine_id id
;
747 intel_runtime_pm_get(dev_priv
);
749 if (IS_CHERRYVIEW(dev_priv
)) {
750 seq_printf(m
, "Master Interrupt Control:\t%08x\n",
751 I915_READ(GEN8_MASTER_IRQ
));
753 seq_printf(m
, "Display IER:\t%08x\n",
755 seq_printf(m
, "Display IIR:\t%08x\n",
757 seq_printf(m
, "Display IIR_RW:\t%08x\n",
758 I915_READ(VLV_IIR_RW
));
759 seq_printf(m
, "Display IMR:\t%08x\n",
761 for_each_pipe(dev_priv
, pipe
) {
762 enum intel_display_power_domain power_domain
;
764 power_domain
= POWER_DOMAIN_PIPE(pipe
);
765 if (!intel_display_power_get_if_enabled(dev_priv
,
767 seq_printf(m
, "Pipe %c power disabled\n",
772 seq_printf(m
, "Pipe %c stat:\t%08x\n",
774 I915_READ(PIPESTAT(pipe
)));
776 intel_display_power_put(dev_priv
, power_domain
);
779 intel_display_power_get(dev_priv
, POWER_DOMAIN_INIT
);
780 seq_printf(m
, "Port hotplug:\t%08x\n",
781 I915_READ(PORT_HOTPLUG_EN
));
782 seq_printf(m
, "DPFLIPSTAT:\t%08x\n",
783 I915_READ(VLV_DPFLIPSTAT
));
784 seq_printf(m
, "DPINVGTT:\t%08x\n",
785 I915_READ(DPINVGTT
));
786 intel_display_power_put(dev_priv
, POWER_DOMAIN_INIT
);
788 for (i
= 0; i
< 4; i
++) {
789 seq_printf(m
, "GT Interrupt IMR %d:\t%08x\n",
790 i
, I915_READ(GEN8_GT_IMR(i
)));
791 seq_printf(m
, "GT Interrupt IIR %d:\t%08x\n",
792 i
, I915_READ(GEN8_GT_IIR(i
)));
793 seq_printf(m
, "GT Interrupt IER %d:\t%08x\n",
794 i
, I915_READ(GEN8_GT_IER(i
)));
797 seq_printf(m
, "PCU interrupt mask:\t%08x\n",
798 I915_READ(GEN8_PCU_IMR
));
799 seq_printf(m
, "PCU interrupt identity:\t%08x\n",
800 I915_READ(GEN8_PCU_IIR
));
801 seq_printf(m
, "PCU interrupt enable:\t%08x\n",
802 I915_READ(GEN8_PCU_IER
));
803 } else if (INTEL_GEN(dev_priv
) >= 8) {
804 seq_printf(m
, "Master Interrupt Control:\t%08x\n",
805 I915_READ(GEN8_MASTER_IRQ
));
807 for (i
= 0; i
< 4; i
++) {
808 seq_printf(m
, "GT Interrupt IMR %d:\t%08x\n",
809 i
, I915_READ(GEN8_GT_IMR(i
)));
810 seq_printf(m
, "GT Interrupt IIR %d:\t%08x\n",
811 i
, I915_READ(GEN8_GT_IIR(i
)));
812 seq_printf(m
, "GT Interrupt IER %d:\t%08x\n",
813 i
, I915_READ(GEN8_GT_IER(i
)));
816 for_each_pipe(dev_priv
, pipe
) {
817 enum intel_display_power_domain power_domain
;
819 power_domain
= POWER_DOMAIN_PIPE(pipe
);
820 if (!intel_display_power_get_if_enabled(dev_priv
,
822 seq_printf(m
, "Pipe %c power disabled\n",
826 seq_printf(m
, "Pipe %c IMR:\t%08x\n",
828 I915_READ(GEN8_DE_PIPE_IMR(pipe
)));
829 seq_printf(m
, "Pipe %c IIR:\t%08x\n",
831 I915_READ(GEN8_DE_PIPE_IIR(pipe
)));
832 seq_printf(m
, "Pipe %c IER:\t%08x\n",
834 I915_READ(GEN8_DE_PIPE_IER(pipe
)));
836 intel_display_power_put(dev_priv
, power_domain
);
839 seq_printf(m
, "Display Engine port interrupt mask:\t%08x\n",
840 I915_READ(GEN8_DE_PORT_IMR
));
841 seq_printf(m
, "Display Engine port interrupt identity:\t%08x\n",
842 I915_READ(GEN8_DE_PORT_IIR
));
843 seq_printf(m
, "Display Engine port interrupt enable:\t%08x\n",
844 I915_READ(GEN8_DE_PORT_IER
));
846 seq_printf(m
, "Display Engine misc interrupt mask:\t%08x\n",
847 I915_READ(GEN8_DE_MISC_IMR
));
848 seq_printf(m
, "Display Engine misc interrupt identity:\t%08x\n",
849 I915_READ(GEN8_DE_MISC_IIR
));
850 seq_printf(m
, "Display Engine misc interrupt enable:\t%08x\n",
851 I915_READ(GEN8_DE_MISC_IER
));
853 seq_printf(m
, "PCU interrupt mask:\t%08x\n",
854 I915_READ(GEN8_PCU_IMR
));
855 seq_printf(m
, "PCU interrupt identity:\t%08x\n",
856 I915_READ(GEN8_PCU_IIR
));
857 seq_printf(m
, "PCU interrupt enable:\t%08x\n",
858 I915_READ(GEN8_PCU_IER
));
859 } else if (IS_VALLEYVIEW(dev_priv
)) {
860 seq_printf(m
, "Display IER:\t%08x\n",
862 seq_printf(m
, "Display IIR:\t%08x\n",
864 seq_printf(m
, "Display IIR_RW:\t%08x\n",
865 I915_READ(VLV_IIR_RW
));
866 seq_printf(m
, "Display IMR:\t%08x\n",
868 for_each_pipe(dev_priv
, pipe
) {
869 enum intel_display_power_domain power_domain
;
871 power_domain
= POWER_DOMAIN_PIPE(pipe
);
872 if (!intel_display_power_get_if_enabled(dev_priv
,
874 seq_printf(m
, "Pipe %c power disabled\n",
879 seq_printf(m
, "Pipe %c stat:\t%08x\n",
881 I915_READ(PIPESTAT(pipe
)));
882 intel_display_power_put(dev_priv
, power_domain
);
885 seq_printf(m
, "Master IER:\t%08x\n",
886 I915_READ(VLV_MASTER_IER
));
888 seq_printf(m
, "Render IER:\t%08x\n",
890 seq_printf(m
, "Render IIR:\t%08x\n",
892 seq_printf(m
, "Render IMR:\t%08x\n",
895 seq_printf(m
, "PM IER:\t\t%08x\n",
896 I915_READ(GEN6_PMIER
));
897 seq_printf(m
, "PM IIR:\t\t%08x\n",
898 I915_READ(GEN6_PMIIR
));
899 seq_printf(m
, "PM IMR:\t\t%08x\n",
900 I915_READ(GEN6_PMIMR
));
902 seq_printf(m
, "Port hotplug:\t%08x\n",
903 I915_READ(PORT_HOTPLUG_EN
));
904 seq_printf(m
, "DPFLIPSTAT:\t%08x\n",
905 I915_READ(VLV_DPFLIPSTAT
));
906 seq_printf(m
, "DPINVGTT:\t%08x\n",
907 I915_READ(DPINVGTT
));
909 } else if (!HAS_PCH_SPLIT(dev_priv
)) {
910 seq_printf(m
, "Interrupt enable: %08x\n",
912 seq_printf(m
, "Interrupt identity: %08x\n",
914 seq_printf(m
, "Interrupt mask: %08x\n",
916 for_each_pipe(dev_priv
, pipe
)
917 seq_printf(m
, "Pipe %c stat: %08x\n",
919 I915_READ(PIPESTAT(pipe
)));
921 seq_printf(m
, "North Display Interrupt enable: %08x\n",
923 seq_printf(m
, "North Display Interrupt identity: %08x\n",
925 seq_printf(m
, "North Display Interrupt mask: %08x\n",
927 seq_printf(m
, "South Display Interrupt enable: %08x\n",
929 seq_printf(m
, "South Display Interrupt identity: %08x\n",
931 seq_printf(m
, "South Display Interrupt mask: %08x\n",
933 seq_printf(m
, "Graphics Interrupt enable: %08x\n",
935 seq_printf(m
, "Graphics Interrupt identity: %08x\n",
937 seq_printf(m
, "Graphics Interrupt mask: %08x\n",
940 for_each_engine(engine
, dev_priv
, id
) {
941 if (INTEL_GEN(dev_priv
) >= 6) {
943 "Graphics Interrupt mask (%s): %08x\n",
944 engine
->name
, I915_READ_IMR(engine
));
946 i915_ring_seqno_info(m
, engine
);
948 intel_runtime_pm_put(dev_priv
);
953 static int i915_gem_fence_regs_info(struct seq_file
*m
, void *data
)
955 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
956 struct drm_device
*dev
= &dev_priv
->drm
;
959 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
963 seq_printf(m
, "Total fences = %d\n", dev_priv
->num_fence_regs
);
964 for (i
= 0; i
< dev_priv
->num_fence_regs
; i
++) {
965 struct i915_vma
*vma
= dev_priv
->fence_regs
[i
].vma
;
967 seq_printf(m
, "Fence %d, pin count = %d, object = ",
968 i
, dev_priv
->fence_regs
[i
].pin_count
);
970 seq_puts(m
, "unused");
972 describe_obj(m
, vma
->obj
);
976 mutex_unlock(&dev
->struct_mutex
);
980 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
981 static ssize_t
gpu_state_read(struct file
*file
, char __user
*ubuf
,
982 size_t count
, loff_t
*pos
)
984 struct i915_gpu_state
*error
= file
->private_data
;
985 struct drm_i915_error_state_buf str
;
992 ret
= i915_error_state_buf_init(&str
, error
->i915
, count
, *pos
);
996 ret
= i915_error_state_to_str(&str
, error
);
1001 ret
= simple_read_from_buffer(ubuf
, count
, &tmp
, str
.buf
, str
.bytes
);
1005 *pos
= str
.start
+ ret
;
1007 i915_error_state_buf_release(&str
);
1011 static int gpu_state_release(struct inode
*inode
, struct file
*file
)
1013 i915_gpu_state_put(file
->private_data
);
1017 static int i915_gpu_info_open(struct inode
*inode
, struct file
*file
)
1019 struct drm_i915_private
*i915
= inode
->i_private
;
1020 struct i915_gpu_state
*gpu
;
1022 intel_runtime_pm_get(i915
);
1023 gpu
= i915_capture_gpu_state(i915
);
1024 intel_runtime_pm_put(i915
);
1028 file
->private_data
= gpu
;
1032 static const struct file_operations i915_gpu_info_fops
= {
1033 .owner
= THIS_MODULE
,
1034 .open
= i915_gpu_info_open
,
1035 .read
= gpu_state_read
,
1036 .llseek
= default_llseek
,
1037 .release
= gpu_state_release
,
1041 i915_error_state_write(struct file
*filp
,
1042 const char __user
*ubuf
,
1046 struct i915_gpu_state
*error
= filp
->private_data
;
1051 DRM_DEBUG_DRIVER("Resetting error state\n");
1052 i915_reset_error_state(error
->i915
);
1057 static int i915_error_state_open(struct inode
*inode
, struct file
*file
)
1059 file
->private_data
= i915_first_error_state(inode
->i_private
);
1063 static const struct file_operations i915_error_state_fops
= {
1064 .owner
= THIS_MODULE
,
1065 .open
= i915_error_state_open
,
1066 .read
= gpu_state_read
,
1067 .write
= i915_error_state_write
,
1068 .llseek
= default_llseek
,
1069 .release
= gpu_state_release
,
1074 i915_next_seqno_set(void *data
, u64 val
)
1076 struct drm_i915_private
*dev_priv
= data
;
1077 struct drm_device
*dev
= &dev_priv
->drm
;
1080 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1084 ret
= i915_gem_set_global_seqno(dev
, val
);
1085 mutex_unlock(&dev
->struct_mutex
);
1090 DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops
,
1091 NULL
, i915_next_seqno_set
,
1094 static int i915_frequency_info(struct seq_file
*m
, void *unused
)
1096 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
1099 intel_runtime_pm_get(dev_priv
);
1101 if (IS_GEN5(dev_priv
)) {
1102 u16 rgvswctl
= I915_READ16(MEMSWCTL
);
1103 u16 rgvstat
= I915_READ16(MEMSTAT_ILK
);
1105 seq_printf(m
, "Requested P-state: %d\n", (rgvswctl
>> 8) & 0xf);
1106 seq_printf(m
, "Requested VID: %d\n", rgvswctl
& 0x3f);
1107 seq_printf(m
, "Current VID: %d\n", (rgvstat
& MEMSTAT_VID_MASK
) >>
1109 seq_printf(m
, "Current P-state: %d\n",
1110 (rgvstat
& MEMSTAT_PSTATE_MASK
) >> MEMSTAT_PSTATE_SHIFT
);
1111 } else if (IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
)) {
1114 mutex_lock(&dev_priv
->rps
.hw_lock
);
1115 freq_sts
= vlv_punit_read(dev_priv
, PUNIT_REG_GPU_FREQ_STS
);
1116 seq_printf(m
, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts
);
1117 seq_printf(m
, "DDR freq: %d MHz\n", dev_priv
->mem_freq
);
1119 seq_printf(m
, "actual GPU freq: %d MHz\n",
1120 intel_gpu_freq(dev_priv
, (freq_sts
>> 8) & 0xff));
1122 seq_printf(m
, "current GPU freq: %d MHz\n",
1123 intel_gpu_freq(dev_priv
, dev_priv
->rps
.cur_freq
));
1125 seq_printf(m
, "max GPU freq: %d MHz\n",
1126 intel_gpu_freq(dev_priv
, dev_priv
->rps
.max_freq
));
1128 seq_printf(m
, "min GPU freq: %d MHz\n",
1129 intel_gpu_freq(dev_priv
, dev_priv
->rps
.min_freq
));
1131 seq_printf(m
, "idle GPU freq: %d MHz\n",
1132 intel_gpu_freq(dev_priv
, dev_priv
->rps
.idle_freq
));
1135 "efficient (RPe) frequency: %d MHz\n",
1136 intel_gpu_freq(dev_priv
, dev_priv
->rps
.efficient_freq
));
1137 mutex_unlock(&dev_priv
->rps
.hw_lock
);
1138 } else if (INTEL_GEN(dev_priv
) >= 6) {
1139 u32 rp_state_limits
;
1142 u32 rpmodectl
, rpinclimit
, rpdeclimit
;
1143 u32 rpstat
, cagf
, reqf
;
1144 u32 rpupei
, rpcurup
, rpprevup
;
1145 u32 rpdownei
, rpcurdown
, rpprevdown
;
1146 u32 pm_ier
, pm_imr
, pm_isr
, pm_iir
, pm_mask
;
1149 rp_state_limits
= I915_READ(GEN6_RP_STATE_LIMITS
);
1150 if (IS_GEN9_LP(dev_priv
)) {
1151 rp_state_cap
= I915_READ(BXT_RP_STATE_CAP
);
1152 gt_perf_status
= I915_READ(BXT_GT_PERF_STATUS
);
1154 rp_state_cap
= I915_READ(GEN6_RP_STATE_CAP
);
1155 gt_perf_status
= I915_READ(GEN6_GT_PERF_STATUS
);
1158 /* RPSTAT1 is in the GT power well */
1159 intel_uncore_forcewake_get(dev_priv
, FORCEWAKE_ALL
);
1161 reqf
= I915_READ(GEN6_RPNSWREQ
);
1162 if (IS_GEN9(dev_priv
))
1165 reqf
&= ~GEN6_TURBO_DISABLE
;
1166 if (IS_HASWELL(dev_priv
) || IS_BROADWELL(dev_priv
))
1171 reqf
= intel_gpu_freq(dev_priv
, reqf
);
1173 rpmodectl
= I915_READ(GEN6_RP_CONTROL
);
1174 rpinclimit
= I915_READ(GEN6_RP_UP_THRESHOLD
);
1175 rpdeclimit
= I915_READ(GEN6_RP_DOWN_THRESHOLD
);
1177 rpstat
= I915_READ(GEN6_RPSTAT1
);
1178 rpupei
= I915_READ(GEN6_RP_CUR_UP_EI
) & GEN6_CURICONT_MASK
;
1179 rpcurup
= I915_READ(GEN6_RP_CUR_UP
) & GEN6_CURBSYTAVG_MASK
;
1180 rpprevup
= I915_READ(GEN6_RP_PREV_UP
) & GEN6_CURBSYTAVG_MASK
;
1181 rpdownei
= I915_READ(GEN6_RP_CUR_DOWN_EI
) & GEN6_CURIAVG_MASK
;
1182 rpcurdown
= I915_READ(GEN6_RP_CUR_DOWN
) & GEN6_CURBSYTAVG_MASK
;
1183 rpprevdown
= I915_READ(GEN6_RP_PREV_DOWN
) & GEN6_CURBSYTAVG_MASK
;
1184 if (IS_GEN9(dev_priv
))
1185 cagf
= (rpstat
& GEN9_CAGF_MASK
) >> GEN9_CAGF_SHIFT
;
1186 else if (IS_HASWELL(dev_priv
) || IS_BROADWELL(dev_priv
))
1187 cagf
= (rpstat
& HSW_CAGF_MASK
) >> HSW_CAGF_SHIFT
;
1189 cagf
= (rpstat
& GEN6_CAGF_MASK
) >> GEN6_CAGF_SHIFT
;
1190 cagf
= intel_gpu_freq(dev_priv
, cagf
);
1192 intel_uncore_forcewake_put(dev_priv
, FORCEWAKE_ALL
);
1194 if (IS_GEN6(dev_priv
) || IS_GEN7(dev_priv
)) {
1195 pm_ier
= I915_READ(GEN6_PMIER
);
1196 pm_imr
= I915_READ(GEN6_PMIMR
);
1197 pm_isr
= I915_READ(GEN6_PMISR
);
1198 pm_iir
= I915_READ(GEN6_PMIIR
);
1199 pm_mask
= I915_READ(GEN6_PMINTRMSK
);
1201 pm_ier
= I915_READ(GEN8_GT_IER(2));
1202 pm_imr
= I915_READ(GEN8_GT_IMR(2));
1203 pm_isr
= I915_READ(GEN8_GT_ISR(2));
1204 pm_iir
= I915_READ(GEN8_GT_IIR(2));
1205 pm_mask
= I915_READ(GEN6_PMINTRMSK
);
1207 seq_printf(m
, "PM IER=0x%08x IMR=0x%08x ISR=0x%08x IIR=0x%08x, MASK=0x%08x\n",
1208 pm_ier
, pm_imr
, pm_isr
, pm_iir
, pm_mask
);
1209 seq_printf(m
, "pm_intrmsk_mbz: 0x%08x\n",
1210 dev_priv
->rps
.pm_intrmsk_mbz
);
1211 seq_printf(m
, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status
);
1212 seq_printf(m
, "Render p-state ratio: %d\n",
1213 (gt_perf_status
& (IS_GEN9(dev_priv
) ? 0x1ff00 : 0xff00)) >> 8);
1214 seq_printf(m
, "Render p-state VID: %d\n",
1215 gt_perf_status
& 0xff);
1216 seq_printf(m
, "Render p-state limit: %d\n",
1217 rp_state_limits
& 0xff);
1218 seq_printf(m
, "RPSTAT1: 0x%08x\n", rpstat
);
1219 seq_printf(m
, "RPMODECTL: 0x%08x\n", rpmodectl
);
1220 seq_printf(m
, "RPINCLIMIT: 0x%08x\n", rpinclimit
);
1221 seq_printf(m
, "RPDECLIMIT: 0x%08x\n", rpdeclimit
);
1222 seq_printf(m
, "RPNSWREQ: %dMHz\n", reqf
);
1223 seq_printf(m
, "CAGF: %dMHz\n", cagf
);
1224 seq_printf(m
, "RP CUR UP EI: %d (%dus)\n",
1225 rpupei
, GT_PM_INTERVAL_TO_US(dev_priv
, rpupei
));
1226 seq_printf(m
, "RP CUR UP: %d (%dus)\n",
1227 rpcurup
, GT_PM_INTERVAL_TO_US(dev_priv
, rpcurup
));
1228 seq_printf(m
, "RP PREV UP: %d (%dus)\n",
1229 rpprevup
, GT_PM_INTERVAL_TO_US(dev_priv
, rpprevup
));
1230 seq_printf(m
, "Up threshold: %d%%\n",
1231 dev_priv
->rps
.up_threshold
);
1233 seq_printf(m
, "RP CUR DOWN EI: %d (%dus)\n",
1234 rpdownei
, GT_PM_INTERVAL_TO_US(dev_priv
, rpdownei
));
1235 seq_printf(m
, "RP CUR DOWN: %d (%dus)\n",
1236 rpcurdown
, GT_PM_INTERVAL_TO_US(dev_priv
, rpcurdown
));
1237 seq_printf(m
, "RP PREV DOWN: %d (%dus)\n",
1238 rpprevdown
, GT_PM_INTERVAL_TO_US(dev_priv
, rpprevdown
));
1239 seq_printf(m
, "Down threshold: %d%%\n",
1240 dev_priv
->rps
.down_threshold
);
1242 max_freq
= (IS_GEN9_LP(dev_priv
) ? rp_state_cap
>> 0 :
1243 rp_state_cap
>> 16) & 0xff;
1244 max_freq
*= (IS_GEN9_BC(dev_priv
) ? GEN9_FREQ_SCALER
: 1);
1245 seq_printf(m
, "Lowest (RPN) frequency: %dMHz\n",
1246 intel_gpu_freq(dev_priv
, max_freq
));
1248 max_freq
= (rp_state_cap
& 0xff00) >> 8;
1249 max_freq
*= (IS_GEN9_BC(dev_priv
) ? GEN9_FREQ_SCALER
: 1);
1250 seq_printf(m
, "Nominal (RP1) frequency: %dMHz\n",
1251 intel_gpu_freq(dev_priv
, max_freq
));
1253 max_freq
= (IS_GEN9_LP(dev_priv
) ? rp_state_cap
>> 16 :
1254 rp_state_cap
>> 0) & 0xff;
1255 max_freq
*= (IS_GEN9_BC(dev_priv
) ? GEN9_FREQ_SCALER
: 1);
1256 seq_printf(m
, "Max non-overclocked (RP0) frequency: %dMHz\n",
1257 intel_gpu_freq(dev_priv
, max_freq
));
1258 seq_printf(m
, "Max overclocked frequency: %dMHz\n",
1259 intel_gpu_freq(dev_priv
, dev_priv
->rps
.max_freq
));
1261 seq_printf(m
, "Current freq: %d MHz\n",
1262 intel_gpu_freq(dev_priv
, dev_priv
->rps
.cur_freq
));
1263 seq_printf(m
, "Actual freq: %d MHz\n", cagf
);
1264 seq_printf(m
, "Idle freq: %d MHz\n",
1265 intel_gpu_freq(dev_priv
, dev_priv
->rps
.idle_freq
));
1266 seq_printf(m
, "Min freq: %d MHz\n",
1267 intel_gpu_freq(dev_priv
, dev_priv
->rps
.min_freq
));
1268 seq_printf(m
, "Boost freq: %d MHz\n",
1269 intel_gpu_freq(dev_priv
, dev_priv
->rps
.boost_freq
));
1270 seq_printf(m
, "Max freq: %d MHz\n",
1271 intel_gpu_freq(dev_priv
, dev_priv
->rps
.max_freq
));
1273 "efficient (RPe) frequency: %d MHz\n",
1274 intel_gpu_freq(dev_priv
, dev_priv
->rps
.efficient_freq
));
1276 seq_puts(m
, "no P-state info available\n");
1279 seq_printf(m
, "Current CD clock frequency: %d kHz\n", dev_priv
->cdclk
.hw
.cdclk
);
1280 seq_printf(m
, "Max CD clock frequency: %d kHz\n", dev_priv
->max_cdclk_freq
);
1281 seq_printf(m
, "Max pixel clock frequency: %d kHz\n", dev_priv
->max_dotclk_freq
);
1283 intel_runtime_pm_put(dev_priv
);
1287 static void i915_instdone_info(struct drm_i915_private
*dev_priv
,
1289 struct intel_instdone
*instdone
)
1294 seq_printf(m
, "\t\tINSTDONE: 0x%08x\n",
1295 instdone
->instdone
);
1297 if (INTEL_GEN(dev_priv
) <= 3)
1300 seq_printf(m
, "\t\tSC_INSTDONE: 0x%08x\n",
1301 instdone
->slice_common
);
1303 if (INTEL_GEN(dev_priv
) <= 6)
1306 for_each_instdone_slice_subslice(dev_priv
, slice
, subslice
)
1307 seq_printf(m
, "\t\tSAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
1308 slice
, subslice
, instdone
->sampler
[slice
][subslice
]);
1310 for_each_instdone_slice_subslice(dev_priv
, slice
, subslice
)
1311 seq_printf(m
, "\t\tROW_INSTDONE[%d][%d]: 0x%08x\n",
1312 slice
, subslice
, instdone
->row
[slice
][subslice
]);
1315 static int i915_hangcheck_info(struct seq_file
*m
, void *unused
)
1317 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
1318 struct intel_engine_cs
*engine
;
1319 u64 acthd
[I915_NUM_ENGINES
];
1320 u32 seqno
[I915_NUM_ENGINES
];
1321 struct intel_instdone instdone
;
1322 enum intel_engine_id id
;
1324 if (test_bit(I915_WEDGED
, &dev_priv
->gpu_error
.flags
))
1325 seq_puts(m
, "Wedged\n");
1326 if (test_bit(I915_RESET_BACKOFF
, &dev_priv
->gpu_error
.flags
))
1327 seq_puts(m
, "Reset in progress: struct_mutex backoff\n");
1328 if (test_bit(I915_RESET_HANDOFF
, &dev_priv
->gpu_error
.flags
))
1329 seq_puts(m
, "Reset in progress: reset handoff to waiter\n");
1330 if (waitqueue_active(&dev_priv
->gpu_error
.wait_queue
))
1331 seq_puts(m
, "Waiter holding struct mutex\n");
1332 if (waitqueue_active(&dev_priv
->gpu_error
.reset_queue
))
1333 seq_puts(m
, "struct_mutex blocked for reset\n");
1335 if (!i915
.enable_hangcheck
) {
1336 seq_puts(m
, "Hangcheck disabled\n");
1340 intel_runtime_pm_get(dev_priv
);
1342 for_each_engine(engine
, dev_priv
, id
) {
1343 acthd
[id
] = intel_engine_get_active_head(engine
);
1344 seqno
[id
] = intel_engine_get_seqno(engine
);
1347 intel_engine_get_instdone(dev_priv
->engine
[RCS
], &instdone
);
1349 intel_runtime_pm_put(dev_priv
);
1351 if (timer_pending(&dev_priv
->gpu_error
.hangcheck_work
.timer
))
1352 seq_printf(m
, "Hangcheck active, timer fires in %dms\n",
1353 jiffies_to_msecs(dev_priv
->gpu_error
.hangcheck_work
.timer
.expires
-
1355 else if (delayed_work_pending(&dev_priv
->gpu_error
.hangcheck_work
))
1356 seq_puts(m
, "Hangcheck active, work pending\n");
1358 seq_puts(m
, "Hangcheck inactive\n");
1360 seq_printf(m
, "GT active? %s\n", yesno(dev_priv
->gt
.awake
));
1362 for_each_engine(engine
, dev_priv
, id
) {
1363 struct intel_breadcrumbs
*b
= &engine
->breadcrumbs
;
1366 seq_printf(m
, "%s:\n", engine
->name
);
1367 seq_printf(m
, "\tseqno = %x [current %x, last %x], inflight %d\n",
1368 engine
->hangcheck
.seqno
, seqno
[id
],
1369 intel_engine_last_submit(engine
),
1370 engine
->timeline
->inflight_seqnos
);
1371 seq_printf(m
, "\twaiters? %s, fake irq active? %s, stalled? %s\n",
1372 yesno(intel_engine_has_waiter(engine
)),
1373 yesno(test_bit(engine
->id
,
1374 &dev_priv
->gpu_error
.missed_irq_rings
)),
1375 yesno(engine
->hangcheck
.stalled
));
1377 spin_lock_irq(&b
->rb_lock
);
1378 for (rb
= rb_first(&b
->waiters
); rb
; rb
= rb_next(rb
)) {
1379 struct intel_wait
*w
= rb_entry(rb
, typeof(*w
), node
);
1381 seq_printf(m
, "\t%s [%d] waiting for %x\n",
1382 w
->tsk
->comm
, w
->tsk
->pid
, w
->seqno
);
1384 spin_unlock_irq(&b
->rb_lock
);
1386 seq_printf(m
, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
1387 (long long)engine
->hangcheck
.acthd
,
1388 (long long)acthd
[id
]);
1389 seq_printf(m
, "\taction = %s(%d) %d ms ago\n",
1390 hangcheck_action_to_str(engine
->hangcheck
.action
),
1391 engine
->hangcheck
.action
,
1392 jiffies_to_msecs(jiffies
-
1393 engine
->hangcheck
.action_timestamp
));
1395 if (engine
->id
== RCS
) {
1396 seq_puts(m
, "\tinstdone read =\n");
1398 i915_instdone_info(dev_priv
, m
, &instdone
);
1400 seq_puts(m
, "\tinstdone accu =\n");
1402 i915_instdone_info(dev_priv
, m
,
1403 &engine
->hangcheck
.instdone
);
1410 static int ironlake_drpc_info(struct seq_file
*m
)
1412 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
1413 u32 rgvmodectl
, rstdbyctl
;
1416 rgvmodectl
= I915_READ(MEMMODECTL
);
1417 rstdbyctl
= I915_READ(RSTDBYCTL
);
1418 crstandvid
= I915_READ16(CRSTANDVID
);
1420 seq_printf(m
, "HD boost: %s\n", yesno(rgvmodectl
& MEMMODE_BOOST_EN
));
1421 seq_printf(m
, "Boost freq: %d\n",
1422 (rgvmodectl
& MEMMODE_BOOST_FREQ_MASK
) >>
1423 MEMMODE_BOOST_FREQ_SHIFT
);
1424 seq_printf(m
, "HW control enabled: %s\n",
1425 yesno(rgvmodectl
& MEMMODE_HWIDLE_EN
));
1426 seq_printf(m
, "SW control enabled: %s\n",
1427 yesno(rgvmodectl
& MEMMODE_SWMODE_EN
));
1428 seq_printf(m
, "Gated voltage change: %s\n",
1429 yesno(rgvmodectl
& MEMMODE_RCLK_GATE
));
1430 seq_printf(m
, "Starting frequency: P%d\n",
1431 (rgvmodectl
& MEMMODE_FSTART_MASK
) >> MEMMODE_FSTART_SHIFT
);
1432 seq_printf(m
, "Max P-state: P%d\n",
1433 (rgvmodectl
& MEMMODE_FMAX_MASK
) >> MEMMODE_FMAX_SHIFT
);
1434 seq_printf(m
, "Min P-state: P%d\n", (rgvmodectl
& MEMMODE_FMIN_MASK
));
1435 seq_printf(m
, "RS1 VID: %d\n", (crstandvid
& 0x3f));
1436 seq_printf(m
, "RS2 VID: %d\n", ((crstandvid
>> 8) & 0x3f));
1437 seq_printf(m
, "Render standby enabled: %s\n",
1438 yesno(!(rstdbyctl
& RCX_SW_EXIT
)));
1439 seq_puts(m
, "Current RS state: ");
1440 switch (rstdbyctl
& RSX_STATUS_MASK
) {
1442 seq_puts(m
, "on\n");
1444 case RSX_STATUS_RC1
:
1445 seq_puts(m
, "RC1\n");
1447 case RSX_STATUS_RC1E
:
1448 seq_puts(m
, "RC1E\n");
1450 case RSX_STATUS_RS1
:
1451 seq_puts(m
, "RS1\n");
1453 case RSX_STATUS_RS2
:
1454 seq_puts(m
, "RS2 (RC6)\n");
1456 case RSX_STATUS_RS3
:
1457 seq_puts(m
, "RC3 (RC6+)\n");
1460 seq_puts(m
, "unknown\n");
1467 static int i915_forcewake_domains(struct seq_file
*m
, void *data
)
1469 struct drm_i915_private
*i915
= node_to_i915(m
->private);
1470 struct intel_uncore_forcewake_domain
*fw_domain
;
1473 for_each_fw_domain(fw_domain
, i915
, tmp
)
1474 seq_printf(m
, "%s.wake_count = %u\n",
1475 intel_uncore_forcewake_domain_to_str(fw_domain
->id
),
1476 READ_ONCE(fw_domain
->wake_count
));
1481 static void print_rc6_res(struct seq_file
*m
,
1483 const i915_reg_t reg
)
1485 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
1487 seq_printf(m
, "%s %u (%llu us)\n",
1488 title
, I915_READ(reg
),
1489 intel_rc6_residency_us(dev_priv
, reg
));
1492 static int vlv_drpc_info(struct seq_file
*m
)
1494 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
1495 u32 rpmodectl1
, rcctl1
, pw_status
;
1497 pw_status
= I915_READ(VLV_GTLC_PW_STATUS
);
1498 rpmodectl1
= I915_READ(GEN6_RP_CONTROL
);
1499 rcctl1
= I915_READ(GEN6_RC_CONTROL
);
1501 seq_printf(m
, "Video Turbo Mode: %s\n",
1502 yesno(rpmodectl1
& GEN6_RP_MEDIA_TURBO
));
1503 seq_printf(m
, "Turbo enabled: %s\n",
1504 yesno(rpmodectl1
& GEN6_RP_ENABLE
));
1505 seq_printf(m
, "HW control enabled: %s\n",
1506 yesno(rpmodectl1
& GEN6_RP_ENABLE
));
1507 seq_printf(m
, "SW control enabled: %s\n",
1508 yesno((rpmodectl1
& GEN6_RP_MEDIA_MODE_MASK
) ==
1509 GEN6_RP_MEDIA_SW_MODE
));
1510 seq_printf(m
, "RC6 Enabled: %s\n",
1511 yesno(rcctl1
& (GEN7_RC_CTL_TO_MODE
|
1512 GEN6_RC_CTL_EI_MODE(1))));
1513 seq_printf(m
, "Render Power Well: %s\n",
1514 (pw_status
& VLV_GTLC_PW_RENDER_STATUS_MASK
) ? "Up" : "Down");
1515 seq_printf(m
, "Media Power Well: %s\n",
1516 (pw_status
& VLV_GTLC_PW_MEDIA_STATUS_MASK
) ? "Up" : "Down");
1518 print_rc6_res(m
, "Render RC6 residency since boot:", VLV_GT_RENDER_RC6
);
1519 print_rc6_res(m
, "Media RC6 residency since boot:", VLV_GT_MEDIA_RC6
);
1521 return i915_forcewake_domains(m
, NULL
);
1524 static int gen6_drpc_info(struct seq_file
*m
)
1526 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
1527 u32 rpmodectl1
, gt_core_status
, rcctl1
, rc6vids
= 0;
1528 u32 gen9_powergate_enable
= 0, gen9_powergate_status
= 0;
1529 unsigned forcewake_count
;
1532 forcewake_count
= READ_ONCE(dev_priv
->uncore
.fw_domain
[FW_DOMAIN_ID_RENDER
].wake_count
);
1533 if (forcewake_count
) {
1534 seq_puts(m
, "RC information inaccurate because somebody "
1535 "holds a forcewake reference \n");
1537 /* NB: we cannot use forcewake, else we read the wrong values */
1538 while (count
++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK
) & 1))
1540 seq_printf(m
, "RC information accurate: %s\n", yesno(count
< 51));
1543 gt_core_status
= I915_READ_FW(GEN6_GT_CORE_STATUS
);
1544 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS
, gt_core_status
, 4, true);
1546 rpmodectl1
= I915_READ(GEN6_RP_CONTROL
);
1547 rcctl1
= I915_READ(GEN6_RC_CONTROL
);
1548 if (INTEL_GEN(dev_priv
) >= 9) {
1549 gen9_powergate_enable
= I915_READ(GEN9_PG_ENABLE
);
1550 gen9_powergate_status
= I915_READ(GEN9_PWRGT_DOMAIN_STATUS
);
1553 mutex_lock(&dev_priv
->rps
.hw_lock
);
1554 sandybridge_pcode_read(dev_priv
, GEN6_PCODE_READ_RC6VIDS
, &rc6vids
);
1555 mutex_unlock(&dev_priv
->rps
.hw_lock
);
1557 seq_printf(m
, "Video Turbo Mode: %s\n",
1558 yesno(rpmodectl1
& GEN6_RP_MEDIA_TURBO
));
1559 seq_printf(m
, "HW control enabled: %s\n",
1560 yesno(rpmodectl1
& GEN6_RP_ENABLE
));
1561 seq_printf(m
, "SW control enabled: %s\n",
1562 yesno((rpmodectl1
& GEN6_RP_MEDIA_MODE_MASK
) ==
1563 GEN6_RP_MEDIA_SW_MODE
));
1564 seq_printf(m
, "RC1e Enabled: %s\n",
1565 yesno(rcctl1
& GEN6_RC_CTL_RC1e_ENABLE
));
1566 seq_printf(m
, "RC6 Enabled: %s\n",
1567 yesno(rcctl1
& GEN6_RC_CTL_RC6_ENABLE
));
1568 if (INTEL_GEN(dev_priv
) >= 9) {
1569 seq_printf(m
, "Render Well Gating Enabled: %s\n",
1570 yesno(gen9_powergate_enable
& GEN9_RENDER_PG_ENABLE
));
1571 seq_printf(m
, "Media Well Gating Enabled: %s\n",
1572 yesno(gen9_powergate_enable
& GEN9_MEDIA_PG_ENABLE
));
1574 seq_printf(m
, "Deep RC6 Enabled: %s\n",
1575 yesno(rcctl1
& GEN6_RC_CTL_RC6p_ENABLE
));
1576 seq_printf(m
, "Deepest RC6 Enabled: %s\n",
1577 yesno(rcctl1
& GEN6_RC_CTL_RC6pp_ENABLE
));
1578 seq_puts(m
, "Current RC state: ");
1579 switch (gt_core_status
& GEN6_RCn_MASK
) {
1581 if (gt_core_status
& GEN6_CORE_CPD_STATE_MASK
)
1582 seq_puts(m
, "Core Power Down\n");
1584 seq_puts(m
, "on\n");
1587 seq_puts(m
, "RC3\n");
1590 seq_puts(m
, "RC6\n");
1593 seq_puts(m
, "RC7\n");
1596 seq_puts(m
, "Unknown\n");
1600 seq_printf(m
, "Core Power Down: %s\n",
1601 yesno(gt_core_status
& GEN6_CORE_CPD_STATE_MASK
));
1602 if (INTEL_GEN(dev_priv
) >= 9) {
1603 seq_printf(m
, "Render Power Well: %s\n",
1604 (gen9_powergate_status
&
1605 GEN9_PWRGT_RENDER_STATUS_MASK
) ? "Up" : "Down");
1606 seq_printf(m
, "Media Power Well: %s\n",
1607 (gen9_powergate_status
&
1608 GEN9_PWRGT_MEDIA_STATUS_MASK
) ? "Up" : "Down");
1611 /* Not exactly sure what this is */
1612 print_rc6_res(m
, "RC6 \"Locked to RPn\" residency since boot:",
1613 GEN6_GT_GFX_RC6_LOCKED
);
1614 print_rc6_res(m
, "RC6 residency since boot:", GEN6_GT_GFX_RC6
);
1615 print_rc6_res(m
, "RC6+ residency since boot:", GEN6_GT_GFX_RC6p
);
1616 print_rc6_res(m
, "RC6++ residency since boot:", GEN6_GT_GFX_RC6pp
);
1618 seq_printf(m
, "RC6 voltage: %dmV\n",
1619 GEN6_DECODE_RC6_VID(((rc6vids
>> 0) & 0xff)));
1620 seq_printf(m
, "RC6+ voltage: %dmV\n",
1621 GEN6_DECODE_RC6_VID(((rc6vids
>> 8) & 0xff)));
1622 seq_printf(m
, "RC6++ voltage: %dmV\n",
1623 GEN6_DECODE_RC6_VID(((rc6vids
>> 16) & 0xff)));
1624 return i915_forcewake_domains(m
, NULL
);
1627 static int i915_drpc_info(struct seq_file
*m
, void *unused
)
1629 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
1632 intel_runtime_pm_get(dev_priv
);
1634 if (IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
))
1635 err
= vlv_drpc_info(m
);
1636 else if (INTEL_GEN(dev_priv
) >= 6)
1637 err
= gen6_drpc_info(m
);
1639 err
= ironlake_drpc_info(m
);
1641 intel_runtime_pm_put(dev_priv
);
1646 static int i915_frontbuffer_tracking(struct seq_file
*m
, void *unused
)
1648 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
1650 seq_printf(m
, "FB tracking busy bits: 0x%08x\n",
1651 dev_priv
->fb_tracking
.busy_bits
);
1653 seq_printf(m
, "FB tracking flip bits: 0x%08x\n",
1654 dev_priv
->fb_tracking
.flip_bits
);
1659 static int i915_fbc_status(struct seq_file
*m
, void *unused
)
1661 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
1663 if (!HAS_FBC(dev_priv
)) {
1664 seq_puts(m
, "FBC unsupported on this chipset\n");
1668 intel_runtime_pm_get(dev_priv
);
1669 mutex_lock(&dev_priv
->fbc
.lock
);
1671 if (intel_fbc_is_active(dev_priv
))
1672 seq_puts(m
, "FBC enabled\n");
1674 seq_printf(m
, "FBC disabled: %s\n",
1675 dev_priv
->fbc
.no_fbc_reason
);
1677 if (intel_fbc_is_active(dev_priv
)) {
1680 if (INTEL_GEN(dev_priv
) >= 8)
1681 mask
= I915_READ(IVB_FBC_STATUS2
) & BDW_FBC_COMP_SEG_MASK
;
1682 else if (INTEL_GEN(dev_priv
) >= 7)
1683 mask
= I915_READ(IVB_FBC_STATUS2
) & IVB_FBC_COMP_SEG_MASK
;
1684 else if (INTEL_GEN(dev_priv
) >= 5)
1685 mask
= I915_READ(ILK_DPFC_STATUS
) & ILK_DPFC_COMP_SEG_MASK
;
1686 else if (IS_G4X(dev_priv
))
1687 mask
= I915_READ(DPFC_STATUS
) & DPFC_COMP_SEG_MASK
;
1689 mask
= I915_READ(FBC_STATUS
) & (FBC_STAT_COMPRESSING
|
1690 FBC_STAT_COMPRESSED
);
1692 seq_printf(m
, "Compressing: %s\n", yesno(mask
));
1695 mutex_unlock(&dev_priv
->fbc
.lock
);
1696 intel_runtime_pm_put(dev_priv
);
1701 static int i915_fbc_false_color_get(void *data
, u64
*val
)
1703 struct drm_i915_private
*dev_priv
= data
;
1705 if (INTEL_GEN(dev_priv
) < 7 || !HAS_FBC(dev_priv
))
1708 *val
= dev_priv
->fbc
.false_color
;
1713 static int i915_fbc_false_color_set(void *data
, u64 val
)
1715 struct drm_i915_private
*dev_priv
= data
;
1718 if (INTEL_GEN(dev_priv
) < 7 || !HAS_FBC(dev_priv
))
1721 mutex_lock(&dev_priv
->fbc
.lock
);
1723 reg
= I915_READ(ILK_DPFC_CONTROL
);
1724 dev_priv
->fbc
.false_color
= val
;
1726 I915_WRITE(ILK_DPFC_CONTROL
, val
?
1727 (reg
| FBC_CTL_FALSE_COLOR
) :
1728 (reg
& ~FBC_CTL_FALSE_COLOR
));
1730 mutex_unlock(&dev_priv
->fbc
.lock
);
1734 DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops
,
1735 i915_fbc_false_color_get
, i915_fbc_false_color_set
,
1738 static int i915_ips_status(struct seq_file
*m
, void *unused
)
1740 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
1742 if (!HAS_IPS(dev_priv
)) {
1743 seq_puts(m
, "not supported\n");
1747 intel_runtime_pm_get(dev_priv
);
1749 seq_printf(m
, "Enabled by kernel parameter: %s\n",
1750 yesno(i915
.enable_ips
));
1752 if (INTEL_GEN(dev_priv
) >= 8) {
1753 seq_puts(m
, "Currently: unknown\n");
1755 if (I915_READ(IPS_CTL
) & IPS_ENABLE
)
1756 seq_puts(m
, "Currently: enabled\n");
1758 seq_puts(m
, "Currently: disabled\n");
1761 intel_runtime_pm_put(dev_priv
);
1766 static int i915_sr_status(struct seq_file
*m
, void *unused
)
1768 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
1769 bool sr_enabled
= false;
1771 intel_runtime_pm_get(dev_priv
);
1772 intel_display_power_get(dev_priv
, POWER_DOMAIN_INIT
);
1774 if (INTEL_GEN(dev_priv
) >= 9)
1775 /* no global SR status; inspect per-plane WM */;
1776 else if (HAS_PCH_SPLIT(dev_priv
))
1777 sr_enabled
= I915_READ(WM1_LP_ILK
) & WM1_LP_SR_EN
;
1778 else if (IS_I965GM(dev_priv
) || IS_G4X(dev_priv
) ||
1779 IS_I945G(dev_priv
) || IS_I945GM(dev_priv
))
1780 sr_enabled
= I915_READ(FW_BLC_SELF
) & FW_BLC_SELF_EN
;
1781 else if (IS_I915GM(dev_priv
))
1782 sr_enabled
= I915_READ(INSTPM
) & INSTPM_SELF_EN
;
1783 else if (IS_PINEVIEW(dev_priv
))
1784 sr_enabled
= I915_READ(DSPFW3
) & PINEVIEW_SELF_REFRESH_EN
;
1785 else if (IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
))
1786 sr_enabled
= I915_READ(FW_BLC_SELF_VLV
) & FW_CSPWRDWNEN
;
1788 intel_display_power_put(dev_priv
, POWER_DOMAIN_INIT
);
1789 intel_runtime_pm_put(dev_priv
);
1791 seq_printf(m
, "self-refresh: %s\n", enableddisabled(sr_enabled
));
1796 static int i915_emon_status(struct seq_file
*m
, void *unused
)
1798 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
1799 struct drm_device
*dev
= &dev_priv
->drm
;
1800 unsigned long temp
, chipset
, gfx
;
1803 if (!IS_GEN5(dev_priv
))
1806 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1810 temp
= i915_mch_val(dev_priv
);
1811 chipset
= i915_chipset_val(dev_priv
);
1812 gfx
= i915_gfx_val(dev_priv
);
1813 mutex_unlock(&dev
->struct_mutex
);
1815 seq_printf(m
, "GMCH temp: %ld\n", temp
);
1816 seq_printf(m
, "Chipset power: %ld\n", chipset
);
1817 seq_printf(m
, "GFX power: %ld\n", gfx
);
1818 seq_printf(m
, "Total power: %ld\n", chipset
+ gfx
);
1823 static int i915_ring_freq_table(struct seq_file
*m
, void *unused
)
1825 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
1827 int gpu_freq
, ia_freq
;
1828 unsigned int max_gpu_freq
, min_gpu_freq
;
1830 if (!HAS_LLC(dev_priv
)) {
1831 seq_puts(m
, "unsupported on this chipset\n");
1835 intel_runtime_pm_get(dev_priv
);
1837 ret
= mutex_lock_interruptible(&dev_priv
->rps
.hw_lock
);
1841 if (IS_GEN9_BC(dev_priv
)) {
1842 /* Convert GT frequency to 50 HZ units */
1844 dev_priv
->rps
.min_freq_softlimit
/ GEN9_FREQ_SCALER
;
1846 dev_priv
->rps
.max_freq_softlimit
/ GEN9_FREQ_SCALER
;
1848 min_gpu_freq
= dev_priv
->rps
.min_freq_softlimit
;
1849 max_gpu_freq
= dev_priv
->rps
.max_freq_softlimit
;
1852 seq_puts(m
, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
1854 for (gpu_freq
= min_gpu_freq
; gpu_freq
<= max_gpu_freq
; gpu_freq
++) {
1856 sandybridge_pcode_read(dev_priv
,
1857 GEN6_PCODE_READ_MIN_FREQ_TABLE
,
1859 seq_printf(m
, "%d\t\t%d\t\t\t\t%d\n",
1860 intel_gpu_freq(dev_priv
, (gpu_freq
*
1861 (IS_GEN9_BC(dev_priv
) ?
1862 GEN9_FREQ_SCALER
: 1))),
1863 ((ia_freq
>> 0) & 0xff) * 100,
1864 ((ia_freq
>> 8) & 0xff) * 100);
1867 mutex_unlock(&dev_priv
->rps
.hw_lock
);
1870 intel_runtime_pm_put(dev_priv
);
1874 static int i915_opregion(struct seq_file
*m
, void *unused
)
1876 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
1877 struct drm_device
*dev
= &dev_priv
->drm
;
1878 struct intel_opregion
*opregion
= &dev_priv
->opregion
;
1881 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1885 if (opregion
->header
)
1886 seq_write(m
, opregion
->header
, OPREGION_SIZE
);
1888 mutex_unlock(&dev
->struct_mutex
);
1894 static int i915_vbt(struct seq_file
*m
, void *unused
)
1896 struct intel_opregion
*opregion
= &node_to_i915(m
->private)->opregion
;
1899 seq_write(m
, opregion
->vbt
, opregion
->vbt_size
);
1904 static int i915_gem_framebuffer_info(struct seq_file
*m
, void *data
)
1906 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
1907 struct drm_device
*dev
= &dev_priv
->drm
;
1908 struct intel_framebuffer
*fbdev_fb
= NULL
;
1909 struct drm_framebuffer
*drm_fb
;
1912 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1916 #ifdef CONFIG_DRM_FBDEV_EMULATION
1917 if (dev_priv
->fbdev
) {
1918 fbdev_fb
= to_intel_framebuffer(dev_priv
->fbdev
->helper
.fb
);
1920 seq_printf(m
, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1921 fbdev_fb
->base
.width
,
1922 fbdev_fb
->base
.height
,
1923 fbdev_fb
->base
.format
->depth
,
1924 fbdev_fb
->base
.format
->cpp
[0] * 8,
1925 fbdev_fb
->base
.modifier
,
1926 drm_framebuffer_read_refcount(&fbdev_fb
->base
));
1927 describe_obj(m
, fbdev_fb
->obj
);
1932 mutex_lock(&dev
->mode_config
.fb_lock
);
1933 drm_for_each_fb(drm_fb
, dev
) {
1934 struct intel_framebuffer
*fb
= to_intel_framebuffer(drm_fb
);
1938 seq_printf(m
, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1941 fb
->base
.format
->depth
,
1942 fb
->base
.format
->cpp
[0] * 8,
1944 drm_framebuffer_read_refcount(&fb
->base
));
1945 describe_obj(m
, fb
->obj
);
1948 mutex_unlock(&dev
->mode_config
.fb_lock
);
1949 mutex_unlock(&dev
->struct_mutex
);
1954 static void describe_ctx_ring(struct seq_file
*m
, struct intel_ring
*ring
)
1956 seq_printf(m
, " (ringbuffer, space: %d, head: %u, tail: %u)",
1957 ring
->space
, ring
->head
, ring
->tail
);
1960 static int i915_context_status(struct seq_file
*m
, void *unused
)
1962 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
1963 struct drm_device
*dev
= &dev_priv
->drm
;
1964 struct intel_engine_cs
*engine
;
1965 struct i915_gem_context
*ctx
;
1966 enum intel_engine_id id
;
1969 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1973 list_for_each_entry(ctx
, &dev_priv
->context_list
, link
) {
1974 seq_printf(m
, "HW context %u ", ctx
->hw_id
);
1976 struct task_struct
*task
;
1978 task
= get_pid_task(ctx
->pid
, PIDTYPE_PID
);
1980 seq_printf(m
, "(%s [%d]) ",
1981 task
->comm
, task
->pid
);
1982 put_task_struct(task
);
1984 } else if (IS_ERR(ctx
->file_priv
)) {
1985 seq_puts(m
, "(deleted) ");
1987 seq_puts(m
, "(kernel) ");
1990 seq_putc(m
, ctx
->remap_slice
? 'R' : 'r');
1993 for_each_engine(engine
, dev_priv
, id
) {
1994 struct intel_context
*ce
= &ctx
->engine
[engine
->id
];
1996 seq_printf(m
, "%s: ", engine
->name
);
1997 seq_putc(m
, ce
->initialised
? 'I' : 'i');
1999 describe_obj(m
, ce
->state
->obj
);
2001 describe_ctx_ring(m
, ce
->ring
);
2006 "\tvma hashtable size=%u (actual %lu), count=%u\n",
2007 ctx
->vma_lut
.ht_size
,
2008 BIT(ctx
->vma_lut
.ht_bits
),
2009 ctx
->vma_lut
.ht_count
);
2014 mutex_unlock(&dev
->struct_mutex
);
2019 static void i915_dump_lrc_obj(struct seq_file
*m
,
2020 struct i915_gem_context
*ctx
,
2021 struct intel_engine_cs
*engine
)
2023 struct i915_vma
*vma
= ctx
->engine
[engine
->id
].state
;
2027 seq_printf(m
, "CONTEXT: %s %u\n", engine
->name
, ctx
->hw_id
);
2030 seq_puts(m
, "\tFake context\n");
2034 if (vma
->flags
& I915_VMA_GLOBAL_BIND
)
2035 seq_printf(m
, "\tBound in GGTT at 0x%08x\n",
2036 i915_ggtt_offset(vma
));
2038 if (i915_gem_object_pin_pages(vma
->obj
)) {
2039 seq_puts(m
, "\tFailed to get pages for context object\n\n");
2043 page
= i915_gem_object_get_page(vma
->obj
, LRC_STATE_PN
);
2045 u32
*reg_state
= kmap_atomic(page
);
2047 for (j
= 0; j
< 0x600 / sizeof(u32
) / 4; j
+= 4) {
2049 "\t[0x%04x] 0x%08x 0x%08x 0x%08x 0x%08x\n",
2051 reg_state
[j
], reg_state
[j
+ 1],
2052 reg_state
[j
+ 2], reg_state
[j
+ 3]);
2054 kunmap_atomic(reg_state
);
2057 i915_gem_object_unpin_pages(vma
->obj
);
2061 static int i915_dump_lrc(struct seq_file
*m
, void *unused
)
2063 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
2064 struct drm_device
*dev
= &dev_priv
->drm
;
2065 struct intel_engine_cs
*engine
;
2066 struct i915_gem_context
*ctx
;
2067 enum intel_engine_id id
;
2070 if (!i915
.enable_execlists
) {
2071 seq_printf(m
, "Logical Ring Contexts are disabled\n");
2075 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
2079 list_for_each_entry(ctx
, &dev_priv
->context_list
, link
)
2080 for_each_engine(engine
, dev_priv
, id
)
2081 i915_dump_lrc_obj(m
, ctx
, engine
);
2083 mutex_unlock(&dev
->struct_mutex
);
2088 static const char *swizzle_string(unsigned swizzle
)
2091 case I915_BIT_6_SWIZZLE_NONE
:
2093 case I915_BIT_6_SWIZZLE_9
:
2095 case I915_BIT_6_SWIZZLE_9_10
:
2096 return "bit9/bit10";
2097 case I915_BIT_6_SWIZZLE_9_11
:
2098 return "bit9/bit11";
2099 case I915_BIT_6_SWIZZLE_9_10_11
:
2100 return "bit9/bit10/bit11";
2101 case I915_BIT_6_SWIZZLE_9_17
:
2102 return "bit9/bit17";
2103 case I915_BIT_6_SWIZZLE_9_10_17
:
2104 return "bit9/bit10/bit17";
2105 case I915_BIT_6_SWIZZLE_UNKNOWN
:
2112 static int i915_swizzle_info(struct seq_file
*m
, void *data
)
2114 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
2116 intel_runtime_pm_get(dev_priv
);
2118 seq_printf(m
, "bit6 swizzle for X-tiling = %s\n",
2119 swizzle_string(dev_priv
->mm
.bit_6_swizzle_x
));
2120 seq_printf(m
, "bit6 swizzle for Y-tiling = %s\n",
2121 swizzle_string(dev_priv
->mm
.bit_6_swizzle_y
));
2123 if (IS_GEN3(dev_priv
) || IS_GEN4(dev_priv
)) {
2124 seq_printf(m
, "DDC = 0x%08x\n",
2126 seq_printf(m
, "DDC2 = 0x%08x\n",
2128 seq_printf(m
, "C0DRB3 = 0x%04x\n",
2129 I915_READ16(C0DRB3
));
2130 seq_printf(m
, "C1DRB3 = 0x%04x\n",
2131 I915_READ16(C1DRB3
));
2132 } else if (INTEL_GEN(dev_priv
) >= 6) {
2133 seq_printf(m
, "MAD_DIMM_C0 = 0x%08x\n",
2134 I915_READ(MAD_DIMM_C0
));
2135 seq_printf(m
, "MAD_DIMM_C1 = 0x%08x\n",
2136 I915_READ(MAD_DIMM_C1
));
2137 seq_printf(m
, "MAD_DIMM_C2 = 0x%08x\n",
2138 I915_READ(MAD_DIMM_C2
));
2139 seq_printf(m
, "TILECTL = 0x%08x\n",
2140 I915_READ(TILECTL
));
2141 if (INTEL_GEN(dev_priv
) >= 8)
2142 seq_printf(m
, "GAMTARBMODE = 0x%08x\n",
2143 I915_READ(GAMTARBMODE
));
2145 seq_printf(m
, "ARB_MODE = 0x%08x\n",
2146 I915_READ(ARB_MODE
));
2147 seq_printf(m
, "DISP_ARB_CTL = 0x%08x\n",
2148 I915_READ(DISP_ARB_CTL
));
2151 if (dev_priv
->quirks
& QUIRK_PIN_SWIZZLED_PAGES
)
2152 seq_puts(m
, "L-shaped memory detected\n");
2154 intel_runtime_pm_put(dev_priv
);
2159 static int per_file_ctx(int id
, void *ptr
, void *data
)
2161 struct i915_gem_context
*ctx
= ptr
;
2162 struct seq_file
*m
= data
;
2163 struct i915_hw_ppgtt
*ppgtt
= ctx
->ppgtt
;
2166 seq_printf(m
, " no ppgtt for context %d\n",
2171 if (i915_gem_context_is_default(ctx
))
2172 seq_puts(m
, " default context:\n");
2174 seq_printf(m
, " context %d:\n", ctx
->user_handle
);
2175 ppgtt
->debug_dump(ppgtt
, m
);
2180 static void gen8_ppgtt_info(struct seq_file
*m
,
2181 struct drm_i915_private
*dev_priv
)
2183 struct i915_hw_ppgtt
*ppgtt
= dev_priv
->mm
.aliasing_ppgtt
;
2184 struct intel_engine_cs
*engine
;
2185 enum intel_engine_id id
;
2191 for_each_engine(engine
, dev_priv
, id
) {
2192 seq_printf(m
, "%s\n", engine
->name
);
2193 for (i
= 0; i
< 4; i
++) {
2194 u64 pdp
= I915_READ(GEN8_RING_PDP_UDW(engine
, i
));
2196 pdp
|= I915_READ(GEN8_RING_PDP_LDW(engine
, i
));
2197 seq_printf(m
, "\tPDP%d 0x%016llx\n", i
, pdp
);
2202 static void gen6_ppgtt_info(struct seq_file
*m
,
2203 struct drm_i915_private
*dev_priv
)
2205 struct intel_engine_cs
*engine
;
2206 enum intel_engine_id id
;
2208 if (IS_GEN6(dev_priv
))
2209 seq_printf(m
, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE
));
2211 for_each_engine(engine
, dev_priv
, id
) {
2212 seq_printf(m
, "%s\n", engine
->name
);
2213 if (IS_GEN7(dev_priv
))
2214 seq_printf(m
, "GFX_MODE: 0x%08x\n",
2215 I915_READ(RING_MODE_GEN7(engine
)));
2216 seq_printf(m
, "PP_DIR_BASE: 0x%08x\n",
2217 I915_READ(RING_PP_DIR_BASE(engine
)));
2218 seq_printf(m
, "PP_DIR_BASE_READ: 0x%08x\n",
2219 I915_READ(RING_PP_DIR_BASE_READ(engine
)));
2220 seq_printf(m
, "PP_DIR_DCLV: 0x%08x\n",
2221 I915_READ(RING_PP_DIR_DCLV(engine
)));
2223 if (dev_priv
->mm
.aliasing_ppgtt
) {
2224 struct i915_hw_ppgtt
*ppgtt
= dev_priv
->mm
.aliasing_ppgtt
;
2226 seq_puts(m
, "aliasing PPGTT:\n");
2227 seq_printf(m
, "pd gtt offset: 0x%08x\n", ppgtt
->pd
.base
.ggtt_offset
);
2229 ppgtt
->debug_dump(ppgtt
, m
);
2232 seq_printf(m
, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK
));
2235 static int i915_ppgtt_info(struct seq_file
*m
, void *data
)
2237 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
2238 struct drm_device
*dev
= &dev_priv
->drm
;
2239 struct drm_file
*file
;
2242 mutex_lock(&dev
->filelist_mutex
);
2243 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
2247 intel_runtime_pm_get(dev_priv
);
2249 if (INTEL_GEN(dev_priv
) >= 8)
2250 gen8_ppgtt_info(m
, dev_priv
);
2251 else if (INTEL_GEN(dev_priv
) >= 6)
2252 gen6_ppgtt_info(m
, dev_priv
);
2254 list_for_each_entry_reverse(file
, &dev
->filelist
, lhead
) {
2255 struct drm_i915_file_private
*file_priv
= file
->driver_priv
;
2256 struct task_struct
*task
;
2258 task
= get_pid_task(file
->pid
, PIDTYPE_PID
);
2263 seq_printf(m
, "\nproc: %s\n", task
->comm
);
2264 put_task_struct(task
);
2265 idr_for_each(&file_priv
->context_idr
, per_file_ctx
,
2266 (void *)(unsigned long)m
);
2270 intel_runtime_pm_put(dev_priv
);
2271 mutex_unlock(&dev
->struct_mutex
);
2273 mutex_unlock(&dev
->filelist_mutex
);
2277 static int count_irq_waiters(struct drm_i915_private
*i915
)
2279 struct intel_engine_cs
*engine
;
2280 enum intel_engine_id id
;
2283 for_each_engine(engine
, i915
, id
)
2284 count
+= intel_engine_has_waiter(engine
);
2289 static const char *rps_power_to_str(unsigned int power
)
2291 static const char * const strings
[] = {
2292 [LOW_POWER
] = "low power",
2293 [BETWEEN
] = "mixed",
2294 [HIGH_POWER
] = "high power",
2297 if (power
>= ARRAY_SIZE(strings
) || !strings
[power
])
2300 return strings
[power
];
2303 static int i915_rps_boost_info(struct seq_file
*m
, void *data
)
2305 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
2306 struct drm_device
*dev
= &dev_priv
->drm
;
2307 struct drm_file
*file
;
2309 seq_printf(m
, "RPS enabled? %d\n", dev_priv
->rps
.enabled
);
2310 seq_printf(m
, "GPU busy? %s [%d requests]\n",
2311 yesno(dev_priv
->gt
.awake
), dev_priv
->gt
.active_requests
);
2312 seq_printf(m
, "CPU waiting? %d\n", count_irq_waiters(dev_priv
));
2313 seq_printf(m
, "Frequency requested %d\n",
2314 intel_gpu_freq(dev_priv
, dev_priv
->rps
.cur_freq
));
2315 seq_printf(m
, " min hard:%d, soft:%d; max soft:%d, hard:%d\n",
2316 intel_gpu_freq(dev_priv
, dev_priv
->rps
.min_freq
),
2317 intel_gpu_freq(dev_priv
, dev_priv
->rps
.min_freq_softlimit
),
2318 intel_gpu_freq(dev_priv
, dev_priv
->rps
.max_freq_softlimit
),
2319 intel_gpu_freq(dev_priv
, dev_priv
->rps
.max_freq
));
2320 seq_printf(m
, " idle:%d, efficient:%d, boost:%d\n",
2321 intel_gpu_freq(dev_priv
, dev_priv
->rps
.idle_freq
),
2322 intel_gpu_freq(dev_priv
, dev_priv
->rps
.efficient_freq
),
2323 intel_gpu_freq(dev_priv
, dev_priv
->rps
.boost_freq
));
2325 mutex_lock(&dev
->filelist_mutex
);
2326 spin_lock(&dev_priv
->rps
.client_lock
);
2327 list_for_each_entry_reverse(file
, &dev
->filelist
, lhead
) {
2328 struct drm_i915_file_private
*file_priv
= file
->driver_priv
;
2329 struct task_struct
*task
;
2332 task
= pid_task(file
->pid
, PIDTYPE_PID
);
2333 seq_printf(m
, "%s [%d]: %d boosts%s\n",
2334 task
? task
->comm
: "<unknown>",
2335 task
? task
->pid
: -1,
2336 file_priv
->rps
.boosts
,
2337 list_empty(&file_priv
->rps
.link
) ? "" : ", active");
2340 seq_printf(m
, "Kernel (anonymous) boosts: %d\n", dev_priv
->rps
.boosts
);
2341 spin_unlock(&dev_priv
->rps
.client_lock
);
2342 mutex_unlock(&dev
->filelist_mutex
);
2344 if (INTEL_GEN(dev_priv
) >= 6 &&
2345 dev_priv
->rps
.enabled
&&
2346 dev_priv
->gt
.active_requests
) {
2348 u32 rpdown
, rpdownei
;
2350 intel_uncore_forcewake_get(dev_priv
, FORCEWAKE_ALL
);
2351 rpup
= I915_READ_FW(GEN6_RP_CUR_UP
) & GEN6_RP_EI_MASK
;
2352 rpupei
= I915_READ_FW(GEN6_RP_CUR_UP_EI
) & GEN6_RP_EI_MASK
;
2353 rpdown
= I915_READ_FW(GEN6_RP_CUR_DOWN
) & GEN6_RP_EI_MASK
;
2354 rpdownei
= I915_READ_FW(GEN6_RP_CUR_DOWN_EI
) & GEN6_RP_EI_MASK
;
2355 intel_uncore_forcewake_put(dev_priv
, FORCEWAKE_ALL
);
2357 seq_printf(m
, "\nRPS Autotuning (current \"%s\" window):\n",
2358 rps_power_to_str(dev_priv
->rps
.power
));
2359 seq_printf(m
, " Avg. up: %d%% [above threshold? %d%%]\n",
2360 rpup
&& rpupei
? 100 * rpup
/ rpupei
: 0,
2361 dev_priv
->rps
.up_threshold
);
2362 seq_printf(m
, " Avg. down: %d%% [below threshold? %d%%]\n",
2363 rpdown
&& rpdownei
? 100 * rpdown
/ rpdownei
: 0,
2364 dev_priv
->rps
.down_threshold
);
2366 seq_puts(m
, "\nRPS Autotuning inactive\n");
2372 static int i915_llc(struct seq_file
*m
, void *data
)
2374 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
2375 const bool edram
= INTEL_GEN(dev_priv
) > 8;
2377 seq_printf(m
, "LLC: %s\n", yesno(HAS_LLC(dev_priv
)));
2378 seq_printf(m
, "%s: %lluMB\n", edram
? "eDRAM" : "eLLC",
2379 intel_uncore_edram_size(dev_priv
)/1024/1024);
2384 static int i915_huc_load_status_info(struct seq_file
*m
, void *data
)
2386 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
2387 struct intel_uc_fw
*huc_fw
= &dev_priv
->huc
.fw
;
2389 if (!HAS_HUC_UCODE(dev_priv
))
2392 seq_puts(m
, "HuC firmware status:\n");
2393 seq_printf(m
, "\tpath: %s\n", huc_fw
->path
);
2394 seq_printf(m
, "\tfetch: %s\n",
2395 intel_uc_fw_status_repr(huc_fw
->fetch_status
));
2396 seq_printf(m
, "\tload: %s\n",
2397 intel_uc_fw_status_repr(huc_fw
->load_status
));
2398 seq_printf(m
, "\tversion wanted: %d.%d\n",
2399 huc_fw
->major_ver_wanted
, huc_fw
->minor_ver_wanted
);
2400 seq_printf(m
, "\tversion found: %d.%d\n",
2401 huc_fw
->major_ver_found
, huc_fw
->minor_ver_found
);
2402 seq_printf(m
, "\theader: offset is %d; size = %d\n",
2403 huc_fw
->header_offset
, huc_fw
->header_size
);
2404 seq_printf(m
, "\tuCode: offset is %d; size = %d\n",
2405 huc_fw
->ucode_offset
, huc_fw
->ucode_size
);
2406 seq_printf(m
, "\tRSA: offset is %d; size = %d\n",
2407 huc_fw
->rsa_offset
, huc_fw
->rsa_size
);
2409 intel_runtime_pm_get(dev_priv
);
2410 seq_printf(m
, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2
));
2411 intel_runtime_pm_put(dev_priv
);
2416 static int i915_guc_load_status_info(struct seq_file
*m
, void *data
)
2418 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
2419 struct intel_uc_fw
*guc_fw
= &dev_priv
->guc
.fw
;
2422 if (!HAS_GUC_UCODE(dev_priv
))
2425 seq_printf(m
, "GuC firmware status:\n");
2426 seq_printf(m
, "\tpath: %s\n",
2428 seq_printf(m
, "\tfetch: %s\n",
2429 intel_uc_fw_status_repr(guc_fw
->fetch_status
));
2430 seq_printf(m
, "\tload: %s\n",
2431 intel_uc_fw_status_repr(guc_fw
->load_status
));
2432 seq_printf(m
, "\tversion wanted: %d.%d\n",
2433 guc_fw
->major_ver_wanted
, guc_fw
->minor_ver_wanted
);
2434 seq_printf(m
, "\tversion found: %d.%d\n",
2435 guc_fw
->major_ver_found
, guc_fw
->minor_ver_found
);
2436 seq_printf(m
, "\theader: offset is %d; size = %d\n",
2437 guc_fw
->header_offset
, guc_fw
->header_size
);
2438 seq_printf(m
, "\tuCode: offset is %d; size = %d\n",
2439 guc_fw
->ucode_offset
, guc_fw
->ucode_size
);
2440 seq_printf(m
, "\tRSA: offset is %d; size = %d\n",
2441 guc_fw
->rsa_offset
, guc_fw
->rsa_size
);
2443 intel_runtime_pm_get(dev_priv
);
2445 tmp
= I915_READ(GUC_STATUS
);
2447 seq_printf(m
, "\nGuC status 0x%08x:\n", tmp
);
2448 seq_printf(m
, "\tBootrom status = 0x%x\n",
2449 (tmp
& GS_BOOTROM_MASK
) >> GS_BOOTROM_SHIFT
);
2450 seq_printf(m
, "\tuKernel status = 0x%x\n",
2451 (tmp
& GS_UKERNEL_MASK
) >> GS_UKERNEL_SHIFT
);
2452 seq_printf(m
, "\tMIA Core status = 0x%x\n",
2453 (tmp
& GS_MIA_MASK
) >> GS_MIA_SHIFT
);
2454 seq_puts(m
, "\nScratch registers:\n");
2455 for (i
= 0; i
< 16; i
++)
2456 seq_printf(m
, "\t%2d: \t0x%x\n", i
, I915_READ(SOFT_SCRATCH(i
)));
2458 intel_runtime_pm_put(dev_priv
);
2463 static void i915_guc_log_info(struct seq_file
*m
,
2464 struct drm_i915_private
*dev_priv
)
2466 struct intel_guc
*guc
= &dev_priv
->guc
;
2468 seq_puts(m
, "\nGuC logging stats:\n");
2470 seq_printf(m
, "\tISR: flush count %10u, overflow count %10u\n",
2471 guc
->log
.flush_count
[GUC_ISR_LOG_BUFFER
],
2472 guc
->log
.total_overflow_count
[GUC_ISR_LOG_BUFFER
]);
2474 seq_printf(m
, "\tDPC: flush count %10u, overflow count %10u\n",
2475 guc
->log
.flush_count
[GUC_DPC_LOG_BUFFER
],
2476 guc
->log
.total_overflow_count
[GUC_DPC_LOG_BUFFER
]);
2478 seq_printf(m
, "\tCRASH: flush count %10u, overflow count %10u\n",
2479 guc
->log
.flush_count
[GUC_CRASH_DUMP_LOG_BUFFER
],
2480 guc
->log
.total_overflow_count
[GUC_CRASH_DUMP_LOG_BUFFER
]);
2482 seq_printf(m
, "\tTotal flush interrupt count: %u\n",
2483 guc
->log
.flush_interrupt_count
);
2485 seq_printf(m
, "\tCapture miss count: %u\n",
2486 guc
->log
.capture_miss_count
);
2489 static void i915_guc_client_info(struct seq_file
*m
,
2490 struct drm_i915_private
*dev_priv
,
2491 struct i915_guc_client
*client
)
2493 struct intel_engine_cs
*engine
;
2494 enum intel_engine_id id
;
2497 seq_printf(m
, "\tPriority %d, GuC stage index: %u, PD offset 0x%x\n",
2498 client
->priority
, client
->stage_id
, client
->proc_desc_offset
);
2499 seq_printf(m
, "\tDoorbell id %d, offset: 0x%lx, cookie 0x%x\n",
2500 client
->doorbell_id
, client
->doorbell_offset
, client
->doorbell_cookie
);
2501 seq_printf(m
, "\tWQ size %d, offset: 0x%x, tail %d\n",
2502 client
->wq_size
, client
->wq_offset
, client
->wq_tail
);
2504 seq_printf(m
, "\tWork queue full: %u\n", client
->no_wq_space
);
2506 for_each_engine(engine
, dev_priv
, id
) {
2507 u64 submissions
= client
->submissions
[id
];
2509 seq_printf(m
, "\tSubmissions: %llu %s\n",
2510 submissions
, engine
->name
);
2512 seq_printf(m
, "\tTotal: %llu\n", tot
);
2515 static bool check_guc_submission(struct seq_file
*m
)
2517 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
2518 const struct intel_guc
*guc
= &dev_priv
->guc
;
2520 if (!guc
->execbuf_client
) {
2521 seq_printf(m
, "GuC submission %s\n",
2522 HAS_GUC_SCHED(dev_priv
) ?
2531 static int i915_guc_info(struct seq_file
*m
, void *data
)
2533 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
2534 const struct intel_guc
*guc
= &dev_priv
->guc
;
2536 if (!check_guc_submission(m
))
2539 seq_printf(m
, "Doorbell map:\n");
2540 seq_printf(m
, "\t%*pb\n", GUC_NUM_DOORBELLS
, guc
->doorbell_bitmap
);
2541 seq_printf(m
, "Doorbell next cacheline: 0x%x\n\n", guc
->db_cacheline
);
2543 seq_printf(m
, "\nGuC execbuf client @ %p:\n", guc
->execbuf_client
);
2544 i915_guc_client_info(m
, dev_priv
, guc
->execbuf_client
);
2546 i915_guc_log_info(m
, dev_priv
);
2548 /* Add more as required ... */
2553 static int i915_guc_stage_pool(struct seq_file
*m
, void *data
)
2555 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
2556 const struct intel_guc
*guc
= &dev_priv
->guc
;
2557 struct guc_stage_desc
*desc
= guc
->stage_desc_pool_vaddr
;
2558 struct i915_guc_client
*client
= guc
->execbuf_client
;
2562 if (!check_guc_submission(m
))
2565 for (index
= 0; index
< GUC_MAX_STAGE_DESCRIPTORS
; index
++, desc
++) {
2566 struct intel_engine_cs
*engine
;
2568 if (!(desc
->attribute
& GUC_STAGE_DESC_ATTR_ACTIVE
))
2571 seq_printf(m
, "GuC stage descriptor %u:\n", index
);
2572 seq_printf(m
, "\tIndex: %u\n", desc
->stage_id
);
2573 seq_printf(m
, "\tAttribute: 0x%x\n", desc
->attribute
);
2574 seq_printf(m
, "\tPriority: %d\n", desc
->priority
);
2575 seq_printf(m
, "\tDoorbell id: %d\n", desc
->db_id
);
2576 seq_printf(m
, "\tEngines used: 0x%x\n",
2577 desc
->engines_used
);
2578 seq_printf(m
, "\tDoorbell trigger phy: 0x%llx, cpu: 0x%llx, uK: 0x%x\n",
2579 desc
->db_trigger_phy
,
2580 desc
->db_trigger_cpu
,
2581 desc
->db_trigger_uk
);
2582 seq_printf(m
, "\tProcess descriptor: 0x%x\n",
2583 desc
->process_desc
);
2584 seq_printf(m
, "\tWorkqueue address: 0x%x, size: 0x%x\n",
2585 desc
->wq_addr
, desc
->wq_size
);
2588 for_each_engine_masked(engine
, dev_priv
, client
->engines
, tmp
) {
2589 u32 guc_engine_id
= engine
->guc_id
;
2590 struct guc_execlist_context
*lrc
=
2591 &desc
->lrc
[guc_engine_id
];
2593 seq_printf(m
, "\t%s LRC:\n", engine
->name
);
2594 seq_printf(m
, "\t\tContext desc: 0x%x\n",
2596 seq_printf(m
, "\t\tContext id: 0x%x\n", lrc
->context_id
);
2597 seq_printf(m
, "\t\tLRCA: 0x%x\n", lrc
->ring_lrca
);
2598 seq_printf(m
, "\t\tRing begin: 0x%x\n", lrc
->ring_begin
);
2599 seq_printf(m
, "\t\tRing end: 0x%x\n", lrc
->ring_end
);
2607 static int i915_guc_log_dump(struct seq_file
*m
, void *data
)
2609 struct drm_info_node
*node
= m
->private;
2610 struct drm_i915_private
*dev_priv
= node_to_i915(node
);
2611 bool dump_load_err
= !!node
->info_ent
->data
;
2612 struct drm_i915_gem_object
*obj
= NULL
;
2617 obj
= dev_priv
->guc
.load_err_log
;
2618 else if (dev_priv
->guc
.log
.vma
)
2619 obj
= dev_priv
->guc
.log
.vma
->obj
;
2624 log
= i915_gem_object_pin_map(obj
, I915_MAP_WC
);
2626 DRM_DEBUG("Failed to pin object\n");
2627 seq_puts(m
, "(log data unaccessible)\n");
2628 return PTR_ERR(log
);
2631 for (i
= 0; i
< obj
->base
.size
/ sizeof(u32
); i
+= 4)
2632 seq_printf(m
, "0x%08x 0x%08x 0x%08x 0x%08x\n",
2633 *(log
+ i
), *(log
+ i
+ 1),
2634 *(log
+ i
+ 2), *(log
+ i
+ 3));
2638 i915_gem_object_unpin_map(obj
);
2643 static int i915_guc_log_control_get(void *data
, u64
*val
)
2645 struct drm_i915_private
*dev_priv
= data
;
2647 if (!dev_priv
->guc
.log
.vma
)
2650 *val
= i915
.guc_log_level
;
2655 static int i915_guc_log_control_set(void *data
, u64 val
)
2657 struct drm_i915_private
*dev_priv
= data
;
2660 if (!dev_priv
->guc
.log
.vma
)
2663 ret
= mutex_lock_interruptible(&dev_priv
->drm
.struct_mutex
);
2667 intel_runtime_pm_get(dev_priv
);
2668 ret
= i915_guc_log_control(dev_priv
, val
);
2669 intel_runtime_pm_put(dev_priv
);
2671 mutex_unlock(&dev_priv
->drm
.struct_mutex
);
2675 DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_control_fops
,
2676 i915_guc_log_control_get
, i915_guc_log_control_set
,
2679 static const char *psr2_live_status(u32 val
)
2681 static const char * const live_status
[] = {
2695 val
= (val
& EDP_PSR2_STATUS_STATE_MASK
) >> EDP_PSR2_STATUS_STATE_SHIFT
;
2696 if (val
< ARRAY_SIZE(live_status
))
2697 return live_status
[val
];
2702 static int i915_edp_psr_status(struct seq_file
*m
, void *data
)
2704 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
2708 bool enabled
= false;
2710 if (!HAS_PSR(dev_priv
)) {
2711 seq_puts(m
, "PSR not supported\n");
2715 intel_runtime_pm_get(dev_priv
);
2717 mutex_lock(&dev_priv
->psr
.lock
);
2718 seq_printf(m
, "Sink_Support: %s\n", yesno(dev_priv
->psr
.sink_support
));
2719 seq_printf(m
, "Source_OK: %s\n", yesno(dev_priv
->psr
.source_ok
));
2720 seq_printf(m
, "Enabled: %s\n", yesno((bool)dev_priv
->psr
.enabled
));
2721 seq_printf(m
, "Active: %s\n", yesno(dev_priv
->psr
.active
));
2722 seq_printf(m
, "Busy frontbuffer bits: 0x%03x\n",
2723 dev_priv
->psr
.busy_frontbuffer_bits
);
2724 seq_printf(m
, "Re-enable work scheduled: %s\n",
2725 yesno(work_busy(&dev_priv
->psr
.work
.work
)));
2727 if (HAS_DDI(dev_priv
)) {
2728 if (dev_priv
->psr
.psr2_support
)
2729 enabled
= I915_READ(EDP_PSR2_CTL
) & EDP_PSR2_ENABLE
;
2731 enabled
= I915_READ(EDP_PSR_CTL
) & EDP_PSR_ENABLE
;
2733 for_each_pipe(dev_priv
, pipe
) {
2734 enum transcoder cpu_transcoder
=
2735 intel_pipe_to_cpu_transcoder(dev_priv
, pipe
);
2736 enum intel_display_power_domain power_domain
;
2738 power_domain
= POWER_DOMAIN_TRANSCODER(cpu_transcoder
);
2739 if (!intel_display_power_get_if_enabled(dev_priv
,
2743 stat
[pipe
] = I915_READ(VLV_PSRSTAT(pipe
)) &
2744 VLV_EDP_PSR_CURR_STATE_MASK
;
2745 if ((stat
[pipe
] == VLV_EDP_PSR_ACTIVE_NORFB_UP
) ||
2746 (stat
[pipe
] == VLV_EDP_PSR_ACTIVE_SF_UPDATE
))
2749 intel_display_power_put(dev_priv
, power_domain
);
2753 seq_printf(m
, "Main link in standby mode: %s\n",
2754 yesno(dev_priv
->psr
.link_standby
));
2756 seq_printf(m
, "HW Enabled & Active bit: %s", yesno(enabled
));
2758 if (!HAS_DDI(dev_priv
))
2759 for_each_pipe(dev_priv
, pipe
) {
2760 if ((stat
[pipe
] == VLV_EDP_PSR_ACTIVE_NORFB_UP
) ||
2761 (stat
[pipe
] == VLV_EDP_PSR_ACTIVE_SF_UPDATE
))
2762 seq_printf(m
, " pipe %c", pipe_name(pipe
));
2767 * VLV/CHV PSR has no kind of performance counter
2768 * SKL+ Perf counter is reset to 0 everytime DC state is entered
2770 if (IS_HASWELL(dev_priv
) || IS_BROADWELL(dev_priv
)) {
2771 psrperf
= I915_READ(EDP_PSR_PERF_CNT
) &
2772 EDP_PSR_PERF_CNT_MASK
;
2774 seq_printf(m
, "Performance_Counter: %u\n", psrperf
);
2776 if (dev_priv
->psr
.psr2_support
) {
2777 u32 psr2
= I915_READ(EDP_PSR2_STATUS_CTL
);
2779 seq_printf(m
, "EDP_PSR2_STATUS_CTL: %x [%s]\n",
2780 psr2
, psr2_live_status(psr2
));
2782 mutex_unlock(&dev_priv
->psr
.lock
);
2784 intel_runtime_pm_put(dev_priv
);
2788 static int i915_sink_crc(struct seq_file
*m
, void *data
)
2790 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
2791 struct drm_device
*dev
= &dev_priv
->drm
;
2792 struct intel_connector
*connector
;
2793 struct drm_connector_list_iter conn_iter
;
2794 struct intel_dp
*intel_dp
= NULL
;
2798 drm_modeset_lock_all(dev
);
2799 drm_connector_list_iter_begin(dev
, &conn_iter
);
2800 for_each_intel_connector_iter(connector
, &conn_iter
) {
2801 struct drm_crtc
*crtc
;
2803 if (!connector
->base
.state
->best_encoder
)
2806 crtc
= connector
->base
.state
->crtc
;
2807 if (!crtc
->state
->active
)
2810 if (connector
->base
.connector_type
!= DRM_MODE_CONNECTOR_eDP
)
2813 intel_dp
= enc_to_intel_dp(connector
->base
.state
->best_encoder
);
2815 ret
= intel_dp_sink_crc(intel_dp
, crc
);
2819 seq_printf(m
, "%02x%02x%02x%02x%02x%02x\n",
2820 crc
[0], crc
[1], crc
[2],
2821 crc
[3], crc
[4], crc
[5]);
2826 drm_connector_list_iter_end(&conn_iter
);
2827 drm_modeset_unlock_all(dev
);
2831 static int i915_energy_uJ(struct seq_file
*m
, void *data
)
2833 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
2837 if (INTEL_GEN(dev_priv
) < 6)
2840 intel_runtime_pm_get(dev_priv
);
2842 rdmsrl(MSR_RAPL_POWER_UNIT
, power
);
2843 power
= (power
& 0x1f00) >> 8;
2844 units
= 1000000 / (1 << power
); /* convert to uJ */
2845 power
= I915_READ(MCH_SECP_NRG_STTS
);
2848 intel_runtime_pm_put(dev_priv
);
2850 seq_printf(m
, "%llu", (long long unsigned)power
);
2855 static int i915_runtime_pm_status(struct seq_file
*m
, void *unused
)
2857 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
2858 struct pci_dev
*pdev
= dev_priv
->drm
.pdev
;
2860 if (!HAS_RUNTIME_PM(dev_priv
))
2861 seq_puts(m
, "Runtime power management not supported\n");
2863 seq_printf(m
, "GPU idle: %s\n", yesno(!dev_priv
->gt
.awake
));
2864 seq_printf(m
, "IRQs disabled: %s\n",
2865 yesno(!intel_irqs_enabled(dev_priv
)));
2867 seq_printf(m
, "Usage count: %d\n",
2868 atomic_read(&dev_priv
->drm
.dev
->power
.usage_count
));
2870 seq_printf(m
, "Device Power Management (CONFIG_PM) disabled\n");
2872 seq_printf(m
, "PCI device power state: %s [%d]\n",
2873 pci_power_name(pdev
->current_state
),
2874 pdev
->current_state
);
2879 static int i915_power_domain_info(struct seq_file
*m
, void *unused
)
2881 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
2882 struct i915_power_domains
*power_domains
= &dev_priv
->power_domains
;
2885 mutex_lock(&power_domains
->lock
);
2887 seq_printf(m
, "%-25s %s\n", "Power well/domain", "Use count");
2888 for (i
= 0; i
< power_domains
->power_well_count
; i
++) {
2889 struct i915_power_well
*power_well
;
2890 enum intel_display_power_domain power_domain
;
2892 power_well
= &power_domains
->power_wells
[i
];
2893 seq_printf(m
, "%-25s %d\n", power_well
->name
,
2896 for_each_power_domain(power_domain
, power_well
->domains
)
2897 seq_printf(m
, " %-23s %d\n",
2898 intel_display_power_domain_str(power_domain
),
2899 power_domains
->domain_use_count
[power_domain
]);
2902 mutex_unlock(&power_domains
->lock
);
2907 static int i915_dmc_info(struct seq_file
*m
, void *unused
)
2909 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
2910 struct intel_csr
*csr
;
2912 if (!HAS_CSR(dev_priv
)) {
2913 seq_puts(m
, "not supported\n");
2917 csr
= &dev_priv
->csr
;
2919 intel_runtime_pm_get(dev_priv
);
2921 seq_printf(m
, "fw loaded: %s\n", yesno(csr
->dmc_payload
!= NULL
));
2922 seq_printf(m
, "path: %s\n", csr
->fw_path
);
2924 if (!csr
->dmc_payload
)
2927 seq_printf(m
, "version: %d.%d\n", CSR_VERSION_MAJOR(csr
->version
),
2928 CSR_VERSION_MINOR(csr
->version
));
2930 if (IS_KABYLAKE(dev_priv
) ||
2931 (IS_SKYLAKE(dev_priv
) && csr
->version
>= CSR_VERSION(1, 6))) {
2932 seq_printf(m
, "DC3 -> DC5 count: %d\n",
2933 I915_READ(SKL_CSR_DC3_DC5_COUNT
));
2934 seq_printf(m
, "DC5 -> DC6 count: %d\n",
2935 I915_READ(SKL_CSR_DC5_DC6_COUNT
));
2936 } else if (IS_BROXTON(dev_priv
) && csr
->version
>= CSR_VERSION(1, 4)) {
2937 seq_printf(m
, "DC3 -> DC5 count: %d\n",
2938 I915_READ(BXT_CSR_DC3_DC5_COUNT
));
2942 seq_printf(m
, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0)));
2943 seq_printf(m
, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE
));
2944 seq_printf(m
, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL
));
2946 intel_runtime_pm_put(dev_priv
);
2951 static void intel_seq_print_mode(struct seq_file
*m
, int tabs
,
2952 struct drm_display_mode
*mode
)
2956 for (i
= 0; i
< tabs
; i
++)
2959 seq_printf(m
, "id %d:\"%s\" freq %d clock %d hdisp %d hss %d hse %d htot %d vdisp %d vss %d vse %d vtot %d type 0x%x flags 0x%x\n",
2960 mode
->base
.id
, mode
->name
,
2961 mode
->vrefresh
, mode
->clock
,
2962 mode
->hdisplay
, mode
->hsync_start
,
2963 mode
->hsync_end
, mode
->htotal
,
2964 mode
->vdisplay
, mode
->vsync_start
,
2965 mode
->vsync_end
, mode
->vtotal
,
2966 mode
->type
, mode
->flags
);
2969 static void intel_encoder_info(struct seq_file
*m
,
2970 struct intel_crtc
*intel_crtc
,
2971 struct intel_encoder
*intel_encoder
)
2973 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
2974 struct drm_device
*dev
= &dev_priv
->drm
;
2975 struct drm_crtc
*crtc
= &intel_crtc
->base
;
2976 struct intel_connector
*intel_connector
;
2977 struct drm_encoder
*encoder
;
2979 encoder
= &intel_encoder
->base
;
2980 seq_printf(m
, "\tencoder %d: type: %s, connectors:\n",
2981 encoder
->base
.id
, encoder
->name
);
2982 for_each_connector_on_encoder(dev
, encoder
, intel_connector
) {
2983 struct drm_connector
*connector
= &intel_connector
->base
;
2984 seq_printf(m
, "\t\tconnector %d: type: %s, status: %s",
2987 drm_get_connector_status_name(connector
->status
));
2988 if (connector
->status
== connector_status_connected
) {
2989 struct drm_display_mode
*mode
= &crtc
->mode
;
2990 seq_printf(m
, ", mode:\n");
2991 intel_seq_print_mode(m
, 2, mode
);
2998 static void intel_crtc_info(struct seq_file
*m
, struct intel_crtc
*intel_crtc
)
3000 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
3001 struct drm_device
*dev
= &dev_priv
->drm
;
3002 struct drm_crtc
*crtc
= &intel_crtc
->base
;
3003 struct intel_encoder
*intel_encoder
;
3004 struct drm_plane_state
*plane_state
= crtc
->primary
->state
;
3005 struct drm_framebuffer
*fb
= plane_state
->fb
;
3008 seq_printf(m
, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
3009 fb
->base
.id
, plane_state
->src_x
>> 16,
3010 plane_state
->src_y
>> 16, fb
->width
, fb
->height
);
3012 seq_puts(m
, "\tprimary plane disabled\n");
3013 for_each_encoder_on_crtc(dev
, crtc
, intel_encoder
)
3014 intel_encoder_info(m
, intel_crtc
, intel_encoder
);
3017 static void intel_panel_info(struct seq_file
*m
, struct intel_panel
*panel
)
3019 struct drm_display_mode
*mode
= panel
->fixed_mode
;
3021 seq_printf(m
, "\tfixed mode:\n");
3022 intel_seq_print_mode(m
, 2, mode
);
3025 static void intel_dp_info(struct seq_file
*m
,
3026 struct intel_connector
*intel_connector
)
3028 struct intel_encoder
*intel_encoder
= intel_connector
->encoder
;
3029 struct intel_dp
*intel_dp
= enc_to_intel_dp(&intel_encoder
->base
);
3031 seq_printf(m
, "\tDPCD rev: %x\n", intel_dp
->dpcd
[DP_DPCD_REV
]);
3032 seq_printf(m
, "\taudio support: %s\n", yesno(intel_dp
->has_audio
));
3033 if (intel_connector
->base
.connector_type
== DRM_MODE_CONNECTOR_eDP
)
3034 intel_panel_info(m
, &intel_connector
->panel
);
3036 drm_dp_downstream_debug(m
, intel_dp
->dpcd
, intel_dp
->downstream_ports
,
3040 static void intel_dp_mst_info(struct seq_file
*m
,
3041 struct intel_connector
*intel_connector
)
3043 struct intel_encoder
*intel_encoder
= intel_connector
->encoder
;
3044 struct intel_dp_mst_encoder
*intel_mst
=
3045 enc_to_mst(&intel_encoder
->base
);
3046 struct intel_digital_port
*intel_dig_port
= intel_mst
->primary
;
3047 struct intel_dp
*intel_dp
= &intel_dig_port
->dp
;
3048 bool has_audio
= drm_dp_mst_port_has_audio(&intel_dp
->mst_mgr
,
3049 intel_connector
->port
);
3051 seq_printf(m
, "\taudio support: %s\n", yesno(has_audio
));
3054 static void intel_hdmi_info(struct seq_file
*m
,
3055 struct intel_connector
*intel_connector
)
3057 struct intel_encoder
*intel_encoder
= intel_connector
->encoder
;
3058 struct intel_hdmi
*intel_hdmi
= enc_to_intel_hdmi(&intel_encoder
->base
);
3060 seq_printf(m
, "\taudio support: %s\n", yesno(intel_hdmi
->has_audio
));
3063 static void intel_lvds_info(struct seq_file
*m
,
3064 struct intel_connector
*intel_connector
)
3066 intel_panel_info(m
, &intel_connector
->panel
);
3069 static void intel_connector_info(struct seq_file
*m
,
3070 struct drm_connector
*connector
)
3072 struct intel_connector
*intel_connector
= to_intel_connector(connector
);
3073 struct intel_encoder
*intel_encoder
= intel_connector
->encoder
;
3074 struct drm_display_mode
*mode
;
3076 seq_printf(m
, "connector %d: type %s, status: %s\n",
3077 connector
->base
.id
, connector
->name
,
3078 drm_get_connector_status_name(connector
->status
));
3079 if (connector
->status
== connector_status_connected
) {
3080 seq_printf(m
, "\tname: %s\n", connector
->display_info
.name
);
3081 seq_printf(m
, "\tphysical dimensions: %dx%dmm\n",
3082 connector
->display_info
.width_mm
,
3083 connector
->display_info
.height_mm
);
3084 seq_printf(m
, "\tsubpixel order: %s\n",
3085 drm_get_subpixel_order_name(connector
->display_info
.subpixel_order
));
3086 seq_printf(m
, "\tCEA rev: %d\n",
3087 connector
->display_info
.cea_rev
);
3090 if (!intel_encoder
|| intel_encoder
->type
== INTEL_OUTPUT_DP_MST
)
3093 switch (connector
->connector_type
) {
3094 case DRM_MODE_CONNECTOR_DisplayPort
:
3095 case DRM_MODE_CONNECTOR_eDP
:
3096 if (intel_encoder
->type
== INTEL_OUTPUT_DP_MST
)
3097 intel_dp_mst_info(m
, intel_connector
);
3099 intel_dp_info(m
, intel_connector
);
3101 case DRM_MODE_CONNECTOR_LVDS
:
3102 if (intel_encoder
->type
== INTEL_OUTPUT_LVDS
)
3103 intel_lvds_info(m
, intel_connector
);
3105 case DRM_MODE_CONNECTOR_HDMIA
:
3106 if (intel_encoder
->type
== INTEL_OUTPUT_HDMI
||
3107 intel_encoder
->type
== INTEL_OUTPUT_UNKNOWN
)
3108 intel_hdmi_info(m
, intel_connector
);
3114 seq_printf(m
, "\tmodes:\n");
3115 list_for_each_entry(mode
, &connector
->modes
, head
)
3116 intel_seq_print_mode(m
, 2, mode
);
3119 static const char *plane_type(enum drm_plane_type type
)
3122 case DRM_PLANE_TYPE_OVERLAY
:
3124 case DRM_PLANE_TYPE_PRIMARY
:
3126 case DRM_PLANE_TYPE_CURSOR
:
3129 * Deliberately omitting default: to generate compiler warnings
3130 * when a new drm_plane_type gets added.
3137 static const char *plane_rotation(unsigned int rotation
)
3139 static char buf
[48];
3141 * According to doc only one DRM_MODE_ROTATE_ is allowed but this
3142 * will print them all to visualize if the values are misused
3144 snprintf(buf
, sizeof(buf
),
3145 "%s%s%s%s%s%s(0x%08x)",
3146 (rotation
& DRM_MODE_ROTATE_0
) ? "0 " : "",
3147 (rotation
& DRM_MODE_ROTATE_90
) ? "90 " : "",
3148 (rotation
& DRM_MODE_ROTATE_180
) ? "180 " : "",
3149 (rotation
& DRM_MODE_ROTATE_270
) ? "270 " : "",
3150 (rotation
& DRM_MODE_REFLECT_X
) ? "FLIPX " : "",
3151 (rotation
& DRM_MODE_REFLECT_Y
) ? "FLIPY " : "",
3157 static void intel_plane_info(struct seq_file
*m
, struct intel_crtc
*intel_crtc
)
3159 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
3160 struct drm_device
*dev
= &dev_priv
->drm
;
3161 struct intel_plane
*intel_plane
;
3163 for_each_intel_plane_on_crtc(dev
, intel_crtc
, intel_plane
) {
3164 struct drm_plane_state
*state
;
3165 struct drm_plane
*plane
= &intel_plane
->base
;
3166 struct drm_format_name_buf format_name
;
3168 if (!plane
->state
) {
3169 seq_puts(m
, "plane->state is NULL!\n");
3173 state
= plane
->state
;
3176 drm_get_format_name(state
->fb
->format
->format
,
3179 sprintf(format_name
.str
, "N/A");
3182 seq_printf(m
, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n",
3184 plane_type(intel_plane
->base
.type
),
3185 state
->crtc_x
, state
->crtc_y
,
3186 state
->crtc_w
, state
->crtc_h
,
3187 (state
->src_x
>> 16),
3188 ((state
->src_x
& 0xffff) * 15625) >> 10,
3189 (state
->src_y
>> 16),
3190 ((state
->src_y
& 0xffff) * 15625) >> 10,
3191 (state
->src_w
>> 16),
3192 ((state
->src_w
& 0xffff) * 15625) >> 10,
3193 (state
->src_h
>> 16),
3194 ((state
->src_h
& 0xffff) * 15625) >> 10,
3196 plane_rotation(state
->rotation
));
3200 static void intel_scaler_info(struct seq_file
*m
, struct intel_crtc
*intel_crtc
)
3202 struct intel_crtc_state
*pipe_config
;
3203 int num_scalers
= intel_crtc
->num_scalers
;
3206 pipe_config
= to_intel_crtc_state(intel_crtc
->base
.state
);
3208 /* Not all platformas have a scaler */
3210 seq_printf(m
, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
3212 pipe_config
->scaler_state
.scaler_users
,
3213 pipe_config
->scaler_state
.scaler_id
);
3215 for (i
= 0; i
< num_scalers
; i
++) {
3216 struct intel_scaler
*sc
=
3217 &pipe_config
->scaler_state
.scalers
[i
];
3219 seq_printf(m
, ", scalers[%d]: use=%s, mode=%x",
3220 i
, yesno(sc
->in_use
), sc
->mode
);
3224 seq_puts(m
, "\tNo scalers available on this platform\n");
3228 static int i915_display_info(struct seq_file
*m
, void *unused
)
3230 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
3231 struct drm_device
*dev
= &dev_priv
->drm
;
3232 struct intel_crtc
*crtc
;
3233 struct drm_connector
*connector
;
3234 struct drm_connector_list_iter conn_iter
;
3236 intel_runtime_pm_get(dev_priv
);
3237 seq_printf(m
, "CRTC info\n");
3238 seq_printf(m
, "---------\n");
3239 for_each_intel_crtc(dev
, crtc
) {
3240 struct intel_crtc_state
*pipe_config
;
3242 drm_modeset_lock(&crtc
->base
.mutex
, NULL
);
3243 pipe_config
= to_intel_crtc_state(crtc
->base
.state
);
3245 seq_printf(m
, "CRTC %d: pipe: %c, active=%s, (size=%dx%d), dither=%s, bpp=%d\n",
3246 crtc
->base
.base
.id
, pipe_name(crtc
->pipe
),
3247 yesno(pipe_config
->base
.active
),
3248 pipe_config
->pipe_src_w
, pipe_config
->pipe_src_h
,
3249 yesno(pipe_config
->dither
), pipe_config
->pipe_bpp
);
3251 if (pipe_config
->base
.active
) {
3252 struct intel_plane
*cursor
=
3253 to_intel_plane(crtc
->base
.cursor
);
3255 intel_crtc_info(m
, crtc
);
3257 seq_printf(m
, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x\n",
3258 yesno(cursor
->base
.state
->visible
),
3259 cursor
->base
.state
->crtc_x
,
3260 cursor
->base
.state
->crtc_y
,
3261 cursor
->base
.state
->crtc_w
,
3262 cursor
->base
.state
->crtc_h
,
3263 cursor
->cursor
.base
);
3264 intel_scaler_info(m
, crtc
);
3265 intel_plane_info(m
, crtc
);
3268 seq_printf(m
, "\tunderrun reporting: cpu=%s pch=%s \n",
3269 yesno(!crtc
->cpu_fifo_underrun_disabled
),
3270 yesno(!crtc
->pch_fifo_underrun_disabled
));
3271 drm_modeset_unlock(&crtc
->base
.mutex
);
3274 seq_printf(m
, "\n");
3275 seq_printf(m
, "Connector info\n");
3276 seq_printf(m
, "--------------\n");
3277 mutex_lock(&dev
->mode_config
.mutex
);
3278 drm_connector_list_iter_begin(dev
, &conn_iter
);
3279 drm_for_each_connector_iter(connector
, &conn_iter
)
3280 intel_connector_info(m
, connector
);
3281 drm_connector_list_iter_end(&conn_iter
);
3282 mutex_unlock(&dev
->mode_config
.mutex
);
3284 intel_runtime_pm_put(dev_priv
);
3289 static int i915_engine_info(struct seq_file
*m
, void *unused
)
3291 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
3292 struct intel_engine_cs
*engine
;
3293 enum intel_engine_id id
;
3295 intel_runtime_pm_get(dev_priv
);
3297 seq_printf(m
, "GT awake? %s\n",
3298 yesno(dev_priv
->gt
.awake
));
3299 seq_printf(m
, "Global active requests: %d\n",
3300 dev_priv
->gt
.active_requests
);
3302 for_each_engine(engine
, dev_priv
, id
) {
3303 struct intel_breadcrumbs
*b
= &engine
->breadcrumbs
;
3304 struct drm_i915_gem_request
*rq
;
3308 seq_printf(m
, "%s\n", engine
->name
);
3309 seq_printf(m
, "\tcurrent seqno %x, last %x, hangcheck %x [%d ms], inflight %d\n",
3310 intel_engine_get_seqno(engine
),
3311 intel_engine_last_submit(engine
),
3312 engine
->hangcheck
.seqno
,
3313 jiffies_to_msecs(jiffies
- engine
->hangcheck
.action_timestamp
),
3314 engine
->timeline
->inflight_seqnos
);
3318 seq_printf(m
, "\tRequests:\n");
3320 rq
= list_first_entry(&engine
->timeline
->requests
,
3321 struct drm_i915_gem_request
, link
);
3322 if (&rq
->link
!= &engine
->timeline
->requests
)
3323 print_request(m
, rq
, "\t\tfirst ");
3325 rq
= list_last_entry(&engine
->timeline
->requests
,
3326 struct drm_i915_gem_request
, link
);
3327 if (&rq
->link
!= &engine
->timeline
->requests
)
3328 print_request(m
, rq
, "\t\tlast ");
3330 rq
= i915_gem_find_active_request(engine
);
3332 print_request(m
, rq
, "\t\tactive ");
3334 "\t\t[head %04x, postfix %04x, tail %04x, batch 0x%08x_%08x]\n",
3335 rq
->head
, rq
->postfix
, rq
->tail
,
3336 rq
->batch
? upper_32_bits(rq
->batch
->node
.start
) : ~0u,
3337 rq
->batch
? lower_32_bits(rq
->batch
->node
.start
) : ~0u);
3340 seq_printf(m
, "\tRING_START: 0x%08x [0x%08x]\n",
3341 I915_READ(RING_START(engine
->mmio_base
)),
3342 rq
? i915_ggtt_offset(rq
->ring
->vma
) : 0);
3343 seq_printf(m
, "\tRING_HEAD: 0x%08x [0x%08x]\n",
3344 I915_READ(RING_HEAD(engine
->mmio_base
)) & HEAD_ADDR
,
3345 rq
? rq
->ring
->head
: 0);
3346 seq_printf(m
, "\tRING_TAIL: 0x%08x [0x%08x]\n",
3347 I915_READ(RING_TAIL(engine
->mmio_base
)) & TAIL_ADDR
,
3348 rq
? rq
->ring
->tail
: 0);
3349 seq_printf(m
, "\tRING_CTL: 0x%08x [%s]\n",
3350 I915_READ(RING_CTL(engine
->mmio_base
)),
3351 I915_READ(RING_CTL(engine
->mmio_base
)) & (RING_WAIT
| RING_WAIT_SEMAPHORE
) ? "waiting" : "");
3355 addr
= intel_engine_get_active_head(engine
);
3356 seq_printf(m
, "\tACTHD: 0x%08x_%08x\n",
3357 upper_32_bits(addr
), lower_32_bits(addr
));
3358 addr
= intel_engine_get_last_batch_head(engine
);
3359 seq_printf(m
, "\tBBADDR: 0x%08x_%08x\n",
3360 upper_32_bits(addr
), lower_32_bits(addr
));
3362 if (i915
.enable_execlists
) {
3363 u32 ptr
, read
, write
;
3366 seq_printf(m
, "\tExeclist status: 0x%08x %08x\n",
3367 I915_READ(RING_EXECLIST_STATUS_LO(engine
)),
3368 I915_READ(RING_EXECLIST_STATUS_HI(engine
)));
3370 ptr
= I915_READ(RING_CONTEXT_STATUS_PTR(engine
));
3371 read
= GEN8_CSB_READ_PTR(ptr
);
3372 write
= GEN8_CSB_WRITE_PTR(ptr
);
3373 seq_printf(m
, "\tExeclist CSB read %d, write %d\n",
3375 if (read
>= GEN8_CSB_ENTRIES
)
3377 if (write
>= GEN8_CSB_ENTRIES
)
3380 write
+= GEN8_CSB_ENTRIES
;
3381 while (read
< write
) {
3382 idx
= ++read
% GEN8_CSB_ENTRIES
;
3383 seq_printf(m
, "\tExeclist CSB[%d]: 0x%08x, context: %d\n",
3385 I915_READ(RING_CONTEXT_STATUS_BUF_LO(engine
, idx
)),
3386 I915_READ(RING_CONTEXT_STATUS_BUF_HI(engine
, idx
)));
3390 for (idx
= 0; idx
< ARRAY_SIZE(engine
->execlist_port
); idx
++) {
3393 rq
= port_unpack(&engine
->execlist_port
[idx
],
3396 seq_printf(m
, "\t\tELSP[%d] count=%d, ",
3398 print_request(m
, rq
, "rq: ");
3400 seq_printf(m
, "\t\tELSP[%d] idle\n",
3406 spin_lock_irq(&engine
->timeline
->lock
);
3407 for (rb
= engine
->execlist_first
; rb
; rb
= rb_next(rb
)){
3408 struct i915_priolist
*p
=
3409 rb_entry(rb
, typeof(*p
), node
);
3411 list_for_each_entry(rq
, &p
->requests
,
3413 print_request(m
, rq
, "\t\tQ ");
3415 spin_unlock_irq(&engine
->timeline
->lock
);
3416 } else if (INTEL_GEN(dev_priv
) > 6) {
3417 seq_printf(m
, "\tPP_DIR_BASE: 0x%08x\n",
3418 I915_READ(RING_PP_DIR_BASE(engine
)));
3419 seq_printf(m
, "\tPP_DIR_BASE_READ: 0x%08x\n",
3420 I915_READ(RING_PP_DIR_BASE_READ(engine
)));
3421 seq_printf(m
, "\tPP_DIR_DCLV: 0x%08x\n",
3422 I915_READ(RING_PP_DIR_DCLV(engine
)));
3425 spin_lock_irq(&b
->rb_lock
);
3426 for (rb
= rb_first(&b
->waiters
); rb
; rb
= rb_next(rb
)) {
3427 struct intel_wait
*w
= rb_entry(rb
, typeof(*w
), node
);
3429 seq_printf(m
, "\t%s [%d] waiting for %x\n",
3430 w
->tsk
->comm
, w
->tsk
->pid
, w
->seqno
);
3432 spin_unlock_irq(&b
->rb_lock
);
3437 intel_runtime_pm_put(dev_priv
);
3442 static int i915_semaphore_status(struct seq_file
*m
, void *unused
)
3444 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
3445 struct drm_device
*dev
= &dev_priv
->drm
;
3446 struct intel_engine_cs
*engine
;
3447 int num_rings
= INTEL_INFO(dev_priv
)->num_rings
;
3448 enum intel_engine_id id
;
3451 if (!i915
.semaphores
) {
3452 seq_puts(m
, "Semaphores are disabled\n");
3456 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
3459 intel_runtime_pm_get(dev_priv
);
3461 if (IS_BROADWELL(dev_priv
)) {
3465 page
= i915_gem_object_get_page(dev_priv
->semaphore
->obj
, 0);
3467 seqno
= (uint64_t *)kmap_atomic(page
);
3468 for_each_engine(engine
, dev_priv
, id
) {
3471 seq_printf(m
, "%s\n", engine
->name
);
3473 seq_puts(m
, " Last signal:");
3474 for (j
= 0; j
< num_rings
; j
++) {
3475 offset
= id
* I915_NUM_ENGINES
+ j
;
3476 seq_printf(m
, "0x%08llx (0x%02llx) ",
3477 seqno
[offset
], offset
* 8);
3481 seq_puts(m
, " Last wait: ");
3482 for (j
= 0; j
< num_rings
; j
++) {
3483 offset
= id
+ (j
* I915_NUM_ENGINES
);
3484 seq_printf(m
, "0x%08llx (0x%02llx) ",
3485 seqno
[offset
], offset
* 8);
3490 kunmap_atomic(seqno
);
3492 seq_puts(m
, " Last signal:");
3493 for_each_engine(engine
, dev_priv
, id
)
3494 for (j
= 0; j
< num_rings
; j
++)
3495 seq_printf(m
, "0x%08x\n",
3496 I915_READ(engine
->semaphore
.mbox
.signal
[j
]));
3500 intel_runtime_pm_put(dev_priv
);
3501 mutex_unlock(&dev
->struct_mutex
);
3505 static int i915_shared_dplls_info(struct seq_file
*m
, void *unused
)
3507 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
3508 struct drm_device
*dev
= &dev_priv
->drm
;
3511 drm_modeset_lock_all(dev
);
3512 for (i
= 0; i
< dev_priv
->num_shared_dpll
; i
++) {
3513 struct intel_shared_dpll
*pll
= &dev_priv
->shared_dplls
[i
];
3515 seq_printf(m
, "DPLL%i: %s, id: %i\n", i
, pll
->name
, pll
->id
);
3516 seq_printf(m
, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n",
3517 pll
->state
.crtc_mask
, pll
->active_mask
, yesno(pll
->on
));
3518 seq_printf(m
, " tracked hardware state:\n");
3519 seq_printf(m
, " dpll: 0x%08x\n", pll
->state
.hw_state
.dpll
);
3520 seq_printf(m
, " dpll_md: 0x%08x\n",
3521 pll
->state
.hw_state
.dpll_md
);
3522 seq_printf(m
, " fp0: 0x%08x\n", pll
->state
.hw_state
.fp0
);
3523 seq_printf(m
, " fp1: 0x%08x\n", pll
->state
.hw_state
.fp1
);
3524 seq_printf(m
, " wrpll: 0x%08x\n", pll
->state
.hw_state
.wrpll
);
3526 drm_modeset_unlock_all(dev
);
3531 static int i915_wa_registers(struct seq_file
*m
, void *unused
)
3535 struct intel_engine_cs
*engine
;
3536 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
3537 struct drm_device
*dev
= &dev_priv
->drm
;
3538 struct i915_workarounds
*workarounds
= &dev_priv
->workarounds
;
3539 enum intel_engine_id id
;
3541 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
3545 intel_runtime_pm_get(dev_priv
);
3547 seq_printf(m
, "Workarounds applied: %d\n", workarounds
->count
);
3548 for_each_engine(engine
, dev_priv
, id
)
3549 seq_printf(m
, "HW whitelist count for %s: %d\n",
3550 engine
->name
, workarounds
->hw_whitelist_count
[id
]);
3551 for (i
= 0; i
< workarounds
->count
; ++i
) {
3553 u32 mask
, value
, read
;
3556 addr
= workarounds
->reg
[i
].addr
;
3557 mask
= workarounds
->reg
[i
].mask
;
3558 value
= workarounds
->reg
[i
].value
;
3559 read
= I915_READ(addr
);
3560 ok
= (value
& mask
) == (read
& mask
);
3561 seq_printf(m
, "0x%X: 0x%08X, mask: 0x%08X, read: 0x%08x, status: %s\n",
3562 i915_mmio_reg_offset(addr
), value
, mask
, read
, ok
? "OK" : "FAIL");
3565 intel_runtime_pm_put(dev_priv
);
3566 mutex_unlock(&dev
->struct_mutex
);
3571 static int i915_ddb_info(struct seq_file
*m
, void *unused
)
3573 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
3574 struct drm_device
*dev
= &dev_priv
->drm
;
3575 struct skl_ddb_allocation
*ddb
;
3576 struct skl_ddb_entry
*entry
;
3580 if (INTEL_GEN(dev_priv
) < 9)
3583 drm_modeset_lock_all(dev
);
3585 ddb
= &dev_priv
->wm
.skl_hw
.ddb
;
3587 seq_printf(m
, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
3589 for_each_pipe(dev_priv
, pipe
) {
3590 seq_printf(m
, "Pipe %c\n", pipe_name(pipe
));
3592 for_each_universal_plane(dev_priv
, pipe
, plane
) {
3593 entry
= &ddb
->plane
[pipe
][plane
];
3594 seq_printf(m
, " Plane%-8d%8u%8u%8u\n", plane
+ 1,
3595 entry
->start
, entry
->end
,
3596 skl_ddb_entry_size(entry
));
3599 entry
= &ddb
->plane
[pipe
][PLANE_CURSOR
];
3600 seq_printf(m
, " %-13s%8u%8u%8u\n", "Cursor", entry
->start
,
3601 entry
->end
, skl_ddb_entry_size(entry
));
3604 drm_modeset_unlock_all(dev
);
3609 static void drrs_status_per_crtc(struct seq_file
*m
,
3610 struct drm_device
*dev
,
3611 struct intel_crtc
*intel_crtc
)
3613 struct drm_i915_private
*dev_priv
= to_i915(dev
);
3614 struct i915_drrs
*drrs
= &dev_priv
->drrs
;
3616 struct drm_connector
*connector
;
3617 struct drm_connector_list_iter conn_iter
;
3619 drm_connector_list_iter_begin(dev
, &conn_iter
);
3620 drm_for_each_connector_iter(connector
, &conn_iter
) {
3621 if (connector
->state
->crtc
!= &intel_crtc
->base
)
3624 seq_printf(m
, "%s:\n", connector
->name
);
3626 drm_connector_list_iter_end(&conn_iter
);
3628 if (dev_priv
->vbt
.drrs_type
== STATIC_DRRS_SUPPORT
)
3629 seq_puts(m
, "\tVBT: DRRS_type: Static");
3630 else if (dev_priv
->vbt
.drrs_type
== SEAMLESS_DRRS_SUPPORT
)
3631 seq_puts(m
, "\tVBT: DRRS_type: Seamless");
3632 else if (dev_priv
->vbt
.drrs_type
== DRRS_NOT_SUPPORTED
)
3633 seq_puts(m
, "\tVBT: DRRS_type: None");
3635 seq_puts(m
, "\tVBT: DRRS_type: FIXME: Unrecognized Value");
3637 seq_puts(m
, "\n\n");
3639 if (to_intel_crtc_state(intel_crtc
->base
.state
)->has_drrs
) {
3640 struct intel_panel
*panel
;
3642 mutex_lock(&drrs
->mutex
);
3643 /* DRRS Supported */
3644 seq_puts(m
, "\tDRRS Supported: Yes\n");
3646 /* disable_drrs() will make drrs->dp NULL */
3648 seq_puts(m
, "Idleness DRRS: Disabled");
3649 mutex_unlock(&drrs
->mutex
);
3653 panel
= &drrs
->dp
->attached_connector
->panel
;
3654 seq_printf(m
, "\t\tBusy_frontbuffer_bits: 0x%X",
3655 drrs
->busy_frontbuffer_bits
);
3657 seq_puts(m
, "\n\t\t");
3658 if (drrs
->refresh_rate_type
== DRRS_HIGH_RR
) {
3659 seq_puts(m
, "DRRS_State: DRRS_HIGH_RR\n");
3660 vrefresh
= panel
->fixed_mode
->vrefresh
;
3661 } else if (drrs
->refresh_rate_type
== DRRS_LOW_RR
) {
3662 seq_puts(m
, "DRRS_State: DRRS_LOW_RR\n");
3663 vrefresh
= panel
->downclock_mode
->vrefresh
;
3665 seq_printf(m
, "DRRS_State: Unknown(%d)\n",
3666 drrs
->refresh_rate_type
);
3667 mutex_unlock(&drrs
->mutex
);
3670 seq_printf(m
, "\t\tVrefresh: %d", vrefresh
);
3672 seq_puts(m
, "\n\t\t");
3673 mutex_unlock(&drrs
->mutex
);
3675 /* DRRS not supported. Print the VBT parameter*/
3676 seq_puts(m
, "\tDRRS Supported : No");
3681 static int i915_drrs_status(struct seq_file
*m
, void *unused
)
3683 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
3684 struct drm_device
*dev
= &dev_priv
->drm
;
3685 struct intel_crtc
*intel_crtc
;
3686 int active_crtc_cnt
= 0;
3688 drm_modeset_lock_all(dev
);
3689 for_each_intel_crtc(dev
, intel_crtc
) {
3690 if (intel_crtc
->base
.state
->active
) {
3692 seq_printf(m
, "\nCRTC %d: ", active_crtc_cnt
);
3694 drrs_status_per_crtc(m
, dev
, intel_crtc
);
3697 drm_modeset_unlock_all(dev
);
3699 if (!active_crtc_cnt
)
3700 seq_puts(m
, "No active crtc found\n");
3705 static int i915_dp_mst_info(struct seq_file
*m
, void *unused
)
3707 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
3708 struct drm_device
*dev
= &dev_priv
->drm
;
3709 struct intel_encoder
*intel_encoder
;
3710 struct intel_digital_port
*intel_dig_port
;
3711 struct drm_connector
*connector
;
3712 struct drm_connector_list_iter conn_iter
;
3714 drm_connector_list_iter_begin(dev
, &conn_iter
);
3715 drm_for_each_connector_iter(connector
, &conn_iter
) {
3716 if (connector
->connector_type
!= DRM_MODE_CONNECTOR_DisplayPort
)
3719 intel_encoder
= intel_attached_encoder(connector
);
3720 if (!intel_encoder
|| intel_encoder
->type
== INTEL_OUTPUT_DP_MST
)
3723 intel_dig_port
= enc_to_dig_port(&intel_encoder
->base
);
3724 if (!intel_dig_port
->dp
.can_mst
)
3727 seq_printf(m
, "MST Source Port %c\n",
3728 port_name(intel_dig_port
->port
));
3729 drm_dp_mst_dump_topology(m
, &intel_dig_port
->dp
.mst_mgr
);
3731 drm_connector_list_iter_end(&conn_iter
);
3736 static ssize_t
i915_displayport_test_active_write(struct file
*file
,
3737 const char __user
*ubuf
,
3738 size_t len
, loff_t
*offp
)
3742 struct drm_device
*dev
;
3743 struct drm_connector
*connector
;
3744 struct drm_connector_list_iter conn_iter
;
3745 struct intel_dp
*intel_dp
;
3748 dev
= ((struct seq_file
*)file
->private_data
)->private;
3753 input_buffer
= memdup_user_nul(ubuf
, len
);
3754 if (IS_ERR(input_buffer
))
3755 return PTR_ERR(input_buffer
);
3757 DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len
);
3759 drm_connector_list_iter_begin(dev
, &conn_iter
);
3760 drm_for_each_connector_iter(connector
, &conn_iter
) {
3761 if (connector
->connector_type
!=
3762 DRM_MODE_CONNECTOR_DisplayPort
)
3765 if (connector
->status
== connector_status_connected
&&
3766 connector
->encoder
!= NULL
) {
3767 intel_dp
= enc_to_intel_dp(connector
->encoder
);
3768 status
= kstrtoint(input_buffer
, 10, &val
);
3771 DRM_DEBUG_DRIVER("Got %d for test active\n", val
);
3772 /* To prevent erroneous activation of the compliance
3773 * testing code, only accept an actual value of 1 here
3776 intel_dp
->compliance
.test_active
= 1;
3778 intel_dp
->compliance
.test_active
= 0;
3781 drm_connector_list_iter_end(&conn_iter
);
3782 kfree(input_buffer
);
3790 static int i915_displayport_test_active_show(struct seq_file
*m
, void *data
)
3792 struct drm_device
*dev
= m
->private;
3793 struct drm_connector
*connector
;
3794 struct drm_connector_list_iter conn_iter
;
3795 struct intel_dp
*intel_dp
;
3797 drm_connector_list_iter_begin(dev
, &conn_iter
);
3798 drm_for_each_connector_iter(connector
, &conn_iter
) {
3799 if (connector
->connector_type
!=
3800 DRM_MODE_CONNECTOR_DisplayPort
)
3803 if (connector
->status
== connector_status_connected
&&
3804 connector
->encoder
!= NULL
) {
3805 intel_dp
= enc_to_intel_dp(connector
->encoder
);
3806 if (intel_dp
->compliance
.test_active
)
3813 drm_connector_list_iter_end(&conn_iter
);
3818 static int i915_displayport_test_active_open(struct inode
*inode
,
3821 struct drm_i915_private
*dev_priv
= inode
->i_private
;
3823 return single_open(file
, i915_displayport_test_active_show
,
3827 static const struct file_operations i915_displayport_test_active_fops
= {
3828 .owner
= THIS_MODULE
,
3829 .open
= i915_displayport_test_active_open
,
3831 .llseek
= seq_lseek
,
3832 .release
= single_release
,
3833 .write
= i915_displayport_test_active_write
3836 static int i915_displayport_test_data_show(struct seq_file
*m
, void *data
)
3838 struct drm_device
*dev
= m
->private;
3839 struct drm_connector
*connector
;
3840 struct drm_connector_list_iter conn_iter
;
3841 struct intel_dp
*intel_dp
;
3843 drm_connector_list_iter_begin(dev
, &conn_iter
);
3844 drm_for_each_connector_iter(connector
, &conn_iter
) {
3845 if (connector
->connector_type
!=
3846 DRM_MODE_CONNECTOR_DisplayPort
)
3849 if (connector
->status
== connector_status_connected
&&
3850 connector
->encoder
!= NULL
) {
3851 intel_dp
= enc_to_intel_dp(connector
->encoder
);
3852 if (intel_dp
->compliance
.test_type
==
3853 DP_TEST_LINK_EDID_READ
)
3854 seq_printf(m
, "%lx",
3855 intel_dp
->compliance
.test_data
.edid
);
3856 else if (intel_dp
->compliance
.test_type
==
3857 DP_TEST_LINK_VIDEO_PATTERN
) {
3858 seq_printf(m
, "hdisplay: %d\n",
3859 intel_dp
->compliance
.test_data
.hdisplay
);
3860 seq_printf(m
, "vdisplay: %d\n",
3861 intel_dp
->compliance
.test_data
.vdisplay
);
3862 seq_printf(m
, "bpc: %u\n",
3863 intel_dp
->compliance
.test_data
.bpc
);
3868 drm_connector_list_iter_end(&conn_iter
);
3872 static int i915_displayport_test_data_open(struct inode
*inode
,
3875 struct drm_i915_private
*dev_priv
= inode
->i_private
;
3877 return single_open(file
, i915_displayport_test_data_show
,
3881 static const struct file_operations i915_displayport_test_data_fops
= {
3882 .owner
= THIS_MODULE
,
3883 .open
= i915_displayport_test_data_open
,
3885 .llseek
= seq_lseek
,
3886 .release
= single_release
3889 static int i915_displayport_test_type_show(struct seq_file
*m
, void *data
)
3891 struct drm_device
*dev
= m
->private;
3892 struct drm_connector
*connector
;
3893 struct drm_connector_list_iter conn_iter
;
3894 struct intel_dp
*intel_dp
;
3896 drm_connector_list_iter_begin(dev
, &conn_iter
);
3897 drm_for_each_connector_iter(connector
, &conn_iter
) {
3898 if (connector
->connector_type
!=
3899 DRM_MODE_CONNECTOR_DisplayPort
)
3902 if (connector
->status
== connector_status_connected
&&
3903 connector
->encoder
!= NULL
) {
3904 intel_dp
= enc_to_intel_dp(connector
->encoder
);
3905 seq_printf(m
, "%02lx", intel_dp
->compliance
.test_type
);
3909 drm_connector_list_iter_end(&conn_iter
);
3914 static int i915_displayport_test_type_open(struct inode
*inode
,
3917 struct drm_i915_private
*dev_priv
= inode
->i_private
;
3919 return single_open(file
, i915_displayport_test_type_show
,
3923 static const struct file_operations i915_displayport_test_type_fops
= {
3924 .owner
= THIS_MODULE
,
3925 .open
= i915_displayport_test_type_open
,
3927 .llseek
= seq_lseek
,
3928 .release
= single_release
3931 static void wm_latency_show(struct seq_file
*m
, const uint16_t wm
[8])
3933 struct drm_i915_private
*dev_priv
= m
->private;
3934 struct drm_device
*dev
= &dev_priv
->drm
;
3938 if (IS_CHERRYVIEW(dev_priv
))
3940 else if (IS_VALLEYVIEW(dev_priv
))
3942 else if (IS_G4X(dev_priv
))
3945 num_levels
= ilk_wm_max_level(dev_priv
) + 1;
3947 drm_modeset_lock_all(dev
);
3949 for (level
= 0; level
< num_levels
; level
++) {
3950 unsigned int latency
= wm
[level
];
3953 * - WM1+ latency values in 0.5us units
3954 * - latencies are in us on gen9/vlv/chv
3956 if (INTEL_GEN(dev_priv
) >= 9 ||
3957 IS_VALLEYVIEW(dev_priv
) ||
3958 IS_CHERRYVIEW(dev_priv
) ||
3964 seq_printf(m
, "WM%d %u (%u.%u usec)\n",
3965 level
, wm
[level
], latency
/ 10, latency
% 10);
3968 drm_modeset_unlock_all(dev
);
3971 static int pri_wm_latency_show(struct seq_file
*m
, void *data
)
3973 struct drm_i915_private
*dev_priv
= m
->private;
3974 const uint16_t *latencies
;
3976 if (INTEL_GEN(dev_priv
) >= 9)
3977 latencies
= dev_priv
->wm
.skl_latency
;
3979 latencies
= dev_priv
->wm
.pri_latency
;
3981 wm_latency_show(m
, latencies
);
3986 static int spr_wm_latency_show(struct seq_file
*m
, void *data
)
3988 struct drm_i915_private
*dev_priv
= m
->private;
3989 const uint16_t *latencies
;
3991 if (INTEL_GEN(dev_priv
) >= 9)
3992 latencies
= dev_priv
->wm
.skl_latency
;
3994 latencies
= dev_priv
->wm
.spr_latency
;
3996 wm_latency_show(m
, latencies
);
4001 static int cur_wm_latency_show(struct seq_file
*m
, void *data
)
4003 struct drm_i915_private
*dev_priv
= m
->private;
4004 const uint16_t *latencies
;
4006 if (INTEL_GEN(dev_priv
) >= 9)
4007 latencies
= dev_priv
->wm
.skl_latency
;
4009 latencies
= dev_priv
->wm
.cur_latency
;
4011 wm_latency_show(m
, latencies
);
4016 static int pri_wm_latency_open(struct inode
*inode
, struct file
*file
)
4018 struct drm_i915_private
*dev_priv
= inode
->i_private
;
4020 if (INTEL_GEN(dev_priv
) < 5 && !IS_G4X(dev_priv
))
4023 return single_open(file
, pri_wm_latency_show
, dev_priv
);
4026 static int spr_wm_latency_open(struct inode
*inode
, struct file
*file
)
4028 struct drm_i915_private
*dev_priv
= inode
->i_private
;
4030 if (HAS_GMCH_DISPLAY(dev_priv
))
4033 return single_open(file
, spr_wm_latency_show
, dev_priv
);
4036 static int cur_wm_latency_open(struct inode
*inode
, struct file
*file
)
4038 struct drm_i915_private
*dev_priv
= inode
->i_private
;
4040 if (HAS_GMCH_DISPLAY(dev_priv
))
4043 return single_open(file
, cur_wm_latency_show
, dev_priv
);
4046 static ssize_t
wm_latency_write(struct file
*file
, const char __user
*ubuf
,
4047 size_t len
, loff_t
*offp
, uint16_t wm
[8])
4049 struct seq_file
*m
= file
->private_data
;
4050 struct drm_i915_private
*dev_priv
= m
->private;
4051 struct drm_device
*dev
= &dev_priv
->drm
;
4052 uint16_t new[8] = { 0 };
4058 if (IS_CHERRYVIEW(dev_priv
))
4060 else if (IS_VALLEYVIEW(dev_priv
))
4062 else if (IS_G4X(dev_priv
))
4065 num_levels
= ilk_wm_max_level(dev_priv
) + 1;
4067 if (len
>= sizeof(tmp
))
4070 if (copy_from_user(tmp
, ubuf
, len
))
4075 ret
= sscanf(tmp
, "%hu %hu %hu %hu %hu %hu %hu %hu",
4076 &new[0], &new[1], &new[2], &new[3],
4077 &new[4], &new[5], &new[6], &new[7]);
4078 if (ret
!= num_levels
)
4081 drm_modeset_lock_all(dev
);
4083 for (level
= 0; level
< num_levels
; level
++)
4084 wm
[level
] = new[level
];
4086 drm_modeset_unlock_all(dev
);
4092 static ssize_t
pri_wm_latency_write(struct file
*file
, const char __user
*ubuf
,
4093 size_t len
, loff_t
*offp
)
4095 struct seq_file
*m
= file
->private_data
;
4096 struct drm_i915_private
*dev_priv
= m
->private;
4097 uint16_t *latencies
;
4099 if (INTEL_GEN(dev_priv
) >= 9)
4100 latencies
= dev_priv
->wm
.skl_latency
;
4102 latencies
= dev_priv
->wm
.pri_latency
;
4104 return wm_latency_write(file
, ubuf
, len
, offp
, latencies
);
4107 static ssize_t
spr_wm_latency_write(struct file
*file
, const char __user
*ubuf
,
4108 size_t len
, loff_t
*offp
)
4110 struct seq_file
*m
= file
->private_data
;
4111 struct drm_i915_private
*dev_priv
= m
->private;
4112 uint16_t *latencies
;
4114 if (INTEL_GEN(dev_priv
) >= 9)
4115 latencies
= dev_priv
->wm
.skl_latency
;
4117 latencies
= dev_priv
->wm
.spr_latency
;
4119 return wm_latency_write(file
, ubuf
, len
, offp
, latencies
);
4122 static ssize_t
cur_wm_latency_write(struct file
*file
, const char __user
*ubuf
,
4123 size_t len
, loff_t
*offp
)
4125 struct seq_file
*m
= file
->private_data
;
4126 struct drm_i915_private
*dev_priv
= m
->private;
4127 uint16_t *latencies
;
4129 if (INTEL_GEN(dev_priv
) >= 9)
4130 latencies
= dev_priv
->wm
.skl_latency
;
4132 latencies
= dev_priv
->wm
.cur_latency
;
4134 return wm_latency_write(file
, ubuf
, len
, offp
, latencies
);
4137 static const struct file_operations i915_pri_wm_latency_fops
= {
4138 .owner
= THIS_MODULE
,
4139 .open
= pri_wm_latency_open
,
4141 .llseek
= seq_lseek
,
4142 .release
= single_release
,
4143 .write
= pri_wm_latency_write
4146 static const struct file_operations i915_spr_wm_latency_fops
= {
4147 .owner
= THIS_MODULE
,
4148 .open
= spr_wm_latency_open
,
4150 .llseek
= seq_lseek
,
4151 .release
= single_release
,
4152 .write
= spr_wm_latency_write
4155 static const struct file_operations i915_cur_wm_latency_fops
= {
4156 .owner
= THIS_MODULE
,
4157 .open
= cur_wm_latency_open
,
4159 .llseek
= seq_lseek
,
4160 .release
= single_release
,
4161 .write
= cur_wm_latency_write
4165 i915_wedged_get(void *data
, u64
*val
)
4167 struct drm_i915_private
*dev_priv
= data
;
4169 *val
= i915_terminally_wedged(&dev_priv
->gpu_error
);
4175 i915_wedged_set(void *data
, u64 val
)
4177 struct drm_i915_private
*i915
= data
;
4178 struct intel_engine_cs
*engine
;
4182 * There is no safeguard against this debugfs entry colliding
4183 * with the hangcheck calling same i915_handle_error() in
4184 * parallel, causing an explosion. For now we assume that the
4185 * test harness is responsible enough not to inject gpu hangs
4186 * while it is writing to 'i915_wedged'
4189 if (i915_reset_backoff(&i915
->gpu_error
))
4192 for_each_engine_masked(engine
, i915
, val
, tmp
) {
4193 engine
->hangcheck
.seqno
= intel_engine_get_seqno(engine
);
4194 engine
->hangcheck
.stalled
= true;
4197 i915_handle_error(i915
, val
, "Manually setting wedged to %llu", val
);
4199 wait_on_bit(&i915
->gpu_error
.flags
,
4201 TASK_UNINTERRUPTIBLE
);
4206 DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops
,
4207 i915_wedged_get
, i915_wedged_set
,
4211 fault_irq_set(struct drm_i915_private
*i915
,
4217 err
= mutex_lock_interruptible(&i915
->drm
.struct_mutex
);
4221 err
= i915_gem_wait_for_idle(i915
,
4223 I915_WAIT_INTERRUPTIBLE
);
4228 mutex_unlock(&i915
->drm
.struct_mutex
);
4230 /* Flush idle worker to disarm irq */
4231 while (flush_delayed_work(&i915
->gt
.idle_work
))
4237 mutex_unlock(&i915
->drm
.struct_mutex
);
4242 i915_ring_missed_irq_get(void *data
, u64
*val
)
4244 struct drm_i915_private
*dev_priv
= data
;
4246 *val
= dev_priv
->gpu_error
.missed_irq_rings
;
4251 i915_ring_missed_irq_set(void *data
, u64 val
)
4253 struct drm_i915_private
*i915
= data
;
4255 return fault_irq_set(i915
, &i915
->gpu_error
.missed_irq_rings
, val
);
4258 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_missed_irq_fops
,
4259 i915_ring_missed_irq_get
, i915_ring_missed_irq_set
,
4263 i915_ring_test_irq_get(void *data
, u64
*val
)
4265 struct drm_i915_private
*dev_priv
= data
;
4267 *val
= dev_priv
->gpu_error
.test_irq_rings
;
4273 i915_ring_test_irq_set(void *data
, u64 val
)
4275 struct drm_i915_private
*i915
= data
;
4277 val
&= INTEL_INFO(i915
)->ring_mask
;
4278 DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val
);
4280 return fault_irq_set(i915
, &i915
->gpu_error
.test_irq_rings
, val
);
4283 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops
,
4284 i915_ring_test_irq_get
, i915_ring_test_irq_set
,
4287 #define DROP_UNBOUND 0x1
4288 #define DROP_BOUND 0x2
4289 #define DROP_RETIRE 0x4
4290 #define DROP_ACTIVE 0x8
4291 #define DROP_FREED 0x10
4292 #define DROP_SHRINK_ALL 0x20
4293 #define DROP_ALL (DROP_UNBOUND | \
4300 i915_drop_caches_get(void *data
, u64
*val
)
4308 i915_drop_caches_set(void *data
, u64 val
)
4310 struct drm_i915_private
*dev_priv
= data
;
4311 struct drm_device
*dev
= &dev_priv
->drm
;
4314 DRM_DEBUG("Dropping caches: 0x%08llx\n", val
);
4316 /* No need to check and wait for gpu resets, only libdrm auto-restarts
4317 * on ioctls on -EAGAIN. */
4318 if (val
& (DROP_ACTIVE
| DROP_RETIRE
)) {
4319 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
4323 if (val
& DROP_ACTIVE
)
4324 ret
= i915_gem_wait_for_idle(dev_priv
,
4325 I915_WAIT_INTERRUPTIBLE
|
4328 if (val
& DROP_RETIRE
)
4329 i915_gem_retire_requests(dev_priv
);
4331 mutex_unlock(&dev
->struct_mutex
);
4334 lockdep_set_current_reclaim_state(GFP_KERNEL
);
4335 if (val
& DROP_BOUND
)
4336 i915_gem_shrink(dev_priv
, LONG_MAX
, I915_SHRINK_BOUND
);
4338 if (val
& DROP_UNBOUND
)
4339 i915_gem_shrink(dev_priv
, LONG_MAX
, I915_SHRINK_UNBOUND
);
4341 if (val
& DROP_SHRINK_ALL
)
4342 i915_gem_shrink_all(dev_priv
);
4343 lockdep_clear_current_reclaim_state();
4345 if (val
& DROP_FREED
) {
4347 i915_gem_drain_freed_objects(dev_priv
);
4353 DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops
,
4354 i915_drop_caches_get
, i915_drop_caches_set
,
4358 i915_max_freq_get(void *data
, u64
*val
)
4360 struct drm_i915_private
*dev_priv
= data
;
4362 if (INTEL_GEN(dev_priv
) < 6)
4365 *val
= intel_gpu_freq(dev_priv
, dev_priv
->rps
.max_freq_softlimit
);
4370 i915_max_freq_set(void *data
, u64 val
)
4372 struct drm_i915_private
*dev_priv
= data
;
4376 if (INTEL_GEN(dev_priv
) < 6)
4379 DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val
);
4381 ret
= mutex_lock_interruptible(&dev_priv
->rps
.hw_lock
);
4386 * Turbo will still be enabled, but won't go above the set value.
4388 val
= intel_freq_opcode(dev_priv
, val
);
4390 hw_max
= dev_priv
->rps
.max_freq
;
4391 hw_min
= dev_priv
->rps
.min_freq
;
4393 if (val
< hw_min
|| val
> hw_max
|| val
< dev_priv
->rps
.min_freq_softlimit
) {
4394 mutex_unlock(&dev_priv
->rps
.hw_lock
);
4398 dev_priv
->rps
.max_freq_softlimit
= val
;
4400 if (intel_set_rps(dev_priv
, val
))
4401 DRM_DEBUG_DRIVER("failed to update RPS to new softlimit\n");
4403 mutex_unlock(&dev_priv
->rps
.hw_lock
);
4408 DEFINE_SIMPLE_ATTRIBUTE(i915_max_freq_fops
,
4409 i915_max_freq_get
, i915_max_freq_set
,
4413 i915_min_freq_get(void *data
, u64
*val
)
4415 struct drm_i915_private
*dev_priv
= data
;
4417 if (INTEL_GEN(dev_priv
) < 6)
4420 *val
= intel_gpu_freq(dev_priv
, dev_priv
->rps
.min_freq_softlimit
);
4425 i915_min_freq_set(void *data
, u64 val
)
4427 struct drm_i915_private
*dev_priv
= data
;
4431 if (INTEL_GEN(dev_priv
) < 6)
4434 DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val
);
4436 ret
= mutex_lock_interruptible(&dev_priv
->rps
.hw_lock
);
4441 * Turbo will still be enabled, but won't go below the set value.
4443 val
= intel_freq_opcode(dev_priv
, val
);
4445 hw_max
= dev_priv
->rps
.max_freq
;
4446 hw_min
= dev_priv
->rps
.min_freq
;
4449 val
> hw_max
|| val
> dev_priv
->rps
.max_freq_softlimit
) {
4450 mutex_unlock(&dev_priv
->rps
.hw_lock
);
4454 dev_priv
->rps
.min_freq_softlimit
= val
;
4456 if (intel_set_rps(dev_priv
, val
))
4457 DRM_DEBUG_DRIVER("failed to update RPS to new softlimit\n");
4459 mutex_unlock(&dev_priv
->rps
.hw_lock
);
4464 DEFINE_SIMPLE_ATTRIBUTE(i915_min_freq_fops
,
4465 i915_min_freq_get
, i915_min_freq_set
,
4469 i915_cache_sharing_get(void *data
, u64
*val
)
4471 struct drm_i915_private
*dev_priv
= data
;
4474 if (!(IS_GEN6(dev_priv
) || IS_GEN7(dev_priv
)))
4477 intel_runtime_pm_get(dev_priv
);
4479 snpcr
= I915_READ(GEN6_MBCUNIT_SNPCR
);
4481 intel_runtime_pm_put(dev_priv
);
4483 *val
= (snpcr
& GEN6_MBC_SNPCR_MASK
) >> GEN6_MBC_SNPCR_SHIFT
;
4489 i915_cache_sharing_set(void *data
, u64 val
)
4491 struct drm_i915_private
*dev_priv
= data
;
4494 if (!(IS_GEN6(dev_priv
) || IS_GEN7(dev_priv
)))
4500 intel_runtime_pm_get(dev_priv
);
4501 DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val
);
4503 /* Update the cache sharing policy here as well */
4504 snpcr
= I915_READ(GEN6_MBCUNIT_SNPCR
);
4505 snpcr
&= ~GEN6_MBC_SNPCR_MASK
;
4506 snpcr
|= (val
<< GEN6_MBC_SNPCR_SHIFT
);
4507 I915_WRITE(GEN6_MBCUNIT_SNPCR
, snpcr
);
4509 intel_runtime_pm_put(dev_priv
);
4513 DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops
,
4514 i915_cache_sharing_get
, i915_cache_sharing_set
,
4517 static void cherryview_sseu_device_status(struct drm_i915_private
*dev_priv
,
4518 struct sseu_dev_info
*sseu
)
4522 u32 sig1
[ss_max
], sig2
[ss_max
];
4524 sig1
[0] = I915_READ(CHV_POWER_SS0_SIG1
);
4525 sig1
[1] = I915_READ(CHV_POWER_SS1_SIG1
);
4526 sig2
[0] = I915_READ(CHV_POWER_SS0_SIG2
);
4527 sig2
[1] = I915_READ(CHV_POWER_SS1_SIG2
);
4529 for (ss
= 0; ss
< ss_max
; ss
++) {
4530 unsigned int eu_cnt
;
4532 if (sig1
[ss
] & CHV_SS_PG_ENABLE
)
4533 /* skip disabled subslice */
4536 sseu
->slice_mask
= BIT(0);
4537 sseu
->subslice_mask
|= BIT(ss
);
4538 eu_cnt
= ((sig1
[ss
] & CHV_EU08_PG_ENABLE
) ? 0 : 2) +
4539 ((sig1
[ss
] & CHV_EU19_PG_ENABLE
) ? 0 : 2) +
4540 ((sig1
[ss
] & CHV_EU210_PG_ENABLE
) ? 0 : 2) +
4541 ((sig2
[ss
] & CHV_EU311_PG_ENABLE
) ? 0 : 2);
4542 sseu
->eu_total
+= eu_cnt
;
4543 sseu
->eu_per_subslice
= max_t(unsigned int,
4544 sseu
->eu_per_subslice
, eu_cnt
);
4548 static void gen9_sseu_device_status(struct drm_i915_private
*dev_priv
,
4549 struct sseu_dev_info
*sseu
)
4551 int s_max
= 3, ss_max
= 4;
4553 u32 s_reg
[s_max
], eu_reg
[2*s_max
], eu_mask
[2];
4555 /* BXT has a single slice and at most 3 subslices. */
4556 if (IS_GEN9_LP(dev_priv
)) {
4561 for (s
= 0; s
< s_max
; s
++) {
4562 s_reg
[s
] = I915_READ(GEN9_SLICE_PGCTL_ACK(s
));
4563 eu_reg
[2*s
] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s
));
4564 eu_reg
[2*s
+ 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s
));
4567 eu_mask
[0] = GEN9_PGCTL_SSA_EU08_ACK
|
4568 GEN9_PGCTL_SSA_EU19_ACK
|
4569 GEN9_PGCTL_SSA_EU210_ACK
|
4570 GEN9_PGCTL_SSA_EU311_ACK
;
4571 eu_mask
[1] = GEN9_PGCTL_SSB_EU08_ACK
|
4572 GEN9_PGCTL_SSB_EU19_ACK
|
4573 GEN9_PGCTL_SSB_EU210_ACK
|
4574 GEN9_PGCTL_SSB_EU311_ACK
;
4576 for (s
= 0; s
< s_max
; s
++) {
4577 if ((s_reg
[s
] & GEN9_PGCTL_SLICE_ACK
) == 0)
4578 /* skip disabled slice */
4581 sseu
->slice_mask
|= BIT(s
);
4583 if (IS_GEN9_BC(dev_priv
))
4584 sseu
->subslice_mask
=
4585 INTEL_INFO(dev_priv
)->sseu
.subslice_mask
;
4587 for (ss
= 0; ss
< ss_max
; ss
++) {
4588 unsigned int eu_cnt
;
4590 if (IS_GEN9_LP(dev_priv
)) {
4591 if (!(s_reg
[s
] & (GEN9_PGCTL_SS_ACK(ss
))))
4592 /* skip disabled subslice */
4595 sseu
->subslice_mask
|= BIT(ss
);
4598 eu_cnt
= 2 * hweight32(eu_reg
[2*s
+ ss
/2] &
4600 sseu
->eu_total
+= eu_cnt
;
4601 sseu
->eu_per_subslice
= max_t(unsigned int,
4602 sseu
->eu_per_subslice
,
4608 static void broadwell_sseu_device_status(struct drm_i915_private
*dev_priv
,
4609 struct sseu_dev_info
*sseu
)
4611 u32 slice_info
= I915_READ(GEN8_GT_SLICE_INFO
);
4614 sseu
->slice_mask
= slice_info
& GEN8_LSLICESTAT_MASK
;
4616 if (sseu
->slice_mask
) {
4617 sseu
->subslice_mask
= INTEL_INFO(dev_priv
)->sseu
.subslice_mask
;
4618 sseu
->eu_per_subslice
=
4619 INTEL_INFO(dev_priv
)->sseu
.eu_per_subslice
;
4620 sseu
->eu_total
= sseu
->eu_per_subslice
*
4621 sseu_subslice_total(sseu
);
4623 /* subtract fused off EU(s) from enabled slice(s) */
4624 for (s
= 0; s
< fls(sseu
->slice_mask
); s
++) {
4626 INTEL_INFO(dev_priv
)->sseu
.subslice_7eu
[s
];
4628 sseu
->eu_total
-= hweight8(subslice_7eu
);
4633 static void i915_print_sseu_info(struct seq_file
*m
, bool is_available_info
,
4634 const struct sseu_dev_info
*sseu
)
4636 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
4637 const char *type
= is_available_info
? "Available" : "Enabled";
4639 seq_printf(m
, " %s Slice Mask: %04x\n", type
,
4641 seq_printf(m
, " %s Slice Total: %u\n", type
,
4642 hweight8(sseu
->slice_mask
));
4643 seq_printf(m
, " %s Subslice Total: %u\n", type
,
4644 sseu_subslice_total(sseu
));
4645 seq_printf(m
, " %s Subslice Mask: %04x\n", type
,
4646 sseu
->subslice_mask
);
4647 seq_printf(m
, " %s Subslice Per Slice: %u\n", type
,
4648 hweight8(sseu
->subslice_mask
));
4649 seq_printf(m
, " %s EU Total: %u\n", type
,
4651 seq_printf(m
, " %s EU Per Subslice: %u\n", type
,
4652 sseu
->eu_per_subslice
);
4654 if (!is_available_info
)
4657 seq_printf(m
, " Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev_priv
)));
4658 if (HAS_POOLED_EU(dev_priv
))
4659 seq_printf(m
, " Min EU in pool: %u\n", sseu
->min_eu_in_pool
);
4661 seq_printf(m
, " Has Slice Power Gating: %s\n",
4662 yesno(sseu
->has_slice_pg
));
4663 seq_printf(m
, " Has Subslice Power Gating: %s\n",
4664 yesno(sseu
->has_subslice_pg
));
4665 seq_printf(m
, " Has EU Power Gating: %s\n",
4666 yesno(sseu
->has_eu_pg
));
4669 static int i915_sseu_status(struct seq_file
*m
, void *unused
)
4671 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
4672 struct sseu_dev_info sseu
;
4674 if (INTEL_GEN(dev_priv
) < 8)
4677 seq_puts(m
, "SSEU Device Info\n");
4678 i915_print_sseu_info(m
, true, &INTEL_INFO(dev_priv
)->sseu
);
4680 seq_puts(m
, "SSEU Device Status\n");
4681 memset(&sseu
, 0, sizeof(sseu
));
4683 intel_runtime_pm_get(dev_priv
);
4685 if (IS_CHERRYVIEW(dev_priv
)) {
4686 cherryview_sseu_device_status(dev_priv
, &sseu
);
4687 } else if (IS_BROADWELL(dev_priv
)) {
4688 broadwell_sseu_device_status(dev_priv
, &sseu
);
4689 } else if (INTEL_GEN(dev_priv
) >= 9) {
4690 gen9_sseu_device_status(dev_priv
, &sseu
);
4693 intel_runtime_pm_put(dev_priv
);
4695 i915_print_sseu_info(m
, false, &sseu
);
4700 static int i915_forcewake_open(struct inode
*inode
, struct file
*file
)
4702 struct drm_i915_private
*dev_priv
= inode
->i_private
;
4704 if (INTEL_GEN(dev_priv
) < 6)
4707 intel_runtime_pm_get(dev_priv
);
4708 intel_uncore_forcewake_get(dev_priv
, FORCEWAKE_ALL
);
4713 static int i915_forcewake_release(struct inode
*inode
, struct file
*file
)
4715 struct drm_i915_private
*dev_priv
= inode
->i_private
;
4717 if (INTEL_GEN(dev_priv
) < 6)
4720 intel_uncore_forcewake_put(dev_priv
, FORCEWAKE_ALL
);
4721 intel_runtime_pm_put(dev_priv
);
4726 static const struct file_operations i915_forcewake_fops
= {
4727 .owner
= THIS_MODULE
,
4728 .open
= i915_forcewake_open
,
4729 .release
= i915_forcewake_release
,
4732 static int i915_hpd_storm_ctl_show(struct seq_file
*m
, void *data
)
4734 struct drm_i915_private
*dev_priv
= m
->private;
4735 struct i915_hotplug
*hotplug
= &dev_priv
->hotplug
;
4737 seq_printf(m
, "Threshold: %d\n", hotplug
->hpd_storm_threshold
);
4738 seq_printf(m
, "Detected: %s\n",
4739 yesno(delayed_work_pending(&hotplug
->reenable_work
)));
4744 static ssize_t
i915_hpd_storm_ctl_write(struct file
*file
,
4745 const char __user
*ubuf
, size_t len
,
4748 struct seq_file
*m
= file
->private_data
;
4749 struct drm_i915_private
*dev_priv
= m
->private;
4750 struct i915_hotplug
*hotplug
= &dev_priv
->hotplug
;
4751 unsigned int new_threshold
;
4756 if (len
>= sizeof(tmp
))
4759 if (copy_from_user(tmp
, ubuf
, len
))
4764 /* Strip newline, if any */
4765 newline
= strchr(tmp
, '\n');
4769 if (strcmp(tmp
, "reset") == 0)
4770 new_threshold
= HPD_STORM_DEFAULT_THRESHOLD
;
4771 else if (kstrtouint(tmp
, 10, &new_threshold
) != 0)
4774 if (new_threshold
> 0)
4775 DRM_DEBUG_KMS("Setting HPD storm detection threshold to %d\n",
4778 DRM_DEBUG_KMS("Disabling HPD storm detection\n");
4780 spin_lock_irq(&dev_priv
->irq_lock
);
4781 hotplug
->hpd_storm_threshold
= new_threshold
;
4782 /* Reset the HPD storm stats so we don't accidentally trigger a storm */
4784 hotplug
->stats
[i
].count
= 0;
4785 spin_unlock_irq(&dev_priv
->irq_lock
);
4787 /* Re-enable hpd immediately if we were in an irq storm */
4788 flush_delayed_work(&dev_priv
->hotplug
.reenable_work
);
4793 static int i915_hpd_storm_ctl_open(struct inode
*inode
, struct file
*file
)
4795 return single_open(file
, i915_hpd_storm_ctl_show
, inode
->i_private
);
4798 static const struct file_operations i915_hpd_storm_ctl_fops
= {
4799 .owner
= THIS_MODULE
,
4800 .open
= i915_hpd_storm_ctl_open
,
4802 .llseek
= seq_lseek
,
4803 .release
= single_release
,
4804 .write
= i915_hpd_storm_ctl_write
4807 static const struct drm_info_list i915_debugfs_list
[] = {
4808 {"i915_capabilities", i915_capabilities
, 0},
4809 {"i915_gem_objects", i915_gem_object_info
, 0},
4810 {"i915_gem_gtt", i915_gem_gtt_info
, 0},
4811 {"i915_gem_pin_display", i915_gem_gtt_info
, 0, (void *)1},
4812 {"i915_gem_stolen", i915_gem_stolen_list_info
},
4813 {"i915_gem_pageflip", i915_gem_pageflip_info
, 0},
4814 {"i915_gem_request", i915_gem_request_info
, 0},
4815 {"i915_gem_seqno", i915_gem_seqno_info
, 0},
4816 {"i915_gem_fence_regs", i915_gem_fence_regs_info
, 0},
4817 {"i915_gem_interrupt", i915_interrupt_info
, 0},
4818 {"i915_gem_batch_pool", i915_gem_batch_pool_info
, 0},
4819 {"i915_guc_info", i915_guc_info
, 0},
4820 {"i915_guc_load_status", i915_guc_load_status_info
, 0},
4821 {"i915_guc_log_dump", i915_guc_log_dump
, 0},
4822 {"i915_guc_load_err_log_dump", i915_guc_log_dump
, 0, (void *)1},
4823 {"i915_guc_stage_pool", i915_guc_stage_pool
, 0},
4824 {"i915_huc_load_status", i915_huc_load_status_info
, 0},
4825 {"i915_frequency_info", i915_frequency_info
, 0},
4826 {"i915_hangcheck_info", i915_hangcheck_info
, 0},
4827 {"i915_drpc_info", i915_drpc_info
, 0},
4828 {"i915_emon_status", i915_emon_status
, 0},
4829 {"i915_ring_freq_table", i915_ring_freq_table
, 0},
4830 {"i915_frontbuffer_tracking", i915_frontbuffer_tracking
, 0},
4831 {"i915_fbc_status", i915_fbc_status
, 0},
4832 {"i915_ips_status", i915_ips_status
, 0},
4833 {"i915_sr_status", i915_sr_status
, 0},
4834 {"i915_opregion", i915_opregion
, 0},
4835 {"i915_vbt", i915_vbt
, 0},
4836 {"i915_gem_framebuffer", i915_gem_framebuffer_info
, 0},
4837 {"i915_context_status", i915_context_status
, 0},
4838 {"i915_dump_lrc", i915_dump_lrc
, 0},
4839 {"i915_forcewake_domains", i915_forcewake_domains
, 0},
4840 {"i915_swizzle_info", i915_swizzle_info
, 0},
4841 {"i915_ppgtt_info", i915_ppgtt_info
, 0},
4842 {"i915_llc", i915_llc
, 0},
4843 {"i915_edp_psr_status", i915_edp_psr_status
, 0},
4844 {"i915_sink_crc_eDP1", i915_sink_crc
, 0},
4845 {"i915_energy_uJ", i915_energy_uJ
, 0},
4846 {"i915_runtime_pm_status", i915_runtime_pm_status
, 0},
4847 {"i915_power_domain_info", i915_power_domain_info
, 0},
4848 {"i915_dmc_info", i915_dmc_info
, 0},
4849 {"i915_display_info", i915_display_info
, 0},
4850 {"i915_engine_info", i915_engine_info
, 0},
4851 {"i915_semaphore_status", i915_semaphore_status
, 0},
4852 {"i915_shared_dplls_info", i915_shared_dplls_info
, 0},
4853 {"i915_dp_mst_info", i915_dp_mst_info
, 0},
4854 {"i915_wa_registers", i915_wa_registers
, 0},
4855 {"i915_ddb_info", i915_ddb_info
, 0},
4856 {"i915_sseu_status", i915_sseu_status
, 0},
4857 {"i915_drrs_status", i915_drrs_status
, 0},
4858 {"i915_rps_boost_info", i915_rps_boost_info
, 0},
4860 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
4862 static const struct i915_debugfs_files
{
4864 const struct file_operations
*fops
;
4865 } i915_debugfs_files
[] = {
4866 {"i915_wedged", &i915_wedged_fops
},
4867 {"i915_max_freq", &i915_max_freq_fops
},
4868 {"i915_min_freq", &i915_min_freq_fops
},
4869 {"i915_cache_sharing", &i915_cache_sharing_fops
},
4870 {"i915_ring_missed_irq", &i915_ring_missed_irq_fops
},
4871 {"i915_ring_test_irq", &i915_ring_test_irq_fops
},
4872 {"i915_gem_drop_caches", &i915_drop_caches_fops
},
4873 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
4874 {"i915_error_state", &i915_error_state_fops
},
4875 {"i915_gpu_info", &i915_gpu_info_fops
},
4877 {"i915_next_seqno", &i915_next_seqno_fops
},
4878 {"i915_display_crc_ctl", &i915_display_crc_ctl_fops
},
4879 {"i915_pri_wm_latency", &i915_pri_wm_latency_fops
},
4880 {"i915_spr_wm_latency", &i915_spr_wm_latency_fops
},
4881 {"i915_cur_wm_latency", &i915_cur_wm_latency_fops
},
4882 {"i915_fbc_false_color", &i915_fbc_false_color_fops
},
4883 {"i915_dp_test_data", &i915_displayport_test_data_fops
},
4884 {"i915_dp_test_type", &i915_displayport_test_type_fops
},
4885 {"i915_dp_test_active", &i915_displayport_test_active_fops
},
4886 {"i915_guc_log_control", &i915_guc_log_control_fops
},
4887 {"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops
}
4890 int i915_debugfs_register(struct drm_i915_private
*dev_priv
)
4892 struct drm_minor
*minor
= dev_priv
->drm
.primary
;
4896 ent
= debugfs_create_file("i915_forcewake_user", S_IRUSR
,
4897 minor
->debugfs_root
, to_i915(minor
->dev
),
4898 &i915_forcewake_fops
);
4902 ret
= intel_pipe_crc_create(minor
);
4906 for (i
= 0; i
< ARRAY_SIZE(i915_debugfs_files
); i
++) {
4907 ent
= debugfs_create_file(i915_debugfs_files
[i
].name
,
4909 minor
->debugfs_root
,
4910 to_i915(minor
->dev
),
4911 i915_debugfs_files
[i
].fops
);
4916 return drm_debugfs_create_files(i915_debugfs_list
,
4917 I915_DEBUGFS_ENTRIES
,
4918 minor
->debugfs_root
, minor
);
4922 /* DPCD dump start address. */
4923 unsigned int offset
;
4924 /* DPCD dump end address, inclusive. If unset, .size will be used. */
4926 /* DPCD dump size. Used if .end is unset. If unset, defaults to 1. */
4928 /* Only valid for eDP. */
4932 static const struct dpcd_block i915_dpcd_debug
[] = {
4933 { .offset
= DP_DPCD_REV
, .size
= DP_RECEIVER_CAP_SIZE
},
4934 { .offset
= DP_PSR_SUPPORT
, .end
= DP_PSR_CAPS
},
4935 { .offset
= DP_DOWNSTREAM_PORT_0
, .size
= 16 },
4936 { .offset
= DP_LINK_BW_SET
, .end
= DP_EDP_CONFIGURATION_SET
},
4937 { .offset
= DP_SINK_COUNT
, .end
= DP_ADJUST_REQUEST_LANE2_3
},
4938 { .offset
= DP_SET_POWER
},
4939 { .offset
= DP_EDP_DPCD_REV
},
4940 { .offset
= DP_EDP_GENERAL_CAP_1
, .end
= DP_EDP_GENERAL_CAP_3
},
4941 { .offset
= DP_EDP_DISPLAY_CONTROL_REGISTER
, .end
= DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB
},
4942 { .offset
= DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET
, .end
= DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET
},
4945 static int i915_dpcd_show(struct seq_file
*m
, void *data
)
4947 struct drm_connector
*connector
= m
->private;
4948 struct intel_dp
*intel_dp
=
4949 enc_to_intel_dp(&intel_attached_encoder(connector
)->base
);
4954 if (connector
->status
!= connector_status_connected
)
4957 for (i
= 0; i
< ARRAY_SIZE(i915_dpcd_debug
); i
++) {
4958 const struct dpcd_block
*b
= &i915_dpcd_debug
[i
];
4959 size_t size
= b
->end
? b
->end
- b
->offset
+ 1 : (b
->size
?: 1);
4962 connector
->connector_type
!= DRM_MODE_CONNECTOR_eDP
)
4965 /* low tech for now */
4966 if (WARN_ON(size
> sizeof(buf
)))
4969 err
= drm_dp_dpcd_read(&intel_dp
->aux
, b
->offset
, buf
, size
);
4971 DRM_ERROR("dpcd read (%zu bytes at %u) failed (%zd)\n",
4972 size
, b
->offset
, err
);
4976 seq_printf(m
, "%04x: %*ph\n", b
->offset
, (int) size
, buf
);
4982 static int i915_dpcd_open(struct inode
*inode
, struct file
*file
)
4984 return single_open(file
, i915_dpcd_show
, inode
->i_private
);
4987 static const struct file_operations i915_dpcd_fops
= {
4988 .owner
= THIS_MODULE
,
4989 .open
= i915_dpcd_open
,
4991 .llseek
= seq_lseek
,
4992 .release
= single_release
,
4995 static int i915_panel_show(struct seq_file
*m
, void *data
)
4997 struct drm_connector
*connector
= m
->private;
4998 struct intel_dp
*intel_dp
=
4999 enc_to_intel_dp(&intel_attached_encoder(connector
)->base
);
5001 if (connector
->status
!= connector_status_connected
)
5004 seq_printf(m
, "Panel power up delay: %d\n",
5005 intel_dp
->panel_power_up_delay
);
5006 seq_printf(m
, "Panel power down delay: %d\n",
5007 intel_dp
->panel_power_down_delay
);
5008 seq_printf(m
, "Backlight on delay: %d\n",
5009 intel_dp
->backlight_on_delay
);
5010 seq_printf(m
, "Backlight off delay: %d\n",
5011 intel_dp
->backlight_off_delay
);
5016 static int i915_panel_open(struct inode
*inode
, struct file
*file
)
5018 return single_open(file
, i915_panel_show
, inode
->i_private
);
5021 static const struct file_operations i915_panel_fops
= {
5022 .owner
= THIS_MODULE
,
5023 .open
= i915_panel_open
,
5025 .llseek
= seq_lseek
,
5026 .release
= single_release
,
5030 * i915_debugfs_connector_add - add i915 specific connector debugfs files
5031 * @connector: pointer to a registered drm_connector
5033 * Cleanup will be done by drm_connector_unregister() through a call to
5034 * drm_debugfs_connector_remove().
5036 * Returns 0 on success, negative error codes on error.
5038 int i915_debugfs_connector_add(struct drm_connector
*connector
)
5040 struct dentry
*root
= connector
->debugfs_entry
;
5042 /* The connector must have been registered beforehands. */
5046 if (connector
->connector_type
== DRM_MODE_CONNECTOR_DisplayPort
||
5047 connector
->connector_type
== DRM_MODE_CONNECTOR_eDP
)
5048 debugfs_create_file("i915_dpcd", S_IRUGO
, root
,
5049 connector
, &i915_dpcd_fops
);
5051 if (connector
->connector_type
== DRM_MODE_CONNECTOR_eDP
)
5052 debugfs_create_file("i915_panel_timings", S_IRUGO
, root
,
5053 connector
, &i915_panel_fops
);