obj->last_read_seqno,
obj->last_write_seqno,
obj->last_fenced_seqno,
- i915_cache_level_str(obj->cache_level),
+ i915_cache_level_str(to_i915(obj->base.dev), obj->cache_level),
obj->dirty ? " dirty" : "",
obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
if (obj->base.name)
I915_READ(VLV_IIR_RW));
seq_printf(m, "Display IMR:\t%08x\n",
I915_READ(VLV_IMR));
- for_each_pipe(pipe)
+ for_each_pipe(dev_priv, pipe)
seq_printf(m, "Pipe %c stat:\t%08x\n",
pipe_name(pipe),
I915_READ(PIPESTAT(pipe)));
i, I915_READ(GEN8_GT_IER(i)));
}
- for_each_pipe(pipe) {
+ for_each_pipe(dev_priv, pipe) {
if (!intel_display_power_enabled(dev_priv,
POWER_DOMAIN_PIPE(pipe))) {
seq_printf(m, "Pipe %c power disabled\n",
I915_READ(VLV_IIR_RW));
seq_printf(m, "Display IMR:\t%08x\n",
I915_READ(VLV_IMR));
- for_each_pipe(pipe)
+ for_each_pipe(dev_priv, pipe)
seq_printf(m, "Pipe %c stat:\t%08x\n",
pipe_name(pipe),
I915_READ(PIPESTAT(pipe)));
I915_READ(IIR));
seq_printf(m, "Interrupt mask: %08x\n",
I915_READ(IMR));
- for_each_pipe(pipe)
+ for_each_pipe(dev_priv, pipe)
seq_printf(m, "Pipe %c stat: %08x\n",
pipe_name(pipe),
I915_READ(PIPESTAT(pipe)));
ssize_t ret_count = 0;
int ret;
- ret = i915_error_state_buf_init(&error_str, count, *pos);
+ ret = i915_error_state_buf_init(&error_str, to_i915(error_priv->dev), count, *pos);
if (ret)
return ret;
u32 rpstat, cagf, reqf;
u32 rpupei, rpcurup, rpprevup;
u32 rpdownei, rpcurdown, rpprevdown;
+ u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
int max_freq;
/* RPSTAT1 is in the GT power well */
gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
mutex_unlock(&dev->struct_mutex);
+ if (IS_GEN6(dev) || IS_GEN7(dev)) {
+ pm_ier = I915_READ(GEN6_PMIER);
+ pm_imr = I915_READ(GEN6_PMIMR);
+ pm_isr = I915_READ(GEN6_PMISR);
+ pm_iir = I915_READ(GEN6_PMIIR);
+ pm_mask = I915_READ(GEN6_PMINTRMSK);
+ } else {
+ pm_ier = I915_READ(GEN8_GT_IER(2));
+ pm_imr = I915_READ(GEN8_GT_IMR(2));
+ pm_isr = I915_READ(GEN8_GT_ISR(2));
+ pm_iir = I915_READ(GEN8_GT_IIR(2));
+ pm_mask = I915_READ(GEN6_PMINTRMSK);
+ }
seq_printf(m, "PM IER=0x%08x IMR=0x%08x ISR=0x%08x IIR=0x%08x, MASK=0x%08x\n",
- I915_READ(GEN6_PMIER),
- I915_READ(GEN6_PMIMR),
- I915_READ(GEN6_PMISR),
- I915_READ(GEN6_PMIIR),
- I915_READ(GEN6_PMINTRMSK));
+ pm_ier, pm_imr, pm_isr, pm_iir, pm_mask);
seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
seq_printf(m, "Render p-state ratio: %d\n",
(gt_perf_status & 0xff00) >> 8);
if (IS_VALLEYVIEW(dev))
return vlv_drpc_info(m);
- else if (IS_GEN6(dev) || IS_GEN7(dev))
+ else if (INTEL_INFO(dev)->gen >= 6)
return gen6_drpc_info(m);
else
return ironlake_drpc_info(m);
return 0;
}
+static void describe_ctx_ringbuf(struct seq_file *m,
+ struct intel_ringbuffer *ringbuf)
+{
+ seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, last head: %d)",
+ ringbuf->space, ringbuf->head, ringbuf->tail,
+ ringbuf->last_retired_head);
+}
+
static int i915_context_status(struct seq_file *m, void *unused)
{
struct drm_info_node *node = m->private;
}
list_for_each_entry(ctx, &dev_priv->context_list, link) {
- if (ctx->legacy_hw_ctx.rcs_state == NULL)
+ if (!i915.enable_execlists &&
+ ctx->legacy_hw_ctx.rcs_state == NULL)
continue;
seq_puts(m, "HW context ");
describe_ctx(m, ctx);
- for_each_ring(ring, dev_priv, i)
+ for_each_ring(ring, dev_priv, i) {
if (ring->default_context == ctx)
- seq_printf(m, "(default context %s) ", ring->name);
+ seq_printf(m, "(default context %s) ",
+ ring->name);
+ }
+
+ if (i915.enable_execlists) {
+ seq_putc(m, '\n');
+ for_each_ring(ring, dev_priv, i) {
+ struct drm_i915_gem_object *ctx_obj =
+ ctx->engine[i].state;
+ struct intel_ringbuffer *ringbuf =
+ ctx->engine[i].ringbuf;
+
+ seq_printf(m, "%s: ", ring->name);
+ if (ctx_obj)
+ describe_obj(m, ctx_obj);
+ if (ringbuf)
+ describe_ctx_ringbuf(m, ringbuf);
+ seq_putc(m, '\n');
+ }
+ } else {
+ describe_obj(m, ctx->legacy_hw_ctx.rcs_state);
+ }
+
+ seq_putc(m, '\n');
+ }
+
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+}
+
+static int i915_dump_lrc(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_engine_cs *ring;
+ struct intel_context *ctx;
+ int ret, i;
+
+ if (!i915.enable_execlists) {
+ seq_printf(m, "Logical Ring Contexts are disabled\n");
+ return 0;
+ }
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ list_for_each_entry(ctx, &dev_priv->context_list, link) {
+ for_each_ring(ring, dev_priv, i) {
+ struct drm_i915_gem_object *ctx_obj = ctx->engine[i].state;
+
+ if (ring->default_context == ctx)
+ continue;
+
+ if (ctx_obj) {
+ struct page *page = i915_gem_object_get_page(ctx_obj, 1);
+ uint32_t *reg_state = kmap_atomic(page);
+ int j;
+
+ seq_printf(m, "CONTEXT: %s %u\n", ring->name,
+ intel_execlists_ctx_id(ctx_obj));
+
+ for (j = 0; j < 0x600 / sizeof(u32) / 4; j += 4) {
+ seq_printf(m, "\t[0x%08lx] 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ i915_gem_obj_ggtt_offset(ctx_obj) + 4096 + (j * 4),
+ reg_state[j], reg_state[j + 1],
+ reg_state[j + 2], reg_state[j + 3]);
+ }
+ kunmap_atomic(reg_state);
+
+ seq_putc(m, '\n');
+ }
+ }
+ }
+
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+}
+
+static int i915_execlists(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *)m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_engine_cs *ring;
+ u32 status_pointer;
+ u8 read_pointer;
+ u8 write_pointer;
+ u32 status;
+ u32 ctx_id;
+ struct list_head *cursor;
+ int ring_id, i;
+ int ret;
+
+ if (!i915.enable_execlists) {
+ seq_puts(m, "Logical Ring Contexts are disabled\n");
+ return 0;
+ }
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ for_each_ring(ring, dev_priv, ring_id) {
+ struct intel_ctx_submit_request *head_req = NULL;
+ int count = 0;
+ unsigned long flags;
+
+ seq_printf(m, "%s\n", ring->name);
+
+ status = I915_READ(RING_EXECLIST_STATUS(ring));
+ ctx_id = I915_READ(RING_EXECLIST_STATUS(ring) + 4);
+ seq_printf(m, "\tExeclist status: 0x%08X, context: %u\n",
+ status, ctx_id);
+
+ status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(ring));
+ seq_printf(m, "\tStatus pointer: 0x%08X\n", status_pointer);
+
+ read_pointer = ring->next_context_status_buffer;
+ write_pointer = status_pointer & 0x07;
+ if (read_pointer > write_pointer)
+ write_pointer += 6;
+ seq_printf(m, "\tRead pointer: 0x%08X, write pointer 0x%08X\n",
+ read_pointer, write_pointer);
+
+ for (i = 0; i < 6; i++) {
+ status = I915_READ(RING_CONTEXT_STATUS_BUF(ring) + 8*i);
+ ctx_id = I915_READ(RING_CONTEXT_STATUS_BUF(ring) + 8*i + 4);
+
+ seq_printf(m, "\tStatus buffer %d: 0x%08X, context: %u\n",
+ i, status, ctx_id);
+ }
+
+ spin_lock_irqsave(&ring->execlist_lock, flags);
+ list_for_each(cursor, &ring->execlist_queue)
+ count++;
+ head_req = list_first_entry_or_null(&ring->execlist_queue,
+ struct intel_ctx_submit_request, execlist_link);
+ spin_unlock_irqrestore(&ring->execlist_lock, flags);
+
+ seq_printf(m, "\t%d requests in queue\n", count);
+ if (head_req) {
+ struct drm_i915_gem_object *ctx_obj;
+
+ ctx_obj = head_req->ctx->engine[ring_id].state;
+ seq_printf(m, "\tHead request id: %u\n",
+ intel_execlists_ctx_id(ctx_obj));
+ seq_printf(m, "\tHead request tail: %u\n",
+ head_req->tail);
+ }
- describe_obj(m, ctx->legacy_hw_ctx.rcs_state);
seq_putc(m, '\n');
}
return 0;
}
+static int intel_wa_registers(struct seq_file *m, void *unused)
+{
+ int i;
+ int ret;
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (!IS_BROADWELL(dev)) {
+ DRM_DEBUG_DRIVER("Workaround table not available !!\n");
+ return -EINVAL;
+ }
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ intel_runtime_pm_get(dev_priv);
+
+ seq_printf(m, "Workarounds applied: %d\n", dev_priv->num_wa_regs);
+ for (i = 0; i < dev_priv->num_wa_regs; ++i) {
+ u32 addr, mask;
+
+ addr = dev_priv->intel_wa_regs[i].addr;
+ mask = dev_priv->intel_wa_regs[i].mask;
+ dev_priv->intel_wa_regs[i].value = I915_READ(addr) | mask;
+ if (dev_priv->intel_wa_regs[i].addr)
+ seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n",
+ dev_priv->intel_wa_regs[i].addr,
+ dev_priv->intel_wa_regs[i].value,
+ dev_priv->intel_wa_regs[i].mask);
+ }
+
+ intel_runtime_pm_put(dev_priv);
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+}
+
struct pipe_crc_info {
const char *name;
struct drm_device *dev;
{"i915_opregion", i915_opregion, 0},
{"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
{"i915_context_status", i915_context_status, 0},
+ {"i915_dump_lrc", i915_dump_lrc, 0},
+ {"i915_execlists", i915_execlists, 0},
{"i915_gen6_forcewake_count", i915_gen6_forcewake_count_info, 0},
{"i915_swizzle_info", i915_swizzle_info, 0},
{"i915_ppgtt_info", i915_ppgtt_info, 0},
{"i915_semaphore_status", i915_semaphore_status, 0},
{"i915_shared_dplls_info", i915_shared_dplls_info, 0},
{"i915_dp_mst_info", i915_dp_mst_info, 0},
+ {"intel_wa_registers", intel_wa_registers, 0}
};
#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
struct drm_i915_private *dev_priv = dev->dev_private;
enum pipe pipe;
- for_each_pipe(pipe) {
+ for_each_pipe(dev_priv, pipe) {
struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
pipe_crc->opened = false;