describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
struct i915_vma *vma;
int pin_count = 0;
int i;
obj->base.size / 1024,
obj->base.read_domains,
obj->base.write_domain);
- for_each_ring(ring, dev_priv, i)
+ for_each_ring(engine, dev_priv, i)
seq_printf(m, "%x ",
i915_gem_request_get_seqno(obj->last_read_req[i]));
seq_printf(m, "] %x %x%s%s%s",
{
struct drm_i915_gem_object *obj;
struct file_stats stats;
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
int i, j;
memset(&stats, 0, sizeof(stats));
- for_each_ring(ring, dev_priv, i) {
- for (j = 0; j < ARRAY_SIZE(ring->batch_pool.cache_list); j++) {
+ for_each_ring(engine, dev_priv, i) {
+ for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
list_for_each_entry(obj,
- &ring->batch_pool.cache_list[j],
+ &engine->batch_pool.cache_list[j],
batch_pool_link)
per_file_stats(0, obj, &stats);
}
pipe, plane);
}
if (work->flip_queued_req) {
- struct intel_engine_cs *ring =
- i915_gem_request_get_ring(work->flip_queued_req);
+ struct intel_engine_cs *engine = i915_gem_request_get_ring(work->flip_queued_req);
seq_printf(m, "Flip queued on %s at seqno %x, next seqno %x [current breadcrumb %x], completed? %d\n",
- ring->name,
+ engine->name,
i915_gem_request_get_seqno(work->flip_queued_req),
dev_priv->next_seqno,
- ring->get_seqno(ring, true),
+ engine->get_seqno(engine, true),
i915_gem_request_completed(work->flip_queued_req, true));
} else
seq_printf(m, "Flip not associated with any ring\n");
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
int total = 0;
int ret, i, j;
if (ret)
return ret;
- for_each_ring(ring, dev_priv, i) {
- for (j = 0; j < ARRAY_SIZE(ring->batch_pool.cache_list); j++) {
+ for_each_ring(engine, dev_priv, i) {
+ for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
int count;
count = 0;
list_for_each_entry(obj,
- &ring->batch_pool.cache_list[j],
+ &engine->batch_pool.cache_list[j],
batch_pool_link)
count++;
seq_printf(m, "%s cache[%d]: %d objects\n",
- ring->name, j, count);
+ engine->name, j, count);
list_for_each_entry(obj,
- &ring->batch_pool.cache_list[j],
+ &engine->batch_pool.cache_list[j],
batch_pool_link) {
seq_puts(m, " ");
describe_obj(m, obj);
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
struct drm_i915_gem_request *req;
int ret, any, i;
return ret;
any = 0;
- for_each_ring(ring, dev_priv, i) {
+ for_each_ring(engine, dev_priv, i) {
int count;
count = 0;
- list_for_each_entry(req, &ring->request_list, list)
+ list_for_each_entry(req, &engine->request_list, list)
count++;
if (count == 0)
continue;
- seq_printf(m, "%s requests: %d\n", ring->name, count);
- list_for_each_entry(req, &ring->request_list, list) {
+ seq_printf(m, "%s requests: %d\n", engine->name, count);
+ list_for_each_entry(req, &engine->request_list, list) {
struct task_struct *task;
rcu_read_lock();
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
int ret, i;
ret = mutex_lock_interruptible(&dev->struct_mutex);
return ret;
intel_runtime_pm_get(dev_priv);
- for_each_ring(ring, dev_priv, i)
- i915_ring_seqno_info(m, ring);
+ for_each_ring(engine, dev_priv, i)
+ i915_ring_seqno_info(m, engine);
intel_runtime_pm_put(dev_priv);
mutex_unlock(&dev->struct_mutex);
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
int ret, i, pipe;
ret = mutex_lock_interruptible(&dev->struct_mutex);
seq_printf(m, "Graphics Interrupt mask: %08x\n",
I915_READ(GTIMR));
}
- for_each_ring(ring, dev_priv, i) {
+ for_each_ring(engine, dev_priv, i) {
if (INTEL_INFO(dev)->gen >= 6) {
seq_printf(m,
"Graphics Interrupt mask (%s): %08x\n",
- ring->name, I915_READ_IMR(ring));
+ engine->name, I915_READ_IMR(engine));
}
- i915_ring_seqno_info(m, ring);
+ i915_ring_seqno_info(m, engine);
}
intel_runtime_pm_put(dev_priv);
mutex_unlock(&dev->struct_mutex);
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
const u32 *hws;
int i;
- ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
- hws = ring->status_page.page_addr;
+ engine = &dev_priv->ring[(uintptr_t)node->info_ent->data];
+ hws = engine->status_page.page_addr;
if (hws == NULL)
return 0;
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
u64 acthd[I915_NUM_RINGS];
u32 seqno[I915_NUM_RINGS];
u32 instdone[I915_NUM_INSTDONE_REG];
intel_runtime_pm_get(dev_priv);
- for_each_ring(ring, dev_priv, i) {
- seqno[i] = ring->get_seqno(ring, false);
- acthd[i] = intel_ring_get_active_head(ring);
+ for_each_ring(engine, dev_priv, i) {
+ seqno[i] = engine->get_seqno(engine, false);
+ acthd[i] = intel_ring_get_active_head(engine);
}
i915_get_extra_instdone(dev, instdone);
} else
seq_printf(m, "Hangcheck inactive\n");
- for_each_ring(ring, dev_priv, i) {
- seq_printf(m, "%s:\n", ring->name);
+ for_each_ring(engine, dev_priv, i) {
+ seq_printf(m, "%s:\n", engine->name);
seq_printf(m, "\tseqno = %x [current %x]\n",
- ring->hangcheck.seqno, seqno[i]);
+ engine->hangcheck.seqno, seqno[i]);
seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
- (long long)ring->hangcheck.acthd,
+ (long long)engine->hangcheck.acthd,
(long long)acthd[i]);
- seq_printf(m, "\tscore = %d\n", ring->hangcheck.score);
- seq_printf(m, "\taction = %d\n", ring->hangcheck.action);
+ seq_printf(m, "\tscore = %d\n", engine->hangcheck.score);
+ seq_printf(m, "\taction = %d\n", engine->hangcheck.action);
- if (ring->id == RCS) {
+ if (engine->id == RCS) {
seq_puts(m, "\tinstdone read =");
for (j = 0; j < I915_NUM_INSTDONE_REG; j++)
for (j = 0; j < I915_NUM_INSTDONE_REG; j++)
seq_printf(m, " 0x%08x",
- ring->hangcheck.instdone[j]);
+ engine->hangcheck.instdone[j]);
seq_puts(m, "\n");
}
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
struct intel_context *ctx;
int ret, i;
if (i915.enable_execlists) {
seq_putc(m, '\n');
- for_each_ring(ring, dev_priv, i) {
+ for_each_ring(engine, dev_priv, i) {
struct drm_i915_gem_object *ctx_obj =
ctx->engine[i].state;
struct intel_ringbuffer *ringbuf =
ctx->engine[i].ringbuf;
- seq_printf(m, "%s: ", ring->name);
+ seq_printf(m, "%s: ", engine->name);
if (ctx_obj)
describe_obj(m, ctx_obj);
if (ringbuf)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
struct intel_context *ctx;
int ret, i;
list_for_each_entry(ctx, &dev_priv->context_list, link)
if (ctx != dev_priv->kernel_context)
- for_each_ring(ring, dev_priv, i)
- i915_dump_lrc_obj(m, ctx, ring);
+ for_each_ring(engine, dev_priv, i)
+ i915_dump_lrc_obj(m, ctx, engine);
mutex_unlock(&dev->struct_mutex);
struct drm_info_node *node = (struct drm_info_node *)m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
u32 status_pointer;
u8 read_pointer;
u8 write_pointer;
intel_runtime_pm_get(dev_priv);
- for_each_ring(ring, dev_priv, ring_id) {
+ for_each_ring(engine, dev_priv, ring_id) {
struct drm_i915_gem_request *head_req = NULL;
int count = 0;
unsigned long flags;
- seq_printf(m, "%s\n", ring->name);
+ seq_printf(m, "%s\n", engine->name);
- status = I915_READ(RING_EXECLIST_STATUS_LO(ring));
- ctx_id = I915_READ(RING_EXECLIST_STATUS_HI(ring));
+ status = I915_READ(RING_EXECLIST_STATUS_LO(engine));
+ ctx_id = I915_READ(RING_EXECLIST_STATUS_HI(engine));
seq_printf(m, "\tExeclist status: 0x%08X, context: %u\n",
status, ctx_id);
- status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(ring));
+ status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(engine));
seq_printf(m, "\tStatus pointer: 0x%08X\n", status_pointer);
- read_pointer = ring->next_context_status_buffer;
+ read_pointer = engine->next_context_status_buffer;
write_pointer = GEN8_CSB_WRITE_PTR(status_pointer);
if (read_pointer > write_pointer)
write_pointer += GEN8_CSB_ENTRIES;
read_pointer, write_pointer);
for (i = 0; i < GEN8_CSB_ENTRIES; i++) {
- status = I915_READ(RING_CONTEXT_STATUS_BUF_LO(ring, i));
- ctx_id = I915_READ(RING_CONTEXT_STATUS_BUF_HI(ring, i));
+ status = I915_READ(RING_CONTEXT_STATUS_BUF_LO(engine, i));
+ ctx_id = I915_READ(RING_CONTEXT_STATUS_BUF_HI(engine, i));
seq_printf(m, "\tStatus buffer %d: 0x%08X, context: %u\n",
i, status, ctx_id);
}
- spin_lock_irqsave(&ring->execlist_lock, flags);
- list_for_each(cursor, &ring->execlist_queue)
+ spin_lock_irqsave(&engine->execlist_lock, flags);
+ list_for_each(cursor, &engine->execlist_queue)
count++;
- head_req = list_first_entry_or_null(&ring->execlist_queue,
- struct drm_i915_gem_request, execlist_link);
- spin_unlock_irqrestore(&ring->execlist_lock, flags);
+ head_req = list_first_entry_or_null(&engine->execlist_queue,
+ struct drm_i915_gem_request,
+ execlist_link);
+ spin_unlock_irqrestore(&engine->execlist_lock, flags);
seq_printf(m, "\t%d requests in queue\n", count);
if (head_req) {
seq_printf(m, "\tHead request id: %u\n",
- intel_execlists_ctx_id(head_req->ctx, ring));
+ intel_execlists_ctx_id(head_req->ctx, engine));
seq_printf(m, "\tHead request tail: %u\n",
head_req->tail);
}
static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
int unused, i;
if (!ppgtt)
return;
- for_each_ring(ring, dev_priv, unused) {
- seq_printf(m, "%s\n", ring->name);
+ for_each_ring(engine, dev_priv, unused) {
+ seq_printf(m, "%s\n", engine->name);
for (i = 0; i < 4; i++) {
- u64 pdp = I915_READ(GEN8_RING_PDP_UDW(ring, i));
+ u64 pdp = I915_READ(GEN8_RING_PDP_UDW(engine, i));
pdp <<= 32;
- pdp |= I915_READ(GEN8_RING_PDP_LDW(ring, i));
+ pdp |= I915_READ(GEN8_RING_PDP_LDW(engine, i));
seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp);
}
}
static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
int i;
if (INTEL_INFO(dev)->gen == 6)
seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
- for_each_ring(ring, dev_priv, i) {
- seq_printf(m, "%s\n", ring->name);
+ for_each_ring(engine, dev_priv, i) {
+ seq_printf(m, "%s\n", engine->name);
if (INTEL_INFO(dev)->gen == 7)
- seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(RING_MODE_GEN7(ring)));
- seq_printf(m, "PP_DIR_BASE: 0x%08x\n", I915_READ(RING_PP_DIR_BASE(ring)));
- seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n", I915_READ(RING_PP_DIR_BASE_READ(ring)));
- seq_printf(m, "PP_DIR_DCLV: 0x%08x\n", I915_READ(RING_PP_DIR_DCLV(ring)));
+ seq_printf(m, "GFX_MODE: 0x%08x\n",
+ I915_READ(RING_MODE_GEN7(engine)));
+ seq_printf(m, "PP_DIR_BASE: 0x%08x\n",
+ I915_READ(RING_PP_DIR_BASE(engine)));
+ seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n",
+ I915_READ(RING_PP_DIR_BASE_READ(engine)));
+ seq_printf(m, "PP_DIR_DCLV: 0x%08x\n",
+ I915_READ(RING_PP_DIR_DCLV(engine)));
}
if (dev_priv->mm.aliasing_ppgtt) {
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
static int count_irq_waiters(struct drm_i915_private *i915)
{
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
int count = 0;
int i;
- for_each_ring(ring, i915, i)
- count += ring->irq_refcount;
+ for_each_ring(engine, i915, i)
+ count += engine->irq_refcount;
return count;
}
struct drm_i915_private *dev_priv,
struct i915_guc_client *client)
{
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
uint64_t tot = 0;
uint32_t i;
seq_printf(m, "\tFailed doorbell: %u\n", client->b_fail);
seq_printf(m, "\tLast submission result: %d\n", client->retcode);
- for_each_ring(ring, dev_priv, i) {
+ for_each_ring(engine, dev_priv, i) {
seq_printf(m, "\tSubmissions: %llu %s\n",
- client->submissions[ring->guc_id],
- ring->name);
- tot += client->submissions[ring->guc_id];
+ client->submissions[engine->guc_id],
+ engine->name);
+ tot += client->submissions[engine->guc_id];
}
seq_printf(m, "\tTotal: %llu\n", tot);
}
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_guc guc;
struct i915_guc_client client = {};
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
enum intel_ring_id i;
u64 total = 0;
seq_printf(m, "GuC last action error code: %d\n", guc.action_err);
seq_printf(m, "\nGuC submissions:\n");
- for_each_ring(ring, dev_priv, i) {
+ for_each_ring(engine, dev_priv, i) {
seq_printf(m, "\t%-24s: %10llu, last seqno 0x%08x\n",
- ring->name, guc.submissions[ring->guc_id],
- guc.last_seqno[ring->guc_id]);
- total += guc.submissions[ring->guc_id];
+ engine->name, guc.submissions[engine->guc_id],
+ guc.last_seqno[engine->guc_id]);
+ total += guc.submissions[engine->guc_id];
}
seq_printf(m, "\t%s: %llu\n", "Total", total);
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
int num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
int i, j, ret;
page = i915_gem_object_get_page(dev_priv->semaphore_obj, 0);
seqno = (uint64_t *)kmap_atomic(page);
- for_each_ring(ring, dev_priv, i) {
+ for_each_ring(engine, dev_priv, i) {
uint64_t offset;
- seq_printf(m, "%s\n", ring->name);
+ seq_printf(m, "%s\n", engine->name);
seq_puts(m, " Last signal:");
for (j = 0; j < num_rings; j++) {
kunmap_atomic(seqno);
} else {
seq_puts(m, " Last signal:");
- for_each_ring(ring, dev_priv, i)
+ for_each_ring(engine, dev_priv, i)
for (j = 0; j < num_rings; j++)
seq_printf(m, "0x%08x\n",
- I915_READ(ring->semaphore.mbox.signal[j]));
+ I915_READ(engine->semaphore.mbox.signal[j]));
seq_putc(m, '\n');
}
seq_puts(m, "\nSync seqno:\n");
- for_each_ring(ring, dev_priv, i) {
+ for_each_ring(engine, dev_priv, i) {
for (j = 0; j < num_rings; j++) {
- seq_printf(m, " 0x%08x ", ring->semaphore.sync_seqno[j]);
+ seq_printf(m, " 0x%08x ",
+ engine->semaphore.sync_seqno[j]);
}
seq_putc(m, '\n');
}
{
int i;
int ret;
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
intel_runtime_pm_get(dev_priv);
seq_printf(m, "Workarounds applied: %d\n", workarounds->count);
- for_each_ring(ring, dev_priv, i)
+ for_each_ring(engine, dev_priv, i)
seq_printf(m, "HW whitelist count for %s: %d\n",
- ring->name, workarounds->hw_whitelist_count[i]);
+ engine->name, workarounds->hw_whitelist_count[i]);
for (i = 0; i < workarounds->count; ++i) {
i915_reg_t addr;
u32 mask, value, read;
s64 *timeout,
struct intel_rps_client *rps)
{
- struct intel_engine_cs *ring = i915_gem_request_get_ring(req);
- struct drm_device *dev = ring->dev;
+ struct intel_engine_cs *engine = i915_gem_request_get_ring(req);
+ struct drm_device *dev = engine->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
const bool irq_test_in_progress =
- ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring);
+ ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(engine);
int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
DEFINE_WAIT(wait);
unsigned long timeout_expire;
if (ret == 0)
goto out;
- if (!irq_test_in_progress && WARN_ON(!ring->irq_get(ring))) {
+ if (!irq_test_in_progress && WARN_ON(!engine->irq_get(engine))) {
ret = -ENODEV;
goto out;
}
for (;;) {
struct timer_list timer;
- prepare_to_wait(&ring->irq_queue, &wait, state);
+ prepare_to_wait(&engine->irq_queue, &wait, state);
/* We need to check whether any gpu reset happened in between
* the caller grabbing the seqno and now ... */
}
timer.function = NULL;
- if (timeout || missed_irq(dev_priv, ring)) {
+ if (timeout || missed_irq(dev_priv, engine)) {
unsigned long expire;
setup_timer_on_stack(&timer, fake_irq, (unsigned long)current);
- expire = missed_irq(dev_priv, ring) ? jiffies + 1 : timeout_expire;
+ expire = missed_irq(dev_priv, engine) ? jiffies + 1 : timeout_expire;
mod_timer(&timer, expire);
}
}
}
if (!irq_test_in_progress)
- ring->irq_put(ring);
+ engine->irq_put(engine);
- finish_wait(&ring->irq_queue, &wait);
+ finish_wait(&engine->irq_queue, &wait);
out:
trace_i915_gem_request_wait_end(req);
struct drm_i915_gem_request *req)
{
struct drm_i915_gem_object *obj = vma->obj;
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
- ring = i915_gem_request_get_ring(req);
+ engine = i915_gem_request_get_ring(req);
/* Add a reference if we're newly entering the active list. */
if (obj->active == 0)
drm_gem_object_reference(&obj->base);
- obj->active |= intel_ring_flag(ring);
+ obj->active |= intel_ring_flag(engine);
- list_move_tail(&obj->ring_list[ring->id], &ring->active_list);
- i915_gem_request_assign(&obj->last_read_req[ring->id], req);
+ list_move_tail(&obj->ring_list[engine->id], &engine->active_list);
+ i915_gem_request_assign(&obj->last_read_req[engine->id], req);
list_move_tail(&vma->vm_link, &vma->vm->active_list);
}
i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
int ret, i, j;
/* Carefully retire all requests without writing to the rings */
- for_each_ring(ring, dev_priv, i) {
- ret = intel_ring_idle(ring);
+ for_each_ring(engine, dev_priv, i) {
+ ret = intel_ring_idle(engine);
if (ret)
return ret;
}
i915_gem_retire_requests(dev);
/* Finally reset hw state */
- for_each_ring(ring, dev_priv, i) {
- intel_ring_init_seqno(ring, seqno);
+ for_each_ring(engine, dev_priv, i) {
+ intel_ring_init_seqno(engine, seqno);
- for (j = 0; j < ARRAY_SIZE(ring->semaphore.sync_seqno); j++)
- ring->semaphore.sync_seqno[j] = 0;
+ for (j = 0; j < ARRAY_SIZE(engine->semaphore.sync_seqno); j++)
+ engine->semaphore.sync_seqno[j] = 0;
}
return 0;
struct drm_i915_gem_object *obj,
bool flush_caches)
{
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
struct drm_i915_private *dev_priv;
struct intel_ringbuffer *ringbuf;
u32 request_start;
if (WARN_ON(request == NULL))
return;
- ring = request->ring;
- dev_priv = ring->dev->dev_private;
+ engine = request->ring;
+ dev_priv = engine->dev->dev_private;
ringbuf = request->ringbuf;
/*
request->postfix = intel_ring_get_tail(ringbuf);
if (i915.enable_execlists)
- ret = ring->emit_request(request);
+ ret = engine->emit_request(request);
else {
- ret = ring->add_request(request);
+ ret = engine->add_request(request);
request->tail = intel_ring_get_tail(ringbuf);
}
request->batch_obj = obj;
request->emitted_jiffies = jiffies;
- request->previous_seqno = ring->last_submitted_seqno;
- ring->last_submitted_seqno = request->seqno;
- list_add_tail(&request->list, &ring->request_list);
+ request->previous_seqno = engine->last_submitted_seqno;
+ engine->last_submitted_seqno = request->seqno;
+ list_add_tail(&request->list, &engine->request_list);
trace_i915_gem_request_add(request);
- i915_queue_hangcheck(ring->dev);
+ i915_queue_hangcheck(engine->dev);
queue_delayed_work(dev_priv->wq,
&dev_priv->mm.retire_work,
void i915_gem_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
int i;
/*
* them for finding the guilty party. As the requests only borrow
* their reference to the objects, the inspection must be done first.
*/
- for_each_ring(ring, dev_priv, i)
- i915_gem_reset_ring_status(dev_priv, ring);
+ for_each_ring(engine, dev_priv, i)
+ i915_gem_reset_ring_status(dev_priv, engine);
- for_each_ring(ring, dev_priv, i)
- i915_gem_reset_ring_cleanup(dev_priv, ring);
+ for_each_ring(engine, dev_priv, i)
+ i915_gem_reset_ring_cleanup(dev_priv, engine);
i915_gem_context_reset(dev);
i915_gem_retire_requests(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
bool idle = true;
int i;
- for_each_ring(ring, dev_priv, i) {
- i915_gem_retire_requests_ring(ring);
- idle &= list_empty(&ring->request_list);
+ for_each_ring(engine, dev_priv, i) {
+ i915_gem_retire_requests_ring(engine);
+ idle &= list_empty(&engine->request_list);
if (i915.enable_execlists) {
- spin_lock_irq(&ring->execlist_lock);
- idle &= list_empty(&ring->execlist_queue);
- spin_unlock_irq(&ring->execlist_lock);
+ spin_lock_irq(&engine->execlist_lock);
+ idle &= list_empty(&engine->execlist_queue);
+ spin_unlock_irq(&engine->execlist_lock);
- intel_execlists_retire_requests(ring);
+ intel_execlists_retire_requests(engine);
}
}
intel_mark_idle(dev);
if (mutex_trylock(&dev->struct_mutex)) {
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
int i;
- for_each_ring(ring, dev_priv, i)
- i915_gem_batch_pool_fini(&ring->batch_pool);
+ for_each_ring(engine, dev_priv, i)
+ i915_gem_batch_pool_fini(&engine->batch_pool);
mutex_unlock(&dev->struct_mutex);
}
int i915_gpu_idle(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
int ret, i;
/* Flush everything onto the inactive list. */
- for_each_ring(ring, dev_priv, i) {
+ for_each_ring(engine, dev_priv, i) {
if (!i915.enable_execlists) {
struct drm_i915_gem_request *req;
- req = i915_gem_request_alloc(ring, NULL);
+ req = i915_gem_request_alloc(engine, NULL);
if (IS_ERR(req))
return PTR_ERR(req);
i915_add_request_no_flush(req);
}
- ret = intel_ring_idle(ring);
+ ret = intel_ring_idle(engine);
if (ret)
return ret;
}
i915_gem_stop_ringbuffers(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
int i;
- for_each_ring(ring, dev_priv, i)
- dev_priv->gt.stop_ring(ring);
+ for_each_ring(engine, dev_priv, i)
+ dev_priv->gt.stop_ring(engine);
}
int
int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice)
{
- struct intel_engine_cs *ring = req->ring;
- struct drm_device *dev = ring->dev;
+ struct intel_engine_cs *engine = req->ring;
+ struct drm_device *dev = engine->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 *remap_info = dev_priv->l3_parity.remap_info[slice];
int i, ret;
* at initialization time.
*/
for (i = 0; i < GEN7_L3LOG_SIZE / 4; i++) {
- intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
- intel_ring_emit_reg(ring, GEN7_L3LOG(slice, i));
- intel_ring_emit(ring, remap_info[i]);
+ intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
+ intel_ring_emit_reg(engine, GEN7_L3LOG(slice, i));
+ intel_ring_emit(engine, remap_info[i]);
}
- intel_ring_advance(ring);
+ intel_ring_advance(engine);
return ret;
}
i915_gem_init_hw(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
int ret, i, j;
if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
}
/* Need to do basic initialisation of all rings first: */
- for_each_ring(ring, dev_priv, i) {
- ret = ring->init_hw(ring);
+ for_each_ring(engine, dev_priv, i) {
+ ret = engine->init_hw(engine);
if (ret)
goto out;
}
goto out;
/* Now it is safe to go back round and do everything else: */
- for_each_ring(ring, dev_priv, i) {
+ for_each_ring(engine, dev_priv, i) {
struct drm_i915_gem_request *req;
- req = i915_gem_request_alloc(ring, NULL);
+ req = i915_gem_request_alloc(engine, NULL);
if (IS_ERR(req)) {
ret = PTR_ERR(req);
i915_gem_cleanup_ringbuffer(dev);
goto out;
}
- if (ring->id == RCS) {
+ if (engine->id == RCS) {
for (j = 0; j < NUM_L3_SLICES(dev); j++)
i915_gem_l3_remap(req, j);
}
i915_gem_cleanup_ringbuffer(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
int i;
- for_each_ring(ring, dev_priv, i)
- dev_priv->gt.cleanup_ring(ring);
+ for_each_ring(engine, dev_priv, i)
+ dev_priv->gt.cleanup_ring(engine);
if (i915.enable_execlists)
/*
}
for (i = 0; i < I915_NUM_RINGS; i++) {
- struct intel_engine_cs *ring = &dev_priv->ring[i];
+ struct intel_engine_cs *engine = &dev_priv->ring[i];
- if (ring->last_context) {
- i915_gem_context_unpin(ring->last_context, ring);
- ring->last_context = NULL;
+ if (engine->last_context) {
+ i915_gem_context_unpin(engine->last_context, engine);
+ engine->last_context = NULL;
}
}
}
for (i = I915_NUM_RINGS; --i >= 0;) {
- struct intel_engine_cs *ring = &dev_priv->ring[i];
+ struct intel_engine_cs *engine = &dev_priv->ring[i];
- if (ring->last_context) {
- i915_gem_context_unpin(ring->last_context, ring);
- ring->last_context = NULL;
+ if (engine->last_context) {
+ i915_gem_context_unpin(engine->last_context, engine);
+ engine->last_context = NULL;
}
}
int i915_gem_context_enable(struct drm_i915_gem_request *req)
{
- struct intel_engine_cs *ring = req->ring;
+ struct intel_engine_cs *engine = req->ring;
int ret;
if (i915.enable_execlists) {
- if (ring->init_context == NULL)
+ if (engine->init_context == NULL)
return 0;
- ret = ring->init_context(req);
+ ret = engine->init_context(req);
} else
ret = i915_switch_context(req);
static inline int
mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
{
- struct intel_engine_cs *ring = req->ring;
+ struct intel_engine_cs *engine = req->ring;
u32 flags = hw_flags | MI_MM_SPACE_GTT;
const int num_rings =
/* Use an extended w/a on ivb+ if signalling from other rings */
- i915_semaphore_is_enabled(ring->dev) ?
- hweight32(INTEL_INFO(ring->dev)->ring_mask) - 1 :
+ i915_semaphore_is_enabled(engine->dev) ?
+ hweight32(INTEL_INFO(engine->dev)->ring_mask) - 1 :
0;
int len, i, ret;
* explicitly, so we rely on the value at ring init, stored in
* itlb_before_ctx_switch.
*/
- if (IS_GEN6(ring->dev)) {
- ret = ring->flush(req, I915_GEM_GPU_DOMAINS, 0);
+ if (IS_GEN6(engine->dev)) {
+ ret = engine->flush(req, I915_GEM_GPU_DOMAINS, 0);
if (ret)
return ret;
}
/* These flags are for resource streamer on HSW+ */
- if (IS_HASWELL(ring->dev) || INTEL_INFO(ring->dev)->gen >= 8)
+ if (IS_HASWELL(engine->dev) || INTEL_INFO(engine->dev)->gen >= 8)
flags |= (HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN);
- else if (INTEL_INFO(ring->dev)->gen < 8)
+ else if (INTEL_INFO(engine->dev)->gen < 8)
flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN);
len = 4;
- if (INTEL_INFO(ring->dev)->gen >= 7)
+ if (INTEL_INFO(engine->dev)->gen >= 7)
len += 2 + (num_rings ? 4*num_rings + 2 : 0);
ret = intel_ring_begin(req, len);
return ret;
/* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
- if (INTEL_INFO(ring->dev)->gen >= 7) {
- intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE);
+ if (INTEL_INFO(engine->dev)->gen >= 7) {
+ intel_ring_emit(engine, MI_ARB_ON_OFF | MI_ARB_DISABLE);
if (num_rings) {
struct intel_engine_cs *signaller;
- intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings));
- for_each_ring(signaller, to_i915(ring->dev), i) {
- if (signaller == ring)
+ intel_ring_emit(engine,
+ MI_LOAD_REGISTER_IMM(num_rings));
+ for_each_ring(signaller, to_i915(engine->dev), i) {
+ if (signaller == engine)
continue;
- intel_ring_emit_reg(ring, RING_PSMI_CTL(signaller->mmio_base));
- intel_ring_emit(ring, _MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
+ intel_ring_emit_reg(engine,
+ RING_PSMI_CTL(signaller->mmio_base));
+ intel_ring_emit(engine,
+ _MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
}
}
}
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_emit(ring, MI_SET_CONTEXT);
- intel_ring_emit(ring, i915_gem_obj_ggtt_offset(req->ctx->legacy_hw_ctx.rcs_state) |
+ intel_ring_emit(engine, MI_NOOP);
+ intel_ring_emit(engine, MI_SET_CONTEXT);
+ intel_ring_emit(engine,
+ i915_gem_obj_ggtt_offset(req->ctx->legacy_hw_ctx.rcs_state) |
flags);
/*
* w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
* WaMiSetContext_Hang:snb,ivb,vlv
*/
- intel_ring_emit(ring, MI_NOOP);
+ intel_ring_emit(engine, MI_NOOP);
- if (INTEL_INFO(ring->dev)->gen >= 7) {
+ if (INTEL_INFO(engine->dev)->gen >= 7) {
if (num_rings) {
struct intel_engine_cs *signaller;
- intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings));
- for_each_ring(signaller, to_i915(ring->dev), i) {
- if (signaller == ring)
+ intel_ring_emit(engine,
+ MI_LOAD_REGISTER_IMM(num_rings));
+ for_each_ring(signaller, to_i915(engine->dev), i) {
+ if (signaller == engine)
continue;
- intel_ring_emit_reg(ring, RING_PSMI_CTL(signaller->mmio_base));
- intel_ring_emit(ring, _MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
+ intel_ring_emit_reg(engine,
+ RING_PSMI_CTL(signaller->mmio_base));
+ intel_ring_emit(engine,
+ _MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
}
}
- intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE);
+ intel_ring_emit(engine, MI_ARB_ON_OFF | MI_ARB_ENABLE);
}
- intel_ring_advance(ring);
+ intel_ring_advance(engine);
return ret;
}
static int do_switch(struct drm_i915_gem_request *req)
{
struct intel_context *to = req->ctx;
- struct intel_engine_cs *ring = req->ring;
- struct drm_i915_private *dev_priv = ring->dev->dev_private;
- struct intel_context *from = ring->last_context;
+ struct intel_engine_cs *engine = req->ring;
+ struct drm_i915_private *dev_priv = engine->dev->dev_private;
+ struct intel_context *from = engine->last_context;
u32 hw_flags = 0;
bool uninitialized = false;
int ret, i;
- if (from != NULL && ring == &dev_priv->ring[RCS]) {
+ if (from != NULL && engine == &dev_priv->ring[RCS]) {
BUG_ON(from->legacy_hw_ctx.rcs_state == NULL);
BUG_ON(!i915_gem_obj_is_pinned(from->legacy_hw_ctx.rcs_state));
}
- if (should_skip_switch(ring, from, to))
+ if (should_skip_switch(engine, from, to))
return 0;
/* Trying to pin first makes error handling easier. */
- if (ring == &dev_priv->ring[RCS]) {
+ if (engine == &dev_priv->ring[RCS]) {
ret = i915_gem_obj_ggtt_pin(to->legacy_hw_ctx.rcs_state,
- get_context_alignment(ring->dev), 0);
+ get_context_alignment(engine->dev),
+ 0);
if (ret)
return ret;
}
* evict_everything - as a last ditch gtt defrag effort that also
* switches to the default context. Hence we need to reload from here.
*/
- from = ring->last_context;
+ from = engine->last_context;
- if (needs_pd_load_pre(ring, to)) {
+ if (needs_pd_load_pre(engine, to)) {
/* Older GENs and non render rings still want the load first,
* "PP_DCLV followed by PP_DIR_BASE register through Load
* Register Immediate commands in Ring Buffer before submitting
* a context."*/
- trace_switch_mm(ring, to);
+ trace_switch_mm(engine, to);
ret = to->ppgtt->switch_mm(to->ppgtt, req);
if (ret)
goto unpin_out;
/* Doing a PD load always reloads the page dirs */
- to->ppgtt->pd_dirty_rings &= ~intel_ring_flag(ring);
+ to->ppgtt->pd_dirty_rings &= ~intel_ring_flag(engine);
}
- if (ring != &dev_priv->ring[RCS]) {
+ if (engine != &dev_priv->ring[RCS]) {
if (from)
i915_gem_context_unreference(from);
goto done;
* space. This means we must enforce that a page table load
* occur when this occurs. */
} else if (to->ppgtt &&
- (intel_ring_flag(ring) & to->ppgtt->pd_dirty_rings)) {
+ (intel_ring_flag(engine) & to->ppgtt->pd_dirty_rings)) {
hw_flags |= MI_FORCE_RESTORE;
- to->ppgtt->pd_dirty_rings &= ~intel_ring_flag(ring);
+ to->ppgtt->pd_dirty_rings &= ~intel_ring_flag(engine);
}
/* We should never emit switch_mm more than once */
- WARN_ON(needs_pd_load_pre(ring, to) &&
- needs_pd_load_post(ring, to, hw_flags));
+ WARN_ON(needs_pd_load_pre(engine, to) &&
+ needs_pd_load_post(engine, to, hw_flags));
ret = mi_set_context(req, hw_flags);
if (ret)
/* GEN8 does *not* require an explicit reload if the PDPs have been
* setup, and we do not wish to move them.
*/
- if (needs_pd_load_post(ring, to, hw_flags)) {
- trace_switch_mm(ring, to);
+ if (needs_pd_load_post(engine, to, hw_flags)) {
+ trace_switch_mm(engine, to);
ret = to->ppgtt->switch_mm(to->ppgtt, req);
/* The hardware context switch is emitted, but we haven't
* actually changed the state - so it's probably safe to bail
done:
i915_gem_context_reference(to);
- ring->last_context = to;
+ engine->last_context = to;
if (uninitialized) {
- if (ring->init_context) {
- ret = ring->init_context(req);
+ if (engine->init_context) {
+ ret = engine->init_context(req);
if (ret)
DRM_ERROR("ring init context: %d\n", ret);
}
return 0;
unpin_out:
- if (ring->id == RCS)
+ if (engine->id == RCS)
i915_gem_object_ggtt_unpin(to->legacy_hw_ctx.rcs_state);
return ret;
}
*/
int i915_switch_context(struct drm_i915_gem_request *req)
{
- struct intel_engine_cs *ring = req->ring;
- struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ struct intel_engine_cs *engine = req->ring;
+ struct drm_i915_private *dev_priv = engine->dev->dev_private;
WARN_ON(i915.enable_execlists);
WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
if (req->ctx->legacy_hw_ctx.rcs_state == NULL) { /* We have the fake context */
- if (req->ctx != ring->last_context) {
+ if (req->ctx != engine->last_context) {
i915_gem_context_reference(req->ctx);
- if (ring->last_context)
- i915_gem_context_unreference(ring->last_context);
- ring->last_context = req->ctx;
+ if (engine->last_context)
+ i915_gem_context_unreference(engine->last_context);
+ engine->last_context = req->ctx;
}
return 0;
}
static int warned;
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_object *obj;
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
int err = 0;
int i;
if (warned)
return 0;
- for_each_ring(ring, dev_priv, i) {
- list_for_each_entry(obj, &ring->active_list, ring_list[ring->id]) {
+ for_each_ring(engine, dev_priv, i) {
+ list_for_each_entry(obj, &engine->active_list,
+ ring_list[engine->id]) {
if (obj->base.dev != dev ||
!atomic_read(&obj->base.refcount.refcount)) {
DRM_ERROR("%s: freed active obj %p\n",
- ring->name, obj);
+ engine->name, obj);
err++;
break;
} else if (!obj->active ||
- obj->last_read_req[ring->id] == NULL) {
+ obj->last_read_req[engine->id] == NULL) {
DRM_ERROR("%s: invalid active obj %p\n",
- ring->name, obj);
+ engine->name, obj);
err++;
} else if (obj->base.write_domain) {
DRM_ERROR("%s: invalid write obj %p (w %x)\n",
- ring->name,
+ engine->name,
obj, obj->base.write_domain);
err++;
}
i915_gem_execbuffer_move_to_active(struct list_head *vmas,
struct drm_i915_gem_request *req)
{
- struct intel_engine_cs *ring = i915_gem_request_get_ring(req);
+ struct intel_engine_cs *engine = i915_gem_request_get_ring(req);
struct i915_vma *vma;
list_for_each_entry(vma, vmas, exec_list) {
if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
i915_gem_request_assign(&obj->last_fenced_req, req);
if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
- struct drm_i915_private *dev_priv = to_i915(ring->dev);
+ struct drm_i915_private *dev_priv = to_i915(engine->dev);
list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list,
&dev_priv->mm.fence_list);
}
i915_reset_gen7_sol_offsets(struct drm_device *dev,
struct drm_i915_gem_request *req)
{
- struct intel_engine_cs *ring = req->ring;
+ struct intel_engine_cs *engine = req->ring;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret, i;
- if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS]) {
+ if (!IS_GEN7(dev) || engine != &dev_priv->ring[RCS]) {
DRM_DEBUG("sol reset is gen7/rcs only\n");
return -EINVAL;
}
return ret;
for (i = 0; i < 4; i++) {
- intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
- intel_ring_emit_reg(ring, GEN7_SO_WRITE_OFFSET(i));
- intel_ring_emit(ring, 0);
+ intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
+ intel_ring_emit_reg(engine, GEN7_SO_WRITE_OFFSET(i));
+ intel_ring_emit(engine, 0);
}
- intel_ring_advance(ring);
+ intel_ring_advance(engine);
return 0;
}
struct list_head *vmas)
{
struct drm_device *dev = params->dev;
- struct intel_engine_cs *ring = params->ring;
+ struct intel_engine_cs *engine = params->ring;
struct drm_i915_private *dev_priv = dev->dev_private;
u64 exec_start, exec_len;
int instp_mode;
if (ret)
return ret;
- WARN(params->ctx->ppgtt && params->ctx->ppgtt->pd_dirty_rings & (1<<ring->id),
- "%s didn't clear reload\n", ring->name);
+ WARN(params->ctx->ppgtt && params->ctx->ppgtt->pd_dirty_rings & (1<<engine->id),
+ "%s didn't clear reload\n", engine->name);
instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
instp_mask = I915_EXEC_CONSTANTS_MASK;
case I915_EXEC_CONSTANTS_REL_GENERAL:
case I915_EXEC_CONSTANTS_ABSOLUTE:
case I915_EXEC_CONSTANTS_REL_SURFACE:
- if (instp_mode != 0 && ring != &dev_priv->ring[RCS]) {
+ if (instp_mode != 0 && engine != &dev_priv->ring[RCS]) {
DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
return -EINVAL;
}
return -EINVAL;
}
- if (ring == &dev_priv->ring[RCS] &&
+ if (engine == &dev_priv->ring[RCS] &&
instp_mode != dev_priv->relative_constants_mode) {
ret = intel_ring_begin(params->request, 4);
if (ret)
return ret;
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
- intel_ring_emit_reg(ring, INSTPM);
- intel_ring_emit(ring, instp_mask << 16 | instp_mode);
- intel_ring_advance(ring);
+ intel_ring_emit(engine, MI_NOOP);
+ intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
+ intel_ring_emit_reg(engine, INSTPM);
+ intel_ring_emit(engine, instp_mask << 16 | instp_mode);
+ intel_ring_advance(engine);
dev_priv->relative_constants_mode = instp_mode;
}
if (exec_len == 0)
exec_len = params->batch_obj->base.size;
- ret = ring->dispatch_execbuffer(params->request,
+ ret = engine->dispatch_execbuffer(params->request,
exec_start, exec_len,
params->dispatch_flags);
if (ret)
struct eb_vmas *eb;
struct drm_i915_gem_object *batch_obj;
struct drm_i915_gem_exec_object2 shadow_exec_entry;
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
struct intel_context *ctx;
struct i915_address_space *vm;
struct i915_execbuffer_params params_master; /* XXX: will be removed later */
if (args->flags & I915_EXEC_IS_PINNED)
dispatch_flags |= I915_DISPATCH_PINNED;
- ret = eb_select_ring(dev_priv, file, args, &ring);
+ ret = eb_select_ring(dev_priv, file, args, &engine);
if (ret)
return ret;
DRM_DEBUG("RS is only allowed for Haswell, Gen8 and above\n");
return -EINVAL;
}
- if (ring->id != RCS) {
+ if (engine->id != RCS) {
DRM_DEBUG("RS is not available on %s\n",
- ring->name);
+ engine->name);
return -EINVAL;
}
if (ret)
goto pre_mutex_err;
- ctx = i915_gem_validate_context(dev, file, ring, ctx_id);
+ ctx = i915_gem_validate_context(dev, file, engine, ctx_id);
if (IS_ERR(ctx)) {
mutex_unlock(&dev->struct_mutex);
ret = PTR_ERR(ctx);
/* Move the objects en-masse into the GTT, evicting if necessary. */
need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
- ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, ctx, &need_relocs);
+ ret = i915_gem_execbuffer_reserve(engine, &eb->vmas, ctx,
+ &need_relocs);
if (ret)
goto err;
ret = i915_gem_execbuffer_relocate(eb);
if (ret) {
if (ret == -EFAULT) {
- ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
+ ret = i915_gem_execbuffer_relocate_slow(dev, args, file,
+ engine,
eb, exec, ctx);
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
}
}
params->args_batch_start_offset = args->batch_start_offset;
- if (i915_needs_cmd_parser(ring) && args->batch_len) {
+ if (i915_needs_cmd_parser(engine) && args->batch_len) {
struct drm_i915_gem_object *parsed_batch_obj;
- parsed_batch_obj = i915_gem_execbuffer_parse(ring,
- &shadow_exec_entry,
- eb,
- batch_obj,
- args->batch_start_offset,
- args->batch_len,
- file->is_master);
+ parsed_batch_obj = i915_gem_execbuffer_parse(engine,
+ &shadow_exec_entry,
+ eb,
+ batch_obj,
+ args->batch_start_offset,
+ args->batch_len,
+ file->is_master);
if (IS_ERR(parsed_batch_obj)) {
ret = PTR_ERR(parsed_batch_obj);
goto err;
params->batch_obj_vm_offset = i915_gem_obj_offset(batch_obj, vm);
/* Allocate a request for this batch buffer nice and early. */
- req = i915_gem_request_alloc(ring, ctx);
+ req = i915_gem_request_alloc(engine, ctx);
if (IS_ERR(req)) {
ret = PTR_ERR(req);
goto err_batch_unpin;
*/
params->dev = dev;
params->file = file;
- params->ring = ring;
+ params->ring = engine;
params->dispatch_flags = dispatch_flags;
params->batch_obj = batch_obj;
params->ctx = ctx;
unsigned entry,
dma_addr_t addr)
{
- struct intel_engine_cs *ring = req->ring;
+ struct intel_engine_cs *engine = req->ring;
int ret;
BUG_ON(entry >= 4);
if (ret)
return ret;
- intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
- intel_ring_emit_reg(ring, GEN8_RING_PDP_UDW(ring, entry));
- intel_ring_emit(ring, upper_32_bits(addr));
- intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
- intel_ring_emit_reg(ring, GEN8_RING_PDP_LDW(ring, entry));
- intel_ring_emit(ring, lower_32_bits(addr));
- intel_ring_advance(ring);
+ intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
+ intel_ring_emit_reg(engine, GEN8_RING_PDP_UDW(engine, entry));
+ intel_ring_emit(engine, upper_32_bits(addr));
+ intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
+ intel_ring_emit_reg(engine, GEN8_RING_PDP_LDW(engine, entry));
+ intel_ring_emit(engine, lower_32_bits(addr));
+ intel_ring_advance(engine);
return 0;
}
static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_request *req)
{
- struct intel_engine_cs *ring = req->ring;
+ struct intel_engine_cs *engine = req->ring;
int ret;
/* NB: TLBs must be flushed and invalidated before a switch */
- ret = ring->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+ ret = engine->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
if (ret)
return ret;
if (ret)
return ret;
- intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
- intel_ring_emit_reg(ring, RING_PP_DIR_DCLV(ring));
- intel_ring_emit(ring, PP_DIR_DCLV_2G);
- intel_ring_emit_reg(ring, RING_PP_DIR_BASE(ring));
- intel_ring_emit(ring, get_pd_offset(ppgtt));
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
+ intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(2));
+ intel_ring_emit_reg(engine, RING_PP_DIR_DCLV(engine));
+ intel_ring_emit(engine, PP_DIR_DCLV_2G);
+ intel_ring_emit_reg(engine, RING_PP_DIR_BASE(engine));
+ intel_ring_emit(engine, get_pd_offset(ppgtt));
+ intel_ring_emit(engine, MI_NOOP);
+ intel_ring_advance(engine);
return 0;
}
static int vgpu_mm_switch(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_request *req)
{
- struct intel_engine_cs *ring = req->ring;
+ struct intel_engine_cs *engine = req->ring;
struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev);
- I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
- I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt));
+ I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G);
+ I915_WRITE(RING_PP_DIR_BASE(engine), get_pd_offset(ppgtt));
return 0;
}
static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_request *req)
{
- struct intel_engine_cs *ring = req->ring;
+ struct intel_engine_cs *engine = req->ring;
int ret;
/* NB: TLBs must be flushed and invalidated before a switch */
- ret = ring->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+ ret = engine->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
if (ret)
return ret;
if (ret)
return ret;
- intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
- intel_ring_emit_reg(ring, RING_PP_DIR_DCLV(ring));
- intel_ring_emit(ring, PP_DIR_DCLV_2G);
- intel_ring_emit_reg(ring, RING_PP_DIR_BASE(ring));
- intel_ring_emit(ring, get_pd_offset(ppgtt));
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
+ intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(2));
+ intel_ring_emit_reg(engine, RING_PP_DIR_DCLV(engine));
+ intel_ring_emit(engine, PP_DIR_DCLV_2G);
+ intel_ring_emit_reg(engine, RING_PP_DIR_BASE(engine));
+ intel_ring_emit(engine, get_pd_offset(ppgtt));
+ intel_ring_emit(engine, MI_NOOP);
+ intel_ring_advance(engine);
/* XXX: RCS is the only one to auto invalidate the TLBs? */
- if (ring->id != RCS) {
- ret = ring->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+ if (engine->id != RCS) {
+ ret = engine->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
if (ret)
return ret;
}
static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_request *req)
{
- struct intel_engine_cs *ring = req->ring;
+ struct intel_engine_cs *engine = req->ring;
struct drm_device *dev = ppgtt->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
- I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt));
+ I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G);
+ I915_WRITE(RING_PP_DIR_BASE(engine), get_pd_offset(ppgtt));
- POSTING_READ(RING_PP_DIR_DCLV(ring));
+ POSTING_READ(RING_PP_DIR_DCLV(engine));
return 0;
}
static void gen8_ppgtt_enable(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
int j;
- for_each_ring(ring, dev_priv, j) {
+ for_each_ring(engine, dev_priv, j) {
u32 four_level = USES_FULL_48BIT_PPGTT(dev) ? GEN8_GFX_PPGTT_48B : 0;
- I915_WRITE(RING_MODE_GEN7(ring),
+ I915_WRITE(RING_MODE_GEN7(engine),
_MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE | four_level));
}
}
static void gen7_ppgtt_enable(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
uint32_t ecochk, ecobits;
int i;
}
I915_WRITE(GAM_ECOCHK, ecochk);
- for_each_ring(ring, dev_priv, i) {
+ for_each_ring(engine, dev_priv, i) {
/* GFX_MODE is per-ring on gen7+ */
- I915_WRITE(RING_MODE_GEN7(ring),
+ I915_WRITE(RING_MODE_GEN7(engine),
_MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
}
}
void i915_check_and_clear_faults(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
int i;
if (INTEL_INFO(dev)->gen < 6)
return;
- for_each_ring(ring, dev_priv, i) {
+ for_each_ring(engine, dev_priv, i) {
u32 fault_reg;
- fault_reg = I915_READ(RING_FAULT_REG(ring));
+ fault_reg = I915_READ(RING_FAULT_REG(engine));
if (fault_reg & RING_FAULT_VALID) {
DRM_DEBUG_DRIVER("Unexpected fault\n"
"\tAddr: 0x%08lx\n"
fault_reg & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT",
RING_FAULT_SRCID(fault_reg),
RING_FAULT_FAULT_TYPE(fault_reg));
- I915_WRITE(RING_FAULT_REG(ring),
+ I915_WRITE(RING_FAULT_REG(engine),
fault_reg & ~RING_FAULT_VALID);
}
}
if (obj) {
u64 wa_ctx_offset = obj->gtt_offset;
u32 *wa_ctx_page = &obj->pages[0][0];
- struct intel_engine_cs *ring = &dev_priv->ring[RCS];
- u32 wa_ctx_size = (ring->wa_ctx.indirect_ctx.size +
- ring->wa_ctx.per_ctx.size);
+ struct intel_engine_cs *engine = &dev_priv->ring[RCS];
+ u32 wa_ctx_size = (engine->wa_ctx.indirect_ctx.size +
+ engine->wa_ctx.per_ctx.size);
err_printf(m, "%s --- WA ctx batch buffer = 0x%08llx\n",
dev_priv->ring[i].name, wa_ctx_offset);
int i, count;
for (i = 0; i < I915_NUM_RINGS; i++) {
- struct intel_engine_cs *ring = &dev_priv->ring[i];
+ struct intel_engine_cs *engine = &dev_priv->ring[i];
struct intel_ringbuffer *rbuf;
error->ring[i].pid = -1;
- if (ring->dev == NULL)
+ if (engine->dev == NULL)
continue;
error->ring[i].valid = true;
- i915_record_ring_state(dev, error, ring, &error->ring[i]);
+ i915_record_ring_state(dev, error, engine, &error->ring[i]);
- request = i915_gem_find_active_request(ring);
+ request = i915_gem_find_active_request(engine);
if (request) {
struct i915_address_space *vm;
if (HAS_BROKEN_CS_TLB(dev_priv->dev))
error->ring[i].wa_batchbuffer =
i915_error_ggtt_object_create(dev_priv,
- ring->scratch.obj);
+ engine->scratch.obj);
if (request->pid) {
struct task_struct *task;
* executed).
*/
if (request)
- rbuf = request->ctx->engine[ring->id].ringbuf;
+ rbuf = request->ctx->engine[engine->id].ringbuf;
else
- rbuf = dev_priv->kernel_context->engine[ring->id].ringbuf;
+ rbuf = dev_priv->kernel_context->engine[engine->id].ringbuf;
} else
- rbuf = ring->buffer;
+ rbuf = engine->buffer;
error->ring[i].cpu_ring_head = rbuf->head;
error->ring[i].cpu_ring_tail = rbuf->tail;
i915_error_ggtt_object_create(dev_priv, rbuf->obj);
error->ring[i].hws_page =
- i915_error_ggtt_object_create(dev_priv, ring->status_page.obj);
+ i915_error_ggtt_object_create(dev_priv,
+ engine->status_page.obj);
- if (ring->wa_ctx.obj) {
+ if (engine->wa_ctx.obj) {
error->ring[i].wa_ctx =
i915_error_ggtt_object_create(dev_priv,
- ring->wa_ctx.obj);
+ engine->wa_ctx.obj);
}
- i915_gem_record_active_context(ring, error, &error->ring[i]);
+ i915_gem_record_active_context(engine, error, &error->ring[i]);
count = 0;
- list_for_each_entry(request, &ring->request_list, list)
+ list_for_each_entry(request, &engine->request_list, list)
count++;
error->ring[i].num_requests = count;
}
count = 0;
- list_for_each_entry(request, &ring->request_list, list) {
+ list_for_each_entry(request, &engine->request_list, list) {
struct drm_i915_error_request *erq;
if (count >= error->ring[i].num_requests) {
struct i915_guc_client *client)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
struct intel_context *ctx = client->owner;
struct guc_context_desc desc;
struct sg_table *sg;
desc.priority = client->priority;
desc.db_id = client->doorbell_id;
- for_each_ring(ring, dev_priv, i) {
- struct guc_execlist_context *lrc = &desc.lrc[ring->guc_id];
+ for_each_ring(engine, dev_priv, i) {
+ struct guc_execlist_context *lrc = &desc.lrc[engine->guc_id];
struct drm_i915_gem_object *obj;
uint64_t ctx_desc;
if (!obj)
break; /* XXX: continue? */
- ctx_desc = intel_lr_context_descriptor(ctx, ring);
+ ctx_desc = intel_lr_context_descriptor(ctx, engine);
lrc->context_desc = (u32)ctx_desc;
/* The state page is after PPHWSP */
lrc->ring_lcra = i915_gem_obj_ggtt_offset(obj) +
LRC_STATE_PN * PAGE_SIZE;
lrc->context_id = (client->ctx_index << GUC_ELC_CTXID_OFFSET) |
- (ring->guc_id << GUC_ELC_ENGINE_OFFSET);
+ (engine->guc_id << GUC_ELC_ENGINE_OFFSET);
obj = ctx->engine[i].ringbuf->obj;
lrc->ring_next_free_location = lrc->ring_begin;
lrc->ring_current_tail_pointer_value = 0;
- desc.engines_used |= (1 << ring->guc_id);
+ desc.engines_used |= (1 << engine->guc_id);
}
WARN_ON(desc.engines_used == 0);
struct guc_ads *ads;
struct guc_policies *policies;
struct guc_mmio_reg_state *reg_state;
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
struct page *page;
u32 size, i;
* so its address won't change after we've told the GuC where
* to find it.
*/
- ring = &dev_priv->ring[RCS];
- ads->golden_context_lrca = ring->status_page.gfx_addr;
+ engine = &dev_priv->ring[RCS];
+ ads->golden_context_lrca = engine->status_page.gfx_addr;
- for_each_ring(ring, dev_priv, i)
- ads->eng_state_size[ring->guc_id] = intel_lr_context_size(ring);
+ for_each_ring(engine, dev_priv, i)
+ ads->eng_state_size[engine->guc_id] = intel_lr_context_size(engine);
/* GuC scheduling policies */
policies = (void *)ads + sizeof(struct guc_ads);
/* MMIO reg state */
reg_state = (void *)policies + sizeof(struct guc_policies);
- for_each_ring(ring, dev_priv, i) {
- reg_state->mmio_white_list[ring->guc_id].mmio_start =
- ring->mmio_base + GUC_MMIO_WHITE_LIST_START;
+ for_each_ring(engine, dev_priv, i) {
+ reg_state->mmio_white_list[engine->guc_id].mmio_start =
+ engine->mmio_base + GUC_MMIO_WHITE_LIST_START;
/* Nothing to be saved or restored for now. */
- reg_state->mmio_white_list[ring->guc_id].count = 0;
+ reg_state->mmio_white_list[engine->guc_id].count = 0;
}
ads->reg_state_addr = ads->scheduler_policies +
static bool any_waiters(struct drm_i915_private *dev_priv)
{
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
int i;
- for_each_ring(ring, dev_priv, i)
- if (ring->irq_refcount)
+ for_each_ring(engine, dev_priv, i)
+ if (engine->irq_refcount)
return true;
return false;
static void i915_error_wake_up(struct drm_i915_private *dev_priv,
bool reset_completed)
{
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
int i;
/*
*/
/* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
- for_each_ring(ring, dev_priv, i)
- wake_up_all(&ring->irq_queue);
+ for_each_ring(engine, dev_priv, i)
+ wake_up_all(&engine->irq_queue);
/* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
wake_up_all(&dev_priv->pending_flip_queue);
static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
{
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
int i;
- for_each_ring(ring, dev_priv, i)
- ring->hangcheck.deadlock = 0;
+ for_each_ring(engine, dev_priv, i)
+ engine->hangcheck.deadlock = 0;
}
static bool subunits_stuck(struct intel_engine_cs *ring)
container_of(work, typeof(*dev_priv),
gpu_error.hangcheck_work.work);
struct drm_device *dev = dev_priv->dev;
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
int i;
int busy_count = 0, rings_hung = 0;
bool stuck[I915_NUM_RINGS] = { 0 };
*/
intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
- for_each_ring(ring, dev_priv, i) {
+ for_each_ring(engine, dev_priv, i) {
u64 acthd;
u32 seqno;
bool busy = true;
semaphore_clear_deadlocks(dev_priv);
- seqno = ring->get_seqno(ring, false);
- acthd = intel_ring_get_active_head(ring);
+ seqno = engine->get_seqno(engine, false);
+ acthd = intel_ring_get_active_head(engine);
- if (ring->hangcheck.seqno == seqno) {
- if (ring_idle(ring, seqno)) {
- ring->hangcheck.action = HANGCHECK_IDLE;
+ if (engine->hangcheck.seqno == seqno) {
+ if (ring_idle(engine, seqno)) {
+ engine->hangcheck.action = HANGCHECK_IDLE;
- if (waitqueue_active(&ring->irq_queue)) {
+ if (waitqueue_active(&engine->irq_queue)) {
/* Issue a wake-up to catch stuck h/w. */
- if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) {
- if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)))
+ if (!test_and_set_bit(engine->id, &dev_priv->gpu_error.missed_irq_rings)) {
+ if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(engine)))
DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
- ring->name);
+ engine->name);
else
DRM_INFO("Fake missed irq on %s\n",
- ring->name);
- wake_up_all(&ring->irq_queue);
+ engine->name);
+ wake_up_all(&engine->irq_queue);
}
/* Safeguard against driver failure */
- ring->hangcheck.score += BUSY;
+ engine->hangcheck.score += BUSY;
} else
busy = false;
} else {
* being repeatedly kicked and so responsible
* for stalling the machine.
*/
- ring->hangcheck.action = ring_stuck(ring,
- acthd);
+ engine->hangcheck.action = ring_stuck(engine,
+ acthd);
- switch (ring->hangcheck.action) {
+ switch (engine->hangcheck.action) {
case HANGCHECK_IDLE:
case HANGCHECK_WAIT:
break;
case HANGCHECK_ACTIVE:
- ring->hangcheck.score += BUSY;
+ engine->hangcheck.score += BUSY;
break;
case HANGCHECK_KICK:
- ring->hangcheck.score += KICK;
+ engine->hangcheck.score += KICK;
break;
case HANGCHECK_HUNG:
- ring->hangcheck.score += HUNG;
+ engine->hangcheck.score += HUNG;
stuck[i] = true;
break;
}
}
} else {
- ring->hangcheck.action = HANGCHECK_ACTIVE;
+ engine->hangcheck.action = HANGCHECK_ACTIVE;
/* Gradually reduce the count so that we catch DoS
* attempts across multiple batches.
*/
- if (ring->hangcheck.score > 0)
- ring->hangcheck.score -= ACTIVE_DECAY;
- if (ring->hangcheck.score < 0)
- ring->hangcheck.score = 0;
+ if (engine->hangcheck.score > 0)
+ engine->hangcheck.score -= ACTIVE_DECAY;
+ if (engine->hangcheck.score < 0)
+ engine->hangcheck.score = 0;
/* Clear head and subunit states on seqno movement */
- ring->hangcheck.acthd = 0;
+ engine->hangcheck.acthd = 0;
- memset(ring->hangcheck.instdone, 0,
- sizeof(ring->hangcheck.instdone));
+ memset(engine->hangcheck.instdone, 0,
+ sizeof(engine->hangcheck.instdone));
}
- ring->hangcheck.seqno = seqno;
- ring->hangcheck.acthd = acthd;
+ engine->hangcheck.seqno = seqno;
+ engine->hangcheck.acthd = acthd;
busy_count += busy;
}
- for_each_ring(ring, dev_priv, i) {
- if (ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
+ for_each_ring(engine, dev_priv, i) {
+ if (engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
DRM_INFO("%s on %s\n",
stuck[i] ? "stuck" : "no progress",
- ring->name);
+ engine->name);
rings_hung++;
}
}
struct drm_i915_gem_request *req,
uint32_t flags)
{
- struct intel_engine_cs *ring = req->ring;
+ struct intel_engine_cs *engine = req->ring;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
u32 flip_mask;
int ret;
flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
else
flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
- intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_emit(ring, MI_DISPLAY_FLIP |
+ intel_ring_emit(engine, MI_WAIT_FOR_EVENT | flip_mask);
+ intel_ring_emit(engine, MI_NOOP);
+ intel_ring_emit(engine, MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
- intel_ring_emit(ring, fb->pitches[0]);
- intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
- intel_ring_emit(ring, 0); /* aux display base address, unused */
+ intel_ring_emit(engine, fb->pitches[0]);
+ intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset);
+ intel_ring_emit(engine, 0); /* aux display base address, unused */
intel_mark_page_flip_active(intel_crtc->unpin_work);
return 0;
struct drm_i915_gem_request *req,
uint32_t flags)
{
- struct intel_engine_cs *ring = req->ring;
+ struct intel_engine_cs *engine = req->ring;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
u32 flip_mask;
int ret;
flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
else
flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
- intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 |
+ intel_ring_emit(engine, MI_WAIT_FOR_EVENT | flip_mask);
+ intel_ring_emit(engine, MI_NOOP);
+ intel_ring_emit(engine, MI_DISPLAY_FLIP_I915 |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
- intel_ring_emit(ring, fb->pitches[0]);
- intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
- intel_ring_emit(ring, MI_NOOP);
+ intel_ring_emit(engine, fb->pitches[0]);
+ intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset);
+ intel_ring_emit(engine, MI_NOOP);
intel_mark_page_flip_active(intel_crtc->unpin_work);
return 0;
struct drm_i915_gem_request *req,
uint32_t flags)
{
- struct intel_engine_cs *ring = req->ring;
+ struct intel_engine_cs *engine = req->ring;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
uint32_t pf, pipesrc;
* Display Registers (which do not change across a page-flip)
* so we need only reprogram the base address.
*/
- intel_ring_emit(ring, MI_DISPLAY_FLIP |
+ intel_ring_emit(engine, MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
- intel_ring_emit(ring, fb->pitches[0]);
- intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset |
+ intel_ring_emit(engine, fb->pitches[0]);
+ intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset |
obj->tiling_mode);
/* XXX Enabling the panel-fitter across page-flip is so far
*/
pf = 0;
pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
- intel_ring_emit(ring, pf | pipesrc);
+ intel_ring_emit(engine, pf | pipesrc);
intel_mark_page_flip_active(intel_crtc->unpin_work);
return 0;
struct drm_i915_gem_request *req,
uint32_t flags)
{
- struct intel_engine_cs *ring = req->ring;
+ struct intel_engine_cs *engine = req->ring;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
uint32_t pf, pipesrc;
if (ret)
return ret;
- intel_ring_emit(ring, MI_DISPLAY_FLIP |
+ intel_ring_emit(engine, MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
- intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode);
- intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
+ intel_ring_emit(engine, fb->pitches[0] | obj->tiling_mode);
+ intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset);
/* Contrary to the suggestions in the documentation,
* "Enable Panel Fitter" does not seem to be required when page
*/
pf = 0;
pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
- intel_ring_emit(ring, pf | pipesrc);
+ intel_ring_emit(engine, pf | pipesrc);
intel_mark_page_flip_active(intel_crtc->unpin_work);
return 0;
struct drm_i915_gem_request *req,
uint32_t flags)
{
- struct intel_engine_cs *ring = req->ring;
+ struct intel_engine_cs *engine = req->ring;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
uint32_t plane_bit = 0;
int len, ret;
}
len = 4;
- if (ring->id == RCS) {
+ if (engine->id == RCS) {
len += 6;
/*
* On Gen 8, SRM is now taking an extra dword to accommodate
* for the RCS also doesn't appear to drop events. Setting the DERRMR
* to zero does lead to lockups within MI_DISPLAY_FLIP.
*/
- if (ring->id == RCS) {
- intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
- intel_ring_emit_reg(ring, DERRMR);
- intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
- DERRMR_PIPEB_PRI_FLIP_DONE |
- DERRMR_PIPEC_PRI_FLIP_DONE));
+ if (engine->id == RCS) {
+ intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
+ intel_ring_emit_reg(engine, DERRMR);
+ intel_ring_emit(engine, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
+ DERRMR_PIPEB_PRI_FLIP_DONE |
+ DERRMR_PIPEC_PRI_FLIP_DONE));
if (IS_GEN8(dev))
- intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8 |
+ intel_ring_emit(engine, MI_STORE_REGISTER_MEM_GEN8 |
MI_SRM_LRM_GLOBAL_GTT);
else
- intel_ring_emit(ring, MI_STORE_REGISTER_MEM |
+ intel_ring_emit(engine, MI_STORE_REGISTER_MEM |
MI_SRM_LRM_GLOBAL_GTT);
- intel_ring_emit_reg(ring, DERRMR);
- intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
+ intel_ring_emit_reg(engine, DERRMR);
+ intel_ring_emit(engine, engine->scratch.gtt_offset + 256);
if (IS_GEN8(dev)) {
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, MI_NOOP);
+ intel_ring_emit(engine, 0);
+ intel_ring_emit(engine, MI_NOOP);
}
}
- intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
- intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
- intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
- intel_ring_emit(ring, (MI_NOOP));
+ intel_ring_emit(engine, MI_DISPLAY_FLIP_I915 | plane_bit);
+ intel_ring_emit(engine, (fb->pitches[0] | obj->tiling_mode));
+ intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset);
+ intel_ring_emit(engine, (MI_NOOP));
intel_mark_page_flip_active(intel_crtc->unpin_work);
return 0;
struct drm_plane *primary = crtc->primary;
enum pipe pipe = intel_crtc->pipe;
struct intel_unpin_work *work;
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
bool mmio_flip;
struct drm_i915_gem_request *request = NULL;
int ret;
work->flip_count = I915_READ(PIPE_FLIPCOUNT_G4X(pipe)) + 1;
if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
- ring = &dev_priv->ring[BCS];
+ engine = &dev_priv->ring[BCS];
if (obj->tiling_mode != intel_fb_obj(work->old_fb)->tiling_mode)
/* vlv: DISPLAY_FLIP fails to change tiling */
- ring = NULL;
+ engine = NULL;
} else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
- ring = &dev_priv->ring[BCS];
+ engine = &dev_priv->ring[BCS];
} else if (INTEL_INFO(dev)->gen >= 7) {
- ring = i915_gem_request_get_ring(obj->last_write_req);
- if (ring == NULL || ring->id != RCS)
- ring = &dev_priv->ring[BCS];
+ engine = i915_gem_request_get_ring(obj->last_write_req);
+ if (engine == NULL || engine->id != RCS)
+ engine = &dev_priv->ring[BCS];
} else {
- ring = &dev_priv->ring[RCS];
+ engine = &dev_priv->ring[RCS];
}
- mmio_flip = use_mmio_flip(ring, obj);
+ mmio_flip = use_mmio_flip(engine, obj);
/* When using CS flips, we want to emit semaphores between rings.
* However, when using mmio flips we will create a task to do the
* into the display plane and skip any waits.
*/
if (!mmio_flip) {
- ret = i915_gem_object_sync(obj, ring, &request);
+ ret = i915_gem_object_sync(obj, engine, &request);
if (ret)
goto cleanup_pending;
}
obj->last_write_req);
} else {
if (!request) {
- request = i915_gem_request_alloc(ring, NULL);
+ request = i915_gem_request_alloc(engine, NULL);
if (IS_ERR(request)) {
ret = PTR_ERR(request);
goto cleanup_unpin;
static void direct_interrupts_to_host(struct drm_i915_private *dev_priv)
{
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
int i, irqs;
/* tell all command streamers NOT to forward interrupts and vblank to GuC */
irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_NEVER);
irqs |= _MASKED_BIT_DISABLE(GFX_INTERRUPT_STEERING);
- for_each_ring(ring, dev_priv, i)
- I915_WRITE(RING_MODE_GEN7(ring), irqs);
+ for_each_ring(engine, dev_priv, i)
+ I915_WRITE(RING_MODE_GEN7(engine), irqs);
/* route all GT interrupts to the host */
I915_WRITE(GUC_BCS_RCS_IER, 0);
static void direct_interrupts_to_guc(struct drm_i915_private *dev_priv)
{
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
int i, irqs;
/* tell all command streamers to forward interrupts and vblank to GuC */
irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_ALWAYS);
irqs |= _MASKED_BIT_ENABLE(GFX_INTERRUPT_STEERING);
- for_each_ring(ring, dev_priv, i)
- I915_WRITE(RING_MODE_GEN7(ring), irqs);
+ for_each_ring(engine, dev_priv, i)
+ I915_WRITE(RING_MODE_GEN7(engine), irqs);
/* route USER_INTERRUPT to Host, all others are sent to GuC. */
irqs = GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
struct drm_i915_gem_request *rq1)
{
- struct intel_engine_cs *ring = rq0->ring;
- struct drm_device *dev = ring->dev;
+ struct intel_engine_cs *engine = rq0->ring;
+ struct drm_device *dev = engine->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint64_t desc[2];
rq0->elsp_submitted++;
/* You must always write both descriptors in the order below. */
- I915_WRITE_FW(RING_ELSP(ring), upper_32_bits(desc[1]));
- I915_WRITE_FW(RING_ELSP(ring), lower_32_bits(desc[1]));
+ I915_WRITE_FW(RING_ELSP(engine), upper_32_bits(desc[1]));
+ I915_WRITE_FW(RING_ELSP(engine), lower_32_bits(desc[1]));
- I915_WRITE_FW(RING_ELSP(ring), upper_32_bits(desc[0]));
+ I915_WRITE_FW(RING_ELSP(engine), upper_32_bits(desc[0]));
/* The context is automatically loaded after the following */
- I915_WRITE_FW(RING_ELSP(ring), lower_32_bits(desc[0]));
+ I915_WRITE_FW(RING_ELSP(engine), lower_32_bits(desc[0]));
/* ELSP is a wo register, use another nearby reg for posting */
- POSTING_READ_FW(RING_EXECLIST_STATUS_LO(ring));
+ POSTING_READ_FW(RING_EXECLIST_STATUS_LO(engine));
}
static void
static void execlists_update_context(struct drm_i915_gem_request *rq)
{
- struct intel_engine_cs *ring = rq->ring;
+ struct intel_engine_cs *engine = rq->ring;
struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt;
- uint32_t *reg_state = rq->ctx->engine[ring->id].lrc_reg_state;
+ uint32_t *reg_state = rq->ctx->engine[engine->id].lrc_reg_state;
reg_state[CTX_RING_TAIL+1] = rq->tail;
static void execlists_context_queue(struct drm_i915_gem_request *request)
{
- struct intel_engine_cs *ring = request->ring;
+ struct intel_engine_cs *engine = request->ring;
struct drm_i915_gem_request *cursor;
int num_elements = 0;
if (request->ctx != request->i915->kernel_context)
- intel_lr_context_pin(request->ctx, ring);
+ intel_lr_context_pin(request->ctx, engine);
i915_gem_request_reference(request);
- spin_lock_irq(&ring->execlist_lock);
+ spin_lock_irq(&engine->execlist_lock);
- list_for_each_entry(cursor, &ring->execlist_queue, execlist_link)
+ list_for_each_entry(cursor, &engine->execlist_queue, execlist_link)
if (++num_elements > 2)
break;
if (num_elements > 2) {
struct drm_i915_gem_request *tail_req;
- tail_req = list_last_entry(&ring->execlist_queue,
+ tail_req = list_last_entry(&engine->execlist_queue,
struct drm_i915_gem_request,
execlist_link);
WARN(tail_req->elsp_submitted != 0,
"More than 2 already-submitted reqs queued\n");
list_move_tail(&tail_req->execlist_link,
- &ring->execlist_retired_req_list);
+ &engine->execlist_retired_req_list);
}
}
- list_add_tail(&request->execlist_link, &ring->execlist_queue);
+ list_add_tail(&request->execlist_link, &engine->execlist_queue);
if (num_elements == 0)
- execlists_context_unqueue(ring);
+ execlists_context_unqueue(engine);
- spin_unlock_irq(&ring->execlist_lock);
+ spin_unlock_irq(&engine->execlist_lock);
}
static int logical_ring_invalidate_all_caches(struct drm_i915_gem_request *req)
{
- struct intel_engine_cs *ring = req->ring;
+ struct intel_engine_cs *engine = req->ring;
uint32_t flush_domains;
int ret;
flush_domains = 0;
- if (ring->gpu_caches_dirty)
+ if (engine->gpu_caches_dirty)
flush_domains = I915_GEM_GPU_DOMAINS;
- ret = ring->emit_flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
+ ret = engine->emit_flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
if (ret)
return ret;
- ring->gpu_caches_dirty = false;
+ engine->gpu_caches_dirty = false;
return 0;
}
int bytes)
{
struct intel_ringbuffer *ringbuf = req->ringbuf;
- struct intel_engine_cs *ring = req->ring;
+ struct intel_engine_cs *engine = req->ring;
struct drm_i915_gem_request *target;
unsigned space;
int ret;
/* The whole point of reserving space is to not wait! */
WARN_ON(ringbuf->reserved_in_use);
- list_for_each_entry(target, &ring->request_list, list) {
+ list_for_each_entry(target, &engine->request_list, list) {
/*
* The request queue is per-engine, so can contain requests
* from multiple ringbuffers. Here, we must ignore any that
break;
}
- if (WARN_ON(&target->list == &ring->request_list))
+ if (WARN_ON(&target->list == &engine->request_list))
return -ENOSPC;
ret = i915_wait_request(target);
struct list_head *vmas)
{
struct drm_device *dev = params->dev;
- struct intel_engine_cs *ring = params->ring;
+ struct intel_engine_cs *engine = params->ring;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ringbuffer *ringbuf = params->ctx->engine[ring->id].ringbuf;
+ struct intel_ringbuffer *ringbuf = params->ctx->engine[engine->id].ringbuf;
u64 exec_start;
int instp_mode;
u32 instp_mask;
case I915_EXEC_CONSTANTS_REL_GENERAL:
case I915_EXEC_CONSTANTS_ABSOLUTE:
case I915_EXEC_CONSTANTS_REL_SURFACE:
- if (instp_mode != 0 && ring != &dev_priv->ring[RCS]) {
+ if (instp_mode != 0 && engine != &dev_priv->ring[RCS]) {
DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
return -EINVAL;
}
if (ret)
return ret;
- if (ring == &dev_priv->ring[RCS] &&
+ if (engine == &dev_priv->ring[RCS] &&
instp_mode != dev_priv->relative_constants_mode) {
ret = intel_logical_ring_begin(params->request, 4);
if (ret)
exec_start = params->batch_obj_vm_offset +
args->batch_start_offset;
- ret = ring->emit_bb_start(params->request, exec_start, params->dispatch_flags);
+ ret = engine->emit_bb_start(params->request, exec_start, params->dispatch_flags);
if (ret)
return ret;
int logical_ring_flush_all_caches(struct drm_i915_gem_request *req)
{
- struct intel_engine_cs *ring = req->ring;
+ struct intel_engine_cs *engine = req->ring;
int ret;
- if (!ring->gpu_caches_dirty)
+ if (!engine->gpu_caches_dirty)
return 0;
- ret = ring->emit_flush(req, 0, I915_GEM_GPU_DOMAINS);
+ ret = engine->emit_flush(req, 0, I915_GEM_GPU_DOMAINS);
if (ret)
return ret;
- ring->gpu_caches_dirty = false;
+ engine->gpu_caches_dirty = false;
return 0;
}
static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
{
int ret, i;
- struct intel_engine_cs *ring = req->ring;
+ struct intel_engine_cs *engine = req->ring;
struct intel_ringbuffer *ringbuf = req->ringbuf;
- struct drm_device *dev = ring->dev;
+ struct drm_device *dev = engine->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_workarounds *w = &dev_priv->workarounds;
if (w->count == 0)
return 0;
- ring->gpu_caches_dirty = true;
+ engine->gpu_caches_dirty = true;
ret = logical_ring_flush_all_caches(req);
if (ret)
return ret;
intel_logical_ring_advance(ringbuf);
- ring->gpu_caches_dirty = true;
+ engine->gpu_caches_dirty = true;
ret = logical_ring_flush_all_caches(req);
if (ret)
return ret;
static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
{
struct i915_hw_ppgtt *ppgtt = req->ctx->ppgtt;
- struct intel_engine_cs *ring = req->ring;
+ struct intel_engine_cs *engine = req->ring;
struct intel_ringbuffer *ringbuf = req->ringbuf;
const int num_lri_cmds = GEN8_LEGACY_PDPES * 2;
int i, ret;
for (i = GEN8_LEGACY_PDPES - 1; i >= 0; i--) {
const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
- intel_logical_ring_emit_reg(ringbuf, GEN8_RING_PDP_UDW(ring, i));
+ intel_logical_ring_emit_reg(ringbuf,
+ GEN8_RING_PDP_UDW(engine, i));
intel_logical_ring_emit(ringbuf, upper_32_bits(pd_daddr));
- intel_logical_ring_emit_reg(ringbuf, GEN8_RING_PDP_LDW(ring, i));
+ intel_logical_ring_emit_reg(ringbuf,
+ GEN8_RING_PDP_LDW(engine, i));
intel_logical_ring_emit(ringbuf, lower_32_bits(pd_daddr));
}
u32 unused)
{
struct intel_ringbuffer *ringbuf = request->ringbuf;
- struct intel_engine_cs *ring = ringbuf->ring;
- struct drm_device *dev = ring->dev;
+ struct intel_engine_cs *engine = ringbuf->ring;
+ struct drm_device *dev = engine->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t cmd;
int ret;
if (invalidate_domains & I915_GEM_GPU_DOMAINS) {
cmd |= MI_INVALIDATE_TLB;
- if (ring == &dev_priv->ring[VCS])
+ if (engine == &dev_priv->ring[VCS])
cmd |= MI_INVALIDATE_BSD;
}
u32 flush_domains)
{
struct intel_ringbuffer *ringbuf = request->ringbuf;
- struct intel_engine_cs *ring = ringbuf->ring;
- u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
+ struct intel_engine_cs *engine = ringbuf->ring;
+ u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
bool vf_flush_wa = false;
u32 flags = 0;
int ret;
* On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL
* pipe control.
*/
- if (IS_GEN9(ring->dev))
+ if (IS_GEN9(engine->dev))
vf_flush_wa = true;
}
static int logical_render_ring_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring = &dev_priv->ring[RCS];
+ struct intel_engine_cs *engine = &dev_priv->ring[RCS];
int ret;
- ring->name = "render ring";
- ring->id = RCS;
- ring->exec_id = I915_EXEC_RENDER;
- ring->guc_id = GUC_RENDER_ENGINE;
- ring->mmio_base = RENDER_RING_BASE;
+ engine->name = "render ring";
+ engine->id = RCS;
+ engine->exec_id = I915_EXEC_RENDER;
+ engine->guc_id = GUC_RENDER_ENGINE;
+ engine->mmio_base = RENDER_RING_BASE;
- logical_ring_default_irqs(ring, GEN8_RCS_IRQ_SHIFT);
+ logical_ring_default_irqs(engine, GEN8_RCS_IRQ_SHIFT);
if (HAS_L3_DPF(dev))
- ring->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
+ engine->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
- logical_ring_default_vfuncs(dev, ring);
+ logical_ring_default_vfuncs(dev, engine);
/* Override some for render ring. */
if (INTEL_INFO(dev)->gen >= 9)
- ring->init_hw = gen9_init_render_ring;
+ engine->init_hw = gen9_init_render_ring;
else
- ring->init_hw = gen8_init_render_ring;
- ring->init_context = gen8_init_rcs_context;
- ring->cleanup = intel_fini_pipe_control;
- ring->emit_flush = gen8_emit_flush_render;
- ring->emit_request = gen8_emit_request_render;
+ engine->init_hw = gen8_init_render_ring;
+ engine->init_context = gen8_init_rcs_context;
+ engine->cleanup = intel_fini_pipe_control;
+ engine->emit_flush = gen8_emit_flush_render;
+ engine->emit_request = gen8_emit_request_render;
- ring->dev = dev;
+ engine->dev = dev;
- ret = intel_init_pipe_control(ring);
+ ret = intel_init_pipe_control(engine);
if (ret)
return ret;
- ret = intel_init_workaround_bb(ring);
+ ret = intel_init_workaround_bb(engine);
if (ret) {
/*
* We continue even if we fail to initialize WA batch
ret);
}
- ret = logical_ring_init(dev, ring);
+ ret = logical_ring_init(dev, engine);
if (ret) {
- lrc_destroy_wa_ctx_obj(ring);
+ lrc_destroy_wa_ctx_obj(engine);
}
return ret;
static int logical_bsd_ring_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring = &dev_priv->ring[VCS];
+ struct intel_engine_cs *engine = &dev_priv->ring[VCS];
- ring->name = "bsd ring";
- ring->id = VCS;
- ring->exec_id = I915_EXEC_BSD;
- ring->guc_id = GUC_VIDEO_ENGINE;
- ring->mmio_base = GEN6_BSD_RING_BASE;
+ engine->name = "bsd ring";
+ engine->id = VCS;
+ engine->exec_id = I915_EXEC_BSD;
+ engine->guc_id = GUC_VIDEO_ENGINE;
+ engine->mmio_base = GEN6_BSD_RING_BASE;
- logical_ring_default_irqs(ring, GEN8_VCS1_IRQ_SHIFT);
- logical_ring_default_vfuncs(dev, ring);
+ logical_ring_default_irqs(engine, GEN8_VCS1_IRQ_SHIFT);
+ logical_ring_default_vfuncs(dev, engine);
- return logical_ring_init(dev, ring);
+ return logical_ring_init(dev, engine);
}
static int logical_bsd2_ring_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring = &dev_priv->ring[VCS2];
+ struct intel_engine_cs *engine = &dev_priv->ring[VCS2];
- ring->name = "bsd2 ring";
- ring->id = VCS2;
- ring->exec_id = I915_EXEC_BSD;
- ring->guc_id = GUC_VIDEO_ENGINE2;
- ring->mmio_base = GEN8_BSD2_RING_BASE;
+ engine->name = "bsd2 ring";
+ engine->id = VCS2;
+ engine->exec_id = I915_EXEC_BSD;
+ engine->guc_id = GUC_VIDEO_ENGINE2;
+ engine->mmio_base = GEN8_BSD2_RING_BASE;
- logical_ring_default_irqs(ring, GEN8_VCS2_IRQ_SHIFT);
- logical_ring_default_vfuncs(dev, ring);
+ logical_ring_default_irqs(engine, GEN8_VCS2_IRQ_SHIFT);
+ logical_ring_default_vfuncs(dev, engine);
- return logical_ring_init(dev, ring);
+ return logical_ring_init(dev, engine);
}
static int logical_blt_ring_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring = &dev_priv->ring[BCS];
+ struct intel_engine_cs *engine = &dev_priv->ring[BCS];
- ring->name = "blitter ring";
- ring->id = BCS;
- ring->exec_id = I915_EXEC_BLT;
- ring->guc_id = GUC_BLITTER_ENGINE;
- ring->mmio_base = BLT_RING_BASE;
+ engine->name = "blitter ring";
+ engine->id = BCS;
+ engine->exec_id = I915_EXEC_BLT;
+ engine->guc_id = GUC_BLITTER_ENGINE;
+ engine->mmio_base = BLT_RING_BASE;
- logical_ring_default_irqs(ring, GEN8_BCS_IRQ_SHIFT);
- logical_ring_default_vfuncs(dev, ring);
+ logical_ring_default_irqs(engine, GEN8_BCS_IRQ_SHIFT);
+ logical_ring_default_vfuncs(dev, engine);
- return logical_ring_init(dev, ring);
+ return logical_ring_init(dev, engine);
}
static int logical_vebox_ring_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring = &dev_priv->ring[VECS];
+ struct intel_engine_cs *engine = &dev_priv->ring[VECS];
- ring->name = "video enhancement ring";
- ring->id = VECS;
- ring->exec_id = I915_EXEC_VEBOX;
- ring->guc_id = GUC_VIDEOENHANCE_ENGINE;
- ring->mmio_base = VEBOX_RING_BASE;
+ engine->name = "video enhancement ring";
+ engine->id = VECS;
+ engine->exec_id = I915_EXEC_VEBOX;
+ engine->guc_id = GUC_VIDEOENHANCE_ENGINE;
+ engine->mmio_base = VEBOX_RING_BASE;
- logical_ring_default_irqs(ring, GEN8_VECS_IRQ_SHIFT);
- logical_ring_default_vfuncs(dev, ring);
+ logical_ring_default_irqs(engine, GEN8_VECS_IRQ_SHIFT);
+ logical_ring_default_vfuncs(dev, engine);
- return logical_ring_init(dev, ring);
+ return logical_ring_init(dev, engine);
}
/**
struct intel_context *ctx)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
int i;
- for_each_ring(ring, dev_priv, i) {
+ for_each_ring(engine, dev_priv, i) {
struct drm_i915_gem_object *ctx_obj =
- ctx->engine[ring->id].state;
+ ctx->engine[engine->id].state;
struct intel_ringbuffer *ringbuf =
- ctx->engine[ring->id].ringbuf;
+ ctx->engine[engine->id].ringbuf;
uint32_t *reg_state;
struct page *page;
if (get_mocs_settings(req->ring->dev, &t)) {
struct drm_i915_private *dev_priv = req->i915;
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
enum intel_ring_id ring_id;
/* Program the control registers */
- for_each_ring(ring, dev_priv, ring_id) {
+ for_each_ring(engine, dev_priv, ring_id) {
ret = emit_mocs_control_table(req, &t, ring_id);
if (ret)
return ret;
{
struct drm_device *dev = overlay->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring = &dev_priv->ring[RCS];
+ struct intel_engine_cs *engine = &dev_priv->ring[RCS];
struct drm_i915_gem_request *req;
int ret;
WARN_ON(overlay->active);
WARN_ON(IS_I830(dev) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE));
- req = i915_gem_request_alloc(ring, NULL);
+ req = i915_gem_request_alloc(engine, NULL);
if (IS_ERR(req))
return PTR_ERR(req);
overlay->active = true;
- intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_ON);
- intel_ring_emit(ring, overlay->flip_addr | OFC_UPDATE);
- intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
+ intel_ring_emit(engine, MI_OVERLAY_FLIP | MI_OVERLAY_ON);
+ intel_ring_emit(engine, overlay->flip_addr | OFC_UPDATE);
+ intel_ring_emit(engine, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+ intel_ring_emit(engine, MI_NOOP);
+ intel_ring_advance(engine);
return intel_overlay_do_wait_request(overlay, req, NULL);
}
{
struct drm_device *dev = overlay->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring = &dev_priv->ring[RCS];
+ struct intel_engine_cs *engine = &dev_priv->ring[RCS];
struct drm_i915_gem_request *req;
u32 flip_addr = overlay->flip_addr;
u32 tmp;
if (tmp & (1 << 17))
DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp);
- req = i915_gem_request_alloc(ring, NULL);
+ req = i915_gem_request_alloc(engine, NULL);
if (IS_ERR(req))
return PTR_ERR(req);
return ret;
}
- intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
- intel_ring_emit(ring, flip_addr);
- intel_ring_advance(ring);
+ intel_ring_emit(engine, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
+ intel_ring_emit(engine, flip_addr);
+ intel_ring_advance(engine);
WARN_ON(overlay->last_flip_req);
i915_gem_request_assign(&overlay->last_flip_req, req);
{
struct drm_device *dev = overlay->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring = &dev_priv->ring[RCS];
+ struct intel_engine_cs *engine = &dev_priv->ring[RCS];
struct drm_i915_gem_request *req;
u32 flip_addr = overlay->flip_addr;
int ret;
* of the hw. Do it in both cases */
flip_addr |= OFC_UPDATE;
- req = i915_gem_request_alloc(ring, NULL);
+ req = i915_gem_request_alloc(engine, NULL);
if (IS_ERR(req))
return PTR_ERR(req);
}
/* wait for overlay to go idle */
- intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
- intel_ring_emit(ring, flip_addr);
- intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+ intel_ring_emit(engine, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
+ intel_ring_emit(engine, flip_addr);
+ intel_ring_emit(engine, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
/* turn overlay off */
if (IS_I830(dev)) {
/* Workaround: Don't disable the overlay fully, since otherwise
* it dies on the next OVERLAY_ON cmd. */
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_emit(ring, MI_NOOP);
+ intel_ring_emit(engine, MI_NOOP);
+ intel_ring_emit(engine, MI_NOOP);
+ intel_ring_emit(engine, MI_NOOP);
} else {
- intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
- intel_ring_emit(ring, flip_addr);
- intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+ intel_ring_emit(engine, MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
+ intel_ring_emit(engine, flip_addr);
+ intel_ring_emit(engine,
+ MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
}
- intel_ring_advance(ring);
+ intel_ring_advance(engine);
return intel_overlay_do_wait_request(overlay, req, intel_overlay_off_tail);
}
{
struct drm_device *dev = overlay->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring = &dev_priv->ring[RCS];
+ struct intel_engine_cs *engine = &dev_priv->ring[RCS];
int ret;
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
/* synchronous slowpath */
struct drm_i915_gem_request *req;
- req = i915_gem_request_alloc(ring, NULL);
+ req = i915_gem_request_alloc(engine, NULL);
if (IS_ERR(req))
return PTR_ERR(req);
return ret;
}
- intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
+ intel_ring_emit(engine,
+ MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+ intel_ring_emit(engine, MI_NOOP);
+ intel_ring_advance(engine);
ret = intel_overlay_do_wait_request(overlay, req,
intel_overlay_release_old_vid_tail);
static void gen9_enable_rc6(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
uint32_t rc6_mask = 0;
int unused;
I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16);
I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
- for_each_ring(ring, dev_priv, unused)
- I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
+ for_each_ring(engine, dev_priv, unused)
+ I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
if (HAS_GUC_UCODE(dev))
I915_WRITE(GUC_MAX_IDLE_COUNT, 0xA);
static void gen8_enable_rps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
uint32_t rc6_mask = 0;
int unused;
I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
- for_each_ring(ring, dev_priv, unused)
- I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
+ for_each_ring(engine, dev_priv, unused)
+ I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
I915_WRITE(GEN6_RC_SLEEP, 0);
if (IS_BROADWELL(dev))
I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */
static void gen6_enable_rps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
u32 rc6vids, pcu_mbox = 0, rc6_mask = 0;
u32 gtfifodbg;
int rc6_mode;
I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
- for_each_ring(ring, dev_priv, i)
- I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
+ for_each_ring(engine, dev_priv, i)
+ I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
I915_WRITE(GEN6_RC_SLEEP, 0);
I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
static void cherryview_enable_rps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
u32 gtfifodbg, val, rc6_mode = 0, pcbr;
int i;
I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
- for_each_ring(ring, dev_priv, i)
- I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
+ for_each_ring(engine, dev_priv, i)
+ I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
I915_WRITE(GEN6_RC_SLEEP, 0);
/* TO threshold set to 500 us ( 0x186 * 1.28 us) */
static void valleyview_enable_rps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
u32 gtfifodbg, val, rc6_mode = 0;
int i;
I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
- for_each_ring(ring, dev_priv, i)
- I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
+ for_each_ring(engine, dev_priv, i)
+ I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
I915_WRITE(GEN6_RC6_THRESHOLD, 0x557);
bool i915_gpu_busy(void)
{
struct drm_i915_private *dev_priv;
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
bool ret = false;
int i;
goto out_unlock;
dev_priv = i915_mch_dev;
- for_each_ring(ring, dev_priv, i)
- ret |= !list_empty(&ring->request_list);
+ for_each_ring(engine, dev_priv, i)
+ ret |= !list_empty(&engine->request_list);
out_unlock:
spin_unlock_irq(&mchdev_lock);
u32 invalidate_domains,
u32 flush_domains)
{
- struct intel_engine_cs *ring = req->ring;
+ struct intel_engine_cs *engine = req->ring;
u32 cmd;
int ret;
if (ret)
return ret;
- intel_ring_emit(ring, cmd);
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
+ intel_ring_emit(engine, cmd);
+ intel_ring_emit(engine, MI_NOOP);
+ intel_ring_advance(engine);
return 0;
}
u32 invalidate_domains,
u32 flush_domains)
{
- struct intel_engine_cs *ring = req->ring;
- struct drm_device *dev = ring->dev;
+ struct intel_engine_cs *engine = req->ring;
+ struct drm_device *dev = engine->dev;
u32 cmd;
int ret;
if (ret)
return ret;
- intel_ring_emit(ring, cmd);
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
+ intel_ring_emit(engine, cmd);
+ intel_ring_emit(engine, MI_NOOP);
+ intel_ring_advance(engine);
return 0;
}
static int
intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req)
{
- struct intel_engine_cs *ring = req->ring;
- u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
+ struct intel_engine_cs *engine = req->ring;
+ u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
int ret;
ret = intel_ring_begin(req, 6);
if (ret)
return ret;
- intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
- intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
+ intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(5));
+ intel_ring_emit(engine, PIPE_CONTROL_CS_STALL |
PIPE_CONTROL_STALL_AT_SCOREBOARD);
- intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
- intel_ring_emit(ring, 0); /* low dword */
- intel_ring_emit(ring, 0); /* high dword */
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
+ intel_ring_emit(engine, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
+ intel_ring_emit(engine, 0); /* low dword */
+ intel_ring_emit(engine, 0); /* high dword */
+ intel_ring_emit(engine, MI_NOOP);
+ intel_ring_advance(engine);
ret = intel_ring_begin(req, 6);
if (ret)
return ret;
- intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
- intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE);
- intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
+ intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(5));
+ intel_ring_emit(engine, PIPE_CONTROL_QW_WRITE);
+ intel_ring_emit(engine, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
+ intel_ring_emit(engine, 0);
+ intel_ring_emit(engine, 0);
+ intel_ring_emit(engine, MI_NOOP);
+ intel_ring_advance(engine);
return 0;
}
gen6_render_ring_flush(struct drm_i915_gem_request *req,
u32 invalidate_domains, u32 flush_domains)
{
- struct intel_engine_cs *ring = req->ring;
+ struct intel_engine_cs *engine = req->ring;
u32 flags = 0;
- u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
+ u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
int ret;
/* Force SNB workarounds for PIPE_CONTROL flushes */
if (ret)
return ret;
- intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
- intel_ring_emit(ring, flags);
- intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
- intel_ring_emit(ring, 0);
- intel_ring_advance(ring);
+ intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(4));
+ intel_ring_emit(engine, flags);
+ intel_ring_emit(engine, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
+ intel_ring_emit(engine, 0);
+ intel_ring_advance(engine);
return 0;
}
static int
gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request *req)
{
- struct intel_engine_cs *ring = req->ring;
+ struct intel_engine_cs *engine = req->ring;
int ret;
ret = intel_ring_begin(req, 4);
if (ret)
return ret;
- intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
- intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
+ intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(4));
+ intel_ring_emit(engine, PIPE_CONTROL_CS_STALL |
PIPE_CONTROL_STALL_AT_SCOREBOARD);
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, 0);
- intel_ring_advance(ring);
+ intel_ring_emit(engine, 0);
+ intel_ring_emit(engine, 0);
+ intel_ring_advance(engine);
return 0;
}
gen7_render_ring_flush(struct drm_i915_gem_request *req,
u32 invalidate_domains, u32 flush_domains)
{
- struct intel_engine_cs *ring = req->ring;
+ struct intel_engine_cs *engine = req->ring;
u32 flags = 0;
- u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
+ u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
int ret;
/*
if (ret)
return ret;
- intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
- intel_ring_emit(ring, flags);
- intel_ring_emit(ring, scratch_addr);
- intel_ring_emit(ring, 0);
- intel_ring_advance(ring);
+ intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(4));
+ intel_ring_emit(engine, flags);
+ intel_ring_emit(engine, scratch_addr);
+ intel_ring_emit(engine, 0);
+ intel_ring_advance(engine);
return 0;
}
gen8_emit_pipe_control(struct drm_i915_gem_request *req,
u32 flags, u32 scratch_addr)
{
- struct intel_engine_cs *ring = req->ring;
+ struct intel_engine_cs *engine = req->ring;
int ret;
ret = intel_ring_begin(req, 6);
if (ret)
return ret;
- intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
- intel_ring_emit(ring, flags);
- intel_ring_emit(ring, scratch_addr);
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, 0);
- intel_ring_advance(ring);
+ intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(6));
+ intel_ring_emit(engine, flags);
+ intel_ring_emit(engine, scratch_addr);
+ intel_ring_emit(engine, 0);
+ intel_ring_emit(engine, 0);
+ intel_ring_emit(engine, 0);
+ intel_ring_advance(engine);
return 0;
}
static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
{
int ret, i;
- struct intel_engine_cs *ring = req->ring;
- struct drm_device *dev = ring->dev;
+ struct intel_engine_cs *engine = req->ring;
+ struct drm_device *dev = engine->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_workarounds *w = &dev_priv->workarounds;
if (w->count == 0)
return 0;
- ring->gpu_caches_dirty = true;
+ engine->gpu_caches_dirty = true;
ret = intel_ring_flush_all_caches(req);
if (ret)
return ret;
if (ret)
return ret;
- intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(w->count));
+ intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(w->count));
for (i = 0; i < w->count; i++) {
- intel_ring_emit_reg(ring, w->reg[i].addr);
- intel_ring_emit(ring, w->reg[i].value);
+ intel_ring_emit_reg(engine, w->reg[i].addr);
+ intel_ring_emit(engine, w->reg[i].value);
}
- intel_ring_emit(ring, MI_NOOP);
+ intel_ring_emit(engine, MI_NOOP);
- intel_ring_advance(ring);
+ intel_ring_advance(engine);
- ring->gpu_caches_dirty = true;
+ engine->gpu_caches_dirty = true;
ret = intel_ring_flush_all_caches(req);
if (ret)
return ret;
static int
gen6_add_request(struct drm_i915_gem_request *req)
{
- struct intel_engine_cs *ring = req->ring;
+ struct intel_engine_cs *engine = req->ring;
int ret;
- if (ring->semaphore.signal)
- ret = ring->semaphore.signal(req, 4);
+ if (engine->semaphore.signal)
+ ret = engine->semaphore.signal(req, 4);
else
ret = intel_ring_begin(req, 4);
if (ret)
return ret;
- intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
- intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- intel_ring_emit(ring, i915_gem_request_get_seqno(req));
- intel_ring_emit(ring, MI_USER_INTERRUPT);
- __intel_ring_advance(ring);
+ intel_ring_emit(engine, MI_STORE_DWORD_INDEX);
+ intel_ring_emit(engine,
+ I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+ intel_ring_emit(engine, i915_gem_request_get_seqno(req));
+ intel_ring_emit(engine, MI_USER_INTERRUPT);
+ __intel_ring_advance(engine);
return 0;
}
static int
pc_render_add_request(struct drm_i915_gem_request *req)
{
- struct intel_engine_cs *ring = req->ring;
- u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
+ struct intel_engine_cs *engine = req->ring;
+ u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
int ret;
/* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
if (ret)
return ret;
- intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
+ intel_ring_emit(engine,
+ GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
PIPE_CONTROL_WRITE_FLUSH |
PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
- intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
- intel_ring_emit(ring, i915_gem_request_get_seqno(req));
- intel_ring_emit(ring, 0);
- PIPE_CONTROL_FLUSH(ring, scratch_addr);
+ intel_ring_emit(engine,
+ engine->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
+ intel_ring_emit(engine, i915_gem_request_get_seqno(req));
+ intel_ring_emit(engine, 0);
+ PIPE_CONTROL_FLUSH(engine, scratch_addr);
scratch_addr += 2 * CACHELINE_BYTES; /* write to separate cachelines */
- PIPE_CONTROL_FLUSH(ring, scratch_addr);
+ PIPE_CONTROL_FLUSH(engine, scratch_addr);
scratch_addr += 2 * CACHELINE_BYTES;
- PIPE_CONTROL_FLUSH(ring, scratch_addr);
+ PIPE_CONTROL_FLUSH(engine, scratch_addr);
scratch_addr += 2 * CACHELINE_BYTES;
- PIPE_CONTROL_FLUSH(ring, scratch_addr);
+ PIPE_CONTROL_FLUSH(engine, scratch_addr);
scratch_addr += 2 * CACHELINE_BYTES;
- PIPE_CONTROL_FLUSH(ring, scratch_addr);
+ PIPE_CONTROL_FLUSH(engine, scratch_addr);
scratch_addr += 2 * CACHELINE_BYTES;
- PIPE_CONTROL_FLUSH(ring, scratch_addr);
+ PIPE_CONTROL_FLUSH(engine, scratch_addr);
- intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
+ intel_ring_emit(engine,
+ GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
PIPE_CONTROL_WRITE_FLUSH |
PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
PIPE_CONTROL_NOTIFY);
- intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
- intel_ring_emit(ring, i915_gem_request_get_seqno(req));
- intel_ring_emit(ring, 0);
- __intel_ring_advance(ring);
+ intel_ring_emit(engine,
+ engine->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
+ intel_ring_emit(engine, i915_gem_request_get_seqno(req));
+ intel_ring_emit(engine, 0);
+ __intel_ring_advance(engine);
return 0;
}
u32 invalidate_domains,
u32 flush_domains)
{
- struct intel_engine_cs *ring = req->ring;
+ struct intel_engine_cs *engine = req->ring;
int ret;
ret = intel_ring_begin(req, 2);
if (ret)
return ret;
- intel_ring_emit(ring, MI_FLUSH);
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
+ intel_ring_emit(engine, MI_FLUSH);
+ intel_ring_emit(engine, MI_NOOP);
+ intel_ring_advance(engine);
return 0;
}
static int
i9xx_add_request(struct drm_i915_gem_request *req)
{
- struct intel_engine_cs *ring = req->ring;
+ struct intel_engine_cs *engine = req->ring;
int ret;
ret = intel_ring_begin(req, 4);
if (ret)
return ret;
- intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
- intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- intel_ring_emit(ring, i915_gem_request_get_seqno(req));
- intel_ring_emit(ring, MI_USER_INTERRUPT);
- __intel_ring_advance(ring);
+ intel_ring_emit(engine, MI_STORE_DWORD_INDEX);
+ intel_ring_emit(engine,
+ I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+ intel_ring_emit(engine, i915_gem_request_get_seqno(req));
+ intel_ring_emit(engine, MI_USER_INTERRUPT);
+ __intel_ring_advance(engine);
return 0;
}
u64 offset, u32 length,
unsigned dispatch_flags)
{
- struct intel_engine_cs *ring = req->ring;
+ struct intel_engine_cs *engine = req->ring;
int ret;
ret = intel_ring_begin(req, 2);
if (ret)
return ret;
- intel_ring_emit(ring,
+ intel_ring_emit(engine,
MI_BATCH_BUFFER_START |
MI_BATCH_GTT |
(dispatch_flags & I915_DISPATCH_SECURE ?
0 : MI_BATCH_NON_SECURE_I965));
- intel_ring_emit(ring, offset);
- intel_ring_advance(ring);
+ intel_ring_emit(engine, offset);
+ intel_ring_advance(engine);
return 0;
}
u64 offset, u32 len,
unsigned dispatch_flags)
{
- struct intel_engine_cs *ring = req->ring;
- u32 cs_offset = ring->scratch.gtt_offset;
+ struct intel_engine_cs *engine = req->ring;
+ u32 cs_offset = engine->scratch.gtt_offset;
int ret;
ret = intel_ring_begin(req, 6);
return ret;
/* Evict the invalid PTE TLBs */
- intel_ring_emit(ring, COLOR_BLT_CMD | BLT_WRITE_RGBA);
- intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096);
- intel_ring_emit(ring, I830_TLB_ENTRIES << 16 | 4); /* load each page */
- intel_ring_emit(ring, cs_offset);
- intel_ring_emit(ring, 0xdeadbeef);
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
+ intel_ring_emit(engine, COLOR_BLT_CMD | BLT_WRITE_RGBA);
+ intel_ring_emit(engine, BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096);
+ intel_ring_emit(engine, I830_TLB_ENTRIES << 16 | 4); /* load each page */
+ intel_ring_emit(engine, cs_offset);
+ intel_ring_emit(engine, 0xdeadbeef);
+ intel_ring_emit(engine, MI_NOOP);
+ intel_ring_advance(engine);
if ((dispatch_flags & I915_DISPATCH_PINNED) == 0) {
if (len > I830_BATCH_LIMIT)
* stable batch scratch bo area (so that the CS never
* stumbles over its tlb invalidation bug) ...
*/
- intel_ring_emit(ring, SRC_COPY_BLT_CMD | BLT_WRITE_RGBA);
- intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096);
- intel_ring_emit(ring, DIV_ROUND_UP(len, 4096) << 16 | 4096);
- intel_ring_emit(ring, cs_offset);
- intel_ring_emit(ring, 4096);
- intel_ring_emit(ring, offset);
-
- intel_ring_emit(ring, MI_FLUSH);
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
+ intel_ring_emit(engine, SRC_COPY_BLT_CMD | BLT_WRITE_RGBA);
+ intel_ring_emit(engine,
+ BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096);
+ intel_ring_emit(engine, DIV_ROUND_UP(len, 4096) << 16 | 4096);
+ intel_ring_emit(engine, cs_offset);
+ intel_ring_emit(engine, 4096);
+ intel_ring_emit(engine, offset);
+
+ intel_ring_emit(engine, MI_FLUSH);
+ intel_ring_emit(engine, MI_NOOP);
+ intel_ring_advance(engine);
/* ... and execute it. */
offset = cs_offset;
if (ret)
return ret;
- intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
- intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
- 0 : MI_BATCH_NON_SECURE));
- intel_ring_advance(ring);
+ intel_ring_emit(engine, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
+ intel_ring_emit(engine, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
+ 0 : MI_BATCH_NON_SECURE));
+ intel_ring_advance(engine);
return 0;
}
u64 offset, u32 len,
unsigned dispatch_flags)
{
- struct intel_engine_cs *ring = req->ring;
+ struct intel_engine_cs *engine = req->ring;
int ret;
ret = intel_ring_begin(req, 2);
if (ret)
return ret;
- intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
- intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
- 0 : MI_BATCH_NON_SECURE));
- intel_ring_advance(ring);
+ intel_ring_emit(engine, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
+ intel_ring_emit(engine, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
+ 0 : MI_BATCH_NON_SECURE));
+ intel_ring_advance(engine);
return 0;
}
int intel_ring_begin(struct drm_i915_gem_request *req,
int num_dwords)
{
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
struct drm_i915_private *dev_priv;
int ret;
WARN_ON(req == NULL);
- ring = req->ring;
- dev_priv = ring->dev->dev_private;
+ engine = req->ring;
+ dev_priv = engine->dev->dev_private;
ret = i915_gem_check_wedge(&dev_priv->gpu_error,
dev_priv->mm.interruptible);
if (ret)
return ret;
- ret = __intel_ring_prepare(ring, num_dwords * sizeof(uint32_t));
+ ret = __intel_ring_prepare(engine, num_dwords * sizeof(uint32_t));
if (ret)
return ret;
- ring->buffer->space -= num_dwords * sizeof(uint32_t);
+ engine->buffer->space -= num_dwords * sizeof(uint32_t);
return 0;
}
/* Align the ring tail to a cacheline boundary */
int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
{
- struct intel_engine_cs *ring = req->ring;
- int num_dwords = (ring->buffer->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
+ struct intel_engine_cs *engine = req->ring;
+ int num_dwords = (engine->buffer->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
int ret;
if (num_dwords == 0)
return ret;
while (num_dwords--)
- intel_ring_emit(ring, MI_NOOP);
+ intel_ring_emit(engine, MI_NOOP);
- intel_ring_advance(ring);
+ intel_ring_advance(engine);
return 0;
}
static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req,
u32 invalidate, u32 flush)
{
- struct intel_engine_cs *ring = req->ring;
+ struct intel_engine_cs *engine = req->ring;
uint32_t cmd;
int ret;
return ret;
cmd = MI_FLUSH_DW;
- if (INTEL_INFO(ring->dev)->gen >= 8)
+ if (INTEL_INFO(engine->dev)->gen >= 8)
cmd += 1;
/* We always require a command barrier so that subsequent
if (invalidate & I915_GEM_GPU_DOMAINS)
cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
- intel_ring_emit(ring, cmd);
- intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
- if (INTEL_INFO(ring->dev)->gen >= 8) {
- intel_ring_emit(ring, 0); /* upper addr */
- intel_ring_emit(ring, 0); /* value */
+ intel_ring_emit(engine, cmd);
+ intel_ring_emit(engine,
+ I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
+ if (INTEL_INFO(engine->dev)->gen >= 8) {
+ intel_ring_emit(engine, 0); /* upper addr */
+ intel_ring_emit(engine, 0); /* value */
} else {
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, MI_NOOP);
+ intel_ring_emit(engine, 0);
+ intel_ring_emit(engine, MI_NOOP);
}
- intel_ring_advance(ring);
+ intel_ring_advance(engine);
return 0;
}
u64 offset, u32 len,
unsigned dispatch_flags)
{
- struct intel_engine_cs *ring = req->ring;
- bool ppgtt = USES_PPGTT(ring->dev) &&
+ struct intel_engine_cs *engine = req->ring;
+ bool ppgtt = USES_PPGTT(engine->dev) &&
!(dispatch_flags & I915_DISPATCH_SECURE);
int ret;
return ret;
/* FIXME(BDW): Address space and security selectors. */
- intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8) |
+ intel_ring_emit(engine, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8) |
(dispatch_flags & I915_DISPATCH_RS ?
MI_BATCH_RESOURCE_STREAMER : 0));
- intel_ring_emit(ring, lower_32_bits(offset));
- intel_ring_emit(ring, upper_32_bits(offset));
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
+ intel_ring_emit(engine, lower_32_bits(offset));
+ intel_ring_emit(engine, upper_32_bits(offset));
+ intel_ring_emit(engine, MI_NOOP);
+ intel_ring_advance(engine);
return 0;
}
u64 offset, u32 len,
unsigned dispatch_flags)
{
- struct intel_engine_cs *ring = req->ring;
+ struct intel_engine_cs *engine = req->ring;
int ret;
ret = intel_ring_begin(req, 2);
if (ret)
return ret;
- intel_ring_emit(ring,
+ intel_ring_emit(engine,
MI_BATCH_BUFFER_START |
(dispatch_flags & I915_DISPATCH_SECURE ?
0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW) |
(dispatch_flags & I915_DISPATCH_RS ?
MI_BATCH_RESOURCE_STREAMER : 0));
/* bit0-7 is the length on GEN6+ */
- intel_ring_emit(ring, offset);
- intel_ring_advance(ring);
+ intel_ring_emit(engine, offset);
+ intel_ring_advance(engine);
return 0;
}
u64 offset, u32 len,
unsigned dispatch_flags)
{
- struct intel_engine_cs *ring = req->ring;
+ struct intel_engine_cs *engine = req->ring;
int ret;
ret = intel_ring_begin(req, 2);
if (ret)
return ret;
- intel_ring_emit(ring,
+ intel_ring_emit(engine,
MI_BATCH_BUFFER_START |
(dispatch_flags & I915_DISPATCH_SECURE ?
0 : MI_BATCH_NON_SECURE_I965));
/* bit0-7 is the length on GEN6+ */
- intel_ring_emit(ring, offset);
- intel_ring_advance(ring);
+ intel_ring_emit(engine, offset);
+ intel_ring_advance(engine);
return 0;
}
static int gen6_ring_flush(struct drm_i915_gem_request *req,
u32 invalidate, u32 flush)
{
- struct intel_engine_cs *ring = req->ring;
- struct drm_device *dev = ring->dev;
+ struct intel_engine_cs *engine = req->ring;
+ struct drm_device *dev = engine->dev;
uint32_t cmd;
int ret;
*/
if (invalidate & I915_GEM_DOMAIN_RENDER)
cmd |= MI_INVALIDATE_TLB;
- intel_ring_emit(ring, cmd);
- intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
+ intel_ring_emit(engine, cmd);
+ intel_ring_emit(engine,
+ I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
if (INTEL_INFO(dev)->gen >= 8) {
- intel_ring_emit(ring, 0); /* upper addr */
- intel_ring_emit(ring, 0); /* value */
+ intel_ring_emit(engine, 0); /* upper addr */
+ intel_ring_emit(engine, 0); /* value */
} else {
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, MI_NOOP);
+ intel_ring_emit(engine, 0);
+ intel_ring_emit(engine, MI_NOOP);
}
- intel_ring_advance(ring);
+ intel_ring_advance(engine);
return 0;
}
int intel_init_render_ring_buffer(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring = &dev_priv->ring[RCS];
+ struct intel_engine_cs *engine = &dev_priv->ring[RCS];
struct drm_i915_gem_object *obj;
int ret;
- ring->name = "render ring";
- ring->id = RCS;
- ring->exec_id = I915_EXEC_RENDER;
- ring->mmio_base = RENDER_RING_BASE;
+ engine->name = "render ring";
+ engine->id = RCS;
+ engine->exec_id = I915_EXEC_RENDER;
+ engine->mmio_base = RENDER_RING_BASE;
if (INTEL_INFO(dev)->gen >= 8) {
if (i915_semaphore_is_enabled(dev)) {
}
}
- ring->init_context = intel_rcs_ctx_init;
- ring->add_request = gen6_add_request;
- ring->flush = gen8_render_ring_flush;
- ring->irq_get = gen8_ring_get_irq;
- ring->irq_put = gen8_ring_put_irq;
- ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
- ring->get_seqno = gen6_ring_get_seqno;
- ring->set_seqno = ring_set_seqno;
+ engine->init_context = intel_rcs_ctx_init;
+ engine->add_request = gen6_add_request;
+ engine->flush = gen8_render_ring_flush;
+ engine->irq_get = gen8_ring_get_irq;
+ engine->irq_put = gen8_ring_put_irq;
+ engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
+ engine->get_seqno = gen6_ring_get_seqno;
+ engine->set_seqno = ring_set_seqno;
if (i915_semaphore_is_enabled(dev)) {
WARN_ON(!dev_priv->semaphore_obj);
- ring->semaphore.sync_to = gen8_ring_sync;
- ring->semaphore.signal = gen8_rcs_signal;
- GEN8_RING_SEMAPHORE_INIT;
+ engine->semaphore.sync_to = gen8_ring_sync;
+ engine->semaphore.signal = gen8_rcs_signal;
+ GEN8_RING_SEMAPHORE_INIT(engine);
}
} else if (INTEL_INFO(dev)->gen >= 6) {
- ring->init_context = intel_rcs_ctx_init;
- ring->add_request = gen6_add_request;
- ring->flush = gen7_render_ring_flush;
+ engine->init_context = intel_rcs_ctx_init;
+ engine->add_request = gen6_add_request;
+ engine->flush = gen7_render_ring_flush;
if (INTEL_INFO(dev)->gen == 6)
- ring->flush = gen6_render_ring_flush;
- ring->irq_get = gen6_ring_get_irq;
- ring->irq_put = gen6_ring_put_irq;
- ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
- ring->get_seqno = gen6_ring_get_seqno;
- ring->set_seqno = ring_set_seqno;
+ engine->flush = gen6_render_ring_flush;
+ engine->irq_get = gen6_ring_get_irq;
+ engine->irq_put = gen6_ring_put_irq;
+ engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
+ engine->get_seqno = gen6_ring_get_seqno;
+ engine->set_seqno = ring_set_seqno;
if (i915_semaphore_is_enabled(dev)) {
- ring->semaphore.sync_to = gen6_ring_sync;
- ring->semaphore.signal = gen6_signal;
+ engine->semaphore.sync_to = gen6_ring_sync;
+ engine->semaphore.signal = gen6_signal;
/*
* The current semaphore is only applied on pre-gen8
* platform. And there is no VCS2 ring on the pre-gen8
* initialized as INVALID. Gen8 will initialize the
* sema between VCS2 and RCS later.
*/
- ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_INVALID;
- ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_RV;
- ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_RB;
- ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_RVE;
- ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
- ring->semaphore.mbox.signal[RCS] = GEN6_NOSYNC;
- ring->semaphore.mbox.signal[VCS] = GEN6_VRSYNC;
- ring->semaphore.mbox.signal[BCS] = GEN6_BRSYNC;
- ring->semaphore.mbox.signal[VECS] = GEN6_VERSYNC;
- ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
+ engine->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_INVALID;
+ engine->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_RV;
+ engine->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_RB;
+ engine->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_RVE;
+ engine->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
+ engine->semaphore.mbox.signal[RCS] = GEN6_NOSYNC;
+ engine->semaphore.mbox.signal[VCS] = GEN6_VRSYNC;
+ engine->semaphore.mbox.signal[BCS] = GEN6_BRSYNC;
+ engine->semaphore.mbox.signal[VECS] = GEN6_VERSYNC;
+ engine->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
}
} else if (IS_GEN5(dev)) {
- ring->add_request = pc_render_add_request;
- ring->flush = gen4_render_ring_flush;
- ring->get_seqno = pc_render_get_seqno;
- ring->set_seqno = pc_render_set_seqno;
- ring->irq_get = gen5_ring_get_irq;
- ring->irq_put = gen5_ring_put_irq;
- ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT |
+ engine->add_request = pc_render_add_request;
+ engine->flush = gen4_render_ring_flush;
+ engine->get_seqno = pc_render_get_seqno;
+ engine->set_seqno = pc_render_set_seqno;
+ engine->irq_get = gen5_ring_get_irq;
+ engine->irq_put = gen5_ring_put_irq;
+ engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT |
GT_RENDER_PIPECTL_NOTIFY_INTERRUPT;
} else {
- ring->add_request = i9xx_add_request;
+ engine->add_request = i9xx_add_request;
if (INTEL_INFO(dev)->gen < 4)
- ring->flush = gen2_render_ring_flush;
+ engine->flush = gen2_render_ring_flush;
else
- ring->flush = gen4_render_ring_flush;
- ring->get_seqno = ring_get_seqno;
- ring->set_seqno = ring_set_seqno;
+ engine->flush = gen4_render_ring_flush;
+ engine->get_seqno = ring_get_seqno;
+ engine->set_seqno = ring_set_seqno;
if (IS_GEN2(dev)) {
- ring->irq_get = i8xx_ring_get_irq;
- ring->irq_put = i8xx_ring_put_irq;
+ engine->irq_get = i8xx_ring_get_irq;
+ engine->irq_put = i8xx_ring_put_irq;
} else {
- ring->irq_get = i9xx_ring_get_irq;
- ring->irq_put = i9xx_ring_put_irq;
+ engine->irq_get = i9xx_ring_get_irq;
+ engine->irq_put = i9xx_ring_put_irq;
}
- ring->irq_enable_mask = I915_USER_INTERRUPT;
+ engine->irq_enable_mask = I915_USER_INTERRUPT;
}
- ring->write_tail = ring_write_tail;
+ engine->write_tail = ring_write_tail;
if (IS_HASWELL(dev))
- ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer;
+ engine->dispatch_execbuffer = hsw_ring_dispatch_execbuffer;
else if (IS_GEN8(dev))
- ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
+ engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
else if (INTEL_INFO(dev)->gen >= 6)
- ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
+ engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
else if (INTEL_INFO(dev)->gen >= 4)
- ring->dispatch_execbuffer = i965_dispatch_execbuffer;
+ engine->dispatch_execbuffer = i965_dispatch_execbuffer;
else if (IS_I830(dev) || IS_845G(dev))
- ring->dispatch_execbuffer = i830_dispatch_execbuffer;
+ engine->dispatch_execbuffer = i830_dispatch_execbuffer;
else
- ring->dispatch_execbuffer = i915_dispatch_execbuffer;
- ring->init_hw = init_render_ring;
- ring->cleanup = render_ring_cleanup;
+ engine->dispatch_execbuffer = i915_dispatch_execbuffer;
+ engine->init_hw = init_render_ring;
+ engine->cleanup = render_ring_cleanup;
/* Workaround batchbuffer to combat CS tlb bug. */
if (HAS_BROKEN_CS_TLB(dev)) {
return ret;
}
- ring->scratch.obj = obj;
- ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(obj);
+ engine->scratch.obj = obj;
+ engine->scratch.gtt_offset = i915_gem_obj_ggtt_offset(obj);
}
- ret = intel_init_ring_buffer(dev, ring);
+ ret = intel_init_ring_buffer(dev, engine);
if (ret)
return ret;
if (INTEL_INFO(dev)->gen >= 5) {
- ret = intel_init_pipe_control(ring);
+ ret = intel_init_pipe_control(engine);
if (ret)
return ret;
}
int intel_init_bsd_ring_buffer(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring = &dev_priv->ring[VCS];
+ struct intel_engine_cs *engine = &dev_priv->ring[VCS];
- ring->name = "bsd ring";
- ring->id = VCS;
- ring->exec_id = I915_EXEC_BSD;
+ engine->name = "bsd ring";
+ engine->id = VCS;
+ engine->exec_id = I915_EXEC_BSD;
- ring->write_tail = ring_write_tail;
+ engine->write_tail = ring_write_tail;
if (INTEL_INFO(dev)->gen >= 6) {
- ring->mmio_base = GEN6_BSD_RING_BASE;
+ engine->mmio_base = GEN6_BSD_RING_BASE;
/* gen6 bsd needs a special wa for tail updates */
if (IS_GEN6(dev))
- ring->write_tail = gen6_bsd_ring_write_tail;
- ring->flush = gen6_bsd_ring_flush;
- ring->add_request = gen6_add_request;
- ring->get_seqno = gen6_ring_get_seqno;
- ring->set_seqno = ring_set_seqno;
+ engine->write_tail = gen6_bsd_ring_write_tail;
+ engine->flush = gen6_bsd_ring_flush;
+ engine->add_request = gen6_add_request;
+ engine->get_seqno = gen6_ring_get_seqno;
+ engine->set_seqno = ring_set_seqno;
if (INTEL_INFO(dev)->gen >= 8) {
- ring->irq_enable_mask =
+ engine->irq_enable_mask =
GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
- ring->irq_get = gen8_ring_get_irq;
- ring->irq_put = gen8_ring_put_irq;
- ring->dispatch_execbuffer =
+ engine->irq_get = gen8_ring_get_irq;
+ engine->irq_put = gen8_ring_put_irq;
+ engine->dispatch_execbuffer =
gen8_ring_dispatch_execbuffer;
if (i915_semaphore_is_enabled(dev)) {
- ring->semaphore.sync_to = gen8_ring_sync;
- ring->semaphore.signal = gen8_xcs_signal;
- GEN8_RING_SEMAPHORE_INIT;
+ engine->semaphore.sync_to = gen8_ring_sync;
+ engine->semaphore.signal = gen8_xcs_signal;
+ GEN8_RING_SEMAPHORE_INIT(engine);
}
} else {
- ring->irq_enable_mask = GT_BSD_USER_INTERRUPT;
- ring->irq_get = gen6_ring_get_irq;
- ring->irq_put = gen6_ring_put_irq;
- ring->dispatch_execbuffer =
+ engine->irq_enable_mask = GT_BSD_USER_INTERRUPT;
+ engine->irq_get = gen6_ring_get_irq;
+ engine->irq_put = gen6_ring_put_irq;
+ engine->dispatch_execbuffer =
gen6_ring_dispatch_execbuffer;
if (i915_semaphore_is_enabled(dev)) {
- ring->semaphore.sync_to = gen6_ring_sync;
- ring->semaphore.signal = gen6_signal;
- ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VR;
- ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_INVALID;
- ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VB;
- ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_VVE;
- ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
- ring->semaphore.mbox.signal[RCS] = GEN6_RVSYNC;
- ring->semaphore.mbox.signal[VCS] = GEN6_NOSYNC;
- ring->semaphore.mbox.signal[BCS] = GEN6_BVSYNC;
- ring->semaphore.mbox.signal[VECS] = GEN6_VEVSYNC;
- ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
+ engine->semaphore.sync_to = gen6_ring_sync;
+ engine->semaphore.signal = gen6_signal;
+ engine->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VR;
+ engine->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_INVALID;
+ engine->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VB;
+ engine->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_VVE;
+ engine->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
+ engine->semaphore.mbox.signal[RCS] = GEN6_RVSYNC;
+ engine->semaphore.mbox.signal[VCS] = GEN6_NOSYNC;
+ engine->semaphore.mbox.signal[BCS] = GEN6_BVSYNC;
+ engine->semaphore.mbox.signal[VECS] = GEN6_VEVSYNC;
+ engine->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
}
}
} else {
- ring->mmio_base = BSD_RING_BASE;
- ring->flush = bsd_ring_flush;
- ring->add_request = i9xx_add_request;
- ring->get_seqno = ring_get_seqno;
- ring->set_seqno = ring_set_seqno;
+ engine->mmio_base = BSD_RING_BASE;
+ engine->flush = bsd_ring_flush;
+ engine->add_request = i9xx_add_request;
+ engine->get_seqno = ring_get_seqno;
+ engine->set_seqno = ring_set_seqno;
if (IS_GEN5(dev)) {
- ring->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
- ring->irq_get = gen5_ring_get_irq;
- ring->irq_put = gen5_ring_put_irq;
+ engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
+ engine->irq_get = gen5_ring_get_irq;
+ engine->irq_put = gen5_ring_put_irq;
} else {
- ring->irq_enable_mask = I915_BSD_USER_INTERRUPT;
- ring->irq_get = i9xx_ring_get_irq;
- ring->irq_put = i9xx_ring_put_irq;
+ engine->irq_enable_mask = I915_BSD_USER_INTERRUPT;
+ engine->irq_get = i9xx_ring_get_irq;
+ engine->irq_put = i9xx_ring_put_irq;
}
- ring->dispatch_execbuffer = i965_dispatch_execbuffer;
+ engine->dispatch_execbuffer = i965_dispatch_execbuffer;
}
- ring->init_hw = init_ring_common;
+ engine->init_hw = init_ring_common;
- return intel_init_ring_buffer(dev, ring);
+ return intel_init_ring_buffer(dev, engine);
}
/**
int intel_init_bsd2_ring_buffer(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring = &dev_priv->ring[VCS2];
-
- ring->name = "bsd2 ring";
- ring->id = VCS2;
- ring->exec_id = I915_EXEC_BSD;
-
- ring->write_tail = ring_write_tail;
- ring->mmio_base = GEN8_BSD2_RING_BASE;
- ring->flush = gen6_bsd_ring_flush;
- ring->add_request = gen6_add_request;
- ring->get_seqno = gen6_ring_get_seqno;
- ring->set_seqno = ring_set_seqno;
- ring->irq_enable_mask =
+ struct intel_engine_cs *engine = &dev_priv->ring[VCS2];
+
+ engine->name = "bsd2 ring";
+ engine->id = VCS2;
+ engine->exec_id = I915_EXEC_BSD;
+
+ engine->write_tail = ring_write_tail;
+ engine->mmio_base = GEN8_BSD2_RING_BASE;
+ engine->flush = gen6_bsd_ring_flush;
+ engine->add_request = gen6_add_request;
+ engine->get_seqno = gen6_ring_get_seqno;
+ engine->set_seqno = ring_set_seqno;
+ engine->irq_enable_mask =
GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT;
- ring->irq_get = gen8_ring_get_irq;
- ring->irq_put = gen8_ring_put_irq;
- ring->dispatch_execbuffer =
+ engine->irq_get = gen8_ring_get_irq;
+ engine->irq_put = gen8_ring_put_irq;
+ engine->dispatch_execbuffer =
gen8_ring_dispatch_execbuffer;
if (i915_semaphore_is_enabled(dev)) {
- ring->semaphore.sync_to = gen8_ring_sync;
- ring->semaphore.signal = gen8_xcs_signal;
- GEN8_RING_SEMAPHORE_INIT;
+ engine->semaphore.sync_to = gen8_ring_sync;
+ engine->semaphore.signal = gen8_xcs_signal;
+ GEN8_RING_SEMAPHORE_INIT(engine);
}
- ring->init_hw = init_ring_common;
+ engine->init_hw = init_ring_common;
- return intel_init_ring_buffer(dev, ring);
+ return intel_init_ring_buffer(dev, engine);
}
int intel_init_blt_ring_buffer(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring = &dev_priv->ring[BCS];
-
- ring->name = "blitter ring";
- ring->id = BCS;
- ring->exec_id = I915_EXEC_BLT;
-
- ring->mmio_base = BLT_RING_BASE;
- ring->write_tail = ring_write_tail;
- ring->flush = gen6_ring_flush;
- ring->add_request = gen6_add_request;
- ring->get_seqno = gen6_ring_get_seqno;
- ring->set_seqno = ring_set_seqno;
+ struct intel_engine_cs *engine = &dev_priv->ring[BCS];
+
+ engine->name = "blitter ring";
+ engine->id = BCS;
+ engine->exec_id = I915_EXEC_BLT;
+
+ engine->mmio_base = BLT_RING_BASE;
+ engine->write_tail = ring_write_tail;
+ engine->flush = gen6_ring_flush;
+ engine->add_request = gen6_add_request;
+ engine->get_seqno = gen6_ring_get_seqno;
+ engine->set_seqno = ring_set_seqno;
if (INTEL_INFO(dev)->gen >= 8) {
- ring->irq_enable_mask =
+ engine->irq_enable_mask =
GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
- ring->irq_get = gen8_ring_get_irq;
- ring->irq_put = gen8_ring_put_irq;
- ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
+ engine->irq_get = gen8_ring_get_irq;
+ engine->irq_put = gen8_ring_put_irq;
+ engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
if (i915_semaphore_is_enabled(dev)) {
- ring->semaphore.sync_to = gen8_ring_sync;
- ring->semaphore.signal = gen8_xcs_signal;
- GEN8_RING_SEMAPHORE_INIT;
+ engine->semaphore.sync_to = gen8_ring_sync;
+ engine->semaphore.signal = gen8_xcs_signal;
+ GEN8_RING_SEMAPHORE_INIT(engine);
}
} else {
- ring->irq_enable_mask = GT_BLT_USER_INTERRUPT;
- ring->irq_get = gen6_ring_get_irq;
- ring->irq_put = gen6_ring_put_irq;
- ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
+ engine->irq_enable_mask = GT_BLT_USER_INTERRUPT;
+ engine->irq_get = gen6_ring_get_irq;
+ engine->irq_put = gen6_ring_put_irq;
+ engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
if (i915_semaphore_is_enabled(dev)) {
- ring->semaphore.signal = gen6_signal;
- ring->semaphore.sync_to = gen6_ring_sync;
+ engine->semaphore.signal = gen6_signal;
+ engine->semaphore.sync_to = gen6_ring_sync;
/*
* The current semaphore is only applied on pre-gen8
* platform. And there is no VCS2 ring on the pre-gen8
* initialized as INVALID. Gen8 will initialize the
* sema between BCS and VCS2 later.
*/
- ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_BR;
- ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_BV;
- ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_INVALID;
- ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_BVE;
- ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
- ring->semaphore.mbox.signal[RCS] = GEN6_RBSYNC;
- ring->semaphore.mbox.signal[VCS] = GEN6_VBSYNC;
- ring->semaphore.mbox.signal[BCS] = GEN6_NOSYNC;
- ring->semaphore.mbox.signal[VECS] = GEN6_VEBSYNC;
- ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
+ engine->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_BR;
+ engine->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_BV;
+ engine->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_INVALID;
+ engine->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_BVE;
+ engine->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
+ engine->semaphore.mbox.signal[RCS] = GEN6_RBSYNC;
+ engine->semaphore.mbox.signal[VCS] = GEN6_VBSYNC;
+ engine->semaphore.mbox.signal[BCS] = GEN6_NOSYNC;
+ engine->semaphore.mbox.signal[VECS] = GEN6_VEBSYNC;
+ engine->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
}
}
- ring->init_hw = init_ring_common;
+ engine->init_hw = init_ring_common;
- return intel_init_ring_buffer(dev, ring);
+ return intel_init_ring_buffer(dev, engine);
}
int intel_init_vebox_ring_buffer(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring = &dev_priv->ring[VECS];
+ struct intel_engine_cs *engine = &dev_priv->ring[VECS];
- ring->name = "video enhancement ring";
- ring->id = VECS;
- ring->exec_id = I915_EXEC_VEBOX;
+ engine->name = "video enhancement ring";
+ engine->id = VECS;
+ engine->exec_id = I915_EXEC_VEBOX;
- ring->mmio_base = VEBOX_RING_BASE;
- ring->write_tail = ring_write_tail;
- ring->flush = gen6_ring_flush;
- ring->add_request = gen6_add_request;
- ring->get_seqno = gen6_ring_get_seqno;
- ring->set_seqno = ring_set_seqno;
+ engine->mmio_base = VEBOX_RING_BASE;
+ engine->write_tail = ring_write_tail;
+ engine->flush = gen6_ring_flush;
+ engine->add_request = gen6_add_request;
+ engine->get_seqno = gen6_ring_get_seqno;
+ engine->set_seqno = ring_set_seqno;
if (INTEL_INFO(dev)->gen >= 8) {
- ring->irq_enable_mask =
+ engine->irq_enable_mask =
GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
- ring->irq_get = gen8_ring_get_irq;
- ring->irq_put = gen8_ring_put_irq;
- ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
+ engine->irq_get = gen8_ring_get_irq;
+ engine->irq_put = gen8_ring_put_irq;
+ engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
if (i915_semaphore_is_enabled(dev)) {
- ring->semaphore.sync_to = gen8_ring_sync;
- ring->semaphore.signal = gen8_xcs_signal;
- GEN8_RING_SEMAPHORE_INIT;
+ engine->semaphore.sync_to = gen8_ring_sync;
+ engine->semaphore.signal = gen8_xcs_signal;
+ GEN8_RING_SEMAPHORE_INIT(engine);
}
} else {
- ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
- ring->irq_get = hsw_vebox_get_irq;
- ring->irq_put = hsw_vebox_put_irq;
- ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
+ engine->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
+ engine->irq_get = hsw_vebox_get_irq;
+ engine->irq_put = hsw_vebox_put_irq;
+ engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
if (i915_semaphore_is_enabled(dev)) {
- ring->semaphore.sync_to = gen6_ring_sync;
- ring->semaphore.signal = gen6_signal;
- ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VER;
- ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_VEV;
- ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VEB;
- ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_INVALID;
- ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
- ring->semaphore.mbox.signal[RCS] = GEN6_RVESYNC;
- ring->semaphore.mbox.signal[VCS] = GEN6_VVESYNC;
- ring->semaphore.mbox.signal[BCS] = GEN6_BVESYNC;
- ring->semaphore.mbox.signal[VECS] = GEN6_NOSYNC;
- ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
+ engine->semaphore.sync_to = gen6_ring_sync;
+ engine->semaphore.signal = gen6_signal;
+ engine->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VER;
+ engine->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_VEV;
+ engine->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VEB;
+ engine->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_INVALID;
+ engine->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
+ engine->semaphore.mbox.signal[RCS] = GEN6_RVESYNC;
+ engine->semaphore.mbox.signal[VCS] = GEN6_VVESYNC;
+ engine->semaphore.mbox.signal[BCS] = GEN6_BVESYNC;
+ engine->semaphore.mbox.signal[VECS] = GEN6_NOSYNC;
+ engine->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
}
}
- ring->init_hw = init_ring_common;
+ engine->init_hw = init_ring_common;
- return intel_init_ring_buffer(dev, ring);
+ return intel_init_ring_buffer(dev, engine);
}
int
intel_ring_flush_all_caches(struct drm_i915_gem_request *req)
{
- struct intel_engine_cs *ring = req->ring;
+ struct intel_engine_cs *engine = req->ring;
int ret;
- if (!ring->gpu_caches_dirty)
+ if (!engine->gpu_caches_dirty)
return 0;
- ret = ring->flush(req, 0, I915_GEM_GPU_DOMAINS);
+ ret = engine->flush(req, 0, I915_GEM_GPU_DOMAINS);
if (ret)
return ret;
trace_i915_gem_ring_flush(req, 0, I915_GEM_GPU_DOMAINS);
- ring->gpu_caches_dirty = false;
+ engine->gpu_caches_dirty = false;
return 0;
}
int
intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req)
{
- struct intel_engine_cs *ring = req->ring;
+ struct intel_engine_cs *engine = req->ring;
uint32_t flush_domains;
int ret;
flush_domains = 0;
- if (ring->gpu_caches_dirty)
+ if (engine->gpu_caches_dirty)
flush_domains = I915_GEM_GPU_DOMAINS;
- ret = ring->flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
+ ret = engine->flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
if (ret)
return ret;
trace_i915_gem_ring_flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
- ring->gpu_caches_dirty = false;
+ engine->gpu_caches_dirty = false;
return 0;
}
((from) * I915_NUM_RINGS * i915_semaphore_seqno_size) + \
(i915_semaphore_seqno_size * (__ring)->id))
-#define GEN8_RING_SEMAPHORE_INIT do { \
+#define GEN8_RING_SEMAPHORE_INIT(e) do { \
if (!dev_priv->semaphore_obj) { \
break; \
} \
- ring->semaphore.signal_ggtt[RCS] = GEN8_SIGNAL_OFFSET(ring, RCS); \
- ring->semaphore.signal_ggtt[VCS] = GEN8_SIGNAL_OFFSET(ring, VCS); \
- ring->semaphore.signal_ggtt[BCS] = GEN8_SIGNAL_OFFSET(ring, BCS); \
- ring->semaphore.signal_ggtt[VECS] = GEN8_SIGNAL_OFFSET(ring, VECS); \
- ring->semaphore.signal_ggtt[VCS2] = GEN8_SIGNAL_OFFSET(ring, VCS2); \
- ring->semaphore.signal_ggtt[ring->id] = MI_SEMAPHORE_SYNC_INVALID; \
+ (e)->semaphore.signal_ggtt[RCS] = GEN8_SIGNAL_OFFSET((e), RCS); \
+ (e)->semaphore.signal_ggtt[VCS] = GEN8_SIGNAL_OFFSET((e), VCS); \
+ (e)->semaphore.signal_ggtt[BCS] = GEN8_SIGNAL_OFFSET((e), BCS); \
+ (e)->semaphore.signal_ggtt[VECS] = GEN8_SIGNAL_OFFSET((e), VECS); \
+ (e)->semaphore.signal_ggtt[VCS2] = GEN8_SIGNAL_OFFSET((e), VCS2); \
+ (e)->semaphore.signal_ggtt[(e)->id] = MI_SEMAPHORE_SYNC_INVALID; \
} while(0)
enum intel_ring_hangcheck_action {