return vma;
}
+ GT_TRACE(timeline->gt, "new HWSP allocated\n");
+
vma->private = hwsp;
hwsp->gt = timeline->gt;
hwsp->vma = vma;
tl->hwsp_offset =
i915_ggtt_offset(tl->hwsp_ggtt) +
offset_in_page(tl->hwsp_offset);
+ GT_TRACE(tl->gt, "timeline:%llx using HWSP offset:%x\n",
+ tl->fence_context, tl->hwsp_offset);
cacheline_acquire(tl->hwsp_cacheline);
if (atomic_fetch_inc(&tl->pin_count)) {
int err;
might_lock(&tl->gt->ggtt->vm.mutex);
+ GT_TRACE(tl->gt, "timeline:%llx wrapped\n", tl->fence_context);
/*
* If there is an outstanding GPU reference to this cacheline,
memset(vaddr + tl->hwsp_offset, 0, CACHELINE_BYTES);
tl->hwsp_offset += i915_ggtt_offset(vma);
+ GT_TRACE(tl->gt, "timeline:%llx using HWSP offset:%x\n",
+ tl->fence_context, tl->hwsp_offset);
cacheline_acquire(cl);
tl->hwsp_cacheline = cl;
struct intel_timeline *tl = timelines[n];
if (!err && *tl->hwsp_seqno != n) {
- pr_err("Invalid seqno stored in timeline %lu, found 0x%x\n",
- n, *tl->hwsp_seqno);
+ pr_err("Invalid seqno stored in timeline %lu @ %x, found 0x%x\n",
+ n, tl->hwsp_offset, *tl->hwsp_seqno);
+ GEM_TRACE_DUMP();
err = -EINVAL;
}
intel_timeline_put(tl);
struct intel_timeline *tl = timelines[n];
if (!err && *tl->hwsp_seqno != n) {
- pr_err("Invalid seqno stored in timeline %lu, found 0x%x\n",
- n, *tl->hwsp_seqno);
+ pr_err("Invalid seqno stored in timeline %lu @ %x, found 0x%x\n",
+ n, tl->hwsp_offset, *tl->hwsp_seqno);
+ GEM_TRACE_DUMP();
err = -EINVAL;
}
intel_timeline_put(tl);
}
if (*tl->hwsp_seqno != count) {
- pr_err("Invalid seqno stored in timeline %lu, found 0x%x\n",
+ pr_err("Invalid seqno stored in timeline %lu @ tl->hwsp_offset, found 0x%x\n",
count, *tl->hwsp_seqno);
+ GEM_TRACE_DUMP();
err = -EINVAL;
}