timelines_init(&i915->gt);
}
-static void timeline_add_to_active(struct intel_timeline *tl)
-{
- struct intel_gt_timelines *gt = &tl->gt->timelines;
-
- mutex_lock(>->mutex);
- list_add(&tl->link, >->active_list);
- mutex_unlock(>->mutex);
-}
-
-static void timeline_remove_from_active(struct intel_timeline *tl)
-{
- struct intel_gt_timelines *gt = &tl->gt->timelines;
-
- mutex_lock(>->mutex);
- list_del(&tl->link);
- mutex_unlock(>->mutex);
-}
-
-static void timelines_park(struct intel_gt *gt)
-{
- struct intel_gt_timelines *timelines = >->timelines;
- struct intel_timeline *timeline;
-
- mutex_lock(&timelines->mutex);
- list_for_each_entry(timeline, &timelines->active_list, link) {
- /*
- * All known fences are completed so we can scrap
- * the current sync point tracking and start afresh,
- * any attempt to wait upon a previous sync point
- * will be skipped as the fence was signaled.
- */
- i915_syncmap_free(&timeline->sync);
- }
- mutex_unlock(&timelines->mutex);
-}
-
-/**
- * intel_timelines_park - called when the driver idles
- * @i915: the drm_i915_private device
- *
- * When the driver is completely idle, we know that all of our sync points
- * have been signaled and our tracking is then entirely redundant. Any request
- * to wait upon an older sync point will be completed instantly as we know
- * the fence is signaled and therefore we will not even look them up in the
- * sync point map.
- */
-void intel_timelines_park(struct drm_i915_private *i915)
-{
- timelines_park(&i915->gt);
-}
-
void intel_timeline_fini(struct intel_timeline *timeline)
{
GEM_BUG_ON(timeline->pin_count);
GEM_BUG_ON(!list_empty(&timeline->requests));
- i915_syncmap_free(&timeline->sync);
-
if (timeline->hwsp_cacheline)
cacheline_free(timeline->hwsp_cacheline);
else
if (tl->pin_count++)
return 0;
GEM_BUG_ON(!tl->pin_count);
+ GEM_BUG_ON(tl->active_count);
err = i915_vma_pin(tl->hwsp_ggtt, 0, 0, PIN_GLOBAL | PIN_HIGH);
if (err)
offset_in_page(tl->hwsp_offset);
cacheline_acquire(tl->hwsp_cacheline);
- timeline_add_to_active(tl);
return 0;
return err;
}
+void intel_timeline_enter(struct intel_timeline *tl)
+{
+ struct intel_gt_timelines *timelines = &tl->gt->timelines;
+
+ GEM_BUG_ON(!tl->pin_count);
+ if (tl->active_count++)
+ return;
+ GEM_BUG_ON(!tl->active_count); /* overflow? */
+
+ mutex_lock(&timelines->mutex);
+ list_add(&tl->link, &timelines->active_list);
+ mutex_unlock(&timelines->mutex);
+}
+
+void intel_timeline_exit(struct intel_timeline *tl)
+{
+ struct intel_gt_timelines *timelines = &tl->gt->timelines;
+
+ GEM_BUG_ON(!tl->active_count);
+ if (--tl->active_count)
+ return;
+
+ mutex_lock(&timelines->mutex);
+ list_del(&tl->link);
+ mutex_unlock(&timelines->mutex);
+
+ /*
+ * Since this timeline is idle, all bariers upon which we were waiting
+ * must also be complete and so we can discard the last used barriers
+ * without loss of information.
+ */
+ i915_syncmap_free(&tl->sync);
+}
+
static u32 timeline_advance(struct intel_timeline *tl)
{
GEM_BUG_ON(!tl->pin_count);
if (--tl->pin_count)
return;
- timeline_remove_from_active(tl);
+ GEM_BUG_ON(tl->active_count);
cacheline_release(tl->hwsp_cacheline);
- /*
- * Since this timeline is idle, all bariers upon which we were waiting
- * must also be complete and so we can discard the last used barriers
- * without loss of information.
- */
- i915_syncmap_free(&tl->sync);
-
__i915_vma_unpin(tl->hwsp_ggtt);
}
struct mutex mutex; /* protects the flow of requests */
+ /*
+ * pin_count and active_count track essentially the same thing:
+ * How many requests are in flight or may be under construction.
+ *
+ * We need two distinct counters so that we can assign different
+ * lifetimes to the events for different use-cases. For example,
+ * we want to permanently keep the timeline pinned for the kernel
+ * context so that we can issue requests at any time without having
+ * to acquire space in the GGTT. However, we want to keep tracking
+ * the activity (to be able to detect when we become idle) along that
+ * permanently pinned timeline and so end up requiring two counters.
+ *
+ * Note that the active_count is protected by the intel_timeline.mutex,
+ * but the pin_count is protected by a combination of serialisation
+ * from the intel_context caller plus internal atomicity.
+ */
unsigned int pin_count;
+ unsigned int active_count;
+
const u32 *hwsp_seqno;
struct i915_vma *hwsp_ggtt;
u32 hwsp_offset;