We were using first page of kernel context render state for sharing data
with GuC. While it's justified by the fact that those pages are not used
(note, GuC still enforces this layout and refuses to work if we remove
the extra page in front), it's also confusing (why are we using this
particular page?). Let's allocate a separate object instead.
v2: Drop kernel_context from GuC suspend/resume action handlers (Michel)
Suggested-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Signed-off-by: Michał Winiarski <michal.winiarski@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Jeff McGee <jeff.mcgee@intel.com>
Cc: Michal Wajdeczko <michal.wajdeczko@intel.com>
Cc: Oscar Mateo <oscar.mateo@intel.com>
Reviewed-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20171025200020.16636-4-michal.winiarski@intel.com
memset(desc, 0, sizeof(*desc));
}
+static int guc_shared_data_create(struct intel_guc *guc)
+{
+ struct i915_vma *vma;
+ void *vaddr;
+
+ vma = intel_guc_allocate_vma(guc, PAGE_SIZE);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
+ if (IS_ERR(vaddr)) {
+ i915_vma_unpin_and_release(&vma);
+ return PTR_ERR(vaddr);
+ }
+
+ guc->shared_data = vma;
+ guc->shared_data_vaddr = vaddr;
+
+ return 0;
+}
+
+static void guc_shared_data_destroy(struct intel_guc *guc)
+{
+ i915_gem_object_unpin_map(guc->shared_data->obj);
+ i915_vma_unpin_and_release(&guc->shared_data);
+}
+
/* Construct a Work Item and append it to the GuC's Work Queue */
static void guc_wq_item_append(struct i915_guc_client *client,
struct drm_i915_gem_request *rq)
if (ret)
return ret;
+ ret = guc_shared_data_create(guc);
+ if (ret)
+ goto err_stage_desc_pool;
+
ret = intel_guc_log_create(guc);
if (ret < 0)
- goto err_stage_desc_pool;
+ goto err_shared_data;
ret = guc_ads_create(guc);
if (ret < 0)
err_log:
intel_guc_log_destroy(guc);
+err_shared_data:
+ guc_shared_data_destroy(guc);
err_stage_desc_pool:
guc_stage_desc_pool_destroy(guc);
return ret;
guc_ads_destroy(guc);
intel_guc_log_destroy(guc);
+ guc_shared_data_destroy(guc);
guc_stage_desc_pool_destroy(guc);
}
int intel_guc_suspend(struct drm_i915_private *dev_priv)
{
struct intel_guc *guc = &dev_priv->guc;
- struct i915_gem_context *ctx;
u32 data[3];
if (guc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS)
gen9_disable_guc_interrupts(dev_priv);
- ctx = dev_priv->kernel_context;
-
data[0] = INTEL_GUC_ACTION_ENTER_S_STATE;
/* any value greater than GUC_POWER_D0 */
data[1] = GUC_POWER_D1;
- /* first page is shared data with GuC */
- data[2] = guc_ggtt_offset(ctx->engine[RCS].state) +
- LRC_GUCSHR_PN * PAGE_SIZE;
+ data[2] = guc_ggtt_offset(guc->shared_data);
return intel_guc_send(guc, data, ARRAY_SIZE(data));
}
int intel_guc_resume(struct drm_i915_private *dev_priv)
{
struct intel_guc *guc = &dev_priv->guc;
- struct i915_gem_context *ctx;
u32 data[3];
if (guc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS)
if (i915_modparams.guc_log_level >= 0)
gen9_enable_guc_interrupts(dev_priv);
- ctx = dev_priv->kernel_context;
-
data[0] = INTEL_GUC_ACTION_EXIT_S_STATE;
data[1] = GUC_POWER_D0;
- /* first page is shared data with GuC */
- data[2] = guc_ggtt_offset(ctx->engine[RCS].state) +
- LRC_GUCSHR_PN * PAGE_SIZE;
+ data[2] = guc_ggtt_offset(guc->shared_data);
return intel_guc_send(guc, data, ARRAY_SIZE(data));
}
struct i915_vma *stage_desc_pool;
void *stage_desc_pool_vaddr;
struct ida stage_ids;
+ struct i915_vma *shared_data;
+ void *shared_data_vaddr;
struct i915_guc_client *execbuf_client;