return ERR_PTR(err);
}
+static void xe_device_sanitize(struct drm_device *drm, void *arg)
+{
+ struct xe_device *xe = arg;
+ struct xe_gt *gt;
+ u8 id;
+
+ for_each_gt(gt, xe, id)
+ xe_gt_sanitize(gt);
+}
+
int xe_device_probe(struct xe_device *xe)
{
struct xe_gt *gt;
xe_debugfs_register(xe);
+ err = drmm_add_action_or_reset(&xe->drm, xe_device_sanitize, xe);
+ if (err)
+ return err;
+
return 0;
err_irq_shutdown:
#include "xe_device.h"
#include "xe_bo.h"
#include "xe_gt.h"
+#include "xe_gt_tlb_invalidation.h"
#include "xe_map.h"
#include "xe_mmio.h"
#include "xe_wopcm.h"
* therefore flushing WC buffers. Is that really true here?
*/
xe_mmio_write32(gt, GFX_FLSH_CNTL_GEN6.reg, GFX_FLSH_CNTL_EN);
- if (xe_device_guc_submission_enabled(gt_to_xe(gt))) {
+
+ if (gt->uc.guc.submission_state.enabled) {
+ int seqno;
+
+ seqno = xe_gt_tlb_invalidation_guc(gt);
+ XE_WARN_ON(seqno <= 0);
+ if (seqno > 0)
+ xe_gt_tlb_invalidation_wait(gt, seqno);
+ } else if (xe_device_guc_submission_enabled(gt_to_xe(gt))) {
struct xe_device *xe = gt_to_xe(gt);
- /* TODO: also use vfunc here */
if (xe->info.platform == XE_PVC) {
xe_mmio_write32(gt, PVC_GUC_TLB_INV_DESC1.reg,
PVC_GUC_TLB_INV_DESC1_INVALIDATE);
return 0;
}
+void xe_gt_sanitize(struct xe_gt *gt)
+{
+ /*
+ * FIXME: if xe_uc_sanitize is called here, on TGL driver will not
+ * reload
+ */
+ gt->uc.guc.submission_state.enabled = false;
+}
+
static void gt_fini(struct drm_device *drm, void *arg)
{
struct xe_gt *gt = arg;
drm_info(&xe->drm, "GT reset started\n");
+ xe_gt_sanitize(gt);
+
xe_device_mem_access_get(gt_to_xe(gt));
err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
if (err)
if (!xe_device_guc_submission_enabled(gt_to_xe(gt)))
return -ENODEV;
+ xe_gt_sanitize(gt);
+
xe_device_mem_access_get(gt_to_xe(gt));
err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
if (err)
int xe_gt_resume(struct xe_gt *gt);
void xe_gt_reset_async(struct xe_gt *gt);
void xe_gt_migrate_wait(struct xe_gt *gt);
+void xe_gt_sanitize(struct xe_gt *gt);
struct xe_gt *xe_find_full_gt(struct xe_gt *gt);
goto retry_userptr;
if (!ret) {
- ret = xe_gt_tlb_invalidation(gt, NULL, vma);
+ ret = xe_gt_tlb_invalidation_vma(gt, NULL, vma);
if (ret >= 0)
ret = 0;
}
return ret;
}
+#define MAKE_INVAL_OP(type) ((type << XE_GUC_TLB_INVAL_TYPE_SHIFT) | \
+ XE_GUC_TLB_INVAL_MODE_HEAVY << XE_GUC_TLB_INVAL_MODE_SHIFT | \
+ XE_GUC_TLB_INVAL_FLUSH_CACHE)
+
/**
- * xe_gt_tlb_invalidation - Issue a TLB invalidation on this GT
+ * xe_gt_tlb_invalidation_guc - Issue a TLB invalidation on this GT for the GuC
+ * @gt: graphics tile
+ *
+ * Issue a TLB invalidation for the GuC. Completion of TLB is asynchronous and
+ * caller can use seqno + xe_gt_tlb_invalidation_wait to wait for completion.
+ *
+ * Return: Seqno which can be passed to xe_gt_tlb_invalidation_wait on success,
+ * negative error code on error.
+ */
+int xe_gt_tlb_invalidation_guc(struct xe_gt *gt)
+{
+ u32 action[] = {
+ XE_GUC_ACTION_TLB_INVALIDATION,
+ 0, /* seqno, replaced in send_tlb_invalidation */
+ MAKE_INVAL_OP(XE_GUC_TLB_INVAL_GUC),
+ };
+
+ return send_tlb_invalidation(>->uc.guc, NULL, action,
+ ARRAY_SIZE(action));
+}
+
+/**
+ * xe_gt_tlb_invalidation_vma - Issue a TLB invalidation on this GT for a VMA
* @gt: graphics tile
* @fence: invalidation fence which will be signal on TLB invalidation
* completion, can be NULL
* Return: Seqno which can be passed to xe_gt_tlb_invalidation_wait on success,
* negative error code on error.
*/
-int xe_gt_tlb_invalidation(struct xe_gt *gt,
- struct xe_gt_tlb_invalidation_fence *fence,
- struct xe_vma *vma)
+int xe_gt_tlb_invalidation_vma(struct xe_gt *gt,
+ struct xe_gt_tlb_invalidation_fence *fence,
+ struct xe_vma *vma)
{
struct xe_device *xe = gt_to_xe(gt);
#define MAX_TLB_INVALIDATION_LEN 7
XE_BUG_ON(!vma);
+ action[len++] = XE_GUC_ACTION_TLB_INVALIDATION;
+ action[len++] = 0; /* seqno, replaced in send_tlb_invalidation */
if (!xe->info.has_range_tlb_invalidation) {
- action[len++] = XE_GUC_ACTION_TLB_INVALIDATION;
- action[len++] = 0; /* seqno, replaced in send_tlb_invalidation */
-#define MAKE_INVAL_OP(type) ((type << XE_GUC_TLB_INVAL_TYPE_SHIFT) | \
- XE_GUC_TLB_INVAL_MODE_HEAVY << XE_GUC_TLB_INVAL_MODE_SHIFT | \
- XE_GUC_TLB_INVAL_FLUSH_CACHE)
action[len++] = MAKE_INVAL_OP(XE_GUC_TLB_INVAL_FULL);
} else {
u64 start = vma->start;
XE_BUG_ON(length & GENMASK(ilog2(SZ_16M) - 1, ilog2(SZ_2M) + 1));
XE_BUG_ON(!IS_ALIGNED(start, length));
- action[len++] = XE_GUC_ACTION_TLB_INVALIDATION;
- action[len++] = 0; /* seqno, replaced in send_tlb_invalidation */
action[len++] = MAKE_INVAL_OP(XE_GUC_TLB_INVAL_PAGE_SELECTIVE);
action[len++] = vma->vm->usm.asid;
action[len++] = lower_32_bits(start);
int xe_gt_tlb_invalidation_init(struct xe_gt *gt);
void xe_gt_tlb_invalidation_reset(struct xe_gt *gt);
-int xe_gt_tlb_invalidation(struct xe_gt *gt,
- struct xe_gt_tlb_invalidation_fence *fence,
- struct xe_vma *vma);
+int xe_gt_tlb_invalidation_guc(struct xe_gt *gt);
+int xe_gt_tlb_invalidation_vma(struct xe_gt *gt,
+ struct xe_gt_tlb_invalidation_fence *fence,
+ struct xe_vma *vma);
int xe_gt_tlb_invalidation_wait(struct xe_gt *gt, int seqno);
int xe_guc_tlb_invalidation_done_handler(struct xe_guc *guc, u32 *msg, u32 len);
int xe_guc_post_load_init(struct xe_guc *guc)
{
xe_guc_ads_populate_post_load(&guc->ads);
+ guc->submission_state.enabled = true;
return 0;
}
{
xe_uc_fw_change_status(&guc->fw, XE_UC_FIRMWARE_LOADABLE);
xe_guc_ct_disable(&guc->ct);
+ guc->submission_state.enabled = false;
}
int xe_guc_reset_prepare(struct xe_guc *guc)
/** @patch: patch version of GuC submission */
u32 patch;
} version;
+ /** @enabled: submission is enabled */
+ bool enabled;
} submission_state;
/** @hwconfig: Hardware config state */
struct {
container_of(w, struct invalidation_fence, work);
trace_xe_gt_tlb_invalidation_fence_work_func(&ifence->base);
- xe_gt_tlb_invalidation(ifence->gt, &ifence->base, ifence->vma);
+ xe_gt_tlb_invalidation_vma(ifence->gt, &ifence->base, ifence->vma);
}
static int invalidation_fence_init(struct xe_gt *gt,
return 0;
}
-static int uc_sanitize(struct xe_uc *uc)
+void xe_uc_sanitize(struct xe_uc *uc)
{
xe_huc_sanitize(&uc->huc);
xe_guc_sanitize(&uc->guc);
+}
+
+static int xe_uc_sanitize_reset(struct xe_uc *uc)
+{
+ xe_uc_sanitize(uc);
return uc_reset(uc);
}
if (!xe_device_guc_submission_enabled(uc_to_xe(uc)))
return 0;
- ret = uc_sanitize(uc);
+ ret = xe_uc_sanitize_reset(uc);
if (ret)
return ret;
int xe_uc_stop(struct xe_uc *uc);
int xe_uc_start(struct xe_uc *uc);
int xe_uc_suspend(struct xe_uc *uc);
+void xe_uc_sanitize(struct xe_uc *uc);
#endif
if (xe_pt_zap_ptes(gt, vma)) {
gt_needs_invalidate |= BIT(id);
xe_device_wmb(xe);
- seqno[id] = xe_gt_tlb_invalidation(gt, NULL, vma);
+ seqno[id] = xe_gt_tlb_invalidation_vma(gt, NULL, vma);
if (seqno[id] < 0)
return seqno[id];
}