#define AMDGPU_SVM_RANGE_RESTORE_DELAY_MS 1
+/* Long enough to ensure no retry fault comes after svm range is restored and
+ * page table is updated.
+ */
+#define AMDGPU_SVM_RANGE_RETRY_FAULT_PENDING 2000
+
static void svm_range_evict_svm_bo_worker(struct work_struct *work);
static bool
svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni,
INIT_LIST_HEAD(&prange->deferred_list);
INIT_LIST_HEAD(&prange->child_list);
atomic_set(&prange->invalid, 0);
+ prange->validate_timestamp = ktime_to_us(ktime_get());
mutex_init(&prange->migrate_mutex);
mutex_init(&prange->lock);
svm_range_set_default_attributes(&prange->preferred_loc,
unreserve_out:
svm_range_unreserve_bos(&ctx);
+ if (!r)
+ prange->validate_timestamp = ktime_to_us(ktime_get());
+
return r;
}
struct svm_range_list *svms;
struct svm_range *prange;
struct kfd_process *p;
+ uint64_t timestamp;
int32_t best_loc, gpuidx;
int r = 0;
}
mutex_lock(&prange->migrate_mutex);
+ timestamp = ktime_to_us(ktime_get()) - prange->validate_timestamp;
+ /* skip duplicate vm fault on different pages of same range */
+ if (timestamp < AMDGPU_SVM_RANGE_RETRY_FAULT_PENDING) {
+ pr_debug("svms 0x%p [0x%lx %lx] already restored\n",
+ svms, prange->start, prange->last);
+ goto out_unlock_range;
+ }
best_loc = svm_range_best_restore_location(prange, adev, &gpuidx);
if (best_loc == -1) {
* @actual_loc: the actual location, 0 for CPU, or GPU id
* @granularity:migration granularity, log2 num pages
* @invalid: not 0 means cpu page table is invalidated
+ * @validate_timestamp: system timestamp when range is validated
* @notifier: register mmu interval notifier
* @work_item: deferred work item information
* @deferred_list: list header used to add range to deferred list
uint32_t actual_loc;
uint8_t granularity;
atomic_t invalid;
+ uint64_t validate_timestamp;
struct mmu_interval_notifier notifier;
struct svm_work_list_item work_item;
struct list_head deferred_list;