From: Alex Deucher Date: Thu, 28 Mar 2019 15:16:12 +0000 (-0500) Subject: Revert "drm/amdgpu: use HMM callback to replace mmu notifier" X-Git-Tag: v5.15~6416^2~31^2~1 X-Git-Url: https://git.proxmox.com/?a=commitdiff_plain;h=296bb163e2d549c0fcae307e6aff1e407bb1a998;p=mirror_ubuntu-kernels.git Revert "drm/amdgpu: use HMM callback to replace mmu notifier" This reverts commit 57731a07795ae80790c6ae7d8e7001cdbd6d14c0. This depends on an HMM fix which is not upstream yet. Signed-off-by: Alex Deucher --- diff --git a/drivers/gpu/drm/amd/amdgpu/Kconfig b/drivers/gpu/drm/amd/amdgpu/Kconfig index 960a63355705..9221e5489069 100644 --- a/drivers/gpu/drm/amd/amdgpu/Kconfig +++ b/drivers/gpu/drm/amd/amdgpu/Kconfig @@ -26,10 +26,10 @@ config DRM_AMDGPU_CIK config DRM_AMDGPU_USERPTR bool "Always enable userptr write support" depends on DRM_AMDGPU - select HMM_MIRROR + select MMU_NOTIFIER help - This option selects CONFIG_HMM and CONFIG_HMM_MIRROR if it - isn't already selected to enabled full userptr support. + This option selects CONFIG_MMU_NOTIFIER if it isn't already + selected to enabled full userptr support. config DRM_AMDGPU_GART_DEBUGFS bool "Allow GART access through debugfs" diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index 7d539ba6400d..f8c58c425eb9 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -173,7 +173,7 @@ endif amdgpu-$(CONFIG_COMPAT) += amdgpu_ioc32.o amdgpu-$(CONFIG_VGA_SWITCHEROO) += amdgpu_atpx_handler.o amdgpu-$(CONFIG_ACPI) += amdgpu_acpi.o -amdgpu-$(CONFIG_HMM_MIRROR) += amdgpu_mn.o +amdgpu-$(CONFIG_MMU_NOTIFIER) += amdgpu_mn.o include $(FULL_AMD_PATH)/powerplay/Makefile diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c index f000704f984d..3e6823fdd939 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c @@ -45,7 +45,7 @@ #include #include -#include +#include #include #include #include @@ -58,12 +58,14 @@ * * @adev: amdgpu device pointer * @mm: process address space + * @mn: MMU notifier structure * @type: type of MMU notifier * @work: destruction work item * @node: hash table node to find structure by adev and mn * @lock: rw semaphore protecting the notifier nodes * @objects: interval tree containing amdgpu_mn_nodes - * @mirror: HMM mirror function support + * @read_lock: mutex for recursive locking of @lock + * @recursion: depth of recursion * * Data for each amdgpu device and process address space. */ @@ -71,6 +73,7 @@ struct amdgpu_mn { /* constant after initialisation */ struct amdgpu_device *adev; struct mm_struct *mm; + struct mmu_notifier mn; enum amdgpu_mn_type type; /* only used on destruction */ @@ -82,9 +85,8 @@ struct amdgpu_mn { /* objects protected by lock */ struct rw_semaphore lock; struct rb_root_cached objects; - - /* HMM mirror */ - struct hmm_mirror mirror; + struct mutex read_lock; + atomic_t recursion; }; /** @@ -101,7 +103,7 @@ struct amdgpu_mn_node { }; /** - * amdgpu_mn_destroy - destroy the HMM mirror + * amdgpu_mn_destroy - destroy the MMU notifier * * @work: previously sheduled work item * @@ -127,26 +129,28 @@ static void amdgpu_mn_destroy(struct work_struct *work) } up_write(&amn->lock); mutex_unlock(&adev->mn_lock); - - hmm_mirror_unregister(&amn->mirror); + mmu_notifier_unregister_no_release(&amn->mn, amn->mm); kfree(amn); } /** - * amdgpu_hmm_mirror_release - callback to notify about mm destruction + * amdgpu_mn_release - callback to notify about mm destruction * - * @mirror: the HMM mirror (mm) this callback is about + * @mn: our notifier + * @mm: the mm this callback is about * - * Shedule a work item to lazy destroy HMM mirror. + * Shedule a work item to lazy destroy our notifier. */ -static void amdgpu_hmm_mirror_release(struct hmm_mirror *mirror) +static void amdgpu_mn_release(struct mmu_notifier *mn, + struct mm_struct *mm) { - struct amdgpu_mn *amn = container_of(mirror, struct amdgpu_mn, mirror); + struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn); INIT_WORK(&amn->work, amdgpu_mn_destroy); schedule_work(&amn->work); } + /** * amdgpu_mn_lock - take the write side lock for this notifier * @@ -177,10 +181,14 @@ void amdgpu_mn_unlock(struct amdgpu_mn *mn) static int amdgpu_mn_read_lock(struct amdgpu_mn *amn, bool blockable) { if (blockable) - down_read(&amn->lock); - else if (!down_read_trylock(&amn->lock)) + mutex_lock(&amn->read_lock); + else if (!mutex_trylock(&amn->read_lock)) return -EAGAIN; + if (atomic_inc_return(&amn->recursion) == 1) + down_read_non_owner(&amn->lock); + mutex_unlock(&amn->read_lock); + return 0; } @@ -191,7 +199,8 @@ static int amdgpu_mn_read_lock(struct amdgpu_mn *amn, bool blockable) */ static void amdgpu_mn_read_unlock(struct amdgpu_mn *amn) { - up_read(&amn->lock); + if (atomic_dec_return(&amn->recursion) == 0) + up_read_non_owner(&amn->lock); } /** @@ -226,128 +235,143 @@ static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node, } /** - * amdgpu_mn_sync_pagetables_gfx - callback to notify about mm change + * amdgpu_mn_invalidate_range_start_gfx - callback to notify about mm change * - * @mirror: the hmm_mirror (mm) is about to update - * @update: the update start, end address + * @mn: our notifier + * @range: mmu notifier context * * Block for operations on BOs to finish and mark pages as accessed and * potentially dirty. */ -static int amdgpu_mn_sync_pagetables_gfx(struct hmm_mirror *mirror, - const struct hmm_update *update) +static int amdgpu_mn_invalidate_range_start_gfx(struct mmu_notifier *mn, + const struct mmu_notifier_range *range) { - struct amdgpu_mn *amn = container_of(mirror, struct amdgpu_mn, mirror); - unsigned long start = update->start; - unsigned long end = update->end; - bool blockable = update->blockable; + struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn); struct interval_tree_node *it; + unsigned long end; /* notification is exclusive, but interval is inclusive */ - end -= 1; + end = range->end - 1; /* TODO we should be able to split locking for interval tree and * amdgpu_mn_invalidate_node */ - if (amdgpu_mn_read_lock(amn, blockable)) + if (amdgpu_mn_read_lock(amn, range->blockable)) return -EAGAIN; - it = interval_tree_iter_first(&amn->objects, start, end); + it = interval_tree_iter_first(&amn->objects, range->start, end); while (it) { struct amdgpu_mn_node *node; - if (!blockable) { + if (!range->blockable) { amdgpu_mn_read_unlock(amn); return -EAGAIN; } node = container_of(it, struct amdgpu_mn_node, it); - it = interval_tree_iter_next(it, start, end); + it = interval_tree_iter_next(it, range->start, end); - amdgpu_mn_invalidate_node(node, start, end); + amdgpu_mn_invalidate_node(node, range->start, end); } - amdgpu_mn_read_unlock(amn); - return 0; } /** - * amdgpu_mn_sync_pagetables_hsa - callback to notify about mm change + * amdgpu_mn_invalidate_range_start_hsa - callback to notify about mm change * - * @mirror: the hmm_mirror (mm) is about to update - * @update: the update start, end address + * @mn: our notifier + * @mm: the mm this callback is about + * @start: start of updated range + * @end: end of updated range * * We temporarily evict all BOs between start and end. This * necessitates evicting all user-mode queues of the process. The BOs * are restorted in amdgpu_mn_invalidate_range_end_hsa. */ -static int amdgpu_mn_sync_pagetables_hsa(struct hmm_mirror *mirror, - const struct hmm_update *update) +static int amdgpu_mn_invalidate_range_start_hsa(struct mmu_notifier *mn, + const struct mmu_notifier_range *range) { - struct amdgpu_mn *amn = container_of(mirror, struct amdgpu_mn, mirror); - unsigned long start = update->start; - unsigned long end = update->end; - bool blockable = update->blockable; + struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn); struct interval_tree_node *it; + unsigned long end; /* notification is exclusive, but interval is inclusive */ - end -= 1; + end = range->end - 1; - if (amdgpu_mn_read_lock(amn, blockable)) + if (amdgpu_mn_read_lock(amn, range->blockable)) return -EAGAIN; - it = interval_tree_iter_first(&amn->objects, start, end); + it = interval_tree_iter_first(&amn->objects, range->start, end); while (it) { struct amdgpu_mn_node *node; struct amdgpu_bo *bo; - if (!blockable) { + if (!range->blockable) { amdgpu_mn_read_unlock(amn); return -EAGAIN; } node = container_of(it, struct amdgpu_mn_node, it); - it = interval_tree_iter_next(it, start, end); + it = interval_tree_iter_next(it, range->start, end); list_for_each_entry(bo, &node->bos, mn_list) { struct kgd_mem *mem = bo->kfd_bo; if (amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, - start, end)) - amdgpu_amdkfd_evict_userptr(mem, amn->mm); + range->start, + end)) + amdgpu_amdkfd_evict_userptr(mem, range->mm); } } - amdgpu_mn_read_unlock(amn); - return 0; } -/* Low bits of any reasonable mm pointer will be unused due to struct - * alignment. Use these bits to make a unique key from the mm pointer - * and notifier type. +/** + * amdgpu_mn_invalidate_range_end - callback to notify about mm change + * + * @mn: our notifier + * @mm: the mm this callback is about + * @start: start of updated range + * @end: end of updated range + * + * Release the lock again to allow new command submissions. */ -#define AMDGPU_MN_KEY(mm, type) ((unsigned long)(mm) + (type)) +static void amdgpu_mn_invalidate_range_end(struct mmu_notifier *mn, + const struct mmu_notifier_range *range) +{ + struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn); + + amdgpu_mn_read_unlock(amn); +} -static struct hmm_mirror_ops amdgpu_hmm_mirror_ops[] = { +static const struct mmu_notifier_ops amdgpu_mn_ops[] = { [AMDGPU_MN_TYPE_GFX] = { - .sync_cpu_device_pagetables = amdgpu_mn_sync_pagetables_gfx, - .release = amdgpu_hmm_mirror_release + .release = amdgpu_mn_release, + .invalidate_range_start = amdgpu_mn_invalidate_range_start_gfx, + .invalidate_range_end = amdgpu_mn_invalidate_range_end, }, [AMDGPU_MN_TYPE_HSA] = { - .sync_cpu_device_pagetables = amdgpu_mn_sync_pagetables_hsa, - .release = amdgpu_hmm_mirror_release + .release = amdgpu_mn_release, + .invalidate_range_start = amdgpu_mn_invalidate_range_start_hsa, + .invalidate_range_end = amdgpu_mn_invalidate_range_end, }, }; +/* Low bits of any reasonable mm pointer will be unused due to struct + * alignment. Use these bits to make a unique key from the mm pointer + * and notifier type. + */ +#define AMDGPU_MN_KEY(mm, type) ((unsigned long)(mm) + (type)) + /** - * amdgpu_mn_get - create HMM mirror context + * amdgpu_mn_get - create notifier context * * @adev: amdgpu device pointer * @type: type of MMU notifier context * - * Creates a HMM mirror context for current->mm. + * Creates a notifier context for current->mm. */ struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev, enum amdgpu_mn_type type) @@ -377,10 +401,12 @@ struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev, amn->mm = mm; init_rwsem(&amn->lock); amn->type = type; + amn->mn.ops = &amdgpu_mn_ops[type]; amn->objects = RB_ROOT_CACHED; + mutex_init(&amn->read_lock); + atomic_set(&amn->recursion, 0); - amn->mirror.ops = &amdgpu_hmm_mirror_ops[type]; - r = hmm_mirror_register(&amn->mirror, mm); + r = __mmu_notifier_register(&amn->mn, mm); if (r) goto free_amn; @@ -406,7 +432,7 @@ free_amn: * @bo: amdgpu buffer object * @addr: userptr addr we should monitor * - * Registers an HMM mirror for the given BO at the specified address. + * Registers an MMU notifier for the given BO at the specified address. * Returns 0 on success, -ERRNO if anything goes wrong. */ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr) @@ -462,11 +488,11 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr) } /** - * amdgpu_mn_unregister - unregister a BO for HMM mirror updates + * amdgpu_mn_unregister - unregister a BO for notifier updates * * @bo: amdgpu buffer object * - * Remove any registration of HMM mirror updates from the buffer object. + * Remove any registration of MMU notifier updates from the buffer object. */ void amdgpu_mn_unregister(struct amdgpu_bo *bo) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.h index 0a51fd00021c..eb0f432f78fe 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.h @@ -34,7 +34,7 @@ enum amdgpu_mn_type { AMDGPU_MN_TYPE_HSA, }; -#if defined(CONFIG_HMM_MIRROR) +#if defined(CONFIG_MMU_NOTIFIER) void amdgpu_mn_lock(struct amdgpu_mn *mn); void amdgpu_mn_unlock(struct amdgpu_mn *mn); struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev,