]> git.proxmox.com Git - mirror_ubuntu-kernels.git/commitdiff
parisc: deduplicate code in flush_cache_mm() and flush_cache_range()
authorSven Schnelle <svens@stackframe.org>
Sat, 9 Oct 2021 18:24:36 +0000 (20:24 +0200)
committerHelge Deller <deller@gmx.de>
Sat, 30 Oct 2021 21:11:01 +0000 (23:11 +0200)
Parts of both functions are the same, so deduplicate them. No functional
change.

Signed-off-by: Sven Schnelle <svens@stackframe.org>
Signed-off-by: Helge Deller <deller@gmx.de>
arch/parisc/kernel/cache.c

index a1a7e2b0812f70a2afc5a7c573d3e6c167473715..c61827e4928ae3952719554cd5e4e6e8228f6b00 100644 (file)
@@ -543,10 +543,33 @@ static inline pte_t *get_ptep(pgd_t *pgd, unsigned long addr)
        return ptep;
 }
 
+static void flush_cache_pages(struct vm_area_struct *vma, struct mm_struct *mm,
+                             unsigned long start, unsigned long end)
+{
+       unsigned long addr, pfn;
+       pte_t *ptep;
+
+       for (addr = start; addr < end; addr += PAGE_SIZE) {
+               ptep = get_ptep(mm->pgd, addr);
+               if (ptep) {
+                       pfn = pte_pfn(*ptep);
+                       flush_cache_page(vma, addr, pfn);
+               }
+       }
+}
+
+static void flush_user_cache_tlb(struct vm_area_struct *vma,
+                                unsigned long start, unsigned long end)
+{
+       flush_user_dcache_range_asm(start, end);
+       if (vma->vm_flags & VM_EXEC)
+               flush_user_icache_range_asm(start, end);
+       flush_tlb_range(vma, start, end);
+}
+
 void flush_cache_mm(struct mm_struct *mm)
 {
        struct vm_area_struct *vma;
-       pgd_t *pgd;
 
        /* Flushing the whole cache on each cpu takes forever on
           rp3440, etc.  So, avoid it if the mm isn't too big.  */
@@ -560,46 +583,20 @@ void flush_cache_mm(struct mm_struct *mm)
 
        preempt_disable();
        if (mm->context == mfsp(3)) {
-               for (vma = mm->mmap; vma; vma = vma->vm_next) {
-                       flush_user_dcache_range_asm(vma->vm_start, vma->vm_end);
-                       if (vma->vm_flags & VM_EXEC)
-                               flush_user_icache_range_asm(vma->vm_start, vma->vm_end);
-                       flush_tlb_range(vma, vma->vm_start, vma->vm_end);
-               }
+               for (vma = mm->mmap; vma; vma = vma->vm_next)
+                       flush_user_cache_tlb(vma, vma->vm_start, vma->vm_end);
                preempt_enable();
                return;
        }
 
-       pgd = mm->pgd;
-       for (vma = mm->mmap; vma; vma = vma->vm_next) {
-               unsigned long addr;
-
-               for (addr = vma->vm_start; addr < vma->vm_end;
-                    addr += PAGE_SIZE) {
-                       unsigned long pfn;
-                       pte_t *ptep = get_ptep(pgd, addr);
-                       if (!ptep)
-                               continue;
-                       pfn = pte_pfn(*ptep);
-                       if (!pfn_valid(pfn))
-                               continue;
-                       if (unlikely(mm->context)) {
-                               flush_tlb_page(vma, addr);
-                               __flush_cache_page(vma, addr, PFN_PHYS(pfn));
-                       } else {
-                               __purge_cache_page(vma, addr, PFN_PHYS(pfn));
-                       }
-               }
-       }
+       for (vma = mm->mmap; vma; vma = vma->vm_next)
+               flush_cache_pages(vma, mm, vma->vm_start, vma->vm_end);
        preempt_enable();
 }
 
 void flush_cache_range(struct vm_area_struct *vma,
                unsigned long start, unsigned long end)
 {
-       pgd_t *pgd;
-       unsigned long addr;
-
        if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
            end - start >= parisc_cache_flush_threshold) {
                if (vma->vm_mm->context)
@@ -610,30 +607,12 @@ void flush_cache_range(struct vm_area_struct *vma,
 
        preempt_disable();
        if (vma->vm_mm->context == mfsp(3)) {
-               flush_user_dcache_range_asm(start, end);
-               if (vma->vm_flags & VM_EXEC)
-                       flush_user_icache_range_asm(start, end);
-               flush_tlb_range(vma, start, end);
+               flush_user_cache_tlb(vma, start, end);
                preempt_enable();
                return;
        }
 
-       pgd = vma->vm_mm->pgd;
-       for (addr = vma->vm_start; addr < vma->vm_end; addr += PAGE_SIZE) {
-               unsigned long pfn;
-               pte_t *ptep = get_ptep(pgd, addr);
-               if (!ptep)
-                       continue;
-               pfn = pte_pfn(*ptep);
-               if (pfn_valid(pfn)) {
-                       if (unlikely(vma->vm_mm->context)) {
-                               flush_tlb_page(vma, addr);
-                               __flush_cache_page(vma, addr, PFN_PHYS(pfn));
-                       } else {
-                               __purge_cache_page(vma, addr, PFN_PHYS(pfn));
-                       }
-               }
-       }
+       flush_cache_pages(vma, vma->vm_mm, vma->vm_start, vma->vm_end);
        preempt_enable();
 }