]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blobdiff - arch/powerpc/mm/pgtable-radix.c
powerpc/mm/radix: Avoid flushing the PWC on every flush_tlb_range
[mirror_ubuntu-zesty-kernel.git] / arch / powerpc / mm / pgtable-radix.c
index a83f5f95f9d370ca00a2eee92834774798cb7f1e..74d50da39460e5078168f0bc4d1d9e92abee44d2 100644 (file)
@@ -186,6 +186,10 @@ static void __init radix_init_pgtable(void)
         */
        register_process_table(__pa(process_tb), 0, PRTB_SIZE_SHIFT - 12);
        pr_info("Process table %p and radix root for kernel: %p\n", process_tb, init_mm.pgd);
+       asm volatile("ptesync" : : : "memory");
+       asm volatile(PPC_TLBIE_5(%0,%1,2,1,1) : :
+                    "r" (TLBIEL_INVAL_SET_LPID), "r" (0));
+       asm volatile("eieio; tlbsync; ptesync" : : : "memory");
 }
 
 static void __init radix_init_partition_table(void)
@@ -414,6 +418,8 @@ void __init radix__early_init_mmu(void)
                mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR);
                radix_init_partition_table();
                radix_init_amor();
+       } else {
+               radix_init_pseries();
        }
 
        memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
@@ -482,10 +488,170 @@ void radix__setup_initial_memory_limit(phys_addr_t first_memblock_base,
 }
 
 #ifdef CONFIG_MEMORY_HOTPLUG
+static void free_pte_table(pte_t *pte_start, pmd_t *pmd)
+{
+       pte_t *pte;
+       int i;
+
+       for (i = 0; i < PTRS_PER_PTE; i++) {
+               pte = pte_start + i;
+               if (!pte_none(*pte))
+                       return;
+       }
+
+       pte_free_kernel(&init_mm, pte_start);
+       pmd_clear(pmd);
+}
+
+static void free_pmd_table(pmd_t *pmd_start, pud_t *pud)
+{
+       pmd_t *pmd;
+       int i;
+
+       for (i = 0; i < PTRS_PER_PMD; i++) {
+               pmd = pmd_start + i;
+               if (!pmd_none(*pmd))
+                       return;
+       }
+
+       pmd_free(&init_mm, pmd_start);
+       pud_clear(pud);
+}
+
+static void remove_pte_table(pte_t *pte_start, unsigned long addr,
+                            unsigned long end)
+{
+       unsigned long next;
+       pte_t *pte;
+
+       pte = pte_start + pte_index(addr);
+       for (; addr < end; addr = next, pte++) {
+               next = (addr + PAGE_SIZE) & PAGE_MASK;
+               if (next > end)
+                       next = end;
+
+               if (!pte_present(*pte))
+                       continue;
+
+               if (!PAGE_ALIGNED(addr) || !PAGE_ALIGNED(next)) {
+                       /*
+                        * The vmemmap_free() and remove_section_mapping()
+                        * codepaths call us with aligned addresses.
+                        */
+                       WARN_ONCE(1, "%s: unaligned range\n", __func__);
+                       continue;
+               }
+
+               pte_clear(&init_mm, addr, pte);
+       }
+}
+
+static void remove_pmd_table(pmd_t *pmd_start, unsigned long addr,
+                            unsigned long end)
+{
+       unsigned long next;
+       pte_t *pte_base;
+       pmd_t *pmd;
+
+       pmd = pmd_start + pmd_index(addr);
+       for (; addr < end; addr = next, pmd++) {
+               next = pmd_addr_end(addr, end);
+
+               if (!pmd_present(*pmd))
+                       continue;
+
+               if (pmd_huge(*pmd)) {
+                       if (!IS_ALIGNED(addr, PMD_SIZE) ||
+                           !IS_ALIGNED(next, PMD_SIZE)) {
+                               WARN_ONCE(1, "%s: unaligned range\n", __func__);
+                               continue;
+                       }
+
+                       pte_clear(&init_mm, addr, (pte_t *)pmd);
+                       continue;
+               }
+
+               pte_base = (pte_t *)pmd_page_vaddr(*pmd);
+               remove_pte_table(pte_base, addr, next);
+               free_pte_table(pte_base, pmd);
+       }
+}
+
+static void remove_pud_table(pud_t *pud_start, unsigned long addr,
+                            unsigned long end)
+{
+       unsigned long next;
+       pmd_t *pmd_base;
+       pud_t *pud;
+
+       pud = pud_start + pud_index(addr);
+       for (; addr < end; addr = next, pud++) {
+               next = pud_addr_end(addr, end);
+
+               if (!pud_present(*pud))
+                       continue;
+
+               if (pud_huge(*pud)) {
+                       if (!IS_ALIGNED(addr, PUD_SIZE) ||
+                           !IS_ALIGNED(next, PUD_SIZE)) {
+                               WARN_ONCE(1, "%s: unaligned range\n", __func__);
+                               continue;
+                       }
+
+                       pte_clear(&init_mm, addr, (pte_t *)pud);
+                       continue;
+               }
+
+               pmd_base = (pmd_t *)pud_page_vaddr(*pud);
+               remove_pmd_table(pmd_base, addr, next);
+               free_pmd_table(pmd_base, pud);
+       }
+}
+
+static void remove_pagetable(unsigned long start, unsigned long end)
+{
+       unsigned long addr, next;
+       pud_t *pud_base;
+       pgd_t *pgd;
+
+       spin_lock(&init_mm.page_table_lock);
+
+       for (addr = start; addr < end; addr = next) {
+               next = pgd_addr_end(addr, end);
+
+               pgd = pgd_offset_k(addr);
+               if (!pgd_present(*pgd))
+                       continue;
+
+               if (pgd_huge(*pgd)) {
+                       if (!IS_ALIGNED(addr, PGDIR_SIZE) ||
+                           !IS_ALIGNED(next, PGDIR_SIZE)) {
+                               WARN_ONCE(1, "%s: unaligned range\n", __func__);
+                               continue;
+                       }
+
+                       pte_clear(&init_mm, addr, (pte_t *)pgd);
+                       continue;
+               }
+
+               pud_base = (pud_t *)pgd_page_vaddr(*pgd);
+               remove_pud_table(pud_base, addr, next);
+       }
+
+       spin_unlock(&init_mm.page_table_lock);
+       radix__flush_tlb_kernel_range(start, end);
+}
+
 int __ref radix__create_section_mapping(unsigned long start, unsigned long end)
 {
        return create_physical_mapping(start, end);
 }
+
+int radix__remove_section_mapping(unsigned long start, unsigned long end)
+{
+       remove_pagetable(start, end);
+       return 0;
+}
 #endif /* CONFIG_MEMORY_HOTPLUG */
 
 #ifdef CONFIG_SPARSEMEM_VMEMMAP
@@ -503,7 +669,7 @@ int __meminit radix__vmemmap_create_mapping(unsigned long start,
 #ifdef CONFIG_MEMORY_HOTPLUG
 void radix__vmemmap_remove_mapping(unsigned long start, unsigned long page_size)
 {
-       /* FIXME!! intel does more. We should free page tables mapping vmemmap ? */
+       remove_pagetable(start, start + page_size);
 }
 #endif
 #endif
@@ -540,9 +706,12 @@ pmd_t radix__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long addre
         */
        pmd = *pmdp;
        pmd_clear(pmdp);
+
        /*FIXME!!  Verify whether we need this kick below */
        kick_all_cpus_sync();
-       flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
+
+       radix__flush_tlb_collapsed_pmd(vma->vm_mm, address);
+
        return pmd;
 }