1 // SPDX-License-Identifier: GPL-2.0
3 * Re-map IO memory to kernel address space so that we can access it.
4 * This is needed for high PCI addresses that aren't mapped in the
5 * 640k-1MB IO memory area on PC's
7 * (C) Copyright 1995 1996 Linus Torvalds
9 #include <linux/vmalloc.h>
11 #include <linux/sched.h>
13 #include <linux/export.h>
14 #include <asm/cacheflush.h>
15 #include <asm/pgtable.h>
17 #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
18 static int __read_mostly ioremap_p4d_capable
;
19 static int __read_mostly ioremap_pud_capable
;
20 static int __read_mostly ioremap_pmd_capable
;
21 static int __read_mostly ioremap_huge_disabled
;
23 static int __init
set_nohugeiomap(char *str
)
25 ioremap_huge_disabled
= 1;
28 early_param("nohugeiomap", set_nohugeiomap
);
30 void __init
ioremap_huge_init(void)
32 if (!ioremap_huge_disabled
) {
33 if (arch_ioremap_p4d_supported())
34 ioremap_p4d_capable
= 1;
35 if (arch_ioremap_pud_supported())
36 ioremap_pud_capable
= 1;
37 if (arch_ioremap_pmd_supported())
38 ioremap_pmd_capable
= 1;
42 static inline int ioremap_p4d_enabled(void)
44 return ioremap_p4d_capable
;
47 static inline int ioremap_pud_enabled(void)
49 return ioremap_pud_capable
;
52 static inline int ioremap_pmd_enabled(void)
54 return ioremap_pmd_capable
;
57 #else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
58 static inline int ioremap_p4d_enabled(void) { return 0; }
59 static inline int ioremap_pud_enabled(void) { return 0; }
60 static inline int ioremap_pmd_enabled(void) { return 0; }
61 #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
63 static int ioremap_pte_range(pmd_t
*pmd
, unsigned long addr
,
64 unsigned long end
, phys_addr_t phys_addr
, pgprot_t prot
,
70 pfn
= phys_addr
>> PAGE_SHIFT
;
71 pte
= pte_alloc_kernel_track(pmd
, addr
, mask
);
75 BUG_ON(!pte_none(*pte
));
76 set_pte_at(&init_mm
, addr
, pte
, pfn_pte(pfn
, prot
));
78 } while (pte
++, addr
+= PAGE_SIZE
, addr
!= end
);
79 *mask
|= PGTBL_PTE_MODIFIED
;
83 static int ioremap_try_huge_pmd(pmd_t
*pmd
, unsigned long addr
,
84 unsigned long end
, phys_addr_t phys_addr
,
87 if (!ioremap_pmd_enabled())
90 if ((end
- addr
) != PMD_SIZE
)
93 if (!IS_ALIGNED(addr
, PMD_SIZE
))
96 if (!IS_ALIGNED(phys_addr
, PMD_SIZE
))
99 if (pmd_present(*pmd
) && !pmd_free_pte_page(pmd
, addr
))
102 return pmd_set_huge(pmd
, phys_addr
, prot
);
105 static inline int ioremap_pmd_range(pud_t
*pud
, unsigned long addr
,
106 unsigned long end
, phys_addr_t phys_addr
, pgprot_t prot
,
107 pgtbl_mod_mask
*mask
)
112 pmd
= pmd_alloc_track(&init_mm
, pud
, addr
, mask
);
116 next
= pmd_addr_end(addr
, end
);
118 if (ioremap_try_huge_pmd(pmd
, addr
, next
, phys_addr
, prot
)) {
119 *mask
|= PGTBL_PMD_MODIFIED
;
123 if (ioremap_pte_range(pmd
, addr
, next
, phys_addr
, prot
, mask
))
125 } while (pmd
++, phys_addr
+= (next
- addr
), addr
= next
, addr
!= end
);
129 static int ioremap_try_huge_pud(pud_t
*pud
, unsigned long addr
,
130 unsigned long end
, phys_addr_t phys_addr
,
133 if (!ioremap_pud_enabled())
136 if ((end
- addr
) != PUD_SIZE
)
139 if (!IS_ALIGNED(addr
, PUD_SIZE
))
142 if (!IS_ALIGNED(phys_addr
, PUD_SIZE
))
145 if (pud_present(*pud
) && !pud_free_pmd_page(pud
, addr
))
148 return pud_set_huge(pud
, phys_addr
, prot
);
151 static inline int ioremap_pud_range(p4d_t
*p4d
, unsigned long addr
,
152 unsigned long end
, phys_addr_t phys_addr
, pgprot_t prot
,
153 pgtbl_mod_mask
*mask
)
158 pud
= pud_alloc_track(&init_mm
, p4d
, addr
, mask
);
162 next
= pud_addr_end(addr
, end
);
164 if (ioremap_try_huge_pud(pud
, addr
, next
, phys_addr
, prot
)) {
165 *mask
|= PGTBL_PUD_MODIFIED
;
169 if (ioremap_pmd_range(pud
, addr
, next
, phys_addr
, prot
, mask
))
171 } while (pud
++, phys_addr
+= (next
- addr
), addr
= next
, addr
!= end
);
175 static int ioremap_try_huge_p4d(p4d_t
*p4d
, unsigned long addr
,
176 unsigned long end
, phys_addr_t phys_addr
,
179 if (!ioremap_p4d_enabled())
182 if ((end
- addr
) != P4D_SIZE
)
185 if (!IS_ALIGNED(addr
, P4D_SIZE
))
188 if (!IS_ALIGNED(phys_addr
, P4D_SIZE
))
191 if (p4d_present(*p4d
) && !p4d_free_pud_page(p4d
, addr
))
194 return p4d_set_huge(p4d
, phys_addr
, prot
);
197 static inline int ioremap_p4d_range(pgd_t
*pgd
, unsigned long addr
,
198 unsigned long end
, phys_addr_t phys_addr
, pgprot_t prot
,
199 pgtbl_mod_mask
*mask
)
204 p4d
= p4d_alloc_track(&init_mm
, pgd
, addr
, mask
);
208 next
= p4d_addr_end(addr
, end
);
210 if (ioremap_try_huge_p4d(p4d
, addr
, next
, phys_addr
, prot
)) {
211 *mask
|= PGTBL_P4D_MODIFIED
;
215 if (ioremap_pud_range(p4d
, addr
, next
, phys_addr
, prot
, mask
))
217 } while (p4d
++, phys_addr
+= (next
- addr
), addr
= next
, addr
!= end
);
221 int ioremap_page_range(unsigned long addr
,
222 unsigned long end
, phys_addr_t phys_addr
, pgprot_t prot
)
228 pgtbl_mod_mask mask
= 0;
234 pgd
= pgd_offset_k(addr
);
236 next
= pgd_addr_end(addr
, end
);
237 err
= ioremap_p4d_range(pgd
, addr
, next
, phys_addr
, prot
,
241 } while (pgd
++, phys_addr
+= (next
- addr
), addr
= next
, addr
!= end
);
243 flush_cache_vmap(start
, end
);
245 if (mask
& ARCH_PAGE_TABLE_SYNC_MASK
)
246 arch_sync_kernel_mappings(start
, end
);
251 #ifdef CONFIG_GENERIC_IOREMAP
252 void __iomem
*ioremap_prot(phys_addr_t addr
, size_t size
, unsigned long prot
)
254 unsigned long offset
, vaddr
;
255 phys_addr_t last_addr
;
256 struct vm_struct
*area
;
258 /* Disallow wrap-around or zero size */
259 last_addr
= addr
+ size
- 1;
260 if (!size
|| last_addr
< addr
)
263 /* Page-align mappings */
264 offset
= addr
& (~PAGE_MASK
);
266 size
= PAGE_ALIGN(size
+ offset
);
268 area
= get_vm_area_caller(size
, VM_IOREMAP
,
269 __builtin_return_address(0));
272 vaddr
= (unsigned long)area
->addr
;
274 if (ioremap_page_range(vaddr
, vaddr
+ size
, addr
, __pgprot(prot
))) {
279 return (void __iomem
*)(vaddr
+ offset
);
281 EXPORT_SYMBOL(ioremap_prot
);
283 void iounmap(volatile void __iomem
*addr
)
285 vunmap((void *)((unsigned long)addr
& PAGE_MASK
));
287 EXPORT_SYMBOL(iounmap
);
288 #endif /* CONFIG_GENERIC_IOREMAP */