1 // SPDX-License-Identifier: GPL-2.0
3 * Provide common bits of early_ioremap() support for architectures needing
4 * temporary mappings during boot before ioremap() is available.
6 * This is mostly a direct copy of the x86 early_ioremap implementation.
8 * (C) Copyright 1995 1996, 2014 Linus Torvalds
11 #include <linux/kernel.h>
12 #include <linux/init.h>
14 #include <linux/module.h>
15 #include <linux/slab.h>
17 #include <linux/vmalloc.h>
18 #include <asm/fixmap.h>
19 #include <asm/early_ioremap.h>
22 static int early_ioremap_debug __initdata
;
24 static int __init
early_ioremap_debug_setup(char *str
)
26 early_ioremap_debug
= 1;
30 early_param("early_ioremap_debug", early_ioremap_debug_setup
);
32 static int after_paging_init __initdata
;
34 pgprot_t __init __weak
early_memremap_pgprot_adjust(resource_size_t phys_addr
,
41 void __init
early_ioremap_reset(void)
43 after_paging_init
= 1;
47 * Generally, ioremap() is available after paging_init() has been called.
48 * Architectures wanting to allow early_ioremap after paging_init() can
49 * define __late_set_fixmap and __late_clear_fixmap to do the right thing.
51 #ifndef __late_set_fixmap
52 static inline void __init
__late_set_fixmap(enum fixed_addresses idx
,
53 phys_addr_t phys
, pgprot_t prot
)
59 #ifndef __late_clear_fixmap
60 static inline void __init
__late_clear_fixmap(enum fixed_addresses idx
)
66 static void __iomem
*prev_map
[FIX_BTMAPS_SLOTS
] __initdata
;
67 static unsigned long prev_size
[FIX_BTMAPS_SLOTS
] __initdata
;
68 static unsigned long slot_virt
[FIX_BTMAPS_SLOTS
] __initdata
;
70 void __init
early_ioremap_setup(void)
74 for (i
= 0; i
< FIX_BTMAPS_SLOTS
; i
++)
75 if (WARN_ON(prev_map
[i
]))
78 for (i
= 0; i
< FIX_BTMAPS_SLOTS
; i
++)
79 slot_virt
[i
] = __fix_to_virt(FIX_BTMAP_BEGIN
- NR_FIX_BTMAPS
*i
);
82 static int __init
check_early_ioremap_leak(void)
87 for (i
= 0; i
< FIX_BTMAPS_SLOTS
; i
++)
91 if (WARN(count
, KERN_WARNING
92 "Debug warning: early ioremap leak of %d areas detected.\n"
93 "please boot with early_ioremap_debug and report the dmesg.\n",
98 late_initcall(check_early_ioremap_leak
);
100 static void __init __iomem
*
101 __early_ioremap(resource_size_t phys_addr
, unsigned long size
, pgprot_t prot
)
103 unsigned long offset
;
104 resource_size_t last_addr
;
105 unsigned int nrpages
;
106 enum fixed_addresses idx
;
109 WARN_ON(system_state
>= SYSTEM_RUNNING
);
112 for (i
= 0; i
< FIX_BTMAPS_SLOTS
; i
++) {
119 if (WARN(slot
< 0, "%s(%pa, %08lx) not found slot\n",
120 __func__
, &phys_addr
, size
))
123 /* Don't allow wraparound or zero size */
124 last_addr
= phys_addr
+ size
- 1;
125 if (WARN_ON(!size
|| last_addr
< phys_addr
))
128 prev_size
[slot
] = size
;
130 * Mappings have to be page-aligned
132 offset
= offset_in_page(phys_addr
);
133 phys_addr
&= PAGE_MASK
;
134 size
= PAGE_ALIGN(last_addr
+ 1) - phys_addr
;
137 * Mappings have to fit in the FIX_BTMAP area.
139 nrpages
= size
>> PAGE_SHIFT
;
140 if (WARN_ON(nrpages
> NR_FIX_BTMAPS
))
146 idx
= FIX_BTMAP_BEGIN
- NR_FIX_BTMAPS
*slot
;
147 while (nrpages
> 0) {
148 if (after_paging_init
)
149 __late_set_fixmap(idx
, phys_addr
, prot
);
151 __early_set_fixmap(idx
, phys_addr
, prot
);
152 phys_addr
+= PAGE_SIZE
;
156 WARN(early_ioremap_debug
, "%s(%pa, %08lx) [%d] => %08lx + %08lx\n",
157 __func__
, &phys_addr
, size
, slot
, offset
, slot_virt
[slot
]);
159 prev_map
[slot
] = (void __iomem
*)(offset
+ slot_virt
[slot
]);
160 return prev_map
[slot
];
163 void __init
early_iounmap(void __iomem
*addr
, unsigned long size
)
165 unsigned long virt_addr
;
166 unsigned long offset
;
167 unsigned int nrpages
;
168 enum fixed_addresses idx
;
172 for (i
= 0; i
< FIX_BTMAPS_SLOTS
; i
++) {
173 if (prev_map
[i
] == addr
) {
179 if (WARN(slot
< 0, "%s(%p, %08lx) not found slot\n",
180 __func__
, addr
, size
))
183 if (WARN(prev_size
[slot
] != size
,
184 "%s(%p, %08lx) [%d] size not consistent %08lx\n",
185 __func__
, addr
, size
, slot
, prev_size
[slot
]))
188 WARN(early_ioremap_debug
, "%s(%p, %08lx) [%d]\n",
189 __func__
, addr
, size
, slot
);
191 virt_addr
= (unsigned long)addr
;
192 if (WARN_ON(virt_addr
< fix_to_virt(FIX_BTMAP_BEGIN
)))
195 offset
= offset_in_page(virt_addr
);
196 nrpages
= PAGE_ALIGN(offset
+ size
) >> PAGE_SHIFT
;
198 idx
= FIX_BTMAP_BEGIN
- NR_FIX_BTMAPS
*slot
;
199 while (nrpages
> 0) {
200 if (after_paging_init
)
201 __late_clear_fixmap(idx
);
203 __early_set_fixmap(idx
, 0, FIXMAP_PAGE_CLEAR
);
207 prev_map
[slot
] = NULL
;
210 /* Remap an IO device */
211 void __init __iomem
*
212 early_ioremap(resource_size_t phys_addr
, unsigned long size
)
214 return __early_ioremap(phys_addr
, size
, FIXMAP_PAGE_IO
);
219 early_memremap(resource_size_t phys_addr
, unsigned long size
)
221 pgprot_t prot
= early_memremap_pgprot_adjust(phys_addr
, size
,
224 return (__force
void *)__early_ioremap(phys_addr
, size
, prot
);
226 #ifdef FIXMAP_PAGE_RO
228 early_memremap_ro(resource_size_t phys_addr
, unsigned long size
)
230 pgprot_t prot
= early_memremap_pgprot_adjust(phys_addr
, size
,
233 return (__force
void *)__early_ioremap(phys_addr
, size
, prot
);
237 #ifdef CONFIG_ARCH_USE_MEMREMAP_PROT
239 early_memremap_prot(resource_size_t phys_addr
, unsigned long size
,
240 unsigned long prot_val
)
242 return (__force
void *)__early_ioremap(phys_addr
, size
,
247 #define MAX_MAP_CHUNK (NR_FIX_BTMAPS << PAGE_SHIFT)
249 void __init
copy_from_early_mem(void *dest
, phys_addr_t src
, unsigned long size
)
251 unsigned long slop
, clen
;
255 slop
= offset_in_page(src
);
257 if (clen
> MAX_MAP_CHUNK
- slop
)
258 clen
= MAX_MAP_CHUNK
- slop
;
259 p
= early_memremap(src
& PAGE_MASK
, clen
+ slop
);
260 memcpy(dest
, p
+ slop
, clen
);
261 early_memunmap(p
, clen
+ slop
);
268 #else /* CONFIG_MMU */
270 void __init __iomem
*
271 early_ioremap(resource_size_t phys_addr
, unsigned long size
)
273 return (__force
void __iomem
*)phys_addr
;
278 early_memremap(resource_size_t phys_addr
, unsigned long size
)
280 return (void *)phys_addr
;
283 early_memremap_ro(resource_size_t phys_addr
, unsigned long size
)
285 return (void *)phys_addr
;
288 void __init
early_iounmap(void __iomem
*addr
, unsigned long size
)
292 #endif /* CONFIG_MMU */
295 void __init
early_memunmap(void *addr
, unsigned long size
)
297 early_iounmap((__force
void __iomem
*)addr
, size
);