]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - arch/x86/mm/ioremap.c
x86, core: remove CONFIG_FORCED_INLINING
[mirror_ubuntu-jammy-kernel.git] / arch / x86 / mm / ioremap.c
CommitLineData
1da177e4 1/*
1da177e4
LT
2 * Re-map IO memory to kernel address space so that we can access it.
3 * This is needed for high PCI addresses that aren't mapped in the
4 * 640k-1MB IO memory area on PC's
5 *
6 * (C) Copyright 1995 1996 Linus Torvalds
7 */
8
e9332cac 9#include <linux/bootmem.h>
1da177e4 10#include <linux/init.h>
a148ecfd 11#include <linux/io.h>
3cbd09e4
TG
12#include <linux/module.h>
13#include <linux/slab.h>
14#include <linux/vmalloc.h>
15
1da177e4 16#include <asm/cacheflush.h>
3cbd09e4
TG
17#include <asm/e820.h>
18#include <asm/fixmap.h>
1da177e4 19#include <asm/pgtable.h>
3cbd09e4 20#include <asm/tlbflush.h>
f6df72e7 21#include <asm/pgalloc.h>
1da177e4 22
d806e5ee
TG
23enum ioremap_mode {
24 IOR_MODE_UNCACHED,
25 IOR_MODE_CACHED,
26};
27
240d3a7c
TG
28#ifdef CONFIG_X86_64
29
30unsigned long __phys_addr(unsigned long x)
31{
32 if (x >= __START_KERNEL_map)
33 return x - __START_KERNEL_map + phys_base;
34 return x - PAGE_OFFSET;
35}
36EXPORT_SYMBOL(__phys_addr);
37
38#endif
39
5f5192b9
TG
40int page_is_ram(unsigned long pagenr)
41{
42 unsigned long addr, end;
43 int i;
44
45 for (i = 0; i < e820.nr_map; i++) {
46 /*
47 * Not usable memory:
48 */
49 if (e820.map[i].type != E820_RAM)
50 continue;
5f5192b9
TG
51 addr = (e820.map[i].addr + PAGE_SIZE-1) >> PAGE_SHIFT;
52 end = (e820.map[i].addr + e820.map[i].size) >> PAGE_SHIFT;
950f9d95
TG
53
54 /*
55 * Sanity check: Some BIOSen report areas as RAM that
56 * are not. Notably the 640->1Mb area, which is the
57 * PCI BIOS area.
58 */
59 if (addr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
60 end < (BIOS_END >> PAGE_SHIFT))
61 continue;
62
5f5192b9
TG
63 if ((pagenr >= addr) && (pagenr < end))
64 return 1;
65 }
66 return 0;
67}
68
e9332cac
TG
69/*
70 * Fix up the linear direct mapping of the kernel to avoid cache attribute
71 * conflicts.
72 */
75ab43bf 73static int ioremap_change_attr(unsigned long vaddr, unsigned long size,
d806e5ee 74 enum ioremap_mode mode)
e9332cac 75{
d806e5ee 76 unsigned long nrpages = size >> PAGE_SHIFT;
93809be8 77 int err;
e9332cac 78
d806e5ee
TG
79 switch (mode) {
80 case IOR_MODE_UNCACHED:
81 default:
82 err = set_memory_uc(vaddr, nrpages);
83 break;
84 case IOR_MODE_CACHED:
85 err = set_memory_wb(vaddr, nrpages);
86 break;
87 }
e9332cac
TG
88
89 return err;
90}
91
1da177e4
LT
92/*
93 * Remap an arbitrary physical address space into the kernel virtual
94 * address space. Needed when the kernel wants to access high addresses
95 * directly.
96 *
97 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
98 * have to convert them into an offset in a page-aligned mapping, but the
99 * caller shouldn't need to know that small detail.
100 */
5f868152 101static void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,
d806e5ee 102 enum ioremap_mode mode)
1da177e4 103{
e66aadbe 104 unsigned long pfn, offset, last_addr, vaddr;
91eebf40 105 struct vm_struct *area;
d806e5ee 106 pgprot_t prot;
1da177e4
LT
107
108 /* Don't allow wraparound or zero size */
109 last_addr = phys_addr + size - 1;
110 if (!size || last_addr < phys_addr)
111 return NULL;
112
113 /*
114 * Don't remap the low PCI/ISA area, it's always mapped..
115 */
116 if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
4b40fcee 117 return (__force void __iomem *)phys_to_virt(phys_addr);
1da177e4
LT
118
119 /*
120 * Don't allow anybody to remap normal RAM that we're using..
121 */
38cb47ba
IM
122 for (pfn = phys_addr >> PAGE_SHIFT; pfn < max_pfn_mapped &&
123 (pfn << PAGE_SHIFT) < last_addr; pfn++) {
124 if (page_is_ram(pfn) && pfn_valid(pfn) &&
125 !PageReserved(pfn_to_page(pfn)))
266b9f87 126 return NULL;
1da177e4
LT
127 }
128
d806e5ee
TG
129 switch (mode) {
130 case IOR_MODE_UNCACHED:
131 default:
132 prot = PAGE_KERNEL_NOCACHE;
133 break;
134 case IOR_MODE_CACHED:
135 prot = PAGE_KERNEL;
136 break;
137 }
a148ecfd 138
1da177e4
LT
139 /*
140 * Mappings have to be page-aligned
141 */
142 offset = phys_addr & ~PAGE_MASK;
143 phys_addr &= PAGE_MASK;
144 size = PAGE_ALIGN(last_addr+1) - phys_addr;
145
146 /*
147 * Ok, go for it..
148 */
74ff2857 149 area = get_vm_area(size, VM_IOREMAP);
1da177e4
LT
150 if (!area)
151 return NULL;
152 area->phys_addr = phys_addr;
e66aadbe
TG
153 vaddr = (unsigned long) area->addr;
154 if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) {
155 remove_vm_area((void *)(vaddr & PAGE_MASK));
1da177e4
LT
156 return NULL;
157 }
e9332cac 158
75ab43bf 159 if (ioremap_change_attr(vaddr, size, mode) < 0) {
e66aadbe 160 vunmap(area->addr);
e9332cac
TG
161 return NULL;
162 }
163
e66aadbe 164 return (void __iomem *) (vaddr + offset);
1da177e4 165}
1da177e4
LT
166
167/**
168 * ioremap_nocache - map bus memory into CPU space
169 * @offset: bus address of the memory
170 * @size: size of the resource to map
171 *
172 * ioremap_nocache performs a platform specific sequence of operations to
173 * make bus memory CPU accessible via the readb/readw/readl/writeb/
174 * writew/writel functions and the other mmio helpers. The returned
175 * address is not guaranteed to be usable directly as a virtual
91eebf40 176 * address.
1da177e4
LT
177 *
178 * This version of ioremap ensures that the memory is marked uncachable
179 * on the CPU as well as honouring existing caching rules from things like
91eebf40 180 * the PCI bus. Note that there are other caches and buffers on many
1da177e4
LT
181 * busses. In particular driver authors should read up on PCI writes
182 *
183 * It's useful if some control registers are in such an area and
184 * write combining or read caching is not desirable:
91eebf40 185 *
1da177e4
LT
186 * Must be freed with iounmap.
187 */
91eebf40 188void __iomem *ioremap_nocache(unsigned long phys_addr, unsigned long size)
1da177e4 189{
d806e5ee 190 return __ioremap(phys_addr, size, IOR_MODE_UNCACHED);
1da177e4 191}
129f6946 192EXPORT_SYMBOL(ioremap_nocache);
1da177e4 193
5f868152
TG
194void __iomem *ioremap_cache(unsigned long phys_addr, unsigned long size)
195{
d806e5ee 196 return __ioremap(phys_addr, size, IOR_MODE_CACHED);
5f868152
TG
197}
198EXPORT_SYMBOL(ioremap_cache);
199
bf5421c3
AK
200/**
201 * iounmap - Free a IO remapping
202 * @addr: virtual address from ioremap_*
203 *
204 * Caller must ensure there is only one unmapping for the same pointer.
205 */
1da177e4
LT
206void iounmap(volatile void __iomem *addr)
207{
bf5421c3 208 struct vm_struct *p, *o;
c23a4e96
AM
209
210 if ((void __force *)addr <= high_memory)
1da177e4
LT
211 return;
212
213 /*
214 * __ioremap special-cases the PCI/ISA range by not instantiating a
215 * vm_area and by simply returning an address into the kernel mapping
216 * of ISA space. So handle that here.
217 */
218 if (addr >= phys_to_virt(ISA_START_ADDRESS) &&
91eebf40 219 addr < phys_to_virt(ISA_END_ADDRESS))
1da177e4
LT
220 return;
221
91eebf40
TG
222 addr = (volatile void __iomem *)
223 (PAGE_MASK & (unsigned long __force)addr);
bf5421c3
AK
224
225 /* Use the vm area unlocked, assuming the caller
226 ensures there isn't another iounmap for the same address
227 in parallel. Reuse of the virtual address is prevented by
228 leaving it in the global lists until we're done with it.
229 cpa takes care of the direct mappings. */
230 read_lock(&vmlist_lock);
231 for (p = vmlist; p; p = p->next) {
232 if (p->addr == addr)
233 break;
234 }
235 read_unlock(&vmlist_lock);
236
237 if (!p) {
91eebf40 238 printk(KERN_ERR "iounmap: bad address %p\n", addr);
c23a4e96 239 dump_stack();
bf5421c3 240 return;
1da177e4
LT
241 }
242
bf5421c3
AK
243 /* Finally remove it */
244 o = remove_vm_area((void *)addr);
245 BUG_ON(p != o || o == NULL);
91eebf40 246 kfree(p);
1da177e4 247}
129f6946 248EXPORT_SYMBOL(iounmap);
1da177e4 249
240d3a7c 250#ifdef CONFIG_X86_32
d18d6d65
IM
251
252int __initdata early_ioremap_debug;
253
254static int __init early_ioremap_debug_setup(char *str)
255{
256 early_ioremap_debug = 1;
257
793b24a2 258 return 0;
d18d6d65 259}
793b24a2 260early_param("early_ioremap_debug", early_ioremap_debug_setup);
d18d6d65 261
0947b2f3
HY
262static __initdata int after_paging_init;
263static __initdata unsigned long bm_pte[1024]
264 __attribute__((aligned(PAGE_SIZE)));
265
beacfaac 266static inline unsigned long * __init early_ioremap_pgd(unsigned long addr)
0947b2f3
HY
267{
268 return (unsigned long *)swapper_pg_dir + ((addr >> 22) & 1023);
269}
270
beacfaac 271static inline unsigned long * __init early_ioremap_pte(unsigned long addr)
0947b2f3
HY
272{
273 return bm_pte + ((addr >> PAGE_SHIFT) & 1023);
274}
275
beacfaac 276void __init early_ioremap_init(void)
0947b2f3
HY
277{
278 unsigned long *pgd;
279
d18d6d65 280 if (early_ioremap_debug)
adafdf6a 281 printk(KERN_INFO "early_ioremap_init()\n");
d18d6d65 282
beacfaac 283 pgd = early_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN));
0947b2f3
HY
284 *pgd = __pa(bm_pte) | _PAGE_TABLE;
285 memset(bm_pte, 0, sizeof(bm_pte));
0e3a9549
IM
286 /*
287 * The boot-ioremap range spans multiple pgds, for which
288 * we are not prepared:
289 */
290 if (pgd != early_ioremap_pgd(fix_to_virt(FIX_BTMAP_END))) {
291 WARN_ON(1);
91eebf40
TG
292 printk(KERN_WARNING "pgd %p != %p\n",
293 pgd, early_ioremap_pgd(fix_to_virt(FIX_BTMAP_END)));
294 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
295 fix_to_virt(FIX_BTMAP_BEGIN));
296 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n",
297 fix_to_virt(FIX_BTMAP_END));
298
299 printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
300 printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n",
301 FIX_BTMAP_BEGIN);
0e3a9549 302 }
0947b2f3
HY
303}
304
beacfaac 305void __init early_ioremap_clear(void)
0947b2f3
HY
306{
307 unsigned long *pgd;
308
d18d6d65 309 if (early_ioremap_debug)
adafdf6a 310 printk(KERN_INFO "early_ioremap_clear()\n");
d18d6d65 311
beacfaac 312 pgd = early_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN));
0947b2f3 313 *pgd = 0;
f6df72e7 314 paravirt_release_pt(__pa(pgd) >> PAGE_SHIFT);
0947b2f3
HY
315 __flush_tlb_all();
316}
317
beacfaac 318void __init early_ioremap_reset(void)
0947b2f3
HY
319{
320 enum fixed_addresses idx;
321 unsigned long *pte, phys, addr;
322
323 after_paging_init = 1;
64a8f852 324 for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) {
0947b2f3 325 addr = fix_to_virt(idx);
beacfaac 326 pte = early_ioremap_pte(addr);
1fd6a53d 327 if (*pte & _PAGE_PRESENT) {
0947b2f3
HY
328 phys = *pte & PAGE_MASK;
329 set_fixmap(idx, phys);
330 }
331 }
332}
333
beacfaac 334static void __init __early_set_fixmap(enum fixed_addresses idx,
0947b2f3
HY
335 unsigned long phys, pgprot_t flags)
336{
337 unsigned long *pte, addr = __fix_to_virt(idx);
338
339 if (idx >= __end_of_fixed_addresses) {
340 BUG();
341 return;
342 }
beacfaac 343 pte = early_ioremap_pte(addr);
0947b2f3
HY
344 if (pgprot_val(flags))
345 *pte = (phys & PAGE_MASK) | pgprot_val(flags);
346 else
347 *pte = 0;
348 __flush_tlb_one(addr);
349}
350
beacfaac 351static inline void __init early_set_fixmap(enum fixed_addresses idx,
0947b2f3
HY
352 unsigned long phys)
353{
354 if (after_paging_init)
355 set_fixmap(idx, phys);
356 else
beacfaac 357 __early_set_fixmap(idx, phys, PAGE_KERNEL);
0947b2f3
HY
358}
359
beacfaac 360static inline void __init early_clear_fixmap(enum fixed_addresses idx)
0947b2f3
HY
361{
362 if (after_paging_init)
363 clear_fixmap(idx);
364 else
beacfaac 365 __early_set_fixmap(idx, 0, __pgprot(0));
0947b2f3
HY
366}
367
1b42f516
IM
368
369int __initdata early_ioremap_nested;
370
d690b2af
IM
371static int __init check_early_ioremap_leak(void)
372{
373 if (!early_ioremap_nested)
374 return 0;
375
376 printk(KERN_WARNING
91eebf40
TG
377 "Debug warning: early ioremap leak of %d areas detected.\n",
378 early_ioremap_nested);
d690b2af 379 printk(KERN_WARNING
91eebf40 380 "please boot with early_ioremap_debug and report the dmesg.\n");
d690b2af
IM
381 WARN_ON(1);
382
383 return 1;
384}
385late_initcall(check_early_ioremap_leak);
386
beacfaac 387void __init *early_ioremap(unsigned long phys_addr, unsigned long size)
1da177e4
LT
388{
389 unsigned long offset, last_addr;
1b42f516
IM
390 unsigned int nrpages, nesting;
391 enum fixed_addresses idx0, idx;
392
393 WARN_ON(system_state != SYSTEM_BOOTING);
394
395 nesting = early_ioremap_nested;
d18d6d65 396 if (early_ioremap_debug) {
adafdf6a 397 printk(KERN_INFO "early_ioremap(%08lx, %08lx) [%d] => ",
91eebf40 398 phys_addr, size, nesting);
d18d6d65
IM
399 dump_stack();
400 }
1da177e4
LT
401
402 /* Don't allow wraparound or zero size */
403 last_addr = phys_addr + size - 1;
bd796ed0
IM
404 if (!size || last_addr < phys_addr) {
405 WARN_ON(1);
1da177e4 406 return NULL;
bd796ed0 407 }
1da177e4 408
bd796ed0
IM
409 if (nesting >= FIX_BTMAPS_NESTING) {
410 WARN_ON(1);
1b42f516 411 return NULL;
bd796ed0 412 }
1b42f516 413 early_ioremap_nested++;
1da177e4
LT
414 /*
415 * Mappings have to be page-aligned
416 */
417 offset = phys_addr & ~PAGE_MASK;
418 phys_addr &= PAGE_MASK;
419 size = PAGE_ALIGN(last_addr) - phys_addr;
420
421 /*
422 * Mappings have to fit in the FIX_BTMAP area.
423 */
424 nrpages = size >> PAGE_SHIFT;
bd796ed0
IM
425 if (nrpages > NR_FIX_BTMAPS) {
426 WARN_ON(1);
1da177e4 427 return NULL;
bd796ed0 428 }
1da177e4
LT
429
430 /*
431 * Ok, go for it..
432 */
1b42f516
IM
433 idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
434 idx = idx0;
1da177e4 435 while (nrpages > 0) {
beacfaac 436 early_set_fixmap(idx, phys_addr);
1da177e4
LT
437 phys_addr += PAGE_SIZE;
438 --idx;
439 --nrpages;
440 }
d18d6d65
IM
441 if (early_ioremap_debug)
442 printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0));
1b42f516 443
91eebf40 444 return (void *) (offset + fix_to_virt(idx0));
1da177e4
LT
445}
446
beacfaac 447void __init early_iounmap(void *addr, unsigned long size)
1da177e4
LT
448{
449 unsigned long virt_addr;
450 unsigned long offset;
451 unsigned int nrpages;
452 enum fixed_addresses idx;
1b42f516
IM
453 unsigned int nesting;
454
455 nesting = --early_ioremap_nested;
bd796ed0 456 WARN_ON(nesting < 0);
1da177e4 457
d18d6d65 458 if (early_ioremap_debug) {
adafdf6a 459 printk(KERN_INFO "early_iounmap(%p, %08lx) [%d]\n", addr,
91eebf40 460 size, nesting);
d18d6d65
IM
461 dump_stack();
462 }
463
1da177e4 464 virt_addr = (unsigned long)addr;
bd796ed0
IM
465 if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) {
466 WARN_ON(1);
1da177e4 467 return;
bd796ed0 468 }
1da177e4
LT
469 offset = virt_addr & ~PAGE_MASK;
470 nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
471
1b42f516 472 idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
1da177e4 473 while (nrpages > 0) {
beacfaac 474 early_clear_fixmap(idx);
1da177e4
LT
475 --idx;
476 --nrpages;
477 }
478}
1b42f516
IM
479
480void __this_fixmap_does_not_exist(void)
481{
482 WARN_ON(1);
483}
240d3a7c
TG
484
485#endif /* CONFIG_X86_32 */