]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - arch/x86/mm/ioremap_32.c
x86: style cleanup of ioremap code
[mirror_ubuntu-jammy-kernel.git] / arch / x86 / mm / ioremap_32.c
CommitLineData
1da177e4 1/*
1da177e4
LT
2 * Re-map IO memory to kernel address space so that we can access it.
3 * This is needed for high PCI addresses that aren't mapped in the
4 * 640k-1MB IO memory area on PC's
5 *
6 * (C) Copyright 1995 1996 Linus Torvalds
7 */
8
9#include <linux/vmalloc.h>
10#include <linux/init.h>
11#include <linux/slab.h>
129f6946 12#include <linux/module.h>
a148ecfd 13#include <linux/io.h>
1da177e4
LT
14#include <asm/fixmap.h>
15#include <asm/cacheflush.h>
16#include <asm/tlbflush.h>
17#include <asm/pgtable.h>
18
19#define ISA_START_ADDRESS 0xa0000
20#define ISA_END_ADDRESS 0x100000
21
1da177e4
LT
22/*
23 * Remap an arbitrary physical address space into the kernel virtual
24 * address space. Needed when the kernel wants to access high addresses
25 * directly.
26 *
27 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
28 * have to convert them into an offset in a page-aligned mapping, but the
29 * caller shouldn't need to know that small detail.
30 */
91eebf40
TG
31void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,
32 unsigned long flags)
1da177e4 33{
91eebf40
TG
34 void __iomem *addr;
35 struct vm_struct *area;
1da177e4 36 unsigned long offset, last_addr;
a148ecfd 37 pgprot_t prot;
1da177e4
LT
38
39 /* Don't allow wraparound or zero size */
40 last_addr = phys_addr + size - 1;
41 if (!size || last_addr < phys_addr)
42 return NULL;
43
44 /*
45 * Don't remap the low PCI/ISA area, it's always mapped..
46 */
47 if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
48 return (void __iomem *) phys_to_virt(phys_addr);
49
50 /*
51 * Don't allow anybody to remap normal RAM that we're using..
52 */
53 if (phys_addr <= virt_to_phys(high_memory - 1)) {
54 char *t_addr, *t_end;
55 struct page *page;
56
57 t_addr = __va(phys_addr);
58 t_end = t_addr + (size - 1);
91eebf40
TG
59
60 for (page = virt_to_page(t_addr);
61 page <= virt_to_page(t_end); page++)
62 if (!PageReserved(page))
1da177e4
LT
63 return NULL;
64 }
65
a4034349 66 prot = MAKE_GLOBAL(__PAGE_KERNEL | flags);
a148ecfd 67
1da177e4
LT
68 /*
69 * Mappings have to be page-aligned
70 */
71 offset = phys_addr & ~PAGE_MASK;
72 phys_addr &= PAGE_MASK;
73 size = PAGE_ALIGN(last_addr+1) - phys_addr;
74
75 /*
76 * Ok, go for it..
77 */
78 area = get_vm_area(size, VM_IOREMAP | (flags << 20));
79 if (!area)
80 return NULL;
81 area->phys_addr = phys_addr;
82 addr = (void __iomem *) area->addr;
83 if (ioremap_page_range((unsigned long) addr,
91eebf40 84 (unsigned long) addr + size, phys_addr, prot)) {
1da177e4
LT
85 vunmap((void __force *) addr);
86 return NULL;
87 }
88 return (void __iomem *) (offset + (char __iomem *)addr);
89}
129f6946 90EXPORT_SYMBOL(__ioremap);
1da177e4
LT
91
92/**
93 * ioremap_nocache - map bus memory into CPU space
94 * @offset: bus address of the memory
95 * @size: size of the resource to map
96 *
97 * ioremap_nocache performs a platform specific sequence of operations to
98 * make bus memory CPU accessible via the readb/readw/readl/writeb/
99 * writew/writel functions and the other mmio helpers. The returned
100 * address is not guaranteed to be usable directly as a virtual
91eebf40 101 * address.
1da177e4
LT
102 *
103 * This version of ioremap ensures that the memory is marked uncachable
104 * on the CPU as well as honouring existing caching rules from things like
91eebf40 105 * the PCI bus. Note that there are other caches and buffers on many
1da177e4
LT
106 * busses. In particular driver authors should read up on PCI writes
107 *
108 * It's useful if some control registers are in such an area and
109 * write combining or read caching is not desirable:
91eebf40 110 *
1da177e4
LT
111 * Must be freed with iounmap.
112 */
91eebf40 113void __iomem *ioremap_nocache(unsigned long phys_addr, unsigned long size)
1da177e4
LT
114{
115 unsigned long last_addr;
4138cc34 116 void __iomem *p = __ioremap(phys_addr, size, _PAGE_PCD | _PAGE_PWT);
91eebf40
TG
117
118 if (!p)
119 return p;
1da177e4
LT
120
121 /* Guaranteed to be > phys_addr, as per __ioremap() */
122 last_addr = phys_addr + size - 1;
123
124 if (last_addr < virt_to_phys(high_memory) - 1) {
91eebf40 125 struct page *ppage = virt_to_page(__va(phys_addr));
1da177e4
LT
126 unsigned long npages;
127
128 phys_addr &= PAGE_MASK;
129
130 /* This might overflow and become zero.. */
131 last_addr = PAGE_ALIGN(last_addr);
132
133 /* .. but that's ok, because modulo-2**n arithmetic will make
91eebf40
TG
134 * the page-aligned "last - first" come out right.
135 */
1da177e4
LT
136 npages = (last_addr - phys_addr) >> PAGE_SHIFT;
137
91eebf40
TG
138 if (change_page_attr(ppage, npages, PAGE_KERNEL_NOCACHE) < 0) {
139 iounmap(p);
1da177e4
LT
140 p = NULL;
141 }
142 global_flush_tlb();
143 }
144
91eebf40 145 return p;
1da177e4 146}
129f6946 147EXPORT_SYMBOL(ioremap_nocache);
1da177e4 148
bf5421c3
AK
149/**
150 * iounmap - Free a IO remapping
151 * @addr: virtual address from ioremap_*
152 *
153 * Caller must ensure there is only one unmapping for the same pointer.
154 */
1da177e4
LT
155void iounmap(volatile void __iomem *addr)
156{
bf5421c3 157 struct vm_struct *p, *o;
c23a4e96
AM
158
159 if ((void __force *)addr <= high_memory)
1da177e4
LT
160 return;
161
162 /*
163 * __ioremap special-cases the PCI/ISA range by not instantiating a
164 * vm_area and by simply returning an address into the kernel mapping
165 * of ISA space. So handle that here.
166 */
167 if (addr >= phys_to_virt(ISA_START_ADDRESS) &&
91eebf40 168 addr < phys_to_virt(ISA_END_ADDRESS))
1da177e4
LT
169 return;
170
91eebf40
TG
171 addr = (volatile void __iomem *)
172 (PAGE_MASK & (unsigned long __force)addr);
bf5421c3
AK
173
174 /* Use the vm area unlocked, assuming the caller
175 ensures there isn't another iounmap for the same address
176 in parallel. Reuse of the virtual address is prevented by
177 leaving it in the global lists until we're done with it.
178 cpa takes care of the direct mappings. */
179 read_lock(&vmlist_lock);
180 for (p = vmlist; p; p = p->next) {
181 if (p->addr == addr)
182 break;
183 }
184 read_unlock(&vmlist_lock);
185
186 if (!p) {
91eebf40 187 printk(KERN_ERR "iounmap: bad address %p\n", addr);
c23a4e96 188 dump_stack();
bf5421c3 189 return;
1da177e4
LT
190 }
191
bf5421c3 192 /* Reset the direct mapping. Can block */
1da177e4 193 if ((p->flags >> 20) && p->phys_addr < virt_to_phys(high_memory) - 1) {
1da177e4 194 change_page_attr(virt_to_page(__va(p->phys_addr)),
9585116b 195 get_vm_area_size(p) >> PAGE_SHIFT,
1da177e4
LT
196 PAGE_KERNEL);
197 global_flush_tlb();
91eebf40 198 }
bf5421c3
AK
199
200 /* Finally remove it */
201 o = remove_vm_area((void *)addr);
202 BUG_ON(p != o || o == NULL);
91eebf40 203 kfree(p);
1da177e4 204}
129f6946 205EXPORT_SYMBOL(iounmap);
1da177e4 206
d18d6d65
IM
207
208int __initdata early_ioremap_debug;
209
210static int __init early_ioremap_debug_setup(char *str)
211{
212 early_ioremap_debug = 1;
213
793b24a2 214 return 0;
d18d6d65 215}
793b24a2 216early_param("early_ioremap_debug", early_ioremap_debug_setup);
d18d6d65 217
0947b2f3
HY
218static __initdata int after_paging_init;
219static __initdata unsigned long bm_pte[1024]
220 __attribute__((aligned(PAGE_SIZE)));
221
beacfaac 222static inline unsigned long * __init early_ioremap_pgd(unsigned long addr)
0947b2f3
HY
223{
224 return (unsigned long *)swapper_pg_dir + ((addr >> 22) & 1023);
225}
226
beacfaac 227static inline unsigned long * __init early_ioremap_pte(unsigned long addr)
0947b2f3
HY
228{
229 return bm_pte + ((addr >> PAGE_SHIFT) & 1023);
230}
231
beacfaac 232void __init early_ioremap_init(void)
0947b2f3
HY
233{
234 unsigned long *pgd;
235
d18d6d65 236 if (early_ioremap_debug)
91eebf40 237 printk(KERN_DEBUG "early_ioremap_init()\n");
d18d6d65 238
beacfaac 239 pgd = early_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN));
0947b2f3
HY
240 *pgd = __pa(bm_pte) | _PAGE_TABLE;
241 memset(bm_pte, 0, sizeof(bm_pte));
0e3a9549
IM
242 /*
243 * The boot-ioremap range spans multiple pgds, for which
244 * we are not prepared:
245 */
246 if (pgd != early_ioremap_pgd(fix_to_virt(FIX_BTMAP_END))) {
247 WARN_ON(1);
91eebf40
TG
248 printk(KERN_WARNING "pgd %p != %p\n",
249 pgd, early_ioremap_pgd(fix_to_virt(FIX_BTMAP_END)));
250 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
251 fix_to_virt(FIX_BTMAP_BEGIN));
252 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n",
253 fix_to_virt(FIX_BTMAP_END));
254
255 printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
256 printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n",
257 FIX_BTMAP_BEGIN);
0e3a9549 258 }
0947b2f3
HY
259}
260
beacfaac 261void __init early_ioremap_clear(void)
0947b2f3
HY
262{
263 unsigned long *pgd;
264
d18d6d65 265 if (early_ioremap_debug)
91eebf40 266 printk(KERN_DEBUG "early_ioremap_clear()\n");
d18d6d65 267
beacfaac 268 pgd = early_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN));
0947b2f3
HY
269 *pgd = 0;
270 __flush_tlb_all();
271}
272
beacfaac 273void __init early_ioremap_reset(void)
0947b2f3
HY
274{
275 enum fixed_addresses idx;
276 unsigned long *pte, phys, addr;
277
278 after_paging_init = 1;
64a8f852 279 for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) {
0947b2f3 280 addr = fix_to_virt(idx);
beacfaac 281 pte = early_ioremap_pte(addr);
0947b2f3
HY
282 if (!*pte & _PAGE_PRESENT) {
283 phys = *pte & PAGE_MASK;
284 set_fixmap(idx, phys);
285 }
286 }
287}
288
beacfaac 289static void __init __early_set_fixmap(enum fixed_addresses idx,
0947b2f3
HY
290 unsigned long phys, pgprot_t flags)
291{
292 unsigned long *pte, addr = __fix_to_virt(idx);
293
294 if (idx >= __end_of_fixed_addresses) {
295 BUG();
296 return;
297 }
beacfaac 298 pte = early_ioremap_pte(addr);
0947b2f3
HY
299 if (pgprot_val(flags))
300 *pte = (phys & PAGE_MASK) | pgprot_val(flags);
301 else
302 *pte = 0;
303 __flush_tlb_one(addr);
304}
305
beacfaac 306static inline void __init early_set_fixmap(enum fixed_addresses idx,
0947b2f3
HY
307 unsigned long phys)
308{
309 if (after_paging_init)
310 set_fixmap(idx, phys);
311 else
beacfaac 312 __early_set_fixmap(idx, phys, PAGE_KERNEL);
0947b2f3
HY
313}
314
beacfaac 315static inline void __init early_clear_fixmap(enum fixed_addresses idx)
0947b2f3
HY
316{
317 if (after_paging_init)
318 clear_fixmap(idx);
319 else
beacfaac 320 __early_set_fixmap(idx, 0, __pgprot(0));
0947b2f3
HY
321}
322
1b42f516
IM
323
324int __initdata early_ioremap_nested;
325
d690b2af
IM
326static int __init check_early_ioremap_leak(void)
327{
328 if (!early_ioremap_nested)
329 return 0;
330
331 printk(KERN_WARNING
91eebf40
TG
332 "Debug warning: early ioremap leak of %d areas detected.\n",
333 early_ioremap_nested);
d690b2af 334 printk(KERN_WARNING
91eebf40 335 "please boot with early_ioremap_debug and report the dmesg.\n");
d690b2af
IM
336 WARN_ON(1);
337
338 return 1;
339}
340late_initcall(check_early_ioremap_leak);
341
beacfaac 342void __init *early_ioremap(unsigned long phys_addr, unsigned long size)
1da177e4
LT
343{
344 unsigned long offset, last_addr;
1b42f516
IM
345 unsigned int nrpages, nesting;
346 enum fixed_addresses idx0, idx;
347
348 WARN_ON(system_state != SYSTEM_BOOTING);
349
350 nesting = early_ioremap_nested;
d18d6d65 351 if (early_ioremap_debug) {
91eebf40
TG
352 printk(KERN_DEBUG "early_ioremap(%08lx, %08lx) [%d] => ",
353 phys_addr, size, nesting);
d18d6d65
IM
354 dump_stack();
355 }
1da177e4
LT
356
357 /* Don't allow wraparound or zero size */
358 last_addr = phys_addr + size - 1;
bd796ed0
IM
359 if (!size || last_addr < phys_addr) {
360 WARN_ON(1);
1da177e4 361 return NULL;
bd796ed0 362 }
1da177e4 363
bd796ed0
IM
364 if (nesting >= FIX_BTMAPS_NESTING) {
365 WARN_ON(1);
1b42f516 366 return NULL;
bd796ed0 367 }
1b42f516 368 early_ioremap_nested++;
1da177e4
LT
369 /*
370 * Mappings have to be page-aligned
371 */
372 offset = phys_addr & ~PAGE_MASK;
373 phys_addr &= PAGE_MASK;
374 size = PAGE_ALIGN(last_addr) - phys_addr;
375
376 /*
377 * Mappings have to fit in the FIX_BTMAP area.
378 */
379 nrpages = size >> PAGE_SHIFT;
bd796ed0
IM
380 if (nrpages > NR_FIX_BTMAPS) {
381 WARN_ON(1);
1da177e4 382 return NULL;
bd796ed0 383 }
1da177e4
LT
384
385 /*
386 * Ok, go for it..
387 */
1b42f516
IM
388 idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
389 idx = idx0;
1da177e4 390 while (nrpages > 0) {
beacfaac 391 early_set_fixmap(idx, phys_addr);
1da177e4
LT
392 phys_addr += PAGE_SIZE;
393 --idx;
394 --nrpages;
395 }
d18d6d65
IM
396 if (early_ioremap_debug)
397 printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0));
1b42f516 398
91eebf40 399 return (void *) (offset + fix_to_virt(idx0));
1da177e4
LT
400}
401
beacfaac 402void __init early_iounmap(void *addr, unsigned long size)
1da177e4
LT
403{
404 unsigned long virt_addr;
405 unsigned long offset;
406 unsigned int nrpages;
407 enum fixed_addresses idx;
1b42f516
IM
408 unsigned int nesting;
409
410 nesting = --early_ioremap_nested;
bd796ed0 411 WARN_ON(nesting < 0);
1da177e4 412
d18d6d65 413 if (early_ioremap_debug) {
91eebf40
TG
414 printk(KERN_DEBUG "early_iounmap(%p, %08lx) [%d]\n", addr,
415 size, nesting);
d18d6d65
IM
416 dump_stack();
417 }
418
1da177e4 419 virt_addr = (unsigned long)addr;
bd796ed0
IM
420 if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) {
421 WARN_ON(1);
1da177e4 422 return;
bd796ed0 423 }
1da177e4
LT
424 offset = virt_addr & ~PAGE_MASK;
425 nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
426
1b42f516 427 idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
1da177e4 428 while (nrpages > 0) {
beacfaac 429 early_clear_fixmap(idx);
1da177e4
LT
430 --idx;
431 --nrpages;
432 }
433}
1b42f516
IM
434
435void __this_fixmap_does_not_exist(void)
436{
437 WARN_ON(1);
438}