]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - arch/x86/mm/ioremap_32.c
x86: switch to change_page_attr_addr in ioremap_32.c
[mirror_ubuntu-hirsute-kernel.git] / arch / x86 / mm / ioremap_32.c
CommitLineData
1da177e4 1/*
1da177e4
LT
2 * Re-map IO memory to kernel address space so that we can access it.
3 * This is needed for high PCI addresses that aren't mapped in the
4 * 640k-1MB IO memory area on PC's
5 *
6 * (C) Copyright 1995 1996 Linus Torvalds
7 */
8
e9332cac 9#include <linux/bootmem.h>
1da177e4 10#include <linux/init.h>
a148ecfd 11#include <linux/io.h>
3cbd09e4
TG
12#include <linux/module.h>
13#include <linux/slab.h>
14#include <linux/vmalloc.h>
15
1da177e4 16#include <asm/cacheflush.h>
3cbd09e4
TG
17#include <asm/e820.h>
18#include <asm/fixmap.h>
1da177e4 19#include <asm/pgtable.h>
3cbd09e4 20#include <asm/tlbflush.h>
1da177e4 21
e9332cac
TG
22/*
23 * Fix up the linear direct mapping of the kernel to avoid cache attribute
24 * conflicts.
25 */
26static int ioremap_change_attr(unsigned long phys_addr, unsigned long size,
27 pgprot_t prot)
28{
29 unsigned long npages, vaddr, last_addr = phys_addr + size - 1;
30 int err, level;
31
32 /* No change for pages after the last mapping */
33 if (last_addr >= (max_pfn_mapped << PAGE_SHIFT))
34 return 0;
35
36 npages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
37 vaddr = (unsigned long) __va(phys_addr);
38
39 /*
40 * If there is no identity map for this address,
41 * change_page_attr_addr is unnecessary
42 */
43 if (!lookup_address(vaddr, &level))
44 return 0;
45
46 /*
47 * Must use an address here and not struct page because the
48 * phys addr can be a in hole between nodes and not have a
49 * memmap entry.
50 */
51 err = change_page_attr_addr(vaddr, npages, prot);
52 if (!err)
53 global_flush_tlb();
54
55 return err;
56}
57
1da177e4
LT
58/*
59 * Remap an arbitrary physical address space into the kernel virtual
60 * address space. Needed when the kernel wants to access high addresses
61 * directly.
62 *
63 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
64 * have to convert them into an offset in a page-aligned mapping, but the
65 * caller shouldn't need to know that small detail.
66 */
91eebf40
TG
67void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,
68 unsigned long flags)
1da177e4 69{
91eebf40
TG
70 void __iomem *addr;
71 struct vm_struct *area;
1da177e4 72 unsigned long offset, last_addr;
e9332cac 73 pgprot_t pgprot;
1da177e4
LT
74
75 /* Don't allow wraparound or zero size */
76 last_addr = phys_addr + size - 1;
77 if (!size || last_addr < phys_addr)
78 return NULL;
79
80 /*
81 * Don't remap the low PCI/ISA area, it's always mapped..
82 */
83 if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
84 return (void __iomem *) phys_to_virt(phys_addr);
85
86 /*
87 * Don't allow anybody to remap normal RAM that we're using..
88 */
89 if (phys_addr <= virt_to_phys(high_memory - 1)) {
90 char *t_addr, *t_end;
91 struct page *page;
92
93 t_addr = __va(phys_addr);
94 t_end = t_addr + (size - 1);
91eebf40
TG
95
96 for (page = virt_to_page(t_addr);
97 page <= virt_to_page(t_end); page++)
98 if (!PageReserved(page))
1da177e4
LT
99 return NULL;
100 }
101
e9332cac 102 pgprot = MAKE_GLOBAL(__PAGE_KERNEL | flags);
a148ecfd 103
1da177e4
LT
104 /*
105 * Mappings have to be page-aligned
106 */
107 offset = phys_addr & ~PAGE_MASK;
108 phys_addr &= PAGE_MASK;
109 size = PAGE_ALIGN(last_addr+1) - phys_addr;
110
111 /*
112 * Ok, go for it..
113 */
74ff2857 114 area = get_vm_area(size, VM_IOREMAP);
1da177e4
LT
115 if (!area)
116 return NULL;
117 area->phys_addr = phys_addr;
118 addr = (void __iomem *) area->addr;
e9332cac
TG
119 if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size,
120 phys_addr, pgprot)) {
1da177e4
LT
121 vunmap((void __force *) addr);
122 return NULL;
123 }
e9332cac
TG
124
125 if (ioremap_change_attr(phys_addr, size, pgprot) < 0) {
126 vunmap(addr);
127 return NULL;
128 }
129
1da177e4
LT
130 return (void __iomem *) (offset + (char __iomem *)addr);
131}
129f6946 132EXPORT_SYMBOL(__ioremap);
1da177e4
LT
133
134/**
135 * ioremap_nocache - map bus memory into CPU space
136 * @offset: bus address of the memory
137 * @size: size of the resource to map
138 *
139 * ioremap_nocache performs a platform specific sequence of operations to
140 * make bus memory CPU accessible via the readb/readw/readl/writeb/
141 * writew/writel functions and the other mmio helpers. The returned
142 * address is not guaranteed to be usable directly as a virtual
91eebf40 143 * address.
1da177e4
LT
144 *
145 * This version of ioremap ensures that the memory is marked uncachable
146 * on the CPU as well as honouring existing caching rules from things like
91eebf40 147 * the PCI bus. Note that there are other caches and buffers on many
1da177e4
LT
148 * busses. In particular driver authors should read up on PCI writes
149 *
150 * It's useful if some control registers are in such an area and
151 * write combining or read caching is not desirable:
91eebf40 152 *
1da177e4
LT
153 * Must be freed with iounmap.
154 */
91eebf40 155void __iomem *ioremap_nocache(unsigned long phys_addr, unsigned long size)
1da177e4 156{
e9332cac 157 return __ioremap(phys_addr, size, _PAGE_PCD | _PAGE_PWT);
1da177e4 158}
129f6946 159EXPORT_SYMBOL(ioremap_nocache);
1da177e4 160
bf5421c3
AK
161/**
162 * iounmap - Free a IO remapping
163 * @addr: virtual address from ioremap_*
164 *
165 * Caller must ensure there is only one unmapping for the same pointer.
166 */
1da177e4
LT
167void iounmap(volatile void __iomem *addr)
168{
bf5421c3 169 struct vm_struct *p, *o;
c23a4e96
AM
170
171 if ((void __force *)addr <= high_memory)
1da177e4
LT
172 return;
173
174 /*
175 * __ioremap special-cases the PCI/ISA range by not instantiating a
176 * vm_area and by simply returning an address into the kernel mapping
177 * of ISA space. So handle that here.
178 */
179 if (addr >= phys_to_virt(ISA_START_ADDRESS) &&
91eebf40 180 addr < phys_to_virt(ISA_END_ADDRESS))
1da177e4
LT
181 return;
182
91eebf40
TG
183 addr = (volatile void __iomem *)
184 (PAGE_MASK & (unsigned long __force)addr);
bf5421c3
AK
185
186 /* Use the vm area unlocked, assuming the caller
187 ensures there isn't another iounmap for the same address
188 in parallel. Reuse of the virtual address is prevented by
189 leaving it in the global lists until we're done with it.
190 cpa takes care of the direct mappings. */
191 read_lock(&vmlist_lock);
192 for (p = vmlist; p; p = p->next) {
193 if (p->addr == addr)
194 break;
195 }
196 read_unlock(&vmlist_lock);
197
198 if (!p) {
91eebf40 199 printk(KERN_ERR "iounmap: bad address %p\n", addr);
c23a4e96 200 dump_stack();
bf5421c3 201 return;
1da177e4
LT
202 }
203
bf5421c3 204 /* Reset the direct mapping. Can block */
e9332cac 205 ioremap_change_attr(p->phys_addr, p->size, PAGE_KERNEL);
bf5421c3
AK
206
207 /* Finally remove it */
208 o = remove_vm_area((void *)addr);
209 BUG_ON(p != o || o == NULL);
91eebf40 210 kfree(p);
1da177e4 211}
129f6946 212EXPORT_SYMBOL(iounmap);
1da177e4 213
d18d6d65
IM
214
215int __initdata early_ioremap_debug;
216
217static int __init early_ioremap_debug_setup(char *str)
218{
219 early_ioremap_debug = 1;
220
793b24a2 221 return 0;
d18d6d65 222}
793b24a2 223early_param("early_ioremap_debug", early_ioremap_debug_setup);
d18d6d65 224
0947b2f3
HY
225static __initdata int after_paging_init;
226static __initdata unsigned long bm_pte[1024]
227 __attribute__((aligned(PAGE_SIZE)));
228
beacfaac 229static inline unsigned long * __init early_ioremap_pgd(unsigned long addr)
0947b2f3
HY
230{
231 return (unsigned long *)swapper_pg_dir + ((addr >> 22) & 1023);
232}
233
beacfaac 234static inline unsigned long * __init early_ioremap_pte(unsigned long addr)
0947b2f3
HY
235{
236 return bm_pte + ((addr >> PAGE_SHIFT) & 1023);
237}
238
beacfaac 239void __init early_ioremap_init(void)
0947b2f3
HY
240{
241 unsigned long *pgd;
242
d18d6d65 243 if (early_ioremap_debug)
91eebf40 244 printk(KERN_DEBUG "early_ioremap_init()\n");
d18d6d65 245
beacfaac 246 pgd = early_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN));
0947b2f3
HY
247 *pgd = __pa(bm_pte) | _PAGE_TABLE;
248 memset(bm_pte, 0, sizeof(bm_pte));
0e3a9549
IM
249 /*
250 * The boot-ioremap range spans multiple pgds, for which
251 * we are not prepared:
252 */
253 if (pgd != early_ioremap_pgd(fix_to_virt(FIX_BTMAP_END))) {
254 WARN_ON(1);
91eebf40
TG
255 printk(KERN_WARNING "pgd %p != %p\n",
256 pgd, early_ioremap_pgd(fix_to_virt(FIX_BTMAP_END)));
257 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
258 fix_to_virt(FIX_BTMAP_BEGIN));
259 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n",
260 fix_to_virt(FIX_BTMAP_END));
261
262 printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
263 printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n",
264 FIX_BTMAP_BEGIN);
0e3a9549 265 }
0947b2f3
HY
266}
267
beacfaac 268void __init early_ioremap_clear(void)
0947b2f3
HY
269{
270 unsigned long *pgd;
271
d18d6d65 272 if (early_ioremap_debug)
91eebf40 273 printk(KERN_DEBUG "early_ioremap_clear()\n");
d18d6d65 274
beacfaac 275 pgd = early_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN));
0947b2f3
HY
276 *pgd = 0;
277 __flush_tlb_all();
278}
279
beacfaac 280void __init early_ioremap_reset(void)
0947b2f3
HY
281{
282 enum fixed_addresses idx;
283 unsigned long *pte, phys, addr;
284
285 after_paging_init = 1;
64a8f852 286 for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) {
0947b2f3 287 addr = fix_to_virt(idx);
beacfaac 288 pte = early_ioremap_pte(addr);
0947b2f3
HY
289 if (!*pte & _PAGE_PRESENT) {
290 phys = *pte & PAGE_MASK;
291 set_fixmap(idx, phys);
292 }
293 }
294}
295
beacfaac 296static void __init __early_set_fixmap(enum fixed_addresses idx,
0947b2f3
HY
297 unsigned long phys, pgprot_t flags)
298{
299 unsigned long *pte, addr = __fix_to_virt(idx);
300
301 if (idx >= __end_of_fixed_addresses) {
302 BUG();
303 return;
304 }
beacfaac 305 pte = early_ioremap_pte(addr);
0947b2f3
HY
306 if (pgprot_val(flags))
307 *pte = (phys & PAGE_MASK) | pgprot_val(flags);
308 else
309 *pte = 0;
310 __flush_tlb_one(addr);
311}
312
beacfaac 313static inline void __init early_set_fixmap(enum fixed_addresses idx,
0947b2f3
HY
314 unsigned long phys)
315{
316 if (after_paging_init)
317 set_fixmap(idx, phys);
318 else
beacfaac 319 __early_set_fixmap(idx, phys, PAGE_KERNEL);
0947b2f3
HY
320}
321
beacfaac 322static inline void __init early_clear_fixmap(enum fixed_addresses idx)
0947b2f3
HY
323{
324 if (after_paging_init)
325 clear_fixmap(idx);
326 else
beacfaac 327 __early_set_fixmap(idx, 0, __pgprot(0));
0947b2f3
HY
328}
329
1b42f516
IM
330
331int __initdata early_ioremap_nested;
332
d690b2af
IM
333static int __init check_early_ioremap_leak(void)
334{
335 if (!early_ioremap_nested)
336 return 0;
337
338 printk(KERN_WARNING
91eebf40
TG
339 "Debug warning: early ioremap leak of %d areas detected.\n",
340 early_ioremap_nested);
d690b2af 341 printk(KERN_WARNING
91eebf40 342 "please boot with early_ioremap_debug and report the dmesg.\n");
d690b2af
IM
343 WARN_ON(1);
344
345 return 1;
346}
347late_initcall(check_early_ioremap_leak);
348
beacfaac 349void __init *early_ioremap(unsigned long phys_addr, unsigned long size)
1da177e4
LT
350{
351 unsigned long offset, last_addr;
1b42f516
IM
352 unsigned int nrpages, nesting;
353 enum fixed_addresses idx0, idx;
354
355 WARN_ON(system_state != SYSTEM_BOOTING);
356
357 nesting = early_ioremap_nested;
d18d6d65 358 if (early_ioremap_debug) {
91eebf40
TG
359 printk(KERN_DEBUG "early_ioremap(%08lx, %08lx) [%d] => ",
360 phys_addr, size, nesting);
d18d6d65
IM
361 dump_stack();
362 }
1da177e4
LT
363
364 /* Don't allow wraparound or zero size */
365 last_addr = phys_addr + size - 1;
bd796ed0
IM
366 if (!size || last_addr < phys_addr) {
367 WARN_ON(1);
1da177e4 368 return NULL;
bd796ed0 369 }
1da177e4 370
bd796ed0
IM
371 if (nesting >= FIX_BTMAPS_NESTING) {
372 WARN_ON(1);
1b42f516 373 return NULL;
bd796ed0 374 }
1b42f516 375 early_ioremap_nested++;
1da177e4
LT
376 /*
377 * Mappings have to be page-aligned
378 */
379 offset = phys_addr & ~PAGE_MASK;
380 phys_addr &= PAGE_MASK;
381 size = PAGE_ALIGN(last_addr) - phys_addr;
382
383 /*
384 * Mappings have to fit in the FIX_BTMAP area.
385 */
386 nrpages = size >> PAGE_SHIFT;
bd796ed0
IM
387 if (nrpages > NR_FIX_BTMAPS) {
388 WARN_ON(1);
1da177e4 389 return NULL;
bd796ed0 390 }
1da177e4
LT
391
392 /*
393 * Ok, go for it..
394 */
1b42f516
IM
395 idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
396 idx = idx0;
1da177e4 397 while (nrpages > 0) {
beacfaac 398 early_set_fixmap(idx, phys_addr);
1da177e4
LT
399 phys_addr += PAGE_SIZE;
400 --idx;
401 --nrpages;
402 }
d18d6d65
IM
403 if (early_ioremap_debug)
404 printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0));
1b42f516 405
91eebf40 406 return (void *) (offset + fix_to_virt(idx0));
1da177e4
LT
407}
408
beacfaac 409void __init early_iounmap(void *addr, unsigned long size)
1da177e4
LT
410{
411 unsigned long virt_addr;
412 unsigned long offset;
413 unsigned int nrpages;
414 enum fixed_addresses idx;
1b42f516
IM
415 unsigned int nesting;
416
417 nesting = --early_ioremap_nested;
bd796ed0 418 WARN_ON(nesting < 0);
1da177e4 419
d18d6d65 420 if (early_ioremap_debug) {
91eebf40
TG
421 printk(KERN_DEBUG "early_iounmap(%p, %08lx) [%d]\n", addr,
422 size, nesting);
d18d6d65
IM
423 dump_stack();
424 }
425
1da177e4 426 virt_addr = (unsigned long)addr;
bd796ed0
IM
427 if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) {
428 WARN_ON(1);
1da177e4 429 return;
bd796ed0 430 }
1da177e4
LT
431 offset = virt_addr & ~PAGE_MASK;
432 nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
433
1b42f516 434 idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
1da177e4 435 while (nrpages > 0) {
beacfaac 436 early_clear_fixmap(idx);
1da177e4
LT
437 --idx;
438 --nrpages;
439 }
440}
1b42f516
IM
441
442void __this_fixmap_does_not_exist(void)
443{
444 WARN_ON(1);
445}