]> git.proxmox.com Git - mirror_ubuntu-kernels.git/blame - arch/x86/mm/ioremap_64.c
x86: fix ioremap pgprot inconsistency
[mirror_ubuntu-kernels.git] / arch / x86 / mm / ioremap_64.c
CommitLineData
1da177e4
LT
1/*
2 * arch/x86_64/mm/ioremap.c
3 *
4 * Re-map IO memory to kernel address space so that we can access it.
5 * This is needed for high PCI addresses that aren't mapped in the
6 * 640k-1MB IO memory area on PC's
7 *
8 * (C) Copyright 1995 1996 Linus Torvalds
9 */
10
11#include <linux/vmalloc.h>
12#include <linux/init.h>
13#include <linux/slab.h>
2ee60e17 14#include <linux/module.h>
16c564bb 15#include <linux/io.h>
e3ebadd9 16
1da177e4
LT
17#include <asm/pgalloc.h>
18#include <asm/fixmap.h>
1da177e4 19#include <asm/tlbflush.h>
16c564bb 20#include <asm/cacheflush.h>
1da177e4 21#include <asm/proto.h>
306c142f 22#include <asm/e820.h>
1da177e4 23
e3ebadd9
LT
24unsigned long __phys_addr(unsigned long x)
25{
26 if (x >= __START_KERNEL_map)
27 return x - __START_KERNEL_map + phys_base;
28 return x - PAGE_OFFSET;
29}
30EXPORT_SYMBOL(__phys_addr);
31
1da177e4
LT
32/*
33 * Fix up the linear direct mapping of the kernel to avoid cache attribute
34 * conflicts.
35 */
36static int
37ioremap_change_attr(unsigned long phys_addr, unsigned long size,
38 unsigned long flags)
39{
40 int err = 0;
7856dfeb 41 if (phys_addr + size - 1 < (end_pfn_map << PAGE_SHIFT)) {
1da177e4
LT
42 unsigned long npages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
43 unsigned long vaddr = (unsigned long) __va(phys_addr);
1c17f4d6 44 int level;
1da177e4 45
1c17f4d6
HY
46 /*
47 * If there is no identity map for this address,
48 * change_page_attr_addr is unnecessary
49 */
50 if (!lookup_address(vaddr, &level))
51 return err;
1da177e4
LT
52 /*
53 * Must use a address here and not struct page because the phys addr
54 * can be a in hole between nodes and not have an memmap entry.
55 */
1aaf74e9 56 err = change_page_attr_addr(vaddr,npages,MAKE_GLOBAL(__PAGE_KERNEL|flags));
1da177e4
LT
57 if (!err)
58 global_flush_tlb();
59 }
60 return err;
61}
62
63/*
64 * Generic mapping function
65 */
66
67/*
68 * Remap an arbitrary physical address space into the kernel virtual
69 * address space. Needed when the kernel wants to access high addresses
70 * directly.
71 *
72 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
73 * have to convert them into an offset in a page-aligned mapping, but the
74 * caller shouldn't need to know that small detail.
75 */
76void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
77{
78 void * addr;
79 struct vm_struct * area;
80 unsigned long offset, last_addr;
16c564bb 81 pgprot_t pgprot;
1da177e4
LT
82
83 /* Don't allow wraparound or zero size */
84 last_addr = phys_addr + size - 1;
85 if (!size || last_addr < phys_addr)
86 return NULL;
87
88 /*
89 * Don't remap the low PCI/ISA area, it's always mapped..
90 */
91 if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
92 return (__force void __iomem *)phys_to_virt(phys_addr);
93
1aaf74e9 94 pgprot = MAKE_GLOBAL(__PAGE_KERNEL | flags);
1da177e4
LT
95 /*
96 * Mappings have to be page-aligned
97 */
98 offset = phys_addr & ~PAGE_MASK;
99 phys_addr &= PAGE_MASK;
100 size = PAGE_ALIGN(last_addr+1) - phys_addr;
101
102 /*
103 * Ok, go for it..
104 */
105 area = get_vm_area(size, VM_IOREMAP | (flags << 20));
106 if (!area)
107 return NULL;
108 area->phys_addr = phys_addr;
109 addr = area->addr;
16c564bb
HS
110 if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size,
111 phys_addr, pgprot)) {
1da177e4
LT
112 remove_vm_area((void *)(PAGE_MASK & (unsigned long) addr));
113 return NULL;
114 }
7856dfeb 115 if (flags && ioremap_change_attr(phys_addr, size, flags) < 0) {
1da177e4
LT
116 area->flags &= 0xffffff;
117 vunmap(addr);
118 return NULL;
119 }
120 return (__force void __iomem *) (offset + (char *)addr);
121}
2ee60e17 122EXPORT_SYMBOL(__ioremap);
1da177e4
LT
123
124/**
125 * ioremap_nocache - map bus memory into CPU space
126 * @offset: bus address of the memory
127 * @size: size of the resource to map
128 *
129 * ioremap_nocache performs a platform specific sequence of operations to
130 * make bus memory CPU accessible via the readb/readw/readl/writeb/
131 * writew/writel functions and the other mmio helpers. The returned
132 * address is not guaranteed to be usable directly as a virtual
133 * address.
134 *
135 * This version of ioremap ensures that the memory is marked uncachable
136 * on the CPU as well as honouring existing caching rules from things like
137 * the PCI bus. Note that there are other caches and buffers on many
138 * busses. In particular driver authors should read up on PCI writes
139 *
140 * It's useful if some control registers are in such an area and
141 * write combining or read caching is not desirable:
142 *
143 * Must be freed with iounmap.
144 */
145
146void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size)
147{
4138cc34 148 return __ioremap(phys_addr, size, _PAGE_PCD | _PAGE_PWT);
1da177e4 149}
2ee60e17 150EXPORT_SYMBOL(ioremap_nocache);
1da177e4 151
bf5421c3
AK
152/**
153 * iounmap - Free a IO remapping
154 * @addr: virtual address from ioremap_*
155 *
156 * Caller must ensure there is only one unmapping for the same pointer.
157 */
1da177e4
LT
158void iounmap(volatile void __iomem *addr)
159{
bf5421c3 160 struct vm_struct *p, *o;
1da177e4
LT
161
162 if (addr <= high_memory)
163 return;
164 if (addr >= phys_to_virt(ISA_START_ADDRESS) &&
165 addr < phys_to_virt(ISA_END_ADDRESS))
166 return;
167
b16b88e5 168 addr = (volatile void __iomem *)(PAGE_MASK & (unsigned long __force)addr);
bf5421c3
AK
169 /* Use the vm area unlocked, assuming the caller
170 ensures there isn't another iounmap for the same address
171 in parallel. Reuse of the virtual address is prevented by
172 leaving it in the global lists until we're done with it.
173 cpa takes care of the direct mappings. */
174 read_lock(&vmlist_lock);
175 for (p = vmlist; p; p = p->next) {
176 if (p->addr == addr)
177 break;
178 }
179 read_unlock(&vmlist_lock);
180
181 if (!p) {
7856dfeb 182 printk("iounmap: bad address %p\n", addr);
bf5421c3
AK
183 dump_stack();
184 return;
185 }
186
187 /* Reset the direct mapping. Can block */
188 if (p->flags >> 20)
7856dfeb 189 ioremap_change_attr(p->phys_addr, p->size, 0);
bf5421c3
AK
190
191 /* Finally remove it */
192 o = remove_vm_area((void *)addr);
193 BUG_ON(p != o || o == NULL);
1da177e4
LT
194 kfree(p);
195}
2ee60e17
AK
196EXPORT_SYMBOL(iounmap);
197