]>
Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
1da177e4 LT |
2 | * Re-map IO memory to kernel address space so that we can access it. |
3 | * This is needed for high PCI addresses that aren't mapped in the | |
4 | * 640k-1MB IO memory area on PC's | |
5 | * | |
6 | * (C) Copyright 1995 1996 Linus Torvalds | |
7 | */ | |
8 | ||
e9332cac | 9 | #include <linux/bootmem.h> |
1da177e4 | 10 | #include <linux/init.h> |
a148ecfd | 11 | #include <linux/io.h> |
3cbd09e4 TG |
12 | #include <linux/module.h> |
13 | #include <linux/slab.h> | |
14 | #include <linux/vmalloc.h> | |
d61fc448 | 15 | #include <linux/mmiotrace.h> |
3cbd09e4 | 16 | |
1da177e4 | 17 | #include <asm/cacheflush.h> |
3cbd09e4 TG |
18 | #include <asm/e820.h> |
19 | #include <asm/fixmap.h> | |
1da177e4 | 20 | #include <asm/pgtable.h> |
3cbd09e4 | 21 | #include <asm/tlbflush.h> |
f6df72e7 | 22 | #include <asm/pgalloc.h> |
d7677d40 | 23 | #include <asm/pat.h> |
1da177e4 | 24 | |
78c86e5e | 25 | #include "physaddr.h" |
240d3a7c | 26 | |
e9332cac TG |
27 | /* |
28 | * Fix up the linear direct mapping of the kernel to avoid cache attribute | |
29 | * conflicts. | |
30 | */ | |
3a96ce8c | 31 | int ioremap_change_attr(unsigned long vaddr, unsigned long size, |
b14097bd | 32 | enum page_cache_mode pcm) |
e9332cac | 33 | { |
d806e5ee | 34 | unsigned long nrpages = size >> PAGE_SHIFT; |
93809be8 | 35 | int err; |
e9332cac | 36 | |
b14097bd JG |
37 | switch (pcm) { |
38 | case _PAGE_CACHE_MODE_UC: | |
d806e5ee | 39 | default: |
1219333d | 40 | err = _set_memory_uc(vaddr, nrpages); |
d806e5ee | 41 | break; |
b14097bd | 42 | case _PAGE_CACHE_MODE_WC: |
b310f381 | 43 | err = _set_memory_wc(vaddr, nrpages); |
44 | break; | |
b14097bd | 45 | case _PAGE_CACHE_MODE_WB: |
1219333d | 46 | err = _set_memory_wb(vaddr, nrpages); |
d806e5ee TG |
47 | break; |
48 | } | |
e9332cac TG |
49 | |
50 | return err; | |
51 | } | |
52 | ||
c81c8a1e RD |
53 | static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages, |
54 | void *arg) | |
55 | { | |
56 | unsigned long i; | |
57 | ||
58 | for (i = 0; i < nr_pages; ++i) | |
59 | if (pfn_valid(start_pfn + i) && | |
60 | !PageReserved(pfn_to_page(start_pfn + i))) | |
61 | return 1; | |
62 | ||
63 | WARN_ONCE(1, "ioremap on RAM pfn 0x%lx\n", start_pfn); | |
64 | ||
65 | return 0; | |
66 | } | |
67 | ||
1da177e4 LT |
68 | /* |
69 | * Remap an arbitrary physical address space into the kernel virtual | |
70 | * address space. Needed when the kernel wants to access high addresses | |
71 | * directly. | |
72 | * | |
73 | * NOTE! We need to allow non-page-aligned mappings too: we will obviously | |
74 | * have to convert them into an offset in a page-aligned mapping, but the | |
75 | * caller shouldn't need to know that small detail. | |
76 | */ | |
23016969 | 77 | static void __iomem *__ioremap_caller(resource_size_t phys_addr, |
b14097bd | 78 | unsigned long size, enum page_cache_mode pcm, void *caller) |
1da177e4 | 79 | { |
ffa71f33 KK |
80 | unsigned long offset, vaddr; |
81 | resource_size_t pfn, last_pfn, last_addr; | |
87e547fe PP |
82 | const resource_size_t unaligned_phys_addr = phys_addr; |
83 | const unsigned long unaligned_size = size; | |
91eebf40 | 84 | struct vm_struct *area; |
b14097bd | 85 | enum page_cache_mode new_pcm; |
d806e5ee | 86 | pgprot_t prot; |
dee7cbb2 | 87 | int retval; |
d61fc448 | 88 | void __iomem *ret_addr; |
906e36c5 | 89 | int ram_region; |
1da177e4 LT |
90 | |
91 | /* Don't allow wraparound or zero size */ | |
92 | last_addr = phys_addr + size - 1; | |
93 | if (!size || last_addr < phys_addr) | |
94 | return NULL; | |
95 | ||
e3100c82 | 96 | if (!phys_addr_valid(phys_addr)) { |
6997ab49 | 97 | printk(KERN_WARNING "ioremap: invalid physical address %llx\n", |
4c8337ac | 98 | (unsigned long long)phys_addr); |
e3100c82 TG |
99 | WARN_ON_ONCE(1); |
100 | return NULL; | |
101 | } | |
102 | ||
1da177e4 LT |
103 | /* |
104 | * Don't remap the low PCI/ISA area, it's always mapped.. | |
105 | */ | |
bcc643dc | 106 | if (is_ISA_range(phys_addr, last_addr)) |
4b40fcee | 107 | return (__force void __iomem *)phys_to_virt(phys_addr); |
1da177e4 LT |
108 | |
109 | /* | |
110 | * Don't allow anybody to remap normal RAM that we're using.. | |
111 | */ | |
906e36c5 MT |
112 | /* First check if whole region can be identified as RAM or not */ |
113 | ram_region = region_is_ram(phys_addr, size); | |
114 | if (ram_region > 0) { | |
115 | WARN_ONCE(1, "ioremap on RAM at 0x%lx - 0x%lx\n", | |
116 | (unsigned long int)phys_addr, | |
117 | (unsigned long int)last_addr); | |
c81c8a1e | 118 | return NULL; |
906e36c5 | 119 | } |
1da177e4 | 120 | |
906e36c5 MT |
121 | /* If could not be identified(-1), check page by page */ |
122 | if (ram_region < 0) { | |
123 | pfn = phys_addr >> PAGE_SHIFT; | |
124 | last_pfn = last_addr >> PAGE_SHIFT; | |
125 | if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL, | |
126 | __ioremap_check_ram) == 1) | |
127 | return NULL; | |
128 | } | |
d7677d40 | 129 | /* |
130 | * Mappings have to be page-aligned | |
131 | */ | |
132 | offset = phys_addr & ~PAGE_MASK; | |
ffa71f33 | 133 | phys_addr &= PHYSICAL_PAGE_MASK; |
d7677d40 | 134 | size = PAGE_ALIGN(last_addr+1) - phys_addr; |
135 | ||
e213e877 | 136 | retval = reserve_memtype(phys_addr, (u64)phys_addr + size, |
e00c8cc9 | 137 | pcm, &new_pcm); |
dee7cbb2 | 138 | if (retval) { |
279e669b | 139 | printk(KERN_ERR "ioremap reserve_memtype failed %d\n", retval); |
dee7cbb2 VP |
140 | return NULL; |
141 | } | |
142 | ||
b14097bd JG |
143 | if (pcm != new_pcm) { |
144 | if (!is_new_memtype_allowed(phys_addr, size, pcm, new_pcm)) { | |
279e669b | 145 | printk(KERN_ERR |
b14097bd | 146 | "ioremap error for 0x%llx-0x%llx, requested 0x%x, got 0x%x\n", |
4c8337ac RD |
147 | (unsigned long long)phys_addr, |
148 | (unsigned long long)(phys_addr + size), | |
b14097bd | 149 | pcm, new_pcm); |
de2a47cf | 150 | goto err_free_memtype; |
d7677d40 | 151 | } |
b14097bd | 152 | pcm = new_pcm; |
d7677d40 | 153 | } |
154 | ||
b14097bd JG |
155 | prot = PAGE_KERNEL_IO; |
156 | switch (pcm) { | |
157 | case _PAGE_CACHE_MODE_UC: | |
d806e5ee | 158 | default: |
b14097bd JG |
159 | prot = __pgprot(pgprot_val(prot) | |
160 | cachemode2protval(_PAGE_CACHE_MODE_UC)); | |
d806e5ee | 161 | break; |
b14097bd JG |
162 | case _PAGE_CACHE_MODE_UC_MINUS: |
163 | prot = __pgprot(pgprot_val(prot) | | |
164 | cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS)); | |
de33c442 | 165 | break; |
b14097bd JG |
166 | case _PAGE_CACHE_MODE_WC: |
167 | prot = __pgprot(pgprot_val(prot) | | |
168 | cachemode2protval(_PAGE_CACHE_MODE_WC)); | |
b310f381 | 169 | break; |
b14097bd | 170 | case _PAGE_CACHE_MODE_WB: |
d806e5ee TG |
171 | break; |
172 | } | |
a148ecfd | 173 | |
1da177e4 LT |
174 | /* |
175 | * Ok, go for it.. | |
176 | */ | |
23016969 | 177 | area = get_vm_area_caller(size, VM_IOREMAP, caller); |
1da177e4 | 178 | if (!area) |
de2a47cf | 179 | goto err_free_memtype; |
1da177e4 | 180 | area->phys_addr = phys_addr; |
e66aadbe | 181 | vaddr = (unsigned long) area->addr; |
43a432b1 | 182 | |
b14097bd | 183 | if (kernel_map_sync_memtype(phys_addr, size, pcm)) |
de2a47cf | 184 | goto err_free_area; |
e9332cac | 185 | |
de2a47cf XF |
186 | if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) |
187 | goto err_free_area; | |
e9332cac | 188 | |
d61fc448 | 189 | ret_addr = (void __iomem *) (vaddr + offset); |
87e547fe | 190 | mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr); |
d61fc448 | 191 | |
c7a7b814 TG |
192 | /* |
193 | * Check if the request spans more than any BAR in the iomem resource | |
194 | * tree. | |
195 | */ | |
196 | WARN_ONCE(iomem_map_sanity_check(unaligned_phys_addr, unaligned_size), | |
197 | KERN_INFO "Info: mapping multiple BARs. Your kernel is fine."); | |
198 | ||
d61fc448 | 199 | return ret_addr; |
de2a47cf XF |
200 | err_free_area: |
201 | free_vm_area(area); | |
202 | err_free_memtype: | |
203 | free_memtype(phys_addr, phys_addr + size); | |
204 | return NULL; | |
1da177e4 | 205 | } |
1da177e4 LT |
206 | |
207 | /** | |
208 | * ioremap_nocache - map bus memory into CPU space | |
9efc31b8 | 209 | * @phys_addr: bus address of the memory |
1da177e4 LT |
210 | * @size: size of the resource to map |
211 | * | |
212 | * ioremap_nocache performs a platform specific sequence of operations to | |
213 | * make bus memory CPU accessible via the readb/readw/readl/writeb/ | |
214 | * writew/writel functions and the other mmio helpers. The returned | |
215 | * address is not guaranteed to be usable directly as a virtual | |
91eebf40 | 216 | * address. |
1da177e4 LT |
217 | * |
218 | * This version of ioremap ensures that the memory is marked uncachable | |
219 | * on the CPU as well as honouring existing caching rules from things like | |
91eebf40 | 220 | * the PCI bus. Note that there are other caches and buffers on many |
1da177e4 LT |
221 | * busses. In particular driver authors should read up on PCI writes |
222 | * | |
223 | * It's useful if some control registers are in such an area and | |
224 | * write combining or read caching is not desirable: | |
91eebf40 | 225 | * |
1da177e4 LT |
226 | * Must be freed with iounmap. |
227 | */ | |
b9e76a00 | 228 | void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size) |
1da177e4 | 229 | { |
de33c442 SS |
230 | /* |
231 | * Ideally, this should be: | |
b14097bd | 232 | * pat_enabled ? _PAGE_CACHE_MODE_UC : _PAGE_CACHE_MODE_UC_MINUS; |
de33c442 SS |
233 | * |
234 | * Till we fix all X drivers to use ioremap_wc(), we will use | |
235 | * UC MINUS. | |
236 | */ | |
b14097bd | 237 | enum page_cache_mode pcm = _PAGE_CACHE_MODE_UC_MINUS; |
de33c442 | 238 | |
b14097bd | 239 | return __ioremap_caller(phys_addr, size, pcm, |
23016969 | 240 | __builtin_return_address(0)); |
1da177e4 | 241 | } |
129f6946 | 242 | EXPORT_SYMBOL(ioremap_nocache); |
1da177e4 | 243 | |
b310f381 | 244 | /** |
245 | * ioremap_wc - map memory into CPU space write combined | |
9efc31b8 | 246 | * @phys_addr: bus address of the memory |
b310f381 | 247 | * @size: size of the resource to map |
248 | * | |
249 | * This version of ioremap ensures that the memory is marked write combining. | |
250 | * Write combining allows faster writes to some hardware devices. | |
251 | * | |
252 | * Must be freed with iounmap. | |
253 | */ | |
d639bab8 | 254 | void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size) |
b310f381 | 255 | { |
499f8f84 | 256 | if (pat_enabled) |
b14097bd | 257 | return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WC, |
23016969 | 258 | __builtin_return_address(0)); |
b310f381 | 259 | else |
260 | return ioremap_nocache(phys_addr, size); | |
261 | } | |
262 | EXPORT_SYMBOL(ioremap_wc); | |
263 | ||
b9e76a00 | 264 | void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size) |
5f868152 | 265 | { |
b14097bd | 266 | return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WB, |
23016969 | 267 | __builtin_return_address(0)); |
5f868152 TG |
268 | } |
269 | EXPORT_SYMBOL(ioremap_cache); | |
270 | ||
28b2ee20 RR |
271 | void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size, |
272 | unsigned long prot_val) | |
273 | { | |
b14097bd JG |
274 | return __ioremap_caller(phys_addr, size, |
275 | pgprot2cachemode(__pgprot(prot_val)), | |
28b2ee20 RR |
276 | __builtin_return_address(0)); |
277 | } | |
278 | EXPORT_SYMBOL(ioremap_prot); | |
279 | ||
bf5421c3 AK |
280 | /** |
281 | * iounmap - Free a IO remapping | |
282 | * @addr: virtual address from ioremap_* | |
283 | * | |
284 | * Caller must ensure there is only one unmapping for the same pointer. | |
285 | */ | |
1da177e4 LT |
286 | void iounmap(volatile void __iomem *addr) |
287 | { | |
bf5421c3 | 288 | struct vm_struct *p, *o; |
c23a4e96 AM |
289 | |
290 | if ((void __force *)addr <= high_memory) | |
1da177e4 LT |
291 | return; |
292 | ||
293 | /* | |
294 | * __ioremap special-cases the PCI/ISA range by not instantiating a | |
295 | * vm_area and by simply returning an address into the kernel mapping | |
296 | * of ISA space. So handle that here. | |
297 | */ | |
6e92a5a6 TG |
298 | if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) && |
299 | (void __force *)addr < phys_to_virt(ISA_END_ADDRESS)) | |
1da177e4 LT |
300 | return; |
301 | ||
91eebf40 TG |
302 | addr = (volatile void __iomem *) |
303 | (PAGE_MASK & (unsigned long __force)addr); | |
bf5421c3 | 304 | |
d61fc448 PP |
305 | mmiotrace_iounmap(addr); |
306 | ||
bf5421c3 AK |
307 | /* Use the vm area unlocked, assuming the caller |
308 | ensures there isn't another iounmap for the same address | |
309 | in parallel. Reuse of the virtual address is prevented by | |
310 | leaving it in the global lists until we're done with it. | |
311 | cpa takes care of the direct mappings. */ | |
ef932473 | 312 | p = find_vm_area((void __force *)addr); |
bf5421c3 AK |
313 | |
314 | if (!p) { | |
91eebf40 | 315 | printk(KERN_ERR "iounmap: bad address %p\n", addr); |
c23a4e96 | 316 | dump_stack(); |
bf5421c3 | 317 | return; |
1da177e4 LT |
318 | } |
319 | ||
d7677d40 | 320 | free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p)); |
321 | ||
bf5421c3 | 322 | /* Finally remove it */ |
6e92a5a6 | 323 | o = remove_vm_area((void __force *)addr); |
bf5421c3 | 324 | BUG_ON(p != o || o == NULL); |
91eebf40 | 325 | kfree(p); |
1da177e4 | 326 | } |
129f6946 | 327 | EXPORT_SYMBOL(iounmap); |
1da177e4 | 328 | |
e045fb2a | 329 | /* |
330 | * Convert a physical pointer to a virtual kernel pointer for /dev/mem | |
331 | * access | |
332 | */ | |
4707a341 | 333 | void *xlate_dev_mem_ptr(phys_addr_t phys) |
e045fb2a | 334 | { |
335 | void *addr; | |
336 | unsigned long start = phys & PAGE_MASK; | |
337 | ||
338 | /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */ | |
339 | if (page_is_ram(start >> PAGE_SHIFT)) | |
340 | return __va(phys); | |
341 | ||
2fb8f4e6 | 342 | addr = (void __force *)ioremap_cache(start, PAGE_SIZE); |
e045fb2a | 343 | if (addr) |
344 | addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK)); | |
345 | ||
346 | return addr; | |
347 | } | |
348 | ||
4707a341 | 349 | void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr) |
e045fb2a | 350 | { |
351 | if (page_is_ram(phys >> PAGE_SHIFT)) | |
352 | return; | |
353 | ||
354 | iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK)); | |
355 | return; | |
356 | } | |
357 | ||
45c7b28f | 358 | static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss; |
0947b2f3 | 359 | |
551889a6 | 360 | static inline pmd_t * __init early_ioremap_pmd(unsigned long addr) |
0947b2f3 | 361 | { |
37cc8d7f JF |
362 | /* Don't assume we're using swapper_pg_dir at this point */ |
363 | pgd_t *base = __va(read_cr3()); | |
364 | pgd_t *pgd = &base[pgd_index(addr)]; | |
551889a6 IC |
365 | pud_t *pud = pud_offset(pgd, addr); |
366 | pmd_t *pmd = pmd_offset(pud, addr); | |
367 | ||
368 | return pmd; | |
0947b2f3 HY |
369 | } |
370 | ||
551889a6 | 371 | static inline pte_t * __init early_ioremap_pte(unsigned long addr) |
0947b2f3 | 372 | { |
551889a6 | 373 | return &bm_pte[pte_index(addr)]; |
0947b2f3 HY |
374 | } |
375 | ||
fef5ba79 JF |
376 | bool __init is_early_ioremap_ptep(pte_t *ptep) |
377 | { | |
378 | return ptep >= &bm_pte[0] && ptep < &bm_pte[PAGE_SIZE/sizeof(pte_t)]; | |
379 | } | |
380 | ||
beacfaac | 381 | void __init early_ioremap_init(void) |
0947b2f3 | 382 | { |
551889a6 | 383 | pmd_t *pmd; |
0947b2f3 | 384 | |
73159fdc AL |
385 | #ifdef CONFIG_X86_64 |
386 | BUILD_BUG_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1)); | |
387 | #else | |
388 | WARN_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1)); | |
389 | #endif | |
390 | ||
5b7c73e0 | 391 | early_ioremap_setup(); |
8827247f | 392 | |
551889a6 | 393 | pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)); |
45c7b28f JF |
394 | memset(bm_pte, 0, sizeof(bm_pte)); |
395 | pmd_populate_kernel(&init_mm, pmd, bm_pte); | |
551889a6 | 396 | |
0e3a9549 | 397 | /* |
551889a6 | 398 | * The boot-ioremap range spans multiple pmds, for which |
0e3a9549 IM |
399 | * we are not prepared: |
400 | */ | |
499a5f1e JB |
401 | #define __FIXADDR_TOP (-PAGE_SIZE) |
402 | BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT) | |
403 | != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT)); | |
404 | #undef __FIXADDR_TOP | |
551889a6 | 405 | if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) { |
0e3a9549 | 406 | WARN_ON(1); |
551889a6 IC |
407 | printk(KERN_WARNING "pmd %p != %p\n", |
408 | pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))); | |
91eebf40 | 409 | printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n", |
551889a6 | 410 | fix_to_virt(FIX_BTMAP_BEGIN)); |
91eebf40 | 411 | printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n", |
551889a6 | 412 | fix_to_virt(FIX_BTMAP_END)); |
91eebf40 TG |
413 | |
414 | printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END); | |
415 | printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n", | |
416 | FIX_BTMAP_BEGIN); | |
0e3a9549 | 417 | } |
0947b2f3 HY |
418 | } |
419 | ||
5b7c73e0 MS |
420 | void __init __early_set_fixmap(enum fixed_addresses idx, |
421 | phys_addr_t phys, pgprot_t flags) | |
0947b2f3 | 422 | { |
551889a6 IC |
423 | unsigned long addr = __fix_to_virt(idx); |
424 | pte_t *pte; | |
0947b2f3 HY |
425 | |
426 | if (idx >= __end_of_fixed_addresses) { | |
427 | BUG(); | |
428 | return; | |
429 | } | |
beacfaac | 430 | pte = early_ioremap_pte(addr); |
4583ed51 | 431 | |
0947b2f3 | 432 | if (pgprot_val(flags)) |
551889a6 | 433 | set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags)); |
0947b2f3 | 434 | else |
4f9c11dd | 435 | pte_clear(&init_mm, addr, pte); |
0947b2f3 HY |
436 | __flush_tlb_one(addr); |
437 | } |