]>
Commit | Line | Data |
---|---|---|
2f2f371f MS |
1 | /* |
2 | * highmem.c: virtual kernel memory mappings for high memory | |
3 | * | |
4 | * PowerPC version, stolen from the i386 version. | |
5 | * | |
6 | * Used in CONFIG_HIGHMEM systems for memory pages which | |
7 | * are not addressable by direct kernel virtual addresses. | |
8 | * | |
9 | * Copyright (C) 1999 Gerhard Wichert, Siemens AG | |
10 | * Gerhard.Wichert@pdb.siemens.de | |
11 | * | |
12 | * | |
13 | * Redesigned the x86 32-bit VM architecture to deal with | |
14 | * up to 16 Terrabyte physical memory. With current x86 CPUs | |
15 | * we now support up to 64 Gigabytes physical RAM. | |
16 | * | |
17 | * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com> | |
18 | * | |
19 | * Reworked for PowerPC by various contributors. Moved from | |
20 | * highmem.h by Benjamin Herrenschmidt (c) 2009 IBM Corp. | |
21 | */ | |
22 | ||
d64af918 | 23 | #include <linux/export.h> |
2f2f371f | 24 | #include <linux/highmem.h> |
2f2f371f MS |
25 | |
26 | /* | |
27 | * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap | |
28 | * gives a more generic (and caching) interface. But kmap_atomic can | |
29 | * be used in IRQ contexts, so in some (very limited) cases we need | |
30 | * it. | |
31 | */ | |
32 | #include <asm/tlbflush.h> | |
33 | ||
34 | void *kmap_atomic_prot(struct page *page, pgprot_t prot) | |
35 | { | |
36 | ||
37 | unsigned long vaddr; | |
38 | int idx, type; | |
39 | ||
2cb7c9cb | 40 | preempt_disable(); |
2f2f371f MS |
41 | pagefault_disable(); |
42 | if (!PageHighMem(page)) | |
43 | return page_address(page); | |
44 | ||
45 | ||
46 | type = kmap_atomic_idx_push(); | |
47 | idx = type + KM_TYPE_NR*smp_processor_id(); | |
48 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); | |
49 | #ifdef CONFIG_DEBUG_HIGHMEM | |
50 | BUG_ON(!pte_none(*(kmap_pte-idx))); | |
51 | #endif | |
52 | set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot)); | |
53 | local_flush_tlb_page(NULL, vaddr); | |
54 | ||
55 | return (void *) vaddr; | |
56 | } | |
57 | EXPORT_SYMBOL(kmap_atomic_prot); | |
58 | ||
59 | void __kunmap_atomic(void *kvaddr) | |
60 | { | |
61 | unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; | |
62 | int type; | |
63 | ||
64 | if (vaddr < __fix_to_virt(FIX_KMAP_END)) { | |
65 | pagefault_enable(); | |
2cb7c9cb | 66 | preempt_enable(); |
2f2f371f MS |
67 | return; |
68 | } | |
69 | ||
70 | type = kmap_atomic_idx(); | |
71 | #ifdef CONFIG_DEBUG_HIGHMEM | |
72 | { | |
73 | unsigned int idx; | |
74 | ||
75 | idx = type + KM_TYPE_NR * smp_processor_id(); | |
76 | BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); | |
77 | ||
78 | /* | |
79 | * force other mappings to Oops if they'll try to access | |
80 | * this pte without first remap it | |
81 | */ | |
82 | pte_clear(&init_mm, vaddr, kmap_pte-idx); | |
83 | local_flush_tlb_page(NULL, vaddr); | |
84 | } | |
85 | #endif | |
86 | kmap_atomic_idx_pop(); | |
87 | pagefault_enable(); | |
2cb7c9cb | 88 | preempt_enable(); |
2f2f371f MS |
89 | } |
90 | EXPORT_SYMBOL(__kunmap_atomic); |