]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/mips/mm/highmem.c
UBUNTU: Ubuntu-4.13.0-45.50
[mirror_ubuntu-artful-kernel.git] / arch / mips / mm / highmem.c
1 #include <linux/compiler.h>
2 #include <linux/init.h>
3 #include <linux/export.h>
4 #include <linux/highmem.h>
5 #include <linux/sched.h>
6 #include <linux/smp.h>
7 #include <asm/fixmap.h>
8 #include <asm/tlbflush.h>
9
10 static pte_t *kmap_pte;
11
12 unsigned long highstart_pfn, highend_pfn;
13
14 void *kmap(struct page *page)
15 {
16 void *addr;
17
18 might_sleep();
19 if (!PageHighMem(page))
20 return page_address(page);
21 addr = kmap_high(page);
22 flush_tlb_one((unsigned long)addr);
23
24 return addr;
25 }
26 EXPORT_SYMBOL(kmap);
27
28 void kunmap(struct page *page)
29 {
30 BUG_ON(in_interrupt());
31 if (!PageHighMem(page))
32 return;
33 kunmap_high(page);
34 }
35 EXPORT_SYMBOL(kunmap);
36
37 /*
38 * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
39 * no global lock is needed and because the kmap code must perform a global TLB
40 * invalidation when the kmap pool wraps.
41 *
42 * However when holding an atomic kmap is is not legal to sleep, so atomic
43 * kmaps are appropriate for short, tight code paths only.
44 */
45
46 void *kmap_atomic(struct page *page)
47 {
48 unsigned long vaddr;
49 int idx, type;
50
51 preempt_disable();
52 pagefault_disable();
53 if (!PageHighMem(page))
54 return page_address(page);
55
56 type = kmap_atomic_idx_push();
57 idx = type + KM_TYPE_NR*smp_processor_id();
58 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
59 #ifdef CONFIG_DEBUG_HIGHMEM
60 BUG_ON(!pte_none(*(kmap_pte - idx)));
61 #endif
62 set_pte(kmap_pte-idx, mk_pte(page, PAGE_KERNEL));
63 local_flush_tlb_one((unsigned long)vaddr);
64
65 return (void*) vaddr;
66 }
67 EXPORT_SYMBOL(kmap_atomic);
68
69 void __kunmap_atomic(void *kvaddr)
70 {
71 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
72 int type __maybe_unused;
73
74 if (vaddr < FIXADDR_START) { // FIXME
75 pagefault_enable();
76 preempt_enable();
77 return;
78 }
79
80 type = kmap_atomic_idx();
81 #ifdef CONFIG_DEBUG_HIGHMEM
82 {
83 int idx = type + KM_TYPE_NR * smp_processor_id();
84
85 BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
86
87 /*
88 * force other mappings to Oops if they'll try to access
89 * this pte without first remap it
90 */
91 pte_clear(&init_mm, vaddr, kmap_pte-idx);
92 local_flush_tlb_one(vaddr);
93 }
94 #endif
95 kmap_atomic_idx_pop();
96 pagefault_enable();
97 preempt_enable();
98 }
99 EXPORT_SYMBOL(__kunmap_atomic);
100
101 /*
102 * This is the same as kmap_atomic() but can map memory that doesn't
103 * have a struct page associated with it.
104 */
105 void *kmap_atomic_pfn(unsigned long pfn)
106 {
107 unsigned long vaddr;
108 int idx, type;
109
110 preempt_disable();
111 pagefault_disable();
112
113 type = kmap_atomic_idx_push();
114 idx = type + KM_TYPE_NR*smp_processor_id();
115 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
116 set_pte(kmap_pte-idx, pfn_pte(pfn, PAGE_KERNEL));
117 flush_tlb_one(vaddr);
118
119 return (void*) vaddr;
120 }
121
122 void __init kmap_init(void)
123 {
124 unsigned long kmap_vstart;
125
126 /* cache the first kmap pte */
127 kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
128 kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
129 }