]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * High memory handling common code and variables. | |
3 | * | |
4 | * (C) 1999 Andrea Arcangeli, SuSE GmbH, andrea@suse.de | |
5 | * Gerhard Wichert, Siemens AG, Gerhard.Wichert@pdb.siemens.de | |
6 | * | |
7 | * | |
8 | * Redesigned the x86 32-bit VM architecture to deal with | |
9 | * 64-bit physical space. With current x86 CPUs this | |
10 | * means up to 64 Gigabytes physical RAM. | |
11 | * | |
12 | * Rewrote high memory support to move the page cache into | |
13 | * high memory. Implemented permanent (schedulable) kmaps | |
14 | * based on Linus' idea. | |
15 | * | |
16 | * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com> | |
17 | */ | |
18 | ||
19 | #include <linux/mm.h> | |
b95f1b31 | 20 | #include <linux/export.h> |
1da177e4 LT |
21 | #include <linux/swap.h> |
22 | #include <linux/bio.h> | |
23 | #include <linux/pagemap.h> | |
24 | #include <linux/mempool.h> | |
25 | #include <linux/blkdev.h> | |
26 | #include <linux/init.h> | |
27 | #include <linux/hash.h> | |
28 | #include <linux/highmem.h> | |
eac79005 | 29 | #include <linux/kgdb.h> |
1da177e4 LT |
30 | #include <asm/tlbflush.h> |
31 | ||
a8e23a29 PZ |
32 | |
33 | #if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32) | |
34 | DEFINE_PER_CPU(int, __kmap_atomic_idx); | |
35 | #endif | |
36 | ||
1da177e4 LT |
37 | /* |
38 | * Virtual_count is not a pure "count". | |
39 | * 0 means that it is not mapped, and has not been mapped | |
40 | * since a TLB flush - it is usable. | |
41 | * 1 means that there are no users, but it has been mapped | |
42 | * since the last TLB flush - so we can't use it. | |
43 | * n means that there are (n-1) current users of it. | |
44 | */ | |
45 | #ifdef CONFIG_HIGHMEM | |
260b2367 | 46 | |
c1f60a5a | 47 | unsigned long totalhigh_pages __read_mostly; |
db7a94d6 | 48 | EXPORT_SYMBOL(totalhigh_pages); |
c1f60a5a | 49 | |
3e4d3af5 | 50 | |
3e4d3af5 PZ |
51 | EXPORT_PER_CPU_SYMBOL(__kmap_atomic_idx); |
52 | ||
c1f60a5a CL |
53 | unsigned int nr_free_highpages (void) |
54 | { | |
55 | pg_data_t *pgdat; | |
56 | unsigned int pages = 0; | |
57 | ||
2a1e274a | 58 | for_each_online_pgdat(pgdat) { |
d23ad423 CL |
59 | pages += zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM], |
60 | NR_FREE_PAGES); | |
2a1e274a MG |
61 | if (zone_movable_is_highmem()) |
62 | pages += zone_page_state( | |
63 | &pgdat->node_zones[ZONE_MOVABLE], | |
64 | NR_FREE_PAGES); | |
65 | } | |
c1f60a5a CL |
66 | |
67 | return pages; | |
68 | } | |
69 | ||
1da177e4 LT |
70 | static int pkmap_count[LAST_PKMAP]; |
71 | static unsigned int last_pkmap_nr; | |
72 | static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kmap_lock); | |
73 | ||
74 | pte_t * pkmap_page_table; | |
75 | ||
76 | static DECLARE_WAIT_QUEUE_HEAD(pkmap_map_wait); | |
77 | ||
3297e760 NP |
78 | /* |
79 | * Most architectures have no use for kmap_high_get(), so let's abstract | |
80 | * the disabling of IRQ out of the locking in that case to save on a | |
81 | * potential useless overhead. | |
82 | */ | |
83 | #ifdef ARCH_NEEDS_KMAP_HIGH_GET | |
84 | #define lock_kmap() spin_lock_irq(&kmap_lock) | |
85 | #define unlock_kmap() spin_unlock_irq(&kmap_lock) | |
86 | #define lock_kmap_any(flags) spin_lock_irqsave(&kmap_lock, flags) | |
87 | #define unlock_kmap_any(flags) spin_unlock_irqrestore(&kmap_lock, flags) | |
88 | #else | |
89 | #define lock_kmap() spin_lock(&kmap_lock) | |
90 | #define unlock_kmap() spin_unlock(&kmap_lock) | |
91 | #define lock_kmap_any(flags) \ | |
92 | do { spin_lock(&kmap_lock); (void)(flags); } while (0) | |
93 | #define unlock_kmap_any(flags) \ | |
94 | do { spin_unlock(&kmap_lock); (void)(flags); } while (0) | |
95 | #endif | |
96 | ||
5a178119 MG |
97 | struct page *kmap_to_page(void *vaddr) |
98 | { | |
99 | unsigned long addr = (unsigned long)vaddr; | |
100 | ||
101 | if (addr >= PKMAP_ADDR(0) && addr <= PKMAP_ADDR(LAST_PKMAP)) { | |
102 | int i = (addr - PKMAP_ADDR(0)) >> PAGE_SHIFT; | |
103 | return pte_page(pkmap_page_table[i]); | |
104 | } | |
105 | ||
106 | return virt_to_page(addr); | |
107 | } | |
f0263d2d | 108 | EXPORT_SYMBOL(kmap_to_page); |
5a178119 | 109 | |
1da177e4 LT |
110 | static void flush_all_zero_pkmaps(void) |
111 | { | |
112 | int i; | |
5843d9a4 | 113 | int need_flush = 0; |
1da177e4 LT |
114 | |
115 | flush_cache_kmaps(); | |
116 | ||
117 | for (i = 0; i < LAST_PKMAP; i++) { | |
118 | struct page *page; | |
119 | ||
120 | /* | |
121 | * zero means we don't have anything to do, | |
122 | * >1 means that it is still in use. Only | |
123 | * a count of 1 means that it is free but | |
124 | * needs to be unmapped | |
125 | */ | |
126 | if (pkmap_count[i] != 1) | |
127 | continue; | |
128 | pkmap_count[i] = 0; | |
129 | ||
130 | /* sanity check */ | |
75babcac | 131 | BUG_ON(pte_none(pkmap_page_table[i])); |
1da177e4 LT |
132 | |
133 | /* | |
134 | * Don't need an atomic fetch-and-clear op here; | |
135 | * no-one has the page mapped, and cannot get at | |
136 | * its virtual address (and hence PTE) without first | |
137 | * getting the kmap_lock (which is held here). | |
138 | * So no dangers, even with speculative execution. | |
139 | */ | |
140 | page = pte_page(pkmap_page_table[i]); | |
141 | pte_clear(&init_mm, (unsigned long)page_address(page), | |
142 | &pkmap_page_table[i]); | |
143 | ||
144 | set_page_address(page, NULL); | |
5843d9a4 | 145 | need_flush = 1; |
1da177e4 | 146 | } |
5843d9a4 NP |
147 | if (need_flush) |
148 | flush_tlb_kernel_range(PKMAP_ADDR(0), PKMAP_ADDR(LAST_PKMAP)); | |
1da177e4 LT |
149 | } |
150 | ||
77f6078a RD |
151 | /** |
152 | * kmap_flush_unused - flush all unused kmap mappings in order to remove stray mappings | |
153 | */ | |
ce6234b5 JF |
154 | void kmap_flush_unused(void) |
155 | { | |
3297e760 | 156 | lock_kmap(); |
ce6234b5 | 157 | flush_all_zero_pkmaps(); |
3297e760 | 158 | unlock_kmap(); |
ce6234b5 JF |
159 | } |
160 | ||
1da177e4 LT |
161 | static inline unsigned long map_new_virtual(struct page *page) |
162 | { | |
163 | unsigned long vaddr; | |
164 | int count; | |
165 | ||
166 | start: | |
167 | count = LAST_PKMAP; | |
168 | /* Find an empty entry */ | |
169 | for (;;) { | |
170 | last_pkmap_nr = (last_pkmap_nr + 1) & LAST_PKMAP_MASK; | |
171 | if (!last_pkmap_nr) { | |
172 | flush_all_zero_pkmaps(); | |
173 | count = LAST_PKMAP; | |
174 | } | |
175 | if (!pkmap_count[last_pkmap_nr]) | |
176 | break; /* Found a usable entry */ | |
177 | if (--count) | |
178 | continue; | |
179 | ||
180 | /* | |
181 | * Sleep for somebody else to unmap their entries | |
182 | */ | |
183 | { | |
184 | DECLARE_WAITQUEUE(wait, current); | |
185 | ||
186 | __set_current_state(TASK_UNINTERRUPTIBLE); | |
187 | add_wait_queue(&pkmap_map_wait, &wait); | |
3297e760 | 188 | unlock_kmap(); |
1da177e4 LT |
189 | schedule(); |
190 | remove_wait_queue(&pkmap_map_wait, &wait); | |
3297e760 | 191 | lock_kmap(); |
1da177e4 LT |
192 | |
193 | /* Somebody else might have mapped it while we slept */ | |
194 | if (page_address(page)) | |
195 | return (unsigned long)page_address(page); | |
196 | ||
197 | /* Re-start */ | |
198 | goto start; | |
199 | } | |
200 | } | |
201 | vaddr = PKMAP_ADDR(last_pkmap_nr); | |
202 | set_pte_at(&init_mm, vaddr, | |
203 | &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot)); | |
204 | ||
205 | pkmap_count[last_pkmap_nr] = 1; | |
206 | set_page_address(page, (void *)vaddr); | |
207 | ||
208 | return vaddr; | |
209 | } | |
210 | ||
77f6078a RD |
211 | /** |
212 | * kmap_high - map a highmem page into memory | |
213 | * @page: &struct page to map | |
214 | * | |
215 | * Returns the page's virtual memory address. | |
216 | * | |
217 | * We cannot call this from interrupts, as it may block. | |
218 | */ | |
920c7a5d | 219 | void *kmap_high(struct page *page) |
1da177e4 LT |
220 | { |
221 | unsigned long vaddr; | |
222 | ||
223 | /* | |
224 | * For highmem pages, we can't trust "virtual" until | |
225 | * after we have the lock. | |
1da177e4 | 226 | */ |
3297e760 | 227 | lock_kmap(); |
1da177e4 LT |
228 | vaddr = (unsigned long)page_address(page); |
229 | if (!vaddr) | |
230 | vaddr = map_new_virtual(page); | |
231 | pkmap_count[PKMAP_NR(vaddr)]++; | |
75babcac | 232 | BUG_ON(pkmap_count[PKMAP_NR(vaddr)] < 2); |
3297e760 | 233 | unlock_kmap(); |
1da177e4 LT |
234 | return (void*) vaddr; |
235 | } | |
236 | ||
237 | EXPORT_SYMBOL(kmap_high); | |
238 | ||
3297e760 NP |
239 | #ifdef ARCH_NEEDS_KMAP_HIGH_GET |
240 | /** | |
241 | * kmap_high_get - pin a highmem page into memory | |
242 | * @page: &struct page to pin | |
243 | * | |
244 | * Returns the page's current virtual memory address, or NULL if no mapping | |
5e39df56 | 245 | * exists. If and only if a non null address is returned then a |
3297e760 NP |
246 | * matching call to kunmap_high() is necessary. |
247 | * | |
248 | * This can be called from any context. | |
249 | */ | |
250 | void *kmap_high_get(struct page *page) | |
251 | { | |
252 | unsigned long vaddr, flags; | |
253 | ||
254 | lock_kmap_any(flags); | |
255 | vaddr = (unsigned long)page_address(page); | |
256 | if (vaddr) { | |
257 | BUG_ON(pkmap_count[PKMAP_NR(vaddr)] < 1); | |
258 | pkmap_count[PKMAP_NR(vaddr)]++; | |
259 | } | |
260 | unlock_kmap_any(flags); | |
261 | return (void*) vaddr; | |
262 | } | |
263 | #endif | |
264 | ||
77f6078a | 265 | /** |
4e9dc5df | 266 | * kunmap_high - unmap a highmem page into memory |
77f6078a | 267 | * @page: &struct page to unmap |
3297e760 NP |
268 | * |
269 | * If ARCH_NEEDS_KMAP_HIGH_GET is not defined then this may be called | |
270 | * only from user context. | |
77f6078a | 271 | */ |
920c7a5d | 272 | void kunmap_high(struct page *page) |
1da177e4 LT |
273 | { |
274 | unsigned long vaddr; | |
275 | unsigned long nr; | |
3297e760 | 276 | unsigned long flags; |
1da177e4 LT |
277 | int need_wakeup; |
278 | ||
3297e760 | 279 | lock_kmap_any(flags); |
1da177e4 | 280 | vaddr = (unsigned long)page_address(page); |
75babcac | 281 | BUG_ON(!vaddr); |
1da177e4 LT |
282 | nr = PKMAP_NR(vaddr); |
283 | ||
284 | /* | |
285 | * A count must never go down to zero | |
286 | * without a TLB flush! | |
287 | */ | |
288 | need_wakeup = 0; | |
289 | switch (--pkmap_count[nr]) { | |
290 | case 0: | |
291 | BUG(); | |
292 | case 1: | |
293 | /* | |
294 | * Avoid an unnecessary wake_up() function call. | |
295 | * The common case is pkmap_count[] == 1, but | |
296 | * no waiters. | |
297 | * The tasks queued in the wait-queue are guarded | |
298 | * by both the lock in the wait-queue-head and by | |
299 | * the kmap_lock. As the kmap_lock is held here, | |
300 | * no need for the wait-queue-head's lock. Simply | |
301 | * test if the queue is empty. | |
302 | */ | |
303 | need_wakeup = waitqueue_active(&pkmap_map_wait); | |
304 | } | |
3297e760 | 305 | unlock_kmap_any(flags); |
1da177e4 LT |
306 | |
307 | /* do wake-up, if needed, race-free outside of the spin lock */ | |
308 | if (need_wakeup) | |
309 | wake_up(&pkmap_map_wait); | |
310 | } | |
311 | ||
312 | EXPORT_SYMBOL(kunmap_high); | |
1da177e4 LT |
313 | #endif |
314 | ||
1da177e4 LT |
315 | #if defined(HASHED_PAGE_VIRTUAL) |
316 | ||
317 | #define PA_HASH_ORDER 7 | |
318 | ||
319 | /* | |
320 | * Describes one page->virtual association | |
321 | */ | |
322 | struct page_address_map { | |
323 | struct page *page; | |
324 | void *virtual; | |
325 | struct list_head list; | |
326 | }; | |
327 | ||
328 | /* | |
329 | * page_address_map freelist, allocated from page_address_maps. | |
330 | */ | |
331 | static struct list_head page_address_pool; /* freelist */ | |
332 | static spinlock_t pool_lock; /* protects page_address_pool */ | |
333 | ||
334 | /* | |
335 | * Hash table bucket | |
336 | */ | |
337 | static struct page_address_slot { | |
338 | struct list_head lh; /* List of page_address_maps */ | |
339 | spinlock_t lock; /* Protect this bucket's list */ | |
340 | } ____cacheline_aligned_in_smp page_address_htable[1<<PA_HASH_ORDER]; | |
341 | ||
f9918794 | 342 | static struct page_address_slot *page_slot(const struct page *page) |
1da177e4 LT |
343 | { |
344 | return &page_address_htable[hash_ptr(page, PA_HASH_ORDER)]; | |
345 | } | |
346 | ||
77f6078a RD |
347 | /** |
348 | * page_address - get the mapped virtual address of a page | |
349 | * @page: &struct page to get the virtual address of | |
350 | * | |
351 | * Returns the page's virtual address. | |
352 | */ | |
f9918794 | 353 | void *page_address(const struct page *page) |
1da177e4 LT |
354 | { |
355 | unsigned long flags; | |
356 | void *ret; | |
357 | struct page_address_slot *pas; | |
358 | ||
359 | if (!PageHighMem(page)) | |
360 | return lowmem_page_address(page); | |
361 | ||
362 | pas = page_slot(page); | |
363 | ret = NULL; | |
364 | spin_lock_irqsave(&pas->lock, flags); | |
365 | if (!list_empty(&pas->lh)) { | |
366 | struct page_address_map *pam; | |
367 | ||
368 | list_for_each_entry(pam, &pas->lh, list) { | |
369 | if (pam->page == page) { | |
370 | ret = pam->virtual; | |
371 | goto done; | |
372 | } | |
373 | } | |
374 | } | |
375 | done: | |
376 | spin_unlock_irqrestore(&pas->lock, flags); | |
377 | return ret; | |
378 | } | |
379 | ||
380 | EXPORT_SYMBOL(page_address); | |
381 | ||
77f6078a RD |
382 | /** |
383 | * set_page_address - set a page's virtual address | |
384 | * @page: &struct page to set | |
385 | * @virtual: virtual address to use | |
386 | */ | |
1da177e4 LT |
387 | void set_page_address(struct page *page, void *virtual) |
388 | { | |
389 | unsigned long flags; | |
390 | struct page_address_slot *pas; | |
391 | struct page_address_map *pam; | |
392 | ||
393 | BUG_ON(!PageHighMem(page)); | |
394 | ||
395 | pas = page_slot(page); | |
396 | if (virtual) { /* Add */ | |
397 | BUG_ON(list_empty(&page_address_pool)); | |
398 | ||
399 | spin_lock_irqsave(&pool_lock, flags); | |
400 | pam = list_entry(page_address_pool.next, | |
401 | struct page_address_map, list); | |
402 | list_del(&pam->list); | |
403 | spin_unlock_irqrestore(&pool_lock, flags); | |
404 | ||
405 | pam->page = page; | |
406 | pam->virtual = virtual; | |
407 | ||
408 | spin_lock_irqsave(&pas->lock, flags); | |
409 | list_add_tail(&pam->list, &pas->lh); | |
410 | spin_unlock_irqrestore(&pas->lock, flags); | |
411 | } else { /* Remove */ | |
412 | spin_lock_irqsave(&pas->lock, flags); | |
413 | list_for_each_entry(pam, &pas->lh, list) { | |
414 | if (pam->page == page) { | |
415 | list_del(&pam->list); | |
416 | spin_unlock_irqrestore(&pas->lock, flags); | |
417 | spin_lock_irqsave(&pool_lock, flags); | |
418 | list_add_tail(&pam->list, &page_address_pool); | |
419 | spin_unlock_irqrestore(&pool_lock, flags); | |
420 | goto done; | |
421 | } | |
422 | } | |
423 | spin_unlock_irqrestore(&pas->lock, flags); | |
424 | } | |
425 | done: | |
426 | return; | |
427 | } | |
428 | ||
429 | static struct page_address_map page_address_maps[LAST_PKMAP]; | |
430 | ||
431 | void __init page_address_init(void) | |
432 | { | |
433 | int i; | |
434 | ||
435 | INIT_LIST_HEAD(&page_address_pool); | |
436 | for (i = 0; i < ARRAY_SIZE(page_address_maps); i++) | |
437 | list_add(&page_address_maps[i].list, &page_address_pool); | |
438 | for (i = 0; i < ARRAY_SIZE(page_address_htable); i++) { | |
439 | INIT_LIST_HEAD(&page_address_htable[i].lh); | |
440 | spin_lock_init(&page_address_htable[i].lock); | |
441 | } | |
442 | spin_lock_init(&pool_lock); | |
443 | } | |
444 | ||
445 | #endif /* defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL) */ |