]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - mm/highmem.c
can: dev: export can_get_state_str() function
[mirror_ubuntu-jammy-kernel.git] / mm / highmem.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * High memory handling common code and variables.
4 *
5 * (C) 1999 Andrea Arcangeli, SuSE GmbH, andrea@suse.de
6 * Gerhard Wichert, Siemens AG, Gerhard.Wichert@pdb.siemens.de
7 *
8 *
9 * Redesigned the x86 32-bit VM architecture to deal with
10 * 64-bit physical space. With current x86 CPUs this
11 * means up to 64 Gigabytes physical RAM.
12 *
13 * Rewrote high memory support to move the page cache into
14 * high memory. Implemented permanent (schedulable) kmaps
15 * based on Linus' idea.
16 *
17 * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
18 */
19
20 #include <linux/mm.h>
21 #include <linux/export.h>
22 #include <linux/swap.h>
23 #include <linux/bio.h>
24 #include <linux/pagemap.h>
25 #include <linux/mempool.h>
26 #include <linux/blkdev.h>
27 #include <linux/init.h>
28 #include <linux/hash.h>
29 #include <linux/highmem.h>
30 #include <linux/kgdb.h>
31 #include <asm/tlbflush.h>
32 #include <linux/vmalloc.h>
33
34 /*
35 * Virtual_count is not a pure "count".
36 * 0 means that it is not mapped, and has not been mapped
37 * since a TLB flush - it is usable.
38 * 1 means that there are no users, but it has been mapped
39 * since the last TLB flush - so we can't use it.
40 * n means that there are (n-1) current users of it.
41 */
42 #ifdef CONFIG_HIGHMEM
43
44 /*
45 * Architecture with aliasing data cache may define the following family of
46 * helper functions in its asm/highmem.h to control cache color of virtual
47 * addresses where physical memory pages are mapped by kmap.
48 */
49 #ifndef get_pkmap_color
50
51 /*
52 * Determine color of virtual address where the page should be mapped.
53 */
54 static inline unsigned int get_pkmap_color(struct page *page)
55 {
56 return 0;
57 }
58 #define get_pkmap_color get_pkmap_color
59
60 /*
61 * Get next index for mapping inside PKMAP region for page with given color.
62 */
63 static inline unsigned int get_next_pkmap_nr(unsigned int color)
64 {
65 static unsigned int last_pkmap_nr;
66
67 last_pkmap_nr = (last_pkmap_nr + 1) & LAST_PKMAP_MASK;
68 return last_pkmap_nr;
69 }
70
71 /*
72 * Determine if page index inside PKMAP region (pkmap_nr) of given color
73 * has wrapped around PKMAP region end. When this happens an attempt to
74 * flush all unused PKMAP slots is made.
75 */
76 static inline int no_more_pkmaps(unsigned int pkmap_nr, unsigned int color)
77 {
78 return pkmap_nr == 0;
79 }
80
81 /*
82 * Get the number of PKMAP entries of the given color. If no free slot is
83 * found after checking that many entries, kmap will sleep waiting for
84 * someone to call kunmap and free PKMAP slot.
85 */
86 static inline int get_pkmap_entries_count(unsigned int color)
87 {
88 return LAST_PKMAP;
89 }
90
91 /*
92 * Get head of a wait queue for PKMAP entries of the given color.
93 * Wait queues for different mapping colors should be independent to avoid
94 * unnecessary wakeups caused by freeing of slots of other colors.
95 */
96 static inline wait_queue_head_t *get_pkmap_wait_queue_head(unsigned int color)
97 {
98 static DECLARE_WAIT_QUEUE_HEAD(pkmap_map_wait);
99
100 return &pkmap_map_wait;
101 }
102 #endif
103
104 atomic_long_t _totalhigh_pages __read_mostly;
105 EXPORT_SYMBOL(_totalhigh_pages);
106
107 unsigned int __nr_free_highpages (void)
108 {
109 struct zone *zone;
110 unsigned int pages = 0;
111
112 for_each_populated_zone(zone) {
113 if (is_highmem(zone))
114 pages += zone_page_state(zone, NR_FREE_PAGES);
115 }
116
117 return pages;
118 }
119
120 static int pkmap_count[LAST_PKMAP];
121 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kmap_lock);
122
123 pte_t * pkmap_page_table;
124
125 /*
126 * Most architectures have no use for kmap_high_get(), so let's abstract
127 * the disabling of IRQ out of the locking in that case to save on a
128 * potential useless overhead.
129 */
130 #ifdef ARCH_NEEDS_KMAP_HIGH_GET
131 #define lock_kmap() spin_lock_irq(&kmap_lock)
132 #define unlock_kmap() spin_unlock_irq(&kmap_lock)
133 #define lock_kmap_any(flags) spin_lock_irqsave(&kmap_lock, flags)
134 #define unlock_kmap_any(flags) spin_unlock_irqrestore(&kmap_lock, flags)
135 #else
136 #define lock_kmap() spin_lock(&kmap_lock)
137 #define unlock_kmap() spin_unlock(&kmap_lock)
138 #define lock_kmap_any(flags) \
139 do { spin_lock(&kmap_lock); (void)(flags); } while (0)
140 #define unlock_kmap_any(flags) \
141 do { spin_unlock(&kmap_lock); (void)(flags); } while (0)
142 #endif
143
144 struct page *__kmap_to_page(void *vaddr)
145 {
146 unsigned long addr = (unsigned long)vaddr;
147
148 if (addr >= PKMAP_ADDR(0) && addr < PKMAP_ADDR(LAST_PKMAP)) {
149 int i = PKMAP_NR(addr);
150 return pte_page(pkmap_page_table[i]);
151 }
152
153 return virt_to_page(addr);
154 }
155 EXPORT_SYMBOL(__kmap_to_page);
156
157 static void flush_all_zero_pkmaps(void)
158 {
159 int i;
160 int need_flush = 0;
161
162 flush_cache_kmaps();
163
164 for (i = 0; i < LAST_PKMAP; i++) {
165 struct page *page;
166
167 /*
168 * zero means we don't have anything to do,
169 * >1 means that it is still in use. Only
170 * a count of 1 means that it is free but
171 * needs to be unmapped
172 */
173 if (pkmap_count[i] != 1)
174 continue;
175 pkmap_count[i] = 0;
176
177 /* sanity check */
178 BUG_ON(pte_none(pkmap_page_table[i]));
179
180 /*
181 * Don't need an atomic fetch-and-clear op here;
182 * no-one has the page mapped, and cannot get at
183 * its virtual address (and hence PTE) without first
184 * getting the kmap_lock (which is held here).
185 * So no dangers, even with speculative execution.
186 */
187 page = pte_page(pkmap_page_table[i]);
188 pte_clear(&init_mm, PKMAP_ADDR(i), &pkmap_page_table[i]);
189
190 set_page_address(page, NULL);
191 need_flush = 1;
192 }
193 if (need_flush)
194 flush_tlb_kernel_range(PKMAP_ADDR(0), PKMAP_ADDR(LAST_PKMAP));
195 }
196
197 void __kmap_flush_unused(void)
198 {
199 lock_kmap();
200 flush_all_zero_pkmaps();
201 unlock_kmap();
202 }
203
204 static inline unsigned long map_new_virtual(struct page *page)
205 {
206 unsigned long vaddr;
207 int count;
208 unsigned int last_pkmap_nr;
209 unsigned int color = get_pkmap_color(page);
210
211 start:
212 count = get_pkmap_entries_count(color);
213 /* Find an empty entry */
214 for (;;) {
215 last_pkmap_nr = get_next_pkmap_nr(color);
216 if (no_more_pkmaps(last_pkmap_nr, color)) {
217 flush_all_zero_pkmaps();
218 count = get_pkmap_entries_count(color);
219 }
220 if (!pkmap_count[last_pkmap_nr])
221 break; /* Found a usable entry */
222 if (--count)
223 continue;
224
225 /*
226 * Sleep for somebody else to unmap their entries
227 */
228 {
229 DECLARE_WAITQUEUE(wait, current);
230 wait_queue_head_t *pkmap_map_wait =
231 get_pkmap_wait_queue_head(color);
232
233 __set_current_state(TASK_UNINTERRUPTIBLE);
234 add_wait_queue(pkmap_map_wait, &wait);
235 unlock_kmap();
236 schedule();
237 remove_wait_queue(pkmap_map_wait, &wait);
238 lock_kmap();
239
240 /* Somebody else might have mapped it while we slept */
241 if (page_address(page))
242 return (unsigned long)page_address(page);
243
244 /* Re-start */
245 goto start;
246 }
247 }
248 vaddr = PKMAP_ADDR(last_pkmap_nr);
249 set_pte_at(&init_mm, vaddr,
250 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
251
252 pkmap_count[last_pkmap_nr] = 1;
253 set_page_address(page, (void *)vaddr);
254
255 return vaddr;
256 }
257
258 /**
259 * kmap_high - map a highmem page into memory
260 * @page: &struct page to map
261 *
262 * Returns the page's virtual memory address.
263 *
264 * We cannot call this from interrupts, as it may block.
265 */
266 void *kmap_high(struct page *page)
267 {
268 unsigned long vaddr;
269
270 /*
271 * For highmem pages, we can't trust "virtual" until
272 * after we have the lock.
273 */
274 lock_kmap();
275 vaddr = (unsigned long)page_address(page);
276 if (!vaddr)
277 vaddr = map_new_virtual(page);
278 pkmap_count[PKMAP_NR(vaddr)]++;
279 BUG_ON(pkmap_count[PKMAP_NR(vaddr)] < 2);
280 unlock_kmap();
281 return (void*) vaddr;
282 }
283
284 EXPORT_SYMBOL(kmap_high);
285
286 #ifdef ARCH_NEEDS_KMAP_HIGH_GET
287 /**
288 * kmap_high_get - pin a highmem page into memory
289 * @page: &struct page to pin
290 *
291 * Returns the page's current virtual memory address, or NULL if no mapping
292 * exists. If and only if a non null address is returned then a
293 * matching call to kunmap_high() is necessary.
294 *
295 * This can be called from any context.
296 */
297 void *kmap_high_get(struct page *page)
298 {
299 unsigned long vaddr, flags;
300
301 lock_kmap_any(flags);
302 vaddr = (unsigned long)page_address(page);
303 if (vaddr) {
304 BUG_ON(pkmap_count[PKMAP_NR(vaddr)] < 1);
305 pkmap_count[PKMAP_NR(vaddr)]++;
306 }
307 unlock_kmap_any(flags);
308 return (void*) vaddr;
309 }
310 #endif
311
312 /**
313 * kunmap_high - unmap a highmem page into memory
314 * @page: &struct page to unmap
315 *
316 * If ARCH_NEEDS_KMAP_HIGH_GET is not defined then this may be called
317 * only from user context.
318 */
319 void kunmap_high(struct page *page)
320 {
321 unsigned long vaddr;
322 unsigned long nr;
323 unsigned long flags;
324 int need_wakeup;
325 unsigned int color = get_pkmap_color(page);
326 wait_queue_head_t *pkmap_map_wait;
327
328 lock_kmap_any(flags);
329 vaddr = (unsigned long)page_address(page);
330 BUG_ON(!vaddr);
331 nr = PKMAP_NR(vaddr);
332
333 /*
334 * A count must never go down to zero
335 * without a TLB flush!
336 */
337 need_wakeup = 0;
338 switch (--pkmap_count[nr]) {
339 case 0:
340 BUG();
341 case 1:
342 /*
343 * Avoid an unnecessary wake_up() function call.
344 * The common case is pkmap_count[] == 1, but
345 * no waiters.
346 * The tasks queued in the wait-queue are guarded
347 * by both the lock in the wait-queue-head and by
348 * the kmap_lock. As the kmap_lock is held here,
349 * no need for the wait-queue-head's lock. Simply
350 * test if the queue is empty.
351 */
352 pkmap_map_wait = get_pkmap_wait_queue_head(color);
353 need_wakeup = waitqueue_active(pkmap_map_wait);
354 }
355 unlock_kmap_any(flags);
356
357 /* do wake-up, if needed, race-free outside of the spin lock */
358 if (need_wakeup)
359 wake_up(pkmap_map_wait);
360 }
361 EXPORT_SYMBOL(kunmap_high);
362
363 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
364 void zero_user_segments(struct page *page, unsigned start1, unsigned end1,
365 unsigned start2, unsigned end2)
366 {
367 unsigned int i;
368
369 BUG_ON(end1 > page_size(page) || end2 > page_size(page));
370
371 for (i = 0; i < compound_nr(page); i++) {
372 void *kaddr = NULL;
373
374 if (start1 < PAGE_SIZE || start2 < PAGE_SIZE)
375 kaddr = kmap_atomic(page + i);
376
377 if (start1 >= PAGE_SIZE) {
378 start1 -= PAGE_SIZE;
379 end1 -= PAGE_SIZE;
380 } else {
381 unsigned this_end = min_t(unsigned, end1, PAGE_SIZE);
382
383 if (end1 > start1)
384 memset(kaddr + start1, 0, this_end - start1);
385 end1 -= this_end;
386 start1 = 0;
387 }
388
389 if (start2 >= PAGE_SIZE) {
390 start2 -= PAGE_SIZE;
391 end2 -= PAGE_SIZE;
392 } else {
393 unsigned this_end = min_t(unsigned, end2, PAGE_SIZE);
394
395 if (end2 > start2)
396 memset(kaddr + start2, 0, this_end - start2);
397 end2 -= this_end;
398 start2 = 0;
399 }
400
401 if (kaddr) {
402 kunmap_atomic(kaddr);
403 flush_dcache_page(page + i);
404 }
405
406 if (!end1 && !end2)
407 break;
408 }
409
410 BUG_ON((start1 | start2 | end1 | end2) != 0);
411 }
412 EXPORT_SYMBOL(zero_user_segments);
413 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
414 #endif /* CONFIG_HIGHMEM */
415
416 #ifdef CONFIG_KMAP_LOCAL
417
418 #include <asm/kmap_size.h>
419
420 /*
421 * With DEBUG_KMAP_LOCAL the stack depth is doubled and every second
422 * slot is unused which acts as a guard page
423 */
424 #ifdef CONFIG_DEBUG_KMAP_LOCAL
425 # define KM_INCR 2
426 #else
427 # define KM_INCR 1
428 #endif
429
430 static inline int kmap_local_idx_push(void)
431 {
432 WARN_ON_ONCE(in_irq() && !irqs_disabled());
433 current->kmap_ctrl.idx += KM_INCR;
434 BUG_ON(current->kmap_ctrl.idx >= KM_MAX_IDX);
435 return current->kmap_ctrl.idx - 1;
436 }
437
438 static inline int kmap_local_idx(void)
439 {
440 return current->kmap_ctrl.idx - 1;
441 }
442
443 static inline void kmap_local_idx_pop(void)
444 {
445 current->kmap_ctrl.idx -= KM_INCR;
446 BUG_ON(current->kmap_ctrl.idx < 0);
447 }
448
449 #ifndef arch_kmap_local_post_map
450 # define arch_kmap_local_post_map(vaddr, pteval) do { } while (0)
451 #endif
452
453 #ifndef arch_kmap_local_pre_unmap
454 # define arch_kmap_local_pre_unmap(vaddr) do { } while (0)
455 #endif
456
457 #ifndef arch_kmap_local_post_unmap
458 # define arch_kmap_local_post_unmap(vaddr) do { } while (0)
459 #endif
460
461 #ifndef arch_kmap_local_map_idx
462 #define arch_kmap_local_map_idx(idx, pfn) kmap_local_calc_idx(idx)
463 #endif
464
465 #ifndef arch_kmap_local_unmap_idx
466 #define arch_kmap_local_unmap_idx(idx, vaddr) kmap_local_calc_idx(idx)
467 #endif
468
469 #ifndef arch_kmap_local_high_get
470 static inline void *arch_kmap_local_high_get(struct page *page)
471 {
472 return NULL;
473 }
474 #endif
475
476 /* Unmap a local mapping which was obtained by kmap_high_get() */
477 static inline bool kmap_high_unmap_local(unsigned long vaddr)
478 {
479 #ifdef ARCH_NEEDS_KMAP_HIGH_GET
480 if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) {
481 kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)]));
482 return true;
483 }
484 #endif
485 return false;
486 }
487
488 static inline int kmap_local_calc_idx(int idx)
489 {
490 return idx + KM_MAX_IDX * smp_processor_id();
491 }
492
493 static pte_t *__kmap_pte;
494
495 static pte_t *kmap_get_pte(void)
496 {
497 if (!__kmap_pte)
498 __kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN));
499 return __kmap_pte;
500 }
501
502 void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot)
503 {
504 pte_t pteval, *kmap_pte = kmap_get_pte();
505 unsigned long vaddr;
506 int idx;
507
508 /*
509 * Disable migration so resulting virtual address is stable
510 * accross preemption.
511 */
512 migrate_disable();
513 preempt_disable();
514 idx = arch_kmap_local_map_idx(kmap_local_idx_push(), pfn);
515 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
516 BUG_ON(!pte_none(*(kmap_pte - idx)));
517 pteval = pfn_pte(pfn, prot);
518 set_pte_at(&init_mm, vaddr, kmap_pte - idx, pteval);
519 arch_kmap_local_post_map(vaddr, pteval);
520 current->kmap_ctrl.pteval[kmap_local_idx()] = pteval;
521 preempt_enable();
522
523 return (void *)vaddr;
524 }
525 EXPORT_SYMBOL_GPL(__kmap_local_pfn_prot);
526
527 void *__kmap_local_page_prot(struct page *page, pgprot_t prot)
528 {
529 void *kmap;
530
531 /*
532 * To broaden the usage of the actual kmap_local() machinery always map
533 * pages when debugging is enabled and the architecture has no problems
534 * with alias mappings.
535 */
536 if (!IS_ENABLED(CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP) && !PageHighMem(page))
537 return page_address(page);
538
539 /* Try kmap_high_get() if architecture has it enabled */
540 kmap = arch_kmap_local_high_get(page);
541 if (kmap)
542 return kmap;
543
544 return __kmap_local_pfn_prot(page_to_pfn(page), prot);
545 }
546 EXPORT_SYMBOL(__kmap_local_page_prot);
547
548 void kunmap_local_indexed(void *vaddr)
549 {
550 unsigned long addr = (unsigned long) vaddr & PAGE_MASK;
551 pte_t *kmap_pte = kmap_get_pte();
552 int idx;
553
554 if (addr < __fix_to_virt(FIX_KMAP_END) ||
555 addr > __fix_to_virt(FIX_KMAP_BEGIN)) {
556 if (IS_ENABLED(CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP)) {
557 /* This _should_ never happen! See above. */
558 WARN_ON_ONCE(1);
559 return;
560 }
561 /*
562 * Handle mappings which were obtained by kmap_high_get()
563 * first as the virtual address of such mappings is below
564 * PAGE_OFFSET. Warn for all other addresses which are in
565 * the user space part of the virtual address space.
566 */
567 if (!kmap_high_unmap_local(addr))
568 WARN_ON_ONCE(addr < PAGE_OFFSET);
569 return;
570 }
571
572 preempt_disable();
573 idx = arch_kmap_local_unmap_idx(kmap_local_idx(), addr);
574 WARN_ON_ONCE(addr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
575
576 arch_kmap_local_pre_unmap(addr);
577 pte_clear(&init_mm, addr, kmap_pte - idx);
578 arch_kmap_local_post_unmap(addr);
579 current->kmap_ctrl.pteval[kmap_local_idx()] = __pte(0);
580 kmap_local_idx_pop();
581 preempt_enable();
582 migrate_enable();
583 }
584 EXPORT_SYMBOL(kunmap_local_indexed);
585
586 /*
587 * Invoked before switch_to(). This is safe even when during or after
588 * clearing the maps an interrupt which needs a kmap_local happens because
589 * the task::kmap_ctrl.idx is not modified by the unmapping code so a
590 * nested kmap_local will use the next unused index and restore the index
591 * on unmap. The already cleared kmaps of the outgoing task are irrelevant
592 * because the interrupt context does not know about them. The same applies
593 * when scheduling back in for an interrupt which happens before the
594 * restore is complete.
595 */
596 void __kmap_local_sched_out(void)
597 {
598 struct task_struct *tsk = current;
599 pte_t *kmap_pte = kmap_get_pte();
600 int i;
601
602 /* Clear kmaps */
603 for (i = 0; i < tsk->kmap_ctrl.idx; i++) {
604 pte_t pteval = tsk->kmap_ctrl.pteval[i];
605 unsigned long addr;
606 int idx;
607
608 /* With debug all even slots are unmapped and act as guard */
609 if (IS_ENABLED(CONFIG_DEBUG_HIGHMEM) && !(i & 0x01)) {
610 WARN_ON_ONCE(!pte_none(pteval));
611 continue;
612 }
613 if (WARN_ON_ONCE(pte_none(pteval)))
614 continue;
615
616 /*
617 * This is a horrible hack for XTENSA to calculate the
618 * coloured PTE index. Uses the PFN encoded into the pteval
619 * and the map index calculation because the actual mapped
620 * virtual address is not stored in task::kmap_ctrl.
621 * For any sane architecture this is optimized out.
622 */
623 idx = arch_kmap_local_map_idx(i, pte_pfn(pteval));
624
625 addr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
626 arch_kmap_local_pre_unmap(addr);
627 pte_clear(&init_mm, addr, kmap_pte - idx);
628 arch_kmap_local_post_unmap(addr);
629 }
630 }
631
632 void __kmap_local_sched_in(void)
633 {
634 struct task_struct *tsk = current;
635 pte_t *kmap_pte = kmap_get_pte();
636 int i;
637
638 /* Restore kmaps */
639 for (i = 0; i < tsk->kmap_ctrl.idx; i++) {
640 pte_t pteval = tsk->kmap_ctrl.pteval[i];
641 unsigned long addr;
642 int idx;
643
644 /* With debug all even slots are unmapped and act as guard */
645 if (IS_ENABLED(CONFIG_DEBUG_HIGHMEM) && !(i & 0x01)) {
646 WARN_ON_ONCE(!pte_none(pteval));
647 continue;
648 }
649 if (WARN_ON_ONCE(pte_none(pteval)))
650 continue;
651
652 /* See comment in __kmap_local_sched_out() */
653 idx = arch_kmap_local_map_idx(i, pte_pfn(pteval));
654 addr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
655 set_pte_at(&init_mm, addr, kmap_pte - idx, pteval);
656 arch_kmap_local_post_map(addr, pteval);
657 }
658 }
659
660 void kmap_local_fork(struct task_struct *tsk)
661 {
662 if (WARN_ON_ONCE(tsk->kmap_ctrl.idx))
663 memset(&tsk->kmap_ctrl, 0, sizeof(tsk->kmap_ctrl));
664 }
665
666 #endif
667
668 #if defined(HASHED_PAGE_VIRTUAL)
669
670 #define PA_HASH_ORDER 7
671
672 /*
673 * Describes one page->virtual association
674 */
675 struct page_address_map {
676 struct page *page;
677 void *virtual;
678 struct list_head list;
679 };
680
681 static struct page_address_map page_address_maps[LAST_PKMAP];
682
683 /*
684 * Hash table bucket
685 */
686 static struct page_address_slot {
687 struct list_head lh; /* List of page_address_maps */
688 spinlock_t lock; /* Protect this bucket's list */
689 } ____cacheline_aligned_in_smp page_address_htable[1<<PA_HASH_ORDER];
690
691 static struct page_address_slot *page_slot(const struct page *page)
692 {
693 return &page_address_htable[hash_ptr(page, PA_HASH_ORDER)];
694 }
695
696 /**
697 * page_address - get the mapped virtual address of a page
698 * @page: &struct page to get the virtual address of
699 *
700 * Returns the page's virtual address.
701 */
702 void *page_address(const struct page *page)
703 {
704 unsigned long flags;
705 void *ret;
706 struct page_address_slot *pas;
707
708 if (!PageHighMem(page))
709 return lowmem_page_address(page);
710
711 pas = page_slot(page);
712 ret = NULL;
713 spin_lock_irqsave(&pas->lock, flags);
714 if (!list_empty(&pas->lh)) {
715 struct page_address_map *pam;
716
717 list_for_each_entry(pam, &pas->lh, list) {
718 if (pam->page == page) {
719 ret = pam->virtual;
720 goto done;
721 }
722 }
723 }
724 done:
725 spin_unlock_irqrestore(&pas->lock, flags);
726 return ret;
727 }
728
729 EXPORT_SYMBOL(page_address);
730
731 /**
732 * set_page_address - set a page's virtual address
733 * @page: &struct page to set
734 * @virtual: virtual address to use
735 */
736 void set_page_address(struct page *page, void *virtual)
737 {
738 unsigned long flags;
739 struct page_address_slot *pas;
740 struct page_address_map *pam;
741
742 BUG_ON(!PageHighMem(page));
743
744 pas = page_slot(page);
745 if (virtual) { /* Add */
746 pam = &page_address_maps[PKMAP_NR((unsigned long)virtual)];
747 pam->page = page;
748 pam->virtual = virtual;
749
750 spin_lock_irqsave(&pas->lock, flags);
751 list_add_tail(&pam->list, &pas->lh);
752 spin_unlock_irqrestore(&pas->lock, flags);
753 } else { /* Remove */
754 spin_lock_irqsave(&pas->lock, flags);
755 list_for_each_entry(pam, &pas->lh, list) {
756 if (pam->page == page) {
757 list_del(&pam->list);
758 spin_unlock_irqrestore(&pas->lock, flags);
759 goto done;
760 }
761 }
762 spin_unlock_irqrestore(&pas->lock, flags);
763 }
764 done:
765 return;
766 }
767
768 void __init page_address_init(void)
769 {
770 int i;
771
772 for (i = 0; i < ARRAY_SIZE(page_address_htable); i++) {
773 INIT_LIST_HEAD(&page_address_htable[i].lh);
774 spin_lock_init(&page_address_htable[i].lock);
775 }
776 }
777
778 #endif /* defined(HASHED_PAGE_VIRTUAL) */