2 * fixmap.h: compile-time virtual memory allocation
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
8 * Copyright (C) 1998 Ingo Molnar
10 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
11 * x86_32 and x86_64 integration by Gustavo F. Padovan, February 2009
14 #ifndef _ASM_X86_FIXMAP_H
15 #define _ASM_X86_FIXMAP_H
18 #include <linux/kernel.h>
20 #include <asm/apicdef.h>
23 #include <linux/threads.h>
24 #include <asm/kmap_types.h>
26 #include <uapi/asm/vsyscall.h>
30 * We can't declare FIXADDR_TOP as variable for x86_64 because vsyscall
31 * uses fixmaps that relies on FIXADDR_TOP for proper address calculation.
32 * Because of this, FIXADDR_TOP x86 integration was left as later work.
35 /* used by vmalloc.c, vsyscall.lds.S.
37 * Leave one empty page between vmalloc'ed areas and
38 * the start of the fixmap.
40 extern unsigned long __FIXADDR_TOP
;
41 #define FIXADDR_TOP ((unsigned long)__FIXADDR_TOP)
43 #define FIXADDR_TOP (round_up(VSYSCALL_ADDR + PAGE_SIZE, 1<<PMD_SHIFT) - \
48 * cpu_entry_area is a percpu region in the fixmap that contains things
49 * needed by the CPU and early entry/exit code. Real types aren't used
50 * for all fields here to avoid circular header dependencies.
52 * Every field is a virtual alias of some other allocated backing store.
53 * There is no direct allocation of a struct cpu_entry_area.
55 struct cpu_entry_area
{
59 #define CPU_ENTRY_AREA_PAGES (sizeof(struct cpu_entry_area) / PAGE_SIZE)
62 * Here we define all the compile-time 'special' virtual
63 * addresses. The point is to have a constant address at
64 * compile time, but to set the physical address only
65 * in the boot process.
66 * for x86_32: We allocate these special addresses
67 * from the end of virtual memory (0xfffff000) backwards.
68 * Also this lets us do fail-safe vmalloc(), we
69 * can guarantee that these special addresses and
70 * vmalloc()-ed addresses never overlap.
72 * These 'compile-time allocated' memory buffers are
73 * fixed-size 4k pages (or larger if used with an increment
74 * higher than 1). Use set_fixmap(idx,phys) to associate
75 * physical memory with fixmap indices.
77 * TLB entries of such buffers will not be flushed across
80 enum fixed_addresses
{
84 #ifdef CONFIG_X86_VSYSCALL_EMULATION
85 VSYSCALL_PAGE
= (FIXADDR_TOP
- VSYSCALL_ADDR
) >> PAGE_SHIFT
,
89 FIX_EARLYCON_MEM_BASE
,
90 #ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
93 #ifdef CONFIG_X86_LOCAL_APIC
94 FIX_APIC_BASE
, /* local (CPU) APIC) -- required for SMP or not */
96 #ifdef CONFIG_X86_IO_APIC
98 FIX_IO_APIC_BASE_END
= FIX_IO_APIC_BASE_0
+ MAX_IO_APICS
- 1,
100 FIX_RO_IDT
, /* Virtual mapping for read-only IDT */
102 FIX_KMAP_BEGIN
, /* reserved pte's for temporary kernel mappings */
103 FIX_KMAP_END
= FIX_KMAP_BEGIN
+(KM_TYPE_NR
*NR_CPUS
)-1,
104 #ifdef CONFIG_PCI_MMCONFIG
108 #ifdef CONFIG_PARAVIRT
109 FIX_PARAVIRT_BOOTMAP
,
111 FIX_TEXT_POKE1
, /* reserve 2 pages for text_poke() */
112 FIX_TEXT_POKE0
, /* first page is last, because allocation is backward */
113 #ifdef CONFIG_X86_INTEL_MID
116 /* Fixmap entries to remap the GDTs, one per processor. */
117 FIX_CPU_ENTRY_AREA_TOP
,
118 FIX_CPU_ENTRY_AREA_BOTTOM
= FIX_CPU_ENTRY_AREA_TOP
+ (CPU_ENTRY_AREA_PAGES
* NR_CPUS
) - 1,
120 #ifdef CONFIG_ACPI_APEI_GHES
121 /* Used for GHES mapping from assorted contexts */
126 __end_of_permanent_fixed_addresses
,
129 * 512 temporary boot-time mappings, used by early_ioremap(),
130 * before ioremap() is functional.
132 * If necessary we round it up to the next 512 pages boundary so
133 * that we can have a single pgd entry and a single pte table:
135 #define NR_FIX_BTMAPS 64
136 #define FIX_BTMAPS_SLOTS 8
137 #define TOTAL_FIX_BTMAPS (NR_FIX_BTMAPS * FIX_BTMAPS_SLOTS)
139 (__end_of_permanent_fixed_addresses
^
140 (__end_of_permanent_fixed_addresses
+ TOTAL_FIX_BTMAPS
- 1)) &
142 ? __end_of_permanent_fixed_addresses
+ TOTAL_FIX_BTMAPS
-
143 (__end_of_permanent_fixed_addresses
& (TOTAL_FIX_BTMAPS
- 1))
144 : __end_of_permanent_fixed_addresses
,
145 FIX_BTMAP_BEGIN
= FIX_BTMAP_END
+ TOTAL_FIX_BTMAPS
- 1,
149 #ifdef CONFIG_INTEL_TXT
152 __end_of_fixed_addresses
156 extern void reserve_top_address(unsigned long reserve
);
158 #define FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT)
159 #define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
161 extern int fixmaps_set
;
163 extern pte_t
*kmap_pte
;
164 #define kmap_prot PAGE_KERNEL
165 extern pte_t
*pkmap_page_table
;
167 void __native_set_fixmap(enum fixed_addresses idx
, pte_t pte
);
168 void native_set_fixmap(enum fixed_addresses idx
,
169 phys_addr_t phys
, pgprot_t flags
);
171 #ifndef CONFIG_PARAVIRT
172 static inline void __set_fixmap(enum fixed_addresses idx
,
173 phys_addr_t phys
, pgprot_t flags
)
175 native_set_fixmap(idx
, phys
, flags
);
179 #include <asm-generic/fixmap.h>
181 #define __late_set_fixmap(idx, phys, flags) __set_fixmap(idx, phys, flags)
182 #define __late_clear_fixmap(idx) __set_fixmap(idx, 0, __pgprot(0))
184 void __early_set_fixmap(enum fixed_addresses idx
,
185 phys_addr_t phys
, pgprot_t flags
);
187 static inline unsigned int __get_cpu_entry_area_page_index(int cpu
, int page
)
189 BUILD_BUG_ON(sizeof(struct cpu_entry_area
) % PAGE_SIZE
!= 0);
191 return FIX_CPU_ENTRY_AREA_BOTTOM
- cpu
*CPU_ENTRY_AREA_PAGES
- page
;
194 #define __get_cpu_entry_area_offset_index(cpu, offset) ({ \
195 BUILD_BUG_ON(offset % PAGE_SIZE != 0); \
196 __get_cpu_entry_area_page_index(cpu, offset / PAGE_SIZE); \
199 #define get_cpu_entry_area_index(cpu, field) \
200 __get_cpu_entry_area_offset_index((cpu), offsetof(struct cpu_entry_area, field))
202 static inline struct cpu_entry_area
*get_cpu_entry_area(int cpu
)
204 return (struct cpu_entry_area
*)__fix_to_virt(__get_cpu_entry_area_page_index(cpu
, 0));
207 #endif /* !__ASSEMBLY__ */
208 #endif /* _ASM_X86_FIXMAP_H */