]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - arch/x86/kernel/setup_percpu.c
x86: separate out setup_pcpu_4k() from setup_per_cpu_areas()
[mirror_ubuntu-bionic-kernel.git] / arch / x86 / kernel / setup_percpu.c
CommitLineData
4fe29a85
GOC
1#include <linux/kernel.h>
2#include <linux/module.h>
3#include <linux/init.h>
4#include <linux/bootmem.h>
5#include <linux/percpu.h>
1ecd2765 6#include <linux/kexec.h>
17b4cceb 7#include <linux/crash_dump.h>
8a87dd9a
JSR
8#include <linux/smp.h>
9#include <linux/topology.h>
5f5d8405 10#include <linux/pfn.h>
4fe29a85
GOC
11#include <asm/sections.h>
12#include <asm/processor.h>
13#include <asm/setup.h>
0fc0906e 14#include <asm/mpspec.h>
76eb4131 15#include <asm/apicdef.h>
1ecd2765 16#include <asm/highmem.h>
1a51e3a0 17#include <asm/proto.h>
06879033 18#include <asm/cpumask.h>
34019be1 19#include <asm/cpu.h>
60a5317f 20#include <asm/stackprotector.h>
76eb4131 21
c90aa894
MT
22#ifdef CONFIG_DEBUG_PER_CPU_MAPS
23# define DBG(x...) printk(KERN_DEBUG x)
24#else
25# define DBG(x...)
26#endif
27
ea927906
BG
28DEFINE_PER_CPU(int, cpu_number);
29EXPORT_PER_CPU_SYMBOL(cpu_number);
ea927906 30
1688401a
BG
31#ifdef CONFIG_X86_64
32#define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
33#else
34#define BOOT_PERCPU_OFFSET 0
35#endif
36
37DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
38EXPORT_PER_CPU_SYMBOL(this_cpu_off);
39
9939ddaf 40unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
34019be1 41 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
9939ddaf 42};
9939ddaf 43EXPORT_SYMBOL(__per_cpu_offset);
4fe29a85 44
5f5d8405
TH
45/**
46 * pcpu_alloc_bootmem - NUMA friendly alloc_bootmem wrapper for percpu
47 * @cpu: cpu to allocate for
48 * @size: size allocation in bytes
49 * @align: alignment
50 *
51 * Allocate @size bytes aligned at @align for cpu @cpu. This wrapper
52 * does the right thing for NUMA regardless of the current
53 * configuration.
54 *
55 * RETURNS:
56 * Pointer to the allocated area on success, NULL on failure.
57 */
58static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
59 unsigned long align)
60{
61 const unsigned long goal = __pa(MAX_DMA_ADDRESS);
62#ifdef CONFIG_NEED_MULTIPLE_NODES
63 int node = early_cpu_to_node(cpu);
64 void *ptr;
65
66 if (!node_online(node) || !NODE_DATA(node)) {
67 ptr = __alloc_bootmem_nopanic(size, align, goal);
68 pr_info("cpu %d has no node %d or node-local memory\n",
69 cpu, node);
70 pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n",
71 cpu, size, __pa(ptr));
72 } else {
73 ptr = __alloc_bootmem_node_nopanic(NODE_DATA(node),
74 size, align, goal);
75 pr_debug("per cpu data for cpu%d %lu bytes on node%d at "
76 "%016lx\n", cpu, size, node, __pa(ptr));
77 }
78 return ptr;
79#else
80 return __alloc_bootmem_nopanic(size, align, goal);
81#endif
82}
83
84/*
85 * 4k page allocator
86 *
87 * This is the basic allocator. Static percpu area is allocated
88 * page-by-page and most of initialization is done by the generic
89 * setup function.
90 */
8d408b4b
TH
91static struct page **pcpu4k_pages __initdata;
92static int pcpu4k_nr_static_pages __initdata;
93
94static struct page * __init pcpu4k_get_page(unsigned int cpu, int pageno)
95{
96 if (pageno < pcpu4k_nr_static_pages)
97 return pcpu4k_pages[cpu * pcpu4k_nr_static_pages + pageno];
98 return NULL;
99}
100
458a3e64
TH
101static void __init pcpu4k_populate_pte(unsigned long addr)
102{
103 populate_extra_pte(addr);
104}
105
5f5d8405
TH
106static ssize_t __init setup_pcpu_4k(size_t static_size)
107{
108 size_t pages_size;
109 unsigned int cpu;
110 int i, j;
111 ssize_t ret;
112
113 pcpu4k_nr_static_pages = PFN_UP(static_size);
114
115 /* unaligned allocations can't be freed, round up to page size */
116 pages_size = PFN_ALIGN(pcpu4k_nr_static_pages * num_possible_cpus()
117 * sizeof(pcpu4k_pages[0]));
118 pcpu4k_pages = alloc_bootmem(pages_size);
119
120 /* allocate and copy */
121 j = 0;
122 for_each_possible_cpu(cpu)
123 for (i = 0; i < pcpu4k_nr_static_pages; i++) {
124 void *ptr;
125
126 ptr = pcpu_alloc_bootmem(cpu, PAGE_SIZE, PAGE_SIZE);
127 if (!ptr)
128 goto enomem;
129
130 memcpy(ptr, __per_cpu_load + i * PAGE_SIZE, PAGE_SIZE);
131 pcpu4k_pages[j++] = virt_to_page(ptr);
132 }
133
134 /* we're ready, commit */
135 pr_info("PERCPU: Allocated %d 4k pages, static data %zu bytes\n",
136 pcpu4k_nr_static_pages, static_size);
137
138 ret = pcpu_setup_first_chunk(pcpu4k_get_page, static_size, 0, 0, NULL,
139 pcpu4k_populate_pte);
140 goto out_free_ar;
141
142enomem:
143 while (--j >= 0)
144 free_bootmem(__pa(page_address(pcpu4k_pages[j])), PAGE_SIZE);
145 ret = -ENOMEM;
146out_free_ar:
147 free_bootmem(__pa(pcpu4k_pages), pages_size);
148 return ret;
149}
150
b2d2f431
BG
151static inline void setup_percpu_segment(int cpu)
152{
153#ifdef CONFIG_X86_32
154 struct desc_struct gdt;
155
156 pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
157 0x2 | DESCTYPE_S, 0x8);
158 gdt.s = 1;
159 write_gdt_entry(get_cpu_gdt_table(cpu),
160 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
161#endif
162}
163
4fe29a85
GOC
164/*
165 * Great future plan:
166 * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data.
167 * Always point %gs to its beginning
168 */
169void __init setup_per_cpu_areas(void)
170{
5f5d8405
TH
171 size_t static_size = __per_cpu_end - __per_cpu_start;
172 unsigned int cpu;
11124411
TH
173 unsigned long delta;
174 size_t pcpu_unit_size;
5f5d8405 175 ssize_t ret;
a1681965 176
ab14398a 177 pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n",
a1681965 178 NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids);
11124411 179
5f5d8405
TH
180 /* allocate percpu area */
181 ret = setup_pcpu_4k(static_size);
182 if (ret < 0)
183 panic("cannot allocate static percpu area (%zu bytes, err=%zd)",
184 static_size, ret);
1a51e3a0 185
5f5d8405 186 pcpu_unit_size = ret;
11124411 187
5f5d8405 188 /* alrighty, percpu areas up and running */
11124411
TH
189 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
190 for_each_possible_cpu(cpu) {
191 per_cpu_offset(cpu) = delta + cpu * pcpu_unit_size;
26f80bd6 192 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
ea927906 193 per_cpu(cpu_number, cpu) = cpu;
b2d2f431 194 setup_percpu_segment(cpu);
60a5317f 195 setup_stack_canary_segment(cpu);
0d77e7f0 196 /*
cf3997f5
TH
197 * Copy data used in early init routines from the
198 * initial arrays to the per cpu data areas. These
199 * arrays then become expendable and the *_early_ptr's
200 * are zeroed indicating that the static arrays are
201 * gone.
0d77e7f0 202 */
ec70de8b 203#ifdef CONFIG_X86_LOCAL_APIC
0d77e7f0 204 per_cpu(x86_cpu_to_apicid, cpu) =
cf3997f5 205 early_per_cpu_map(x86_cpu_to_apicid, cpu);
0d77e7f0 206 per_cpu(x86_bios_cpu_apicid, cpu) =
cf3997f5 207 early_per_cpu_map(x86_bios_cpu_apicid, cpu);
ec70de8b 208#endif
1a51e3a0 209#ifdef CONFIG_X86_64
26f80bd6 210 per_cpu(irq_stack_ptr, cpu) =
cf3997f5
TH
211 per_cpu(irq_stack_union.irq_stack, cpu) +
212 IRQ_STACK_SIZE - 64;
6470aff6
BG
213#ifdef CONFIG_NUMA
214 per_cpu(x86_cpu_to_node_map, cpu) =
cf3997f5 215 early_per_cpu_map(x86_cpu_to_node_map, cpu);
2697fbd5 216#endif
6470aff6 217#endif
1a51e3a0 218 /*
34019be1 219 * Up to this point, the boot CPU has been using .data.init
2697fbd5 220 * area. Reload any changed state for the boot CPU.
1a51e3a0 221 */
34019be1 222 if (cpu == boot_cpu_id)
552be871 223 switch_to_new_gdt(cpu);
c90aa894
MT
224
225 DBG("PERCPU: cpu %4d %p\n", cpu, ptr);
4fe29a85
GOC
226 }
227
0d77e7f0 228 /* indicate the early static arrays will soon be gone */
22f25138 229#ifdef CONFIG_X86_LOCAL_APIC
0d77e7f0
BG
230 early_per_cpu_ptr(x86_cpu_to_apicid) = NULL;
231 early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL;
22f25138 232#endif
6470aff6 233#if defined(CONFIG_X86_64) && defined(CONFIG_NUMA)
0d77e7f0
BG
234 early_per_cpu_ptr(x86_cpu_to_node_map) = NULL;
235#endif
9f0e8d04 236
9f248bde
MT
237 /* Setup node to cpumask map */
238 setup_node_to_cpumask_map();
c2d1cec1
MT
239
240 /* Setup cpu initialized, callin, callout masks */
241 setup_cpu_local_masks();
4fe29a85 242}