2 * c 2001 PPC 64 Team, IBM Corp
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
10 #include <linux/smp.h>
11 #include <linux/export.h>
12 #include <linux/memblock.h>
13 #include <linux/sched/task.h>
15 #include <asm/lppaca.h>
17 #include <asm/sections.h>
18 #include <asm/pgtable.h>
19 #include <asm/kexec.h>
23 #ifdef CONFIG_PPC_PSERIES
26 * The structure which the hypervisor knows about - this structure
27 * should not cross a page boundary. The vpa_init/register_vpa call
28 * is now known to fail if the lppaca structure crosses a page
29 * boundary. The lppaca is also used on POWER5 pSeries boxes.
30 * The lppaca is 640 bytes long, and cannot readily
31 * change since the hypervisor knows its layout, so a 1kB alignment
32 * will suffice to ensure that it doesn't cross a page boundary.
34 struct lppaca lppaca
[] = {
35 [0 ... (NR_LPPACAS
-1)] = {
36 .desc
= cpu_to_be32(0xd397d781), /* "LpPa" */
37 .size
= cpu_to_be16(sizeof(struct lppaca
)),
39 .slb_count
= cpu_to_be16(64),
45 static struct lppaca
*extra_lppacas
;
46 static long __initdata lppaca_size
;
48 static void __init
allocate_lppacas(int nr_cpus
, unsigned long limit
)
50 if (early_cpu_has_feature(CPU_FTR_HVMODE
))
53 if (nr_cpus
<= NR_LPPACAS
)
56 lppaca_size
= PAGE_ALIGN(sizeof(struct lppaca
) *
57 (nr_cpus
- NR_LPPACAS
));
58 extra_lppacas
= __va(memblock_alloc_base(lppaca_size
,
62 static struct lppaca
* __init
new_lppaca(int cpu
)
66 if (early_cpu_has_feature(CPU_FTR_HVMODE
))
72 lp
= extra_lppacas
+ (cpu
- NR_LPPACAS
);
78 static void __init
free_lppacas(void)
80 long new_size
= 0, nr
;
82 if (early_cpu_has_feature(CPU_FTR_HVMODE
))
87 nr
= num_possible_cpus() - NR_LPPACAS
;
89 new_size
= PAGE_ALIGN(nr
* sizeof(struct lppaca
));
90 if (new_size
>= lppaca_size
)
93 memblock_free(__pa(extra_lppacas
) + new_size
, lppaca_size
- new_size
);
94 lppaca_size
= new_size
;
99 static inline void allocate_lppacas(int nr_cpus
, unsigned long limit
) { }
100 static inline void free_lppacas(void) { }
102 #endif /* CONFIG_PPC_BOOK3S */
104 #ifdef CONFIG_PPC_BOOK3S_64
107 * 3 persistent SLBs are registered here. The buffer will be zero
108 * initially, hence will all be invaild until we actually write them.
110 * If you make the number of persistent SLB entries dynamic, please also
111 * update PR KVM to flush and restore them accordingly.
113 static struct slb_shadow
* __initdata slb_shadow
;
115 static void __init
allocate_slb_shadows(int nr_cpus
, int limit
)
117 int size
= PAGE_ALIGN(sizeof(struct slb_shadow
) * nr_cpus
);
119 if (early_radix_enabled())
122 slb_shadow
= __va(memblock_alloc_base(size
, PAGE_SIZE
, limit
));
123 memset(slb_shadow
, 0, size
);
126 static struct slb_shadow
* __init
init_slb_shadow(int cpu
)
128 struct slb_shadow
*s
;
130 if (early_radix_enabled())
133 s
= &slb_shadow
[cpu
];
136 * When we come through here to initialise boot_paca, the slb_shadow
137 * buffers are not allocated yet. That's OK, we'll get one later in
138 * boot, but make sure we don't corrupt memory at 0.
143 s
->persistent
= cpu_to_be32(SLB_NUM_BOLTED
);
144 s
->buffer_length
= cpu_to_be32(sizeof(*s
));
149 #else /* !CONFIG_PPC_BOOK3S_64 */
151 static void __init
allocate_slb_shadows(int nr_cpus
, int limit
) { }
153 #endif /* CONFIG_PPC_BOOK3S_64 */
155 /* The Paca is an array with one entry per processor. Each contains an
156 * lppaca, which contains the information shared between the
157 * hypervisor and Linux.
158 * On systems with hardware multi-threading, there are two threads
159 * per processor. The Paca array must contain an entry for each thread.
160 * The VPD Areas will give a max logical processors = 2 * max physical
161 * processors. The processor VPD array needs one entry per physical
162 * processor (not thread).
164 struct paca_struct
**paca_ptrs __read_mostly
;
165 EXPORT_SYMBOL(paca_ptrs
);
167 void __init
initialise_paca(struct paca_struct
*new_paca
, int cpu
)
169 #ifdef CONFIG_PPC_PSERIES
170 new_paca
->lppaca_ptr
= new_lppaca(cpu
);
172 #ifdef CONFIG_PPC_BOOK3E
173 new_paca
->kernel_pgd
= swapper_pg_dir
;
175 new_paca
->lock_token
= 0x8000;
176 new_paca
->paca_index
= cpu
;
177 new_paca
->kernel_toc
= kernel_toc_addr();
178 new_paca
->kernelbase
= (unsigned long) _stext
;
179 /* Only set MSR:IR/DR when MMU is initialized */
180 new_paca
->kernel_msr
= MSR_KERNEL
& ~(MSR_IR
| MSR_DR
);
181 new_paca
->hw_cpu_id
= 0xffff;
182 new_paca
->kexec_state
= KEXEC_STATE_NONE
;
183 new_paca
->__current
= &init_task
;
184 new_paca
->data_offset
= 0xfeeeeeeeeeeeeeeeULL
;
185 #ifdef CONFIG_PPC_BOOK3S_64
186 new_paca
->slb_shadow_ptr
= init_slb_shadow(cpu
);
189 #ifdef CONFIG_PPC_BOOK3E
190 /* For now -- if we have threads this will be adjusted later */
191 new_paca
->tcd_ptr
= &new_paca
->tcd
;
195 /* Put the paca pointer into r13 and SPRG_PACA */
196 void setup_paca(struct paca_struct
*new_paca
)
199 local_paca
= new_paca
;
201 #ifdef CONFIG_PPC_BOOK3E
202 /* On Book3E, initialize the TLB miss exception frames */
203 mtspr(SPRN_SPRG_TLB_EXFRAME
, local_paca
->extlb
);
205 /* In HV mode, we setup both HPACA and PACA to avoid problems
206 * if we do a GET_PACA() before the feature fixups have been
209 if (early_cpu_has_feature(CPU_FTR_HVMODE
))
210 mtspr(SPRN_SPRG_HPACA
, local_paca
);
212 mtspr(SPRN_SPRG_PACA
, local_paca
);
216 static int __initdata paca_nr_cpu_ids
;
217 static int __initdata paca_ptrs_size
;
219 void __init
allocate_pacas(void)
222 unsigned long size
= 0;
225 #ifdef CONFIG_PPC_BOOK3S_64
227 * We access pacas in real mode, and cannot take SLB faults
228 * on them when in virtual mode, so allocate them accordingly.
230 limit
= min(ppc64_bolted_size(), ppc64_rma_size
);
232 limit
= ppc64_rma_size
;
235 paca_nr_cpu_ids
= nr_cpu_ids
;
237 paca_ptrs_size
= sizeof(struct paca_struct
*) * nr_cpu_ids
;
238 paca_ptrs
= __va(memblock_alloc_base(paca_ptrs_size
, 0, limit
));
239 memset(paca_ptrs
, 0, paca_ptrs_size
);
241 size
+= paca_ptrs_size
;
243 for (cpu
= 0; cpu
< nr_cpu_ids
; cpu
++) {
246 pa
= memblock_alloc_base(sizeof(struct paca_struct
),
247 L1_CACHE_BYTES
, limit
);
248 paca_ptrs
[cpu
] = __va(pa
);
249 memset(paca_ptrs
[cpu
], 0, sizeof(struct paca_struct
));
251 size
+= sizeof(struct paca_struct
);
254 printk(KERN_DEBUG
"Allocated %lu bytes for %u pacas\n",
257 allocate_lppacas(nr_cpu_ids
, limit
);
259 allocate_slb_shadows(nr_cpu_ids
, limit
);
261 /* Can't use for_each_*_cpu, as they aren't functional yet */
262 for (cpu
= 0; cpu
< nr_cpu_ids
; cpu
++)
263 initialise_paca(paca_ptrs
[cpu
], cpu
);
266 void __init
free_unused_pacas(void)
268 unsigned long size
= 0;
272 for (cpu
= 0; cpu
< paca_nr_cpu_ids
; cpu
++) {
273 if (!cpu_possible(cpu
)) {
274 unsigned long pa
= __pa(paca_ptrs
[cpu
]);
275 memblock_free(pa
, sizeof(struct paca_struct
));
276 paca_ptrs
[cpu
] = NULL
;
277 size
+= sizeof(struct paca_struct
);
281 new_ptrs_size
= sizeof(struct paca_struct
*) * nr_cpu_ids
;
282 if (new_ptrs_size
< paca_ptrs_size
) {
283 memblock_free(__pa(paca_ptrs
) + new_ptrs_size
,
284 paca_ptrs_size
- new_ptrs_size
);
285 size
+= paca_ptrs_size
- new_ptrs_size
;
289 printk(KERN_DEBUG
"Freed %lu bytes for unused pacas\n", size
);
293 paca_nr_cpu_ids
= nr_cpu_ids
;
294 paca_ptrs_size
= new_ptrs_size
;
297 void copy_mm_to_paca(struct mm_struct
*mm
)
299 #ifdef CONFIG_PPC_BOOK3S
300 mm_context_t
*context
= &mm
->context
;
302 get_paca()->mm_ctx_id
= context
->id
;
303 #ifdef CONFIG_PPC_MM_SLICES
304 VM_BUG_ON(!mm
->context
.slb_addr_limit
);
305 get_paca()->mm_ctx_slb_addr_limit
= mm
->context
.slb_addr_limit
;
306 get_paca()->mm_ctx_low_slices_psize
= context
->low_slices_psize
;
307 memcpy(&get_paca()->mm_ctx_high_slices_psize
,
308 &context
->high_slices_psize
, TASK_SLICE_ARRAY_SZ(mm
));
309 #else /* CONFIG_PPC_MM_SLICES */
310 get_paca()->mm_ctx_user_psize
= context
->user_psize
;
311 get_paca()->mm_ctx_sllp
= context
->sllp
;
313 #else /* !CONFIG_PPC_BOOK3S */