]>
Commit | Line | Data |
---|---|---|
d2912cb1 | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
141c943f G |
2 | /* |
3 | * linux/arch/unicore32/kernel/head.S | |
4 | * | |
5 | * Code specific to PKUnity SoC and UniCore ISA | |
6 | * | |
7 | * Copyright (C) 2001-2010 GUAN Xue-tao | |
141c943f G |
8 | */ |
9 | #include <linux/linkage.h> | |
10 | #include <linux/init.h> | |
11 | ||
12 | #include <asm/assembler.h> | |
13 | #include <asm/ptrace.h> | |
14 | #include <generated/asm-offsets.h> | |
15 | #include <asm/memory.h> | |
16 | #include <asm/thread_info.h> | |
8978bfd2 | 17 | #include <asm/hwdef-copro.h> |
141c943f G |
18 | #include <asm/pgtable-hwdef.h> |
19 | ||
20 | #if (PHYS_OFFSET & 0x003fffff) | |
21 | #error "PHYS_OFFSET must be at an even 4MiB boundary!" | |
22 | #endif | |
23 | ||
24 | #define KERNEL_RAM_VADDR (PAGE_OFFSET + KERNEL_IMAGE_START) | |
25 | #define KERNEL_RAM_PADDR (PHYS_OFFSET + KERNEL_IMAGE_START) | |
26 | ||
27 | #define KERNEL_PGD_PADDR (KERNEL_RAM_PADDR - 0x1000) | |
28 | #define KERNEL_PGD_VADDR (KERNEL_RAM_VADDR - 0x1000) | |
29 | ||
30 | #define KERNEL_START KERNEL_RAM_VADDR | |
31 | #define KERNEL_END _end | |
32 | ||
33 | /* | |
34 | * swapper_pg_dir is the virtual address of the initial page table. | |
35 | * We place the page tables 4K below KERNEL_RAM_VADDR. Therefore, we must | |
36 | * make sure that KERNEL_RAM_VADDR is correctly set. Currently, we expect | |
37 | * the least significant 16 bits to be 0x8000, but we could probably | |
38 | * relax this restriction to KERNEL_RAM_VADDR >= PAGE_OFFSET + 0x1000. | |
39 | */ | |
40 | #if (KERNEL_RAM_VADDR & 0xffff) != 0x8000 | |
41 | #error KERNEL_RAM_VADDR must start at 0xXXXX8000 | |
42 | #endif | |
43 | ||
44 | .globl swapper_pg_dir | |
45 | .equ swapper_pg_dir, KERNEL_RAM_VADDR - 0x1000 | |
46 | ||
47 | /* | |
48 | * Kernel startup entry point. | |
49 | * --------------------------- | |
50 | * | |
51 | * This is normally called from the decompressor code. The requirements | |
52 | * are: MMU = off, D-cache = off, I-cache = dont care | |
53 | * | |
54 | * This code is mostly position independent, so if you link the kernel at | |
55 | * 0xc0008000, you call this at __pa(0xc0008000). | |
56 | */ | |
57 | __HEAD | |
58 | ENTRY(stext) | |
59 | @ set asr | |
60 | mov r0, #PRIV_MODE @ ensure priv mode | |
61 | or r0, #PSR_R_BIT | PSR_I_BIT @ disable irqs | |
62 | mov.a asr, r0 | |
63 | ||
64 | @ process identify | |
65 | movc r0, p0.c0, #0 @ cpuid | |
66 | movl r1, 0xff00ffff @ mask | |
67 | movl r2, 0x4d000863 @ value | |
68 | and r0, r1, r0 | |
69 | cxor.a r0, r2 | |
70 | bne __error_p @ invalid processor id | |
71 | ||
72 | /* | |
73 | * Clear the 4K level 1 swapper page table | |
74 | */ | |
75 | movl r0, #KERNEL_PGD_PADDR @ page table address | |
76 | mov r1, #0 | |
77 | add r2, r0, #0x1000 | |
78 | 101: stw.w r1, [r0]+, #4 | |
79 | stw.w r1, [r0]+, #4 | |
80 | stw.w r1, [r0]+, #4 | |
81 | stw.w r1, [r0]+, #4 | |
82 | cxor.a r0, r2 | |
83 | bne 101b | |
84 | ||
85 | movl r4, #KERNEL_PGD_PADDR @ page table address | |
86 | mov r7, #PMD_TYPE_SECT | PMD_PRESENT @ page size: section | |
87 | or r7, r7, #PMD_SECT_CACHEABLE @ cacheable | |
88 | or r7, r7, #PMD_SECT_READ | PMD_SECT_WRITE | PMD_SECT_EXEC | |
89 | ||
90 | /* | |
91 | * Create identity mapping for first 4MB of kernel to | |
92 | * cater for the MMU enable. This identity mapping | |
93 | * will be removed by paging_init(). We use our current program | |
94 | * counter to determine corresponding section base address. | |
95 | */ | |
96 | mov r6, pc | |
97 | mov r6, r6 >> #22 @ start of kernel section | |
98 | or r1, r7, r6 << #22 @ flags + kernel base | |
99 | stw r1, [r4+], r6 << #2 @ identity mapping | |
100 | ||
101 | /* | |
102 | * Now setup the pagetables for our kernel direct | |
103 | * mapped region. | |
104 | */ | |
105 | add r0, r4, #(KERNEL_START & 0xff000000) >> 20 | |
106 | stw.w r1, [r0+], #(KERNEL_START & 0x00c00000) >> 20 | |
107 | movl r6, #(KERNEL_END - 1) | |
108 | add r0, r0, #4 | |
109 | add r6, r4, r6 >> #20 | |
110 | 102: csub.a r0, r6 | |
111 | add r1, r1, #1 << 22 | |
112 | bua 103f | |
113 | stw.w r1, [r0]+, #4 | |
114 | b 102b | |
115 | 103: | |
116 | /* | |
117 | * Then map first 4MB of ram in case it contains our boot params. | |
118 | */ | |
119 | add r0, r4, #PAGE_OFFSET >> 20 | |
120 | or r6, r7, #(PHYS_OFFSET & 0xffc00000) | |
121 | stw r6, [r0] | |
122 | ||
123 | ldw r15, __switch_data @ address to jump to after | |
124 | ||
125 | /* | |
126 | * Initialise TLB, Caches, and MMU state ready to switch the MMU | |
127 | * on. | |
128 | */ | |
129 | mov r0, #0 | |
130 | movc p0.c5, r0, #28 @ cache invalidate all | |
131 | nop8 | |
132 | movc p0.c6, r0, #6 @ TLB invalidate all | |
133 | nop8 | |
134 | ||
135 | /* | |
136 | * ..V. .... ..TB IDAM | |
137 | * ..1. .... ..01 1111 | |
138 | */ | |
139 | movl r0, #0x201f @ control register setting | |
140 | ||
141 | /* | |
142 | * Setup common bits before finally enabling the MMU. Essentially | |
143 | * this is just loading the page table pointer and domain access | |
144 | * registers. | |
145 | */ | |
146 | #ifndef CONFIG_ALIGNMENT_TRAP | |
147 | andn r0, r0, #CR_A | |
148 | #endif | |
149 | #ifdef CONFIG_CPU_DCACHE_DISABLE | |
150 | andn r0, r0, #CR_D | |
151 | #endif | |
152 | #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH | |
153 | andn r0, r0, #CR_B | |
154 | #endif | |
155 | #ifdef CONFIG_CPU_ICACHE_DISABLE | |
156 | andn r0, r0, #CR_I | |
157 | #endif | |
158 | ||
159 | movc p0.c2, r4, #0 @ set pgd | |
160 | b __turn_mmu_on | |
161 | ENDPROC(stext) | |
162 | ||
163 | /* | |
25985edc | 164 | * Enable the MMU. This completely changes the structure of the visible |
141c943f G |
165 | * memory space. You will not be able to trace execution through this. |
166 | * | |
167 | * r0 = cp#0 control register | |
168 | * r15 = *virtual* address to jump to upon completion | |
169 | */ | |
170 | .align 5 | |
171 | __turn_mmu_on: | |
172 | mov r0, r0 | |
173 | movc p0.c1, r0, #0 @ write control reg | |
174 | nop @ fetch inst by phys addr | |
175 | mov pc, r15 | |
176 | nop8 @ fetch inst by phys addr | |
177 | ENDPROC(__turn_mmu_on) | |
178 | ||
179 | /* | |
180 | * Setup the initial page tables. We only setup the barest | |
181 | * amount which are required to get the kernel running, which | |
182 | * generally means mapping in the kernel code. | |
183 | * | |
184 | * r9 = cpuid | |
185 | * r10 = procinfo | |
186 | * | |
187 | * Returns: | |
188 | * r0, r3, r6, r7 corrupted | |
189 | * r4 = physical page table address | |
190 | */ | |
191 | .ltorg | |
192 | ||
193 | .align 2 | |
194 | .type __switch_data, %object | |
195 | __switch_data: | |
196 | .long __mmap_switched | |
197 | .long __bss_start @ r6 | |
198 | .long _end @ r7 | |
199 | .long cr_alignment @ r8 | |
200 | .long init_thread_union + THREAD_START_SP @ sp | |
201 | ||
202 | /* | |
203 | * The following fragment of code is executed with the MMU on in MMU mode, | |
204 | * and uses absolute addresses; this is not position independent. | |
205 | * | |
206 | * r0 = cp#0 control register | |
207 | */ | |
208 | __mmap_switched: | |
209 | adr r3, __switch_data + 4 | |
210 | ||
211 | ldm.w (r6, r7, r8), [r3]+ | |
212 | ldw sp, [r3] | |
213 | ||
214 | mov fp, #0 @ Clear BSS (and zero fp) | |
215 | 203: csub.a r6, r7 | |
216 | bea 204f | |
217 | stw.w fp, [r6]+,#4 | |
218 | b 203b | |
219 | 204: | |
220 | andn r1, r0, #CR_A @ Clear 'A' bit | |
221 | stm (r0, r1), [r8]+ @ Save control register values | |
222 | b start_kernel | |
223 | ENDPROC(__mmap_switched) | |
224 | ||
225 | /* | |
226 | * Exception handling. Something went wrong and we can't proceed. We | |
227 | * ought to tell the user, but since we don't have any guarantee that | |
228 | * we're even running on the right architecture, we do virtually nothing. | |
229 | * | |
230 | * If CONFIG_DEBUG_LL is set we try to print out something about the error | |
231 | * and hope for the best (useful if bootloader fails to pass a proper | |
232 | * machine ID for example). | |
233 | */ | |
234 | __error_p: | |
235 | #ifdef CONFIG_DEBUG_LL | |
236 | adr r0, str_p1 | |
237 | b.l printascii | |
238 | mov r0, r9 | |
239 | b.l printhex8 | |
240 | adr r0, str_p2 | |
241 | b.l printascii | |
242 | 901: nop8 | |
243 | b 901b | |
244 | str_p1: .asciz "\nError: unrecognized processor variant (0x" | |
245 | str_p2: .asciz ").\n" | |
246 | .align | |
247 | #endif | |
248 | ENDPROC(__error_p) | |
249 |