]>
Commit | Line | Data |
---|---|---|
95322526 LP |
1 | #include <linux/errno.h> |
2 | #include <linux/linkage.h> | |
3 | #include <asm/asm-offsets.h> | |
4 | #include <asm/assembler.h> | |
5 | ||
6 | .text | |
7 | /* | |
8 | * Implementation of MPIDR_EL1 hash algorithm through shifting | |
9 | * and OR'ing. | |
10 | * | |
11 | * @dst: register containing hash result | |
12 | * @rs0: register containing affinity level 0 bit shift | |
13 | * @rs1: register containing affinity level 1 bit shift | |
14 | * @rs2: register containing affinity level 2 bit shift | |
15 | * @rs3: register containing affinity level 3 bit shift | |
16 | * @mpidr: register containing MPIDR_EL1 value | |
17 | * @mask: register containing MPIDR mask | |
18 | * | |
19 | * Pseudo C-code: | |
20 | * | |
21 | *u32 dst; | |
22 | * | |
23 | *compute_mpidr_hash(u32 rs0, u32 rs1, u32 rs2, u32 rs3, u64 mpidr, u64 mask) { | |
24 | * u32 aff0, aff1, aff2, aff3; | |
25 | * u64 mpidr_masked = mpidr & mask; | |
26 | * aff0 = mpidr_masked & 0xff; | |
27 | * aff1 = mpidr_masked & 0xff00; | |
28 | * aff2 = mpidr_masked & 0xff0000; | |
29 | * aff2 = mpidr_masked & 0xff00000000; | |
30 | * dst = (aff0 >> rs0 | aff1 >> rs1 | aff2 >> rs2 | aff3 >> rs3); | |
31 | *} | |
32 | * Input registers: rs0, rs1, rs2, rs3, mpidr, mask | |
33 | * Output register: dst | |
34 | * Note: input and output registers must be disjoint register sets | |
35 | (eg: a macro instance with mpidr = x1 and dst = x1 is invalid) | |
36 | */ | |
37 | .macro compute_mpidr_hash dst, rs0, rs1, rs2, rs3, mpidr, mask | |
38 | and \mpidr, \mpidr, \mask // mask out MPIDR bits | |
39 | and \dst, \mpidr, #0xff // mask=aff0 | |
40 | lsr \dst ,\dst, \rs0 // dst=aff0>>rs0 | |
41 | and \mask, \mpidr, #0xff00 // mask = aff1 | |
42 | lsr \mask ,\mask, \rs1 | |
43 | orr \dst, \dst, \mask // dst|=(aff1>>rs1) | |
44 | and \mask, \mpidr, #0xff0000 // mask = aff2 | |
45 | lsr \mask ,\mask, \rs2 | |
46 | orr \dst, \dst, \mask // dst|=(aff2>>rs2) | |
47 | and \mask, \mpidr, #0xff00000000 // mask = aff3 | |
48 | lsr \mask ,\mask, \rs3 | |
49 | orr \dst, \dst, \mask // dst|=(aff3>>rs3) | |
50 | .endm | |
51 | /* | |
714f5992 LP |
52 | * Save CPU state for a suspend and execute the suspend finisher. |
53 | * On success it will return 0 through cpu_resume - ie through a CPU | |
54 | * soft/hard reboot from the reset vector. | |
55 | * On failure it returns the suspend finisher return value or force | |
56 | * -EOPNOTSUPP if the finisher erroneously returns 0 (the suspend finisher | |
57 | * is not allowed to return, if it does this must be considered failure). | |
58 | * It saves callee registers, and allocates space on the kernel stack | |
59 | * to save the CPU specific registers + some other data for resume. | |
95322526 LP |
60 | * |
61 | * x0 = suspend finisher argument | |
714f5992 | 62 | * x1 = suspend finisher function pointer |
95322526 | 63 | */ |
714f5992 | 64 | ENTRY(__cpu_suspend_enter) |
95322526 LP |
65 | stp x29, lr, [sp, #-96]! |
66 | stp x19, x20, [sp,#16] | |
67 | stp x21, x22, [sp,#32] | |
68 | stp x23, x24, [sp,#48] | |
69 | stp x25, x26, [sp,#64] | |
70 | stp x27, x28, [sp,#80] | |
714f5992 LP |
71 | /* |
72 | * Stash suspend finisher and its argument in x20 and x19 | |
73 | */ | |
74 | mov x19, x0 | |
75 | mov x20, x1 | |
95322526 LP |
76 | mov x2, sp |
77 | sub sp, sp, #CPU_SUSPEND_SZ // allocate cpu_suspend_ctx | |
714f5992 | 78 | mov x0, sp |
95322526 | 79 | /* |
714f5992 | 80 | * x0 now points to struct cpu_suspend_ctx allocated on the stack |
95322526 | 81 | */ |
714f5992 LP |
82 | str x2, [x0, #CPU_CTX_SP] |
83 | ldr x1, =sleep_save_sp | |
84 | ldr x1, [x1, #SLEEP_SAVE_SP_VIRT] | |
95322526 LP |
85 | #ifdef CONFIG_SMP |
86 | mrs x7, mpidr_el1 | |
87 | ldr x9, =mpidr_hash | |
88 | ldr x10, [x9, #MPIDR_HASH_MASK] | |
89 | /* | |
90 | * Following code relies on the struct mpidr_hash | |
91 | * members size. | |
92 | */ | |
93 | ldp w3, w4, [x9, #MPIDR_HASH_SHIFTS] | |
94 | ldp w5, w6, [x9, #(MPIDR_HASH_SHIFTS + 8)] | |
95 | compute_mpidr_hash x8, x3, x4, x5, x6, x7, x10 | |
714f5992 | 96 | add x1, x1, x8, lsl #3 |
95322526 | 97 | #endif |
714f5992 LP |
98 | bl __cpu_suspend_save |
99 | /* | |
100 | * Grab suspend finisher in x20 and its argument in x19 | |
101 | */ | |
102 | mov x0, x19 | |
103 | mov x1, x20 | |
104 | /* | |
105 | * We are ready for power down, fire off the suspend finisher | |
106 | * in x1, with argument in x0 | |
107 | */ | |
108 | blr x1 | |
95322526 | 109 | /* |
714f5992 | 110 | * Never gets here, unless suspend finisher fails. |
95322526 LP |
111 | * Successful cpu_suspend should return from cpu_resume, returning |
112 | * through this code path is considered an error | |
113 | * If the return value is set to 0 force x0 = -EOPNOTSUPP | |
114 | * to make sure a proper error condition is propagated | |
115 | */ | |
116 | cmp x0, #0 | |
117 | mov x3, #-EOPNOTSUPP | |
118 | csel x0, x3, x0, eq | |
119 | add sp, sp, #CPU_SUSPEND_SZ // rewind stack pointer | |
120 | ldp x19, x20, [sp, #16] | |
121 | ldp x21, x22, [sp, #32] | |
122 | ldp x23, x24, [sp, #48] | |
123 | ldp x25, x26, [sp, #64] | |
124 | ldp x27, x28, [sp, #80] | |
125 | ldp x29, lr, [sp], #96 | |
126 | ret | |
714f5992 | 127 | ENDPROC(__cpu_suspend_enter) |
95322526 LP |
128 | .ltorg |
129 | ||
130 | /* | |
131 | * x0 must contain the sctlr value retrieved from restored context | |
132 | */ | |
133 | ENTRY(cpu_resume_mmu) | |
134 | ldr x3, =cpu_resume_after_mmu | |
135 | msr sctlr_el1, x0 // restore sctlr_el1 | |
136 | isb | |
137 | br x3 // global jump to virtual address | |
138 | ENDPROC(cpu_resume_mmu) | |
139 | cpu_resume_after_mmu: | |
140 | mov x0, #0 // return zero on success | |
141 | ldp x19, x20, [sp, #16] | |
142 | ldp x21, x22, [sp, #32] | |
143 | ldp x23, x24, [sp, #48] | |
144 | ldp x25, x26, [sp, #64] | |
145 | ldp x27, x28, [sp, #80] | |
146 | ldp x29, lr, [sp], #96 | |
147 | ret | |
148 | ENDPROC(cpu_resume_after_mmu) | |
149 | ||
95322526 LP |
150 | ENTRY(cpu_resume) |
151 | bl el2_setup // if in EL2 drop to EL1 cleanly | |
152 | #ifdef CONFIG_SMP | |
153 | mrs x1, mpidr_el1 | |
c3684fbb LA |
154 | adrp x8, mpidr_hash |
155 | add x8, x8, #:lo12:mpidr_hash // x8 = struct mpidr_hash phys address | |
95322526 LP |
156 | /* retrieve mpidr_hash members to compute the hash */ |
157 | ldr x2, [x8, #MPIDR_HASH_MASK] | |
158 | ldp w3, w4, [x8, #MPIDR_HASH_SHIFTS] | |
159 | ldp w5, w6, [x8, #(MPIDR_HASH_SHIFTS + 8)] | |
160 | compute_mpidr_hash x7, x3, x4, x5, x6, x1, x2 | |
161 | /* x7 contains hash index, let's use it to grab context pointer */ | |
162 | #else | |
163 | mov x7, xzr | |
164 | #endif | |
c3684fbb LA |
165 | adrp x0, sleep_save_sp |
166 | add x0, x0, #:lo12:sleep_save_sp | |
95322526 LP |
167 | ldr x0, [x0, #SLEEP_SAVE_SP_PHYS] |
168 | ldr x0, [x0, x7, lsl #3] | |
169 | /* load sp from context */ | |
170 | ldr x2, [x0, #CPU_CTX_SP] | |
c3684fbb | 171 | adrp x1, sleep_idmap_phys |
95322526 | 172 | /* load physical address of identity map page table in x1 */ |
c3684fbb | 173 | ldr x1, [x1, #:lo12:sleep_idmap_phys] |
95322526 LP |
174 | mov sp, x2 |
175 | /* | |
176 | * cpu_do_resume expects x0 to contain context physical address | |
177 | * pointer and x1 to contain physical address of 1:1 page tables | |
178 | */ | |
179 | bl cpu_do_resume // PC relative jump, MMU off | |
180 | b cpu_resume_mmu // Resume MMU, never returns | |
181 | ENDPROC(cpu_resume) |