]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - arch/arm64/kernel/sleep.S
arm64: kernel: Rework finisher callback out of __cpu_suspend_enter()
[mirror_ubuntu-zesty-kernel.git] / arch / arm64 / kernel / sleep.S
CommitLineData
95322526
LP
1#include <linux/errno.h>
2#include <linux/linkage.h>
3#include <asm/asm-offsets.h>
4#include <asm/assembler.h>
5
6 .text
7/*
8 * Implementation of MPIDR_EL1 hash algorithm through shifting
9 * and OR'ing.
10 *
11 * @dst: register containing hash result
12 * @rs0: register containing affinity level 0 bit shift
13 * @rs1: register containing affinity level 1 bit shift
14 * @rs2: register containing affinity level 2 bit shift
15 * @rs3: register containing affinity level 3 bit shift
16 * @mpidr: register containing MPIDR_EL1 value
17 * @mask: register containing MPIDR mask
18 *
19 * Pseudo C-code:
20 *
21 *u32 dst;
22 *
23 *compute_mpidr_hash(u32 rs0, u32 rs1, u32 rs2, u32 rs3, u64 mpidr, u64 mask) {
24 * u32 aff0, aff1, aff2, aff3;
25 * u64 mpidr_masked = mpidr & mask;
26 * aff0 = mpidr_masked & 0xff;
27 * aff1 = mpidr_masked & 0xff00;
28 * aff2 = mpidr_masked & 0xff0000;
29 * aff2 = mpidr_masked & 0xff00000000;
30 * dst = (aff0 >> rs0 | aff1 >> rs1 | aff2 >> rs2 | aff3 >> rs3);
31 *}
32 * Input registers: rs0, rs1, rs2, rs3, mpidr, mask
33 * Output register: dst
34 * Note: input and output registers must be disjoint register sets
35 (eg: a macro instance with mpidr = x1 and dst = x1 is invalid)
36 */
37 .macro compute_mpidr_hash dst, rs0, rs1, rs2, rs3, mpidr, mask
38 and \mpidr, \mpidr, \mask // mask out MPIDR bits
39 and \dst, \mpidr, #0xff // mask=aff0
40 lsr \dst ,\dst, \rs0 // dst=aff0>>rs0
41 and \mask, \mpidr, #0xff00 // mask = aff1
42 lsr \mask ,\mask, \rs1
43 orr \dst, \dst, \mask // dst|=(aff1>>rs1)
44 and \mask, \mpidr, #0xff0000 // mask = aff2
45 lsr \mask ,\mask, \rs2
46 orr \dst, \dst, \mask // dst|=(aff2>>rs2)
47 and \mask, \mpidr, #0xff00000000 // mask = aff3
48 lsr \mask ,\mask, \rs3
49 orr \dst, \dst, \mask // dst|=(aff3>>rs3)
50 .endm
51/*
adc9b2df
JM
52 * Save CPU state in the provided sleep_stack_data area, and publish its
53 * location for cpu_resume()'s use in sleep_save_stash.
95322526 54 *
adc9b2df
JM
55 * cpu_resume() will restore this saved state, and return. Because the
56 * link-register is saved and restored, it will appear to return from this
57 * function. So that the caller can tell the suspend/resume paths apart,
58 * __cpu_suspend_enter() will always return a non-zero value, whereas the
59 * path through cpu_resume() will return 0.
60 *
61 * x0 = struct sleep_stack_data area
95322526 62 */
714f5992 63ENTRY(__cpu_suspend_enter)
adc9b2df
JM
64 stp x29, lr, [x0, #SLEEP_STACK_DATA_CALLEE_REGS]
65 stp x19, x20, [x0,#SLEEP_STACK_DATA_CALLEE_REGS+16]
66 stp x21, x22, [x0,#SLEEP_STACK_DATA_CALLEE_REGS+32]
67 stp x23, x24, [x0,#SLEEP_STACK_DATA_CALLEE_REGS+48]
68 stp x25, x26, [x0,#SLEEP_STACK_DATA_CALLEE_REGS+64]
69 stp x27, x28, [x0,#SLEEP_STACK_DATA_CALLEE_REGS+80]
70
71 /* save the sp in cpu_suspend_ctx */
95322526 72 mov x2, sp
adc9b2df
JM
73 str x2, [x0, #SLEEP_STACK_DATA_SYSTEM_REGS + CPU_CTX_SP]
74
75 /* find the mpidr_hash */
714f5992
LP
76 ldr x1, =sleep_save_sp
77 ldr x1, [x1, #SLEEP_SAVE_SP_VIRT]
95322526
LP
78 mrs x7, mpidr_el1
79 ldr x9, =mpidr_hash
80 ldr x10, [x9, #MPIDR_HASH_MASK]
81 /*
82 * Following code relies on the struct mpidr_hash
83 * members size.
84 */
85 ldp w3, w4, [x9, #MPIDR_HASH_SHIFTS]
86 ldp w5, w6, [x9, #(MPIDR_HASH_SHIFTS + 8)]
87 compute_mpidr_hash x8, x3, x4, x5, x6, x7, x10
714f5992 88 add x1, x1, x8, lsl #3
adc9b2df
JM
89
90 stp x29, lr, [sp, #-16]!
714f5992 91 bl __cpu_suspend_save
adc9b2df
JM
92 ldp x29, lr, [sp], #16
93 mov x0, #1
95322526 94 ret
714f5992 95ENDPROC(__cpu_suspend_enter)
95322526
LP
96 .ltorg
97
98/*
99 * x0 must contain the sctlr value retrieved from restored context
100 */
5dfe9d7d 101 .pushsection ".idmap.text", "ax"
95322526
LP
102ENTRY(cpu_resume_mmu)
103 ldr x3, =cpu_resume_after_mmu
104 msr sctlr_el1, x0 // restore sctlr_el1
105 isb
8ec41987
WD
106 /*
107 * Invalidate the local I-cache so that any instructions fetched
108 * speculatively from the PoC are discarded, since they may have
109 * been dynamically patched at the PoU.
110 */
111 ic iallu
112 dsb nsh
113 isb
95322526
LP
114 br x3 // global jump to virtual address
115ENDPROC(cpu_resume_mmu)
5dfe9d7d 116 .popsection
95322526 117cpu_resume_after_mmu:
0d97e6d8
MR
118#ifdef CONFIG_KASAN
119 mov x0, sp
120 bl kasan_unpoison_remaining_stack
121#endif
95322526 122 mov x0, #0 // return zero on success
95322526
LP
123 ret
124ENDPROC(cpu_resume_after_mmu)
125
95322526
LP
126ENTRY(cpu_resume)
127 bl el2_setup // if in EL2 drop to EL1 cleanly
95322526 128 mrs x1, mpidr_el1
c3684fbb
LA
129 adrp x8, mpidr_hash
130 add x8, x8, #:lo12:mpidr_hash // x8 = struct mpidr_hash phys address
95322526
LP
131 /* retrieve mpidr_hash members to compute the hash */
132 ldr x2, [x8, #MPIDR_HASH_MASK]
133 ldp w3, w4, [x8, #MPIDR_HASH_SHIFTS]
134 ldp w5, w6, [x8, #(MPIDR_HASH_SHIFTS + 8)]
135 compute_mpidr_hash x7, x3, x4, x5, x6, x1, x2
136 /* x7 contains hash index, let's use it to grab context pointer */
9acdc2af 137 ldr_l x0, sleep_save_sp + SLEEP_SAVE_SP_PHYS
95322526 138 ldr x0, [x0, x7, lsl #3]
adc9b2df
JM
139 add x29, x0, #SLEEP_STACK_DATA_CALLEE_REGS
140 add x0, x0, #SLEEP_STACK_DATA_SYSTEM_REGS
95322526
LP
141 /* load sp from context */
142 ldr x2, [x0, #CPU_CTX_SP]
95322526 143 /* load physical address of identity map page table in x1 */
9acdc2af 144 adrp x1, idmap_pg_dir
95322526 145 mov sp, x2
6cdf9c7c
JL
146 /* save thread_info */
147 and x2, x2, #~(THREAD_SIZE - 1)
148 msr sp_el0, x2
95322526
LP
149 /*
150 * cpu_do_resume expects x0 to contain context physical address
151 * pointer and x1 to contain physical address of 1:1 page tables
152 */
153 bl cpu_do_resume // PC relative jump, MMU off
adc9b2df
JM
154 /* Can't access these by physical address once the MMU is on */
155 ldp x19, x20, [x29, #16]
156 ldp x21, x22, [x29, #32]
157 ldp x23, x24, [x29, #48]
158 ldp x25, x26, [x29, #64]
159 ldp x27, x28, [x29, #80]
160 ldp x29, lr, [x29]
95322526
LP
161 b cpu_resume_mmu // Resume MMU, never returns
162ENDPROC(cpu_resume)