]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/powerpc/kvm/book3s_64_slb.S
KVM: PPC: Pass EA to updating emulation ops
[mirror_ubuntu-artful-kernel.git] / arch / powerpc / kvm / book3s_64_slb.S
CommitLineData
5126ed37
AG
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright SUSE Linux Products GmbH 2009
16 *
17 * Authors: Alexander Graf <agraf@suse.de>
18 */
19
20#define SHADOW_SLB_ESID(num) (SLBSHADOW_SAVEAREA + (num * 0x10))
21#define SHADOW_SLB_VSID(num) (SLBSHADOW_SAVEAREA + (num * 0x10) + 0x8)
22#define UNBOLT_SLB_ENTRY(num) \
23 ld r9, SHADOW_SLB_ESID(num)(r12); \
24 /* Invalid? Skip. */; \
25 rldicl. r0, r9, 37, 63; \
26 beq slb_entry_skip_ ## num; \
27 xoris r9, r9, SLB_ESID_V@h; \
28 std r9, SHADOW_SLB_ESID(num)(r12); \
29 slb_entry_skip_ ## num:
30
31#define REBOLT_SLB_ENTRY(num) \
32 ld r10, SHADOW_SLB_ESID(num)(r11); \
33 cmpdi r10, 0; \
b480f780 34 beq slb_exit_skip_ ## num; \
5126ed37
AG
35 oris r10, r10, SLB_ESID_V@h; \
36 ld r9, SHADOW_SLB_VSID(num)(r11); \
37 slbmte r9, r10; \
38 std r10, SHADOW_SLB_ESID(num)(r11); \
39slb_exit_skip_ ## num:
40
41/******************************************************************************
42 * *
43 * Entry code *
44 * *
45 *****************************************************************************/
46
53e5b8bb 47.macro LOAD_GUEST_SEGMENTS
5126ed37
AG
48
49 /* Required state:
50 *
51 * MSR = ~IR|DR
52 * R13 = PACA
7e57cba0
AG
53 * R1 = host R1
54 * R2 = host R2
53e5b8bb 55 * R3 = shadow vcpu
02143947 56 * all other volatile GPRS = free except R4, R6
53e5b8bb
AG
57 * SVCPU[CR] = guest CR
58 * SVCPU[XER] = guest XER
59 * SVCPU[CTR] = guest CTR
60 * SVCPU[LR] = guest LR
5126ed37
AG
61 */
62
5126ed37
AG
63 /* Remove LPAR shadow entries */
64
65#if SLB_NUM_BOLTED == 3
66
67 ld r12, PACA_SLBSHADOWPTR(r13)
68
69 /* Save off the first entry so we can slbie it later */
70 ld r10, SHADOW_SLB_ESID(0)(r12)
71 ld r11, SHADOW_SLB_VSID(0)(r12)
72
73 /* Remove bolted entries */
74 UNBOLT_SLB_ENTRY(0)
75 UNBOLT_SLB_ENTRY(1)
76 UNBOLT_SLB_ENTRY(2)
77
78#else
79#error unknown number of bolted entries
80#endif
81
82 /* Flush SLB */
83
84 slbia
85
86 /* r0 = esid & ESID_MASK */
87 rldicr r10, r10, 0, 35
88 /* r0 |= CLASS_BIT(VSID) */
89 rldic r12, r11, 56 - 36, 36
90 or r10, r10, r12
91 slbie r10
92
93 isync
94
95 /* Fill SLB with our shadow */
96
53e5b8bb 97 lbz r12, SVCPU_SLB_MAX(r3)
5126ed37 98 mulli r12, r12, 16
53e5b8bb
AG
99 addi r12, r12, SVCPU_SLB
100 add r12, r12, r3
5126ed37
AG
101
102 /* for (r11 = kvm_slb; r11 < kvm_slb + kvm_slb_size; r11+=slb_entry) */
53e5b8bb
AG
103 li r11, SVCPU_SLB
104 add r11, r11, r3
5126ed37
AG
105
106slb_loop_enter:
107
108 ld r10, 0(r11)
109
110 rldicl. r0, r10, 37, 63
111 beq slb_loop_enter_skip
112
113 ld r9, 8(r11)
114 slbmte r9, r10
115
116slb_loop_enter_skip:
117 addi r11, r11, 16
118 cmpd cr0, r11, r12
119 blt slb_loop_enter
120
121slb_do_enter:
122
53e5b8bb 123.endm
5126ed37
AG
124
125/******************************************************************************
126 * *
127 * Exit code *
128 * *
129 *****************************************************************************/
130
53e5b8bb 131.macro LOAD_HOST_SEGMENTS
5126ed37
AG
132
133 /* Register usage at this point:
134 *
53e5b8bb
AG
135 * R1 = host R1
136 * R2 = host R2
137 * R12 = exit handler id
138 * R13 = shadow vcpu - SHADOW_VCPU_OFF [=PACA on PPC64]
139 * SVCPU.* = guest *
140 * SVCPU[CR] = guest CR
141 * SVCPU[XER] = guest XER
142 * SVCPU[CTR] = guest CTR
143 * SVCPU[LR] = guest LR
5126ed37
AG
144 *
145 */
146
5126ed37
AG
147 /* Restore bolted entries from the shadow and fix it along the way */
148
149 /* We don't store anything in entry 0, so we don't need to take care of it */
150 slbia
151 isync
152
153#if SLB_NUM_BOLTED == 3
154
155 ld r11, PACA_SLBSHADOWPTR(r13)
156
157 REBOLT_SLB_ENTRY(0)
158 REBOLT_SLB_ENTRY(1)
159 REBOLT_SLB_ENTRY(2)
160
161#else
162#error unknown number of bolted entries
163#endif
164
165slb_do_exit:
166
53e5b8bb 167.endm