]> git.proxmox.com Git - mirror_qemu.git/blob - target/i386/hvf/x86.c
i386: hvf: unify register enums between HVF and the rest
[mirror_qemu.git] / target / i386 / hvf / x86.c
1 /*
2 * Copyright (C) 2016 Veertu Inc,
3 * Copyright (C) 2017 Google Inc,
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
14 *
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this program; if not, see <http://www.gnu.org/licenses/>.
17 */
18
19 #include "qemu/osdep.h"
20
21 #include "qemu-common.h"
22 #include "x86_decode.h"
23 #include "x86_emu.h"
24 #include "vmcs.h"
25 #include "vmx.h"
26 #include "x86_mmu.h"
27 #include "x86_descr.h"
28
29 /* static uint32_t x86_segment_access_rights(struct x86_segment_descriptor *var)
30 {
31 uint32_t ar;
32
33 if (!var->p) {
34 ar = 1 << 16;
35 return ar;
36 }
37
38 ar = var->type & 15;
39 ar |= (var->s & 1) << 4;
40 ar |= (var->dpl & 3) << 5;
41 ar |= (var->p & 1) << 7;
42 ar |= (var->avl & 1) << 12;
43 ar |= (var->l & 1) << 13;
44 ar |= (var->db & 1) << 14;
45 ar |= (var->g & 1) << 15;
46 return ar;
47 }*/
48
49 bool x86_read_segment_descriptor(struct CPUState *cpu,
50 struct x86_segment_descriptor *desc,
51 x68_segment_selector sel)
52 {
53 addr_t base;
54 uint32_t limit;
55
56 ZERO_INIT(*desc);
57 /* valid gdt descriptors start from index 1 */
58 if (!sel.index && GDT_SEL == sel.ti) {
59 return false;
60 }
61
62 if (GDT_SEL == sel.ti) {
63 base = rvmcs(cpu->hvf_fd, VMCS_GUEST_GDTR_BASE);
64 limit = rvmcs(cpu->hvf_fd, VMCS_GUEST_GDTR_LIMIT);
65 } else {
66 base = rvmcs(cpu->hvf_fd, VMCS_GUEST_LDTR_BASE);
67 limit = rvmcs(cpu->hvf_fd, VMCS_GUEST_LDTR_LIMIT);
68 }
69
70 if (sel.index * 8 >= limit) {
71 return false;
72 }
73
74 vmx_read_mem(cpu, desc, base + sel.index * 8, sizeof(*desc));
75 return true;
76 }
77
78 bool x86_write_segment_descriptor(struct CPUState *cpu,
79 struct x86_segment_descriptor *desc,
80 x68_segment_selector sel)
81 {
82 addr_t base;
83 uint32_t limit;
84
85 if (GDT_SEL == sel.ti) {
86 base = rvmcs(cpu->hvf_fd, VMCS_GUEST_GDTR_BASE);
87 limit = rvmcs(cpu->hvf_fd, VMCS_GUEST_GDTR_LIMIT);
88 } else {
89 base = rvmcs(cpu->hvf_fd, VMCS_GUEST_LDTR_BASE);
90 limit = rvmcs(cpu->hvf_fd, VMCS_GUEST_LDTR_LIMIT);
91 }
92
93 if (sel.index * 8 >= limit) {
94 printf("%s: gdt limit\n", __func__);
95 return false;
96 }
97 vmx_write_mem(cpu, base + sel.index * 8, desc, sizeof(*desc));
98 return true;
99 }
100
101 bool x86_read_call_gate(struct CPUState *cpu, struct x86_call_gate *idt_desc,
102 int gate)
103 {
104 addr_t base = rvmcs(cpu->hvf_fd, VMCS_GUEST_IDTR_BASE);
105 uint32_t limit = rvmcs(cpu->hvf_fd, VMCS_GUEST_IDTR_LIMIT);
106
107 ZERO_INIT(*idt_desc);
108 if (gate * 8 >= limit) {
109 printf("%s: idt limit\n", __func__);
110 return false;
111 }
112
113 vmx_read_mem(cpu, idt_desc, base + gate * 8, sizeof(*idt_desc));
114 return true;
115 }
116
117 bool x86_is_protected(struct CPUState *cpu)
118 {
119 uint64_t cr0 = rvmcs(cpu->hvf_fd, VMCS_GUEST_CR0);
120 return cr0 & CR0_PE;
121 }
122
123 bool x86_is_real(struct CPUState *cpu)
124 {
125 return !x86_is_protected(cpu);
126 }
127
128 bool x86_is_v8086(struct CPUState *cpu)
129 {
130 X86CPU *x86_cpu = X86_CPU(cpu);
131 CPUX86State *env = &x86_cpu->env;
132 return x86_is_protected(cpu) && (RFLAGS(env) & RFLAGS_VM);
133 }
134
135 bool x86_is_long_mode(struct CPUState *cpu)
136 {
137 return rvmcs(cpu->hvf_fd, VMCS_GUEST_IA32_EFER) & MSR_EFER_LMA;
138 }
139
140 bool x86_is_long64_mode(struct CPUState *cpu)
141 {
142 struct vmx_segment desc;
143 vmx_read_segment_descriptor(cpu, &desc, R_CS);
144
145 return x86_is_long_mode(cpu) && ((desc.ar >> 13) & 1);
146 }
147
148 bool x86_is_paging_mode(struct CPUState *cpu)
149 {
150 uint64_t cr0 = rvmcs(cpu->hvf_fd, VMCS_GUEST_CR0);
151 return cr0 & CR0_PG;
152 }
153
154 bool x86_is_pae_enabled(struct CPUState *cpu)
155 {
156 uint64_t cr4 = rvmcs(cpu->hvf_fd, VMCS_GUEST_CR4);
157 return cr4 & CR4_PAE;
158 }
159
160 addr_t linear_addr(struct CPUState *cpu, addr_t addr, X86Seg seg)
161 {
162 return vmx_read_segment_base(cpu, seg) + addr;
163 }
164
165 addr_t linear_addr_size(struct CPUState *cpu, addr_t addr, int size,
166 X86Seg seg)
167 {
168 switch (size) {
169 case 2:
170 addr = (uint16_t)addr;
171 break;
172 case 4:
173 addr = (uint32_t)addr;
174 break;
175 default:
176 break;
177 }
178 return linear_addr(cpu, addr, seg);
179 }
180
181 addr_t linear_rip(struct CPUState *cpu, addr_t rip)
182 {
183 return linear_addr(cpu, rip, R_CS);
184 }