]> git.proxmox.com Git - mirror_qemu.git/blobdiff - target-i386/svm_helper.c
target-i386: unify reserved bits and NX bit check
[mirror_qemu.git] / target-i386 / svm_helper.c
index 6b371c8eebfca3acdfb51c56941df326ad0dcbfa..d250d18e27709ac48d595c0ca5480a3440156b30 100644 (file)
 
 #include "cpu.h"
 #include "exec/cpu-all.h"
-#include "helper.h"
-
-#if !defined(CONFIG_USER_ONLY)
-#include "exec/softmmu_exec.h"
-#endif /* !defined(CONFIG_USER_ONLY) */
+#include "exec/helper-proto.h"
+#include "exec/cpu_ldst.h"
 
 /* Secure Virtual Machine helpers */
 
@@ -88,21 +85,22 @@ void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
 static inline void svm_save_seg(CPUX86State *env, hwaddr addr,
                                 const SegmentCache *sc)
 {
-    CPUState *cs = ENV_GET_CPU(env);
-    stw_phys(addr + offsetof(struct vmcb_seg, selector),
+    CPUState *cs = CPU(x86_env_get_cpu(env));
+
+    stw_phys(cs->as, addr + offsetof(struct vmcb_seg, selector),
              sc->selector);
     stq_phys(cs->as, addr + offsetof(struct vmcb_seg, base),
              sc->base);
-    stl_phys(addr + offsetof(struct vmcb_seg, limit),
+    stl_phys(cs->as, addr + offsetof(struct vmcb_seg, limit),
              sc->limit);
-    stw_phys(addr + offsetof(struct vmcb_seg, attrib),
+    stw_phys(cs->as, addr + offsetof(struct vmcb_seg, attrib),
              ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
 }
 
 static inline void svm_load_seg(CPUX86State *env, hwaddr addr,
                                 SegmentCache *sc)
 {
-    CPUState *cs = ENV_GET_CPU(env);
+    CPUState *cs = CPU(x86_env_get_cpu(env));
     unsigned int flags;
 
     sc->selector = lduw_phys(cs->as,
@@ -125,7 +123,7 @@ static inline void svm_load_seg_cache(CPUX86State *env, hwaddr addr,
 
 void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
 {
-    CPUState *cs = ENV_GET_CPU(env);
+    CPUState *cs = CPU(x86_env_get_cpu(env));
     target_ulong addr;
     uint32_t event_inj;
     uint32_t int_ctl;
@@ -145,12 +143,12 @@ void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
     /* save the current CPU state in the hsave page */
     stq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, save.gdtr.base),
              env->gdt.base);
-    stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit),
+    stl_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit),
              env->gdt.limit);
 
     stq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, save.idtr.base),
              env->idt.base);
-    stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit),
+    stl_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, save.idtr.limit),
              env->idt.limit);
 
     stq_phys(cs->as,
@@ -281,9 +279,6 @@ void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
                           env->vm_vmcb + offsetof(struct vmcb, save.dr7));
     env->dr[6] = ldq_phys(cs->as,
                           env->vm_vmcb + offsetof(struct vmcb, save.dr6));
-    cpu_x86_set_cpl(env, ldub_phys(cs->as,
-                                   env->vm_vmcb + offsetof(struct vmcb,
-                                                           save.cpl)));
 
     /* FIXME: guest state consistency checks */
 
@@ -293,7 +288,7 @@ void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
         break;
     case TLB_CONTROL_FLUSH_ALL_ASID:
         /* FIXME: this is not 100% correct but should work for now */
-        tlb_flush(env, 1);
+        tlb_flush(cs, 1);
         break;
     }
 
@@ -319,7 +314,7 @@ void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
         /* FIXME: need to implement valid_err */
         switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
         case SVM_EVTINJ_TYPE_INTR:
-            env->exception_index = vector;
+            cs->exception_index = vector;
             env->error_code = event_inj_err;
             env->exception_is_int = 0;
             env->exception_next_eip = -1;
@@ -328,31 +323,31 @@ void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
             do_interrupt_x86_hardirq(env, vector, 1);
             break;
         case SVM_EVTINJ_TYPE_NMI:
-            env->exception_index = EXCP02_NMI;
+            cs->exception_index = EXCP02_NMI;
             env->error_code = event_inj_err;
             env->exception_is_int = 0;
             env->exception_next_eip = env->eip;
             qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
-            cpu_loop_exit(env);
+            cpu_loop_exit(cs);
             break;
         case SVM_EVTINJ_TYPE_EXEPT:
-            env->exception_index = vector;
+            cs->exception_index = vector;
             env->error_code = event_inj_err;
             env->exception_is_int = 0;
             env->exception_next_eip = -1;
             qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
-            cpu_loop_exit(env);
+            cpu_loop_exit(cs);
             break;
         case SVM_EVTINJ_TYPE_SOFT:
-            env->exception_index = vector;
+            cs->exception_index = vector;
             env->error_code = event_inj_err;
             env->exception_is_int = 1;
             env->exception_next_eip = env->eip;
             qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
-            cpu_loop_exit(env);
+            cpu_loop_exit(cs);
             break;
         }
-        qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", env->exception_index,
+        qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", cs->exception_index,
                       env->error_code);
     }
 }
@@ -365,7 +360,7 @@ void helper_vmmcall(CPUX86State *env)
 
 void helper_vmload(CPUX86State *env, int aflag)
 {
-    CPUState *cs = ENV_GET_CPU(env);
+    CPUState *cs = CPU(x86_env_get_cpu(env));
     target_ulong addr;
 
     cpu_svm_check_intercept_param(env, SVM_EXIT_VMLOAD, 0);
@@ -405,7 +400,7 @@ void helper_vmload(CPUX86State *env, int aflag)
 
 void helper_vmsave(CPUX86State *env, int aflag)
 {
-    CPUState *cs = ENV_GET_CPU(env);
+    CPUState *cs = CPU(x86_env_get_cpu(env));
     target_ulong addr;
 
     cpu_svm_check_intercept_param(env, SVM_EXIT_VMSAVE, 0);
@@ -468,6 +463,7 @@ void helper_skinit(CPUX86State *env)
 
 void helper_invlpga(CPUX86State *env, int aflag)
 {
+    X86CPU *cpu = x86_env_get_cpu(env);
     target_ulong addr;
 
     cpu_svm_check_intercept_param(env, SVM_EXIT_INVLPGA, 0);
@@ -480,13 +476,13 @@ void helper_invlpga(CPUX86State *env, int aflag)
 
     /* XXX: could use the ASID to see if it is needed to do the
        flush */
-    tlb_flush_page(env, addr);
+    tlb_flush_page(CPU(cpu), addr);
 }
 
 void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type,
                                       uint64_t param)
 {
-    CPUState *cs = ENV_GET_CPU(env);
+    CPUState *cs = CPU(x86_env_get_cpu(env));
 
     if (likely(!(env->hflags & HF_SVMI_MASK))) {
         return;
@@ -568,7 +564,8 @@ void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
 void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
                          uint32_t next_eip_addend)
 {
-    CPUState *cs = ENV_GET_CPU(env);
+    CPUState *cs = CPU(x86_env_get_cpu(env));
+
     if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
         /* FIXME: this should be read in at vmrun (faster this way?) */
         uint64_t addr = ldq_phys(cs->as, env->vm_vmcb +
@@ -599,11 +596,13 @@ void helper_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
                   env->eip);
 
     if (env->hflags & HF_INHIBIT_IRQ_MASK) {
-        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state),
+        stl_phys(cs->as,
+                 env->vm_vmcb + offsetof(struct vmcb, control.int_state),
                  SVM_INTERRUPT_SHADOW_MASK);
         env->hflags &= ~HF_INHIBIT_IRQ_MASK;
     } else {
-        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
+        stl_phys(cs->as,
+                 env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
     }
 
     /* Save the VM state in the vmcb */
@@ -618,12 +617,12 @@ void helper_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
 
     stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base),
              env->gdt.base);
-    stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit),
+    stl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit),
              env->gdt.limit);
 
     stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.idtr.base),
              env->idt.base);
-    stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit),
+    stl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit),
              env->idt.limit);
 
     stq_phys(cs->as,
@@ -644,7 +643,8 @@ void helper_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
     if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) {
         int_ctl |= V_IRQ_MASK;
     }
-    stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
+    stl_phys(cs->as,
+             env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
 
     stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.rflags),
              cpu_compute_eflags(env));
@@ -658,7 +658,7 @@ void helper_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
              env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
     stq_phys(cs->as,
              env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
-    stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl),
+    stb_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.cpl),
              env->hflags & HF_CPL_MASK);
 
     /* Reload the host state from vm_hsave */
@@ -697,7 +697,8 @@ void helper_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
     cpu_load_eflags(env, ldq_phys(cs->as,
                                   env->vm_hsave + offsetof(struct vmcb,
                                                            save.rflags)),
-                    ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
+                    ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK |
+                      VM_MASK));
     CC_OP = CC_OP_EFLAGS;
 
     svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.es),
@@ -722,19 +723,21 @@ void helper_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
                           env->vm_hsave + offsetof(struct vmcb, save.dr7));
 
     /* other setups */
-    cpu_x86_set_cpl(env, 0);
     stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, control.exit_code),
              exit_code);
     stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1),
              exit_info_1);
 
-    stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
+    stl_phys(cs->as,
+             env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
              ldl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
                                               control.event_inj)));
-    stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
+    stl_phys(cs->as,
+             env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
              ldl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
                                               control.event_inj_err)));
-    stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0);
+    stl_phys(cs->as,
+             env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0);
 
     env->hflags2 &= ~HF2_GIF_MASK;
     /* FIXME: Resets the current ASID register to zero (host ASID). */
@@ -747,10 +750,6 @@ void helper_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
        from the page table indicated the host's CR3. If the PDPEs contain
        illegal state, the processor causes a shutdown. */
 
-    /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
-    env->cr[0] |= CR0_PE_MASK;
-    env->eflags &= ~VM_MASK;
-
     /* Disables all breakpoints in the host DR7 register. */
 
     /* Checks the reloaded host state for consistency. */
@@ -760,11 +759,11 @@ void helper_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
        #GP fault is delivered inside the host. */
 
     /* remove any pending exception */
-    env->exception_index = -1;
+    cs->exception_index = -1;
     env->error_code = 0;
     env->old_exception = -1;
 
-    cpu_loop_exit(env);
+    cpu_loop_exit(cs);
 }
 
 void cpu_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)