[VMX] Get rid of special vm_launch schedule tail.
authorkfraser@localhost.localdomain <kfraser@localhost.localdomain>
Mon, 6 Nov 2006 15:40:30 +0000 (15:40 +0000)
committerkfraser@localhost.localdomain <kfraser@localhost.localdomain>
Mon, 6 Nov 2006 15:40:30 +0000 (15:40 +0000)
This required various hacking, including getting rid
of implicit vcpu==current assumption in __vmwrite()
and a couple of tweaks to the shadow code.
Signed-off-by: Keir Fraser <keir@xensource.com>
xen/arch/x86/hvm/vmx/vmcs.c
xen/arch/x86/hvm/vmx/vmx.c
xen/arch/x86/mm/shadow/common.c
xen/arch/x86/mm/shadow/multi.c
xen/include/asm-x86/hvm/vmx/vmcs.h
xen/include/asm-x86/hvm/vmx/vmx.h

index 240e479a272c2154c2c89a842b52c9a8f5ad1792..e5ebec095e686e19d3b242ad866941db07b3e4c5 100644 (file)
@@ -285,12 +285,9 @@ static void vmx_set_host_env(struct vcpu *v)
     error |= __vmwrite(HOST_RSP, (unsigned long)get_stack_bottom());
 }
 
-/* Update CR3, CR0, CR4, GDT, LDT, TR */
+#if 0
 static void vmx_do_launch(struct vcpu *v)
 {
-    unsigned int  error = 0;
-    unsigned long cr0, cr4;
-
     if ( v->vcpu_id != 0 )
     {
         /* Sync AP's TSC with BSP's */
@@ -298,62 +295,13 @@ static void vmx_do_launch(struct vcpu *v)
             v->domain->vcpu[0]->arch.hvm_vcpu.cache_tsc_offset;
         hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset);
     }
-
-    __asm__ __volatile__ ("mov %%cr0,%0" : "=r" (cr0) : );
-
-    error |= __vmwrite(GUEST_CR0, cr0);
-    cr0 &= ~X86_CR0_PG;
-    error |= __vmwrite(CR0_READ_SHADOW, cr0);
-    error |= __vmwrite(CPU_BASED_VM_EXEC_CONTROL, vmx_cpu_based_exec_control);
-    v->arch.hvm_vcpu.u.vmx.exec_control = vmx_cpu_based_exec_control;
-
-    __asm__ __volatile__ ("mov %%cr4,%0" : "=r" (cr4) : );
-
-    error |= __vmwrite(GUEST_CR4, cr4 & ~X86_CR4_PSE);
-    cr4 &= ~(X86_CR4_PGE | X86_CR4_VMXE | X86_CR4_PAE);
-
-    error |= __vmwrite(CR4_READ_SHADOW, cr4);
-
-    hvm_stts(v);
-
-    if ( vlapic_init(v) == 0 )
-    {
-#ifdef __x86_64__ 
-        u32 *cpu_exec_control = &v->arch.hvm_vcpu.u.vmx.exec_control;
-        u64  vapic_page_addr = 
-                        page_to_maddr(v->arch.hvm_vcpu.vlapic->regs_page);
-
-        *cpu_exec_control   |= CPU_BASED_TPR_SHADOW;
-        *cpu_exec_control   &= ~CPU_BASED_CR8_STORE_EXITING;
-        *cpu_exec_control   &= ~CPU_BASED_CR8_LOAD_EXITING;
-        error |= __vmwrite(CPU_BASED_VM_EXEC_CONTROL, *cpu_exec_control);
-        error |= __vmwrite(VIRTUAL_APIC_PAGE_ADDR, vapic_page_addr);
-        error |= __vmwrite(TPR_THRESHOLD, 0);
-#endif
-    }
-
-    vmx_set_host_env(v);
-    init_timer(&v->arch.hvm_vcpu.hlt_timer, hlt_timer_fn, v, v->processor);
-
-    error |= __vmwrite(GUEST_LDTR_SELECTOR, 0);
-    error |= __vmwrite(GUEST_LDTR_BASE, 0);
-    error |= __vmwrite(GUEST_LDTR_LIMIT, 0);
-
-    error |= __vmwrite(GUEST_TR_BASE, 0);
-    error |= __vmwrite(GUEST_TR_LIMIT, 0xff);
-
-    shadow_update_paging_modes(v);
-
-    __vmwrite(GUEST_CR3, v->arch.hvm_vcpu.hw_cr3);
-    __vmwrite(HOST_CR3, v->arch.cr3);
-
-    v->arch.schedule_tail = arch_vmx_do_resume;
 }
+#endif
 
 static int construct_vmcs(struct vcpu *v)
 {
     int error = 0;
-    unsigned long tmp;
+    unsigned long tmp, cr0, cr4;
     union vmcs_arbytes arbytes;
 
     vmx_vmcs_enter(v);
@@ -362,6 +310,8 @@ static int construct_vmcs(struct vcpu *v)
     error |= __vmwrite(PIN_BASED_VM_EXEC_CONTROL, vmx_pin_based_exec_control);
     error |= __vmwrite(VM_EXIT_CONTROLS, vmx_vmexit_control);
     error |= __vmwrite(VM_ENTRY_CONTROLS, vmx_vmentry_control);
+    error |= __vmwrite(CPU_BASED_VM_EXEC_CONTROL, vmx_cpu_based_exec_control);
+    v->arch.hvm_vcpu.u.vmx.exec_control = vmx_cpu_based_exec_control;
 
     /* Host data selectors. */
     error |= __vmwrite(HOST_SS_SELECTOR, __HYPERVISOR_DS);
@@ -465,6 +415,48 @@ static int construct_vmcs(struct vcpu *v)
     error |= __vmwrite(EXCEPTION_BITMAP,
                        MONITOR_DEFAULT_EXCEPTION_BITMAP);
 
+    /* Guest CR0. */
+    cr0 = read_cr0();
+    v->arch.hvm_vmx.cpu_cr0 = cr0;
+    error |= __vmwrite(GUEST_CR0, v->arch.hvm_vmx.cpu_cr0);
+    v->arch.hvm_vmx.cpu_shadow_cr0 = cr0 & ~(X86_CR0_PG | X86_CR0_TS);
+    error |= __vmwrite(CR0_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr0);
+
+    /* Guest CR4. */
+    cr4 = read_cr4();
+    error |= __vmwrite(GUEST_CR4, cr4 & ~X86_CR4_PSE);
+    v->arch.hvm_vmx.cpu_shadow_cr4 =
+        cr4 & ~(X86_CR4_PGE | X86_CR4_VMXE | X86_CR4_PAE);
+    error |= __vmwrite(CR4_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr4);
+
+    /* XXX Move this out. */
+    init_timer(&v->arch.hvm_vcpu.hlt_timer, hlt_timer_fn, v, v->processor);
+    if ( vlapic_init(v) != 0 )
+        return -1;
+
+#ifdef __x86_64__ 
+    /* VLAPIC TPR optimisation. */
+    v->arch.hvm_vcpu.u.vmx.exec_control |= CPU_BASED_TPR_SHADOW;
+    v->arch.hvm_vcpu.u.vmx.exec_control &=
+        ~(CPU_BASED_CR8_STORE_EXITING | CPU_BASED_CR8_LOAD_EXITING);
+    error |= __vmwrite(CPU_BASED_VM_EXEC_CONTROL,
+                       v->arch.hvm_vcpu.u.vmx.exec_control);
+    error |= __vmwrite(VIRTUAL_APIC_PAGE_ADDR,
+                       page_to_maddr(v->arch.hvm_vcpu.vlapic->regs_page));
+    error |= __vmwrite(TPR_THRESHOLD, 0);
+#endif
+
+    error |= __vmwrite(GUEST_LDTR_SELECTOR, 0);
+    error |= __vmwrite(GUEST_LDTR_BASE, 0);
+    error |= __vmwrite(GUEST_LDTR_LIMIT, 0);
+
+    error |= __vmwrite(GUEST_TR_BASE, 0);
+    error |= __vmwrite(GUEST_TR_LIMIT, 0xff);
+
+    shadow_update_paging_modes(v);
+    __vmwrite(GUEST_CR3, v->arch.hvm_vcpu.hw_cr3);
+    __vmwrite(HOST_CR3, v->arch.cr3);
+
     vmx_vmcs_exit(v);
 
     return error;
@@ -534,14 +526,6 @@ void arch_vmx_do_resume(struct vcpu *v)
     reset_stack_and_jump(vmx_asm_do_vmentry);
 }
 
-void arch_vmx_do_launch(struct vcpu *v)
-{
-    vmx_load_vmcs(v);
-    vmx_do_launch(v);
-    reset_stack_and_jump(vmx_asm_do_vmentry);
-}
-
-
 /* Dump a section of VMCS */
 static void print_section(char *header, uint32_t start, 
                           uint32_t end, int incr)
index 816d50511b0232639ddacb80b3b634b678e52cd1..a921e2a1c384460ccdfe4ff6a4aaa21adfd9e5bc 100644 (file)
@@ -59,7 +59,7 @@ static int vmx_vcpu_initialise(struct vcpu *v)
 
     spin_lock_init(&v->arch.hvm_vmx.vmcs_lock);
 
-    v->arch.schedule_tail    = arch_vmx_do_launch;
+    v->arch.schedule_tail    = arch_vmx_do_resume;
     v->arch.ctxt_switch_from = vmx_ctxt_switch_from;
     v->arch.ctxt_switch_to   = vmx_ctxt_switch_to;
 
@@ -474,10 +474,10 @@ static void vmx_store_cpu_guest_regs(
 
     if ( crs != NULL )
     {
-        __vmread(CR0_READ_SHADOW, &crs[0]);
+        crs[0] = v->arch.hvm_vmx.cpu_shadow_cr0;
         crs[2] = v->arch.hvm_vmx.cpu_cr2;
         __vmread(GUEST_CR3, &crs[3]);
-        __vmread(CR4_READ_SHADOW, &crs[4]);
+        crs[4] = v->arch.hvm_vmx.cpu_shadow_cr4;
     }
 
     vmx_vmcs_exit(v);
@@ -570,8 +570,6 @@ static unsigned long vmx_get_ctrl_reg(struct vcpu *v, unsigned int num)
 /* Make sure that xen intercepts any FP accesses from current */
 static void vmx_stts(struct vcpu *v)
 {
-    unsigned long cr0;
-
     /* VMX depends on operating on the current vcpu */
     ASSERT(v == current);
 
@@ -581,11 +579,10 @@ static void vmx_stts(struct vcpu *v)
      * then this is not necessary: no FPU activity can occur until the guest
      * clears CR0.TS, and we will initialise the FPU when that happens.
      */
-    __vmread_vcpu(v, CR0_READ_SHADOW, &cr0);
-    if ( !(cr0 & X86_CR0_TS) )
+    if ( !(v->arch.hvm_vmx.cpu_shadow_cr0 & X86_CR0_TS) )
     {
-        __vmread_vcpu(v, GUEST_CR0, &cr0);
-        __vmwrite(GUEST_CR0, cr0 | X86_CR0_TS);
+        v->arch.hvm_vmx.cpu_cr0 |= X86_CR0_TS;
+        __vmwrite(GUEST_CR0, v->arch.hvm_vmx.cpu_cr0);
         __vm_set_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_NM);
     }
 }
@@ -662,6 +659,12 @@ static int vmx_guest_x86_mode(struct vcpu *v)
             X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16);
 }
 
+static int vmx_pae_enabled(struct vcpu *v)
+{
+    unsigned long cr4 = v->arch.hvm_vmx.cpu_shadow_cr4;
+    return (vmx_paging_enabled(v) && (cr4 & X86_CR4_PAE));
+}
+
 /* Setup HVM interfaces */
 static void vmx_setup_hvm_funcs(void)
 {
@@ -811,19 +814,16 @@ static int vmx_do_page_fault(unsigned long va, struct cpu_user_regs *regs)
 
 static void vmx_do_no_device_fault(void)
 {
-    unsigned long cr0;
     struct vcpu *v = current;
 
     setup_fpu(current);
     __vm_clear_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_NM);
 
     /* Disable TS in guest CR0 unless the guest wants the exception too. */
-    __vmread_vcpu(v, CR0_READ_SHADOW, &cr0);
-    if ( !(cr0 & X86_CR0_TS) )
+    if ( !(v->arch.hvm_vmx.cpu_shadow_cr0 & X86_CR0_TS) )
     {
-        __vmread_vcpu(v, GUEST_CR0, &cr0);
-        cr0 &= ~X86_CR0_TS;
-        __vmwrite(GUEST_CR0, cr0);
+        v->arch.hvm_vmx.cpu_cr0 &= ~X86_CR0_TS;
+        __vmwrite(GUEST_CR0, v->arch.hvm_vmx.cpu_cr0);
     }
 }
 
@@ -1158,9 +1158,9 @@ static int vmx_world_save(struct vcpu *v, struct vmx_assist_context *c)
     error |= __vmread(GUEST_RSP, &c->esp);
     error |= __vmread(GUEST_RFLAGS, &c->eflags);
 
-    error |= __vmread(CR0_READ_SHADOW, &c->cr0);
+    c->cr0 = v->arch.hvm_vmx.cpu_shadow_cr0;
     c->cr3 = v->arch.hvm_vmx.cpu_cr3;
-    error |= __vmread(CR4_READ_SHADOW, &c->cr4);
+    c->cr4 = v->arch.hvm_vmx.cpu_shadow_cr4;
 
     error |= __vmread(GUEST_IDTR_LIMIT, &c->idtr_limit);
     error |= __vmread(GUEST_IDTR_BASE, &c->idtr_base);
@@ -1220,7 +1220,8 @@ static int vmx_world_restore(struct vcpu *v, struct vmx_assist_context *c)
     error |= __vmwrite(GUEST_RSP, c->esp);
     error |= __vmwrite(GUEST_RFLAGS, c->eflags);
 
-    error |= __vmwrite(CR0_READ_SHADOW, c->cr0);
+    v->arch.hvm_vmx.cpu_shadow_cr0 = c->cr0;
+    error |= __vmwrite(CR0_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr0);
 
     if (!vmx_paging_enabled(v))
         goto skip_cr3;
@@ -1270,7 +1271,8 @@ static int vmx_world_restore(struct vcpu *v, struct vmx_assist_context *c)
         HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %x", c->cr3);
 
     error |= __vmwrite(GUEST_CR4, (c->cr4 | VMX_CR4_HOST_MASK));
-    error |= __vmwrite(CR4_READ_SHADOW, c->cr4);
+    v->arch.hvm_vmx.cpu_shadow_cr4 = c->cr4;
+    error |= __vmwrite(CR4_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr4);
 
     error |= __vmwrite(GUEST_IDTR_LIMIT, c->idtr_limit);
     error |= __vmwrite(GUEST_IDTR_BASE, c->idtr_base);
@@ -1408,7 +1410,7 @@ static int vmx_set_cr0(unsigned long value)
     /*
      * CR0: We don't want to lose PE and PG.
      */
-    __vmread_vcpu(v, CR0_READ_SHADOW, &old_cr0);
+    old_cr0 = v->arch.hvm_vmx.cpu_shadow_cr0;
     paging_enabled = (old_cr0 & X86_CR0_PE) && (old_cr0 & X86_CR0_PG);
 
     /* TS cleared? Then initialise FPU now. */
@@ -1418,8 +1420,11 @@ static int vmx_set_cr0(unsigned long value)
         __vm_clear_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_NM);
     }
 
-    __vmwrite(GUEST_CR0, value | X86_CR0_PE | X86_CR0_PG | X86_CR0_NE);
-    __vmwrite(CR0_READ_SHADOW, value);
+    v->arch.hvm_vmx.cpu_cr0 = value | X86_CR0_PE | X86_CR0_PG | X86_CR0_NE;
+    __vmwrite(GUEST_CR0, v->arch.hvm_vmx.cpu_cr0);
+
+    v->arch.hvm_vmx.cpu_shadow_cr0 = value;
+    __vmwrite(CR0_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr0);
 
     HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR0 value = %lx\n", value);
 
@@ -1655,9 +1660,9 @@ static int mov_to_cr(int gp, int cr, struct cpu_user_regs *regs)
     }
     case 4: /* CR4 */
     {
-        __vmread(CR4_READ_SHADOW, &old_cr);
+        old_cr = v->arch.hvm_vmx.cpu_shadow_cr4;
 
-        if ( value & X86_CR4_PAE && !(old_cr & X86_CR4_PAE) )
+        if ( (value & X86_CR4_PAE) && !(old_cr & X86_CR4_PAE) )
         {
             if ( vmx_pgbit_test(v) )
             {
@@ -1706,7 +1711,8 @@ static int mov_to_cr(int gp, int cr, struct cpu_user_regs *regs)
         }
 
         __vmwrite(GUEST_CR4, value| VMX_CR4_HOST_MASK);
-        __vmwrite(CR4_READ_SHADOW, value);
+        v->arch.hvm_vmx.cpu_shadow_cr4 = value;
+        __vmwrite(CR4_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr4);
 
         /*
          * Writing to CR4 to modify the PSE, PGE, or PAE flag invalidates
@@ -1804,16 +1810,14 @@ static int vmx_cr_access(unsigned long exit_qualification,
         setup_fpu(v);
         __vm_clear_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_NM);
 
-        __vmread_vcpu(v, GUEST_CR0, &value);
-        value &= ~X86_CR0_TS; /* clear TS */
-        __vmwrite(GUEST_CR0, value);
+        v->arch.hvm_vmx.cpu_cr0 &= ~X86_CR0_TS; /* clear TS */
+        __vmwrite(GUEST_CR0, v->arch.hvm_vmx.cpu_cr0);
 
-        __vmread_vcpu(v, CR0_READ_SHADOW, &value);
-        value &= ~X86_CR0_TS; /* clear TS */
-        __vmwrite(CR0_READ_SHADOW, value);
+        v->arch.hvm_vmx.cpu_shadow_cr0 &= ~X86_CR0_TS; /* clear TS */
+        __vmwrite(CR0_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr0);
         break;
     case TYPE_LMSW:
-        __vmread_vcpu(v, CR0_READ_SHADOW, &value);
+        value = v->arch.hvm_vmx.cpu_shadow_cr0;
         value = (value & ~0xF) |
             (((exit_qualification & LMSW_SOURCE_DATA) >> 16) & 0xF);
         TRACE_VMEXIT(1, TYPE_LMSW);
index aa217975edb877351ae9bbfaabf667a39f6046b3..a5ace1a907bbd2d2a8e53602d1a2afc4b1fbd4a5 100644 (file)
@@ -2273,15 +2273,6 @@ void sh_update_paging_modes(struct vcpu *v)
     //     - changes in CR0.PG, CR4.PAE, CR4.PSE, or CR4.PGE
     //
 
-    // Avoid determining the current shadow mode for uninitialized CPUs, as
-    // we can not yet determine whether it is an HVM or PV domain.
-    //
-    if ( !test_bit(_VCPUF_initialised, &v->vcpu_flags) )
-    {
-        SHADOW_PRINTK("%s: postponing determination of shadow mode\n", __func__);
-        return;
-    }
-
     // First, tear down any old shadow tables held by this vcpu.
     //
     shadow_detach_old_tables(v);
@@ -2316,7 +2307,6 @@ void sh_update_paging_modes(struct vcpu *v)
         v->arch.shadow.translate_enabled = !!hvm_paging_enabled(v);
         if ( !v->arch.shadow.translate_enabled )
         {
-            
             /* Set v->arch.guest_table to use the p2m map, and choose
              * the appropriate shadow mode */
             old_guest_table = pagetable_get_mfn(v->arch.guest_table);
index a5f254af98dd2d5b83c92218aa94844c1f540ee5..e2e0bb7a3b1fa63b7bbff3966d8de811460994af 100644 (file)
@@ -3357,7 +3357,7 @@ sh_update_cr3(struct vcpu *v)
 
     sh_detach_old_tables(v);
 
-    if ( !test_bit(_VCPUF_initialised, &v->vcpu_flags) )
+    if ( !is_hvm_domain(d) && !test_bit(_VCPUF_initialised, &v->vcpu_flags) )
     {
         ASSERT(v->arch.cr3 == 0);
         return;
index da6ade2f18cd1ad2600ac86bb2ab51dba324250c..e1a6e5a393db1058b458317290ce4dd7e09160b0 100644 (file)
@@ -76,7 +76,6 @@ struct arch_vmx_struct {
     unsigned long        cpu_shadow_cr4; /* copy of guest read shadow CR4 */
     unsigned long        cpu_cr2; /* save CR2 */
     unsigned long        cpu_cr3;
-    unsigned long        cpu_based_exec_control;
     struct vmx_msr_state msr_content;
     unsigned long        vmxassist_enabled:1; 
 };
index 4a2051e5bd0376dba911764a36622e6bfdc2894c..919acdc706e056196a4e0b84a5cfed07f841bf5d 100644 (file)
@@ -30,7 +30,6 @@ extern void vmx_asm_vmexit_handler(struct cpu_user_regs);
 extern void vmx_asm_do_vmentry(void);
 extern void vmx_intr_assist(void);
 extern void vmx_migrate_timers(struct vcpu *v);
-extern void arch_vmx_do_launch(struct vcpu *);
 extern void arch_vmx_do_resume(struct vcpu *);
 extern void set_guest_time(struct vcpu *v, u64 gtime);
 
@@ -220,54 +219,8 @@ static always_inline int ___vmread(
     return rc;
 }
 
-
-static always_inline void __vmwrite_vcpu(
-    struct vcpu *v, unsigned long field, unsigned long value)
-{
-    switch ( field ) {
-    case CR0_READ_SHADOW:
-        v->arch.hvm_vmx.cpu_shadow_cr0 = value;
-        break;
-    case GUEST_CR0:
-        v->arch.hvm_vmx.cpu_cr0 = value;
-        break;
-    case CR4_READ_SHADOW:
-        v->arch.hvm_vmx.cpu_shadow_cr4 = value;
-        break;
-    case CPU_BASED_VM_EXEC_CONTROL:
-        v->arch.hvm_vmx.cpu_based_exec_control = value;
-        break;
-    default:
-        printk("__vmwrite_cpu: invalid field %lx\n", field);
-        break;
-    }
-}
-
-static always_inline void __vmread_vcpu(
-    struct vcpu *v, unsigned long field, unsigned long *value)
-{
-    switch ( field ) {
-    case CR0_READ_SHADOW:
-        *value = v->arch.hvm_vmx.cpu_shadow_cr0;
-        break;
-    case GUEST_CR0:
-        *value = v->arch.hvm_vmx.cpu_cr0;
-        break;
-    case CR4_READ_SHADOW:
-        *value = v->arch.hvm_vmx.cpu_shadow_cr4;
-        break;
-    case CPU_BASED_VM_EXEC_CONTROL:
-        *value = v->arch.hvm_vmx.cpu_based_exec_control;
-        break;
-    default:
-        printk("__vmread_vcpu: invalid field %lx\n", field);
-        break;
-    }
-}
-
 static inline int __vmwrite(unsigned long field, unsigned long value)
 {
-    struct vcpu *v = current;
     int rc;
 
     __asm__ __volatile__ ( VMWRITE_OPCODE
@@ -278,15 +231,6 @@ static inline int __vmwrite(unsigned long field, unsigned long value)
                            : "0" (0), "a" (field) , "c" (value)
                            : "memory");
 
-    switch ( field ) {
-    case CR0_READ_SHADOW:
-    case GUEST_CR0:
-    case CR4_READ_SHADOW:
-    case CPU_BASED_VM_EXEC_CONTROL:
-        __vmwrite_vcpu(v, field, value);
-        break;
-    }
-
     return rc;
 }
 
@@ -337,18 +281,10 @@ static inline int __vmxon (u64 addr)
 
 static inline int vmx_paging_enabled(struct vcpu *v)
 {
-    unsigned long cr0;
-    __vmread_vcpu(v, CR0_READ_SHADOW, &cr0);
+    unsigned long cr0 = v->arch.hvm_vmx.cpu_shadow_cr0;
     return ((cr0 & (X86_CR0_PE|X86_CR0_PG)) == (X86_CR0_PE|X86_CR0_PG));
 }
 
-static inline int vmx_pae_enabled(struct vcpu *v)
-{
-    unsigned long cr4;
-    __vmread_vcpu(v, CR4_READ_SHADOW, &cr4);
-    return (vmx_paging_enabled(v) && (cr4 & X86_CR4_PAE));
-}
-
 static inline int vmx_long_mode_enabled(struct vcpu *v)
 {
     u64 efer = v->arch.hvm_vmx.msr_content.msr_items[VMX_INDEX_MSR_EFER];
@@ -370,9 +306,7 @@ static inline void vmx_update_host_cr3(struct vcpu *v)
 
 static inline int vmx_pgbit_test(struct vcpu *v)
 {
-    unsigned long cr0;
-
-    __vmread_vcpu(v, CR0_READ_SHADOW, &cr0);
+    unsigned long cr0 = v->arch.hvm_vmx.cpu_shadow_cr0;
     return (cr0 & X86_CR0_PG);
 }