nv->nv_vvmcxaddr = INVALID_PADDR;
nv->nv_flushp2m = 0;
nv->nv_p2m = NULL;
+ nv->stale_np2m = false;
hvm_asid_flush_vcpu_asid(&nv->nv_n2asid);
*/
hvm_asid_flush_core();
vcpu_nestedhvm(v).nv_p2m = NULL;
+ vcpu_nestedhvm(v).stale_np2m = true;
}
void
mov %rsp,%rdi
call vmx_vmenter_helper
+ test %al, %al
+ jz .Lvmx_vmentry_restart
mov VCPU_hvm_guest_cr2(%rbx),%rax
pop %r15
GET_CURRENT(bx)
jmp .Lvmx_do_vmentry
+.Lvmx_vmentry_restart:
+ sti
+ jmp .Lvmx_do_vmentry
+
.Lvmx_goto_emulator:
sti
mov %rsp,%rdi
case 0: // Unhandled L1 EPT violation
break;
case 1: // This violation is handled completly
- /*Current nested EPT maybe flushed by other vcpus, so need
- * to re-set its shadow EPTP pointer.
- */
- if ( nestedhvm_vcpu_in_guestmode(current) &&
- nestedhvm_paging_mode_hap(current ) )
- __vmwrite(EPT_POINTER, get_shadow_eptp(current));
return;
case -1: // This vioaltion should be injected to L1 VMM
vcpu_nestedhvm(current).nv_vmexit_pending = 1;
bdw_erratum_bdf14_fixup();
}
-void vmx_vmenter_helper(const struct cpu_user_regs *regs)
+/* Returns false if the vmentry has to be restarted */
+bool vmx_vmenter_helper(const struct cpu_user_regs *regs)
{
struct vcpu *curr = current;
u32 new_asid, old_asid;
struct hvm_vcpu_asid *p_asid;
bool_t need_flush;
+ /* Shadow EPTP can't be updated here because irqs are disabled */
+ if ( nestedhvm_vcpu_in_guestmode(curr) && vcpu_nestedhvm(curr).stale_np2m )
+ return false;
+
if ( curr->domain->arch.hvm_domain.pi_ops.do_resume )
curr->domain->arch.hvm_domain.pi_ops.do_resume(curr);
__vmwrite(GUEST_RIP, regs->rip);
__vmwrite(GUEST_RSP, regs->rsp);
__vmwrite(GUEST_RFLAGS, regs->rflags | X86_EFLAGS_MBS);
+
+ return true;
}
/*
vmsucceed(regs);
}
+static void nvmx_eptp_update(void)
+{
+ struct vcpu *curr = current;
+
+ if ( !nestedhvm_vcpu_in_guestmode(curr) ||
+ vcpu_nestedhvm(curr).nv_vmexit_pending ||
+ !vcpu_nestedhvm(curr).stale_np2m ||
+ !nestedhvm_paging_mode_hap(curr) )
+ return;
+
+ /*
+ * Interrupts are enabled here, so we need to clear stale_np2m
+ * before we do the vmwrite. If we do it in the other order, an
+ * and IPI comes in changing the shadow eptp after the vmwrite,
+ * we'll complete the vmenter with a stale eptp value.
+ */
+ vcpu_nestedhvm(curr).stale_np2m = false;
+ __vmwrite(EPT_POINTER, get_shadow_eptp(curr));
+}
+
void nvmx_switch_guest(void)
{
struct vcpu *v = current;
struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
struct cpu_user_regs *regs = guest_cpu_user_regs();
+ nvmx_eptp_update();
+
/*
* A pending IO emulation may still be not finished. In this case, no
* virtual vmswitch is allowed. Or else, the following IO emulation will
cpumask_set_cpu(v->processor, p2m->dirty_cpumask);
}
+static void nvcpu_flush(struct vcpu *v)
+{
+ hvm_asid_flush_vcpu(v);
+ vcpu_nestedhvm(v).stale_np2m = true;
+}
+
struct p2m_domain *
p2m_get_nestedp2m_locked(struct vcpu *v)
{
if ( p2m->np2m_base == np2m_base || p2m->np2m_base == P2M_BASE_EADDR )
{
if ( p2m->np2m_base == P2M_BASE_EADDR )
- hvm_asid_flush_vcpu(v);
+ nvcpu_flush(v);
p2m->np2m_base = np2m_base;
assign_np2m(v, p2m);
nestedp2m_unlock(d);
p2m_flush_table(p2m);
p2m_lock(p2m);
p2m->np2m_base = np2m_base;
- hvm_asid_flush_vcpu(v);
+ nvcpu_flush(v);
assign_np2m(v, p2m);
nestedp2m_unlock(d);
bool_t nv_flushp2m; /* True, when p2m table must be flushed */
struct p2m_domain *nv_p2m; /* used p2m table for this vcpu */
+ bool stale_np2m; /* True when p2m_base in VMCx02 is no longer valid */
struct hvm_vcpu_asid nv_n2asid;