static void vmx_ctxt_switch_to(struct vcpu *v)
{
- struct domain *d = v->domain;
unsigned long old_cr4 = read_cr4(), new_cr4 = mmu_cr4_features;
- struct ept_data *ept_data = &p2m_get_hostp2m(d)->ept;
/* HOST_CR4 in VMCS is always mmu_cr4_features. Sync CR4 now. */
if ( old_cr4 != new_cr4 )
write_cr4(new_cr4);
- if ( paging_mode_hap(d) )
- {
- unsigned int cpu = smp_processor_id();
- /* Test-and-test-and-set this CPU in the EPT-is-synced mask. */
- if ( !cpumask_test_cpu(cpu, ept_get_synced_mask(ept_data)) &&
- !cpumask_test_and_set_cpu(cpu,
- ept_get_synced_mask(ept_data)) )
- __invept(INVEPT_SINGLE_CONTEXT, ept_get_eptp(ept_data), 0);
- }
-
vmx_restore_guest_msrs(v);
vmx_restore_dr(v);
}
if ( unlikely(need_flush) )
vpid_sync_all();
+ if ( paging_mode_hap(curr->domain) )
+ {
+ struct ept_data *ept = &p2m_get_hostp2m(curr->domain)->ept;
+ unsigned int cpu = smp_processor_id();
+
+ if ( cpumask_test_cpu(cpu, ept->invalidate) )
+ {
+ cpumask_clear_cpu(cpu, ept->invalidate);
+ __invept(INVEPT_SINGLE_CONTEXT, ept_get_eptp(ept), 0);
+ }
+ }
+
out:
HVMTRACE_ND(VMENTRY, 0, 1/*cycles*/, 0, 0, 0, 0, 0, 0, 0);
static void __ept_sync_domain(void *info)
{
- struct ept_data *ept = &((struct p2m_domain *)info)->ept;
-
- __invept(INVEPT_SINGLE_CONTEXT, ept_get_eptp(ept), 0);
+ /*
+ * The invalidation will be done before VMENTER (see
+ * vmx_vmenter_helper()).
+ */
}
void ept_sync_domain(struct p2m_domain *p2m)
p2m_flush_nestedp2m(d);
/*
- * Flush active cpus synchronously. Flush others the next time this domain
- * is scheduled onto them. We accept the race of other CPUs adding to
- * the ept_synced mask before on_selected_cpus() reads it, resulting in
- * unnecessary extra flushes, to avoid allocating a cpumask_t on the stack.
+ * Need to invalidate on all PCPUs because either:
+ *
+ * a) A VCPU has run and some translations may be cached.
+ * b) A VCPU has not run and and the initial invalidation in case
+ * of an EP4TA reuse is still needed.
*/
- cpumask_and(ept_get_synced_mask(ept),
- d->domain_dirty_cpumask, &cpu_online_map);
+ cpumask_setall(ept->invalidate);
- on_selected_cpus(ept_get_synced_mask(ept),
+ on_selected_cpus(d->domain_dirty_cpumask,
__ept_sync_domain, p2m, 1);
}
p2m->flush_hardware_cached_dirty = ept_flush_pml_buffers;
}
- if ( !zalloc_cpumask_var(&ept->synced_mask) )
+ if ( !zalloc_cpumask_var(&ept->invalidate) )
return -ENOMEM;
- on_each_cpu(__ept_sync_domain, p2m, 1);
+ /*
+ * Assume an initial invalidation is required, in case an EP4TA is
+ * reused.
+ */
+ cpumask_setall(ept->invalidate);
return 0;
}
void ept_p2m_uninit(struct p2m_domain *p2m)
{
struct ept_data *ept = &p2m->ept;
- free_cpumask_var(ept->synced_mask);
+ free_cpumask_var(ept->invalidate);
}
static void ept_dump_p2m_table(unsigned char key)
};
u64 eptp;
};
- cpumask_var_t synced_mask;
+ /* Set of PCPUs needing an INVEPT before a VMENTER. */
+ cpumask_var_t invalidate;
};
#define _VMX_DOMAIN_PML_ENABLED 0
#define ept_get_wl(ept) ((ept)->ept_wl)
#define ept_get_asr(ept) ((ept)->asr)
#define ept_get_eptp(ept) ((ept)->eptp)
-#define ept_get_synced_mask(ept) ((ept)->synced_mask)
#define NR_PML_ENTRIES 512