static void amd_ctxt_switch_levelling(const struct domain *nextd)
{
struct cpuidmasks *these_masks = &this_cpu(cpuidmasks);
- const struct cpuidmasks *masks = &cpuidmask_defaults;
+ const struct cpuidmasks *masks =
+ (nextd && is_pv_domain(nextd) && nextd->arch.pv_domain.cpuidmasks)
+ ? nextd->arch.pv_domain.cpuidmasks : &cpuidmask_defaults;
#define LAZY(cap, msr, field) \
({ \
static void intel_ctxt_switch_levelling(const struct domain *nextd)
{
struct cpuidmasks *these_masks = &this_cpu(cpuidmasks);
- const struct cpuidmasks *masks = &cpuidmask_defaults;
+ const struct cpuidmasks *masks;
if (cpu_has_cpuid_faulting) {
/*
return;
}
+ masks = (nextd && is_pv_domain(nextd) && nextd->arch.pv_domain.cpuidmasks)
+ ? nextd->arch.pv_domain.cpuidmasks : &cpuidmask_defaults;
+
#define LAZY(msr, field) \
({ \
if (unlikely(these_masks->field != masks->field) && \
goto fail;
clear_page(d->arch.pv_domain.gdt_ldt_l1tab);
+ if ( levelling_caps & ~LCAP_faulting )
+ {
+ d->arch.pv_domain.cpuidmasks = xmalloc(struct cpuidmasks);
+ if ( !d->arch.pv_domain.cpuidmasks )
+ goto fail;
+ *d->arch.pv_domain.cpuidmasks = cpuidmask_defaults;
+ }
+
rc = create_perdomain_mapping(d, GDT_LDT_VIRT_START,
GDT_LDT_MBYTES << (20 - PAGE_SHIFT),
NULL, NULL);
paging_final_teardown(d);
free_perdomain_mappings(d);
if ( is_pv_domain(d) )
+ {
+ xfree(d->arch.pv_domain.cpuidmasks);
free_xenheap_page(d->arch.pv_domain.gdt_ldt_l1tab);
+ }
psr_domain_free(d);
return rc;
}
free_perdomain_mappings(d);
if ( is_pv_domain(d) )
+ {
free_xenheap_page(d->arch.pv_domain.gdt_ldt_l1tab);
+ xfree(d->arch.pv_domain.cpuidmasks);
+ }
free_xenheap_page(d->shared_info);
cleanup_domain_irq_mapping(d);
/* map_domain_page() mapping cache. */
struct mapcache_domain mapcache;
+
+ struct cpuidmasks *cpuidmasks;
};
struct monitor_write_data {