No functional change.
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Reviewed-by: Paul Durrant <paul.durrant@citrix.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Kevin Tian <kevin.tian@intel.com>
Acked-by: George Dunlap <george.dunlap@citrix.com>
for ( i = 1; i < min(ARRAY_SIZE(p->extd.raw),
p->extd.max_leaf + 1 - 0x80000000ul); ++i )
cpuid_leaf(0x80000000 + i, &p->extd.raw[i]);
+
+ p->x86_vendor = boot_cpu_data.x86_vendor;
}
static void __init calculate_host_policy(void)
res->d = p->extd.e1d;
/* If not emulating AMD, clear the duplicated features in e1d. */
- if ( currd->arch.x86_vendor != X86_VENDOR_AMD )
+ if ( p->x86_vendor != X86_VENDOR_AMD )
res->d &= ~CPUID_COMMON_1D_FEATURES;
/*
res->d = p->extd.e1d;
/* If not emulating AMD, clear the duplicated features in e1d. */
- if ( d->arch.x86_vendor != X86_VENDOR_AMD )
+ if ( p->x86_vendor != X86_VENDOR_AMD )
res->d &= ~CPUID_COMMON_1D_FEATURES;
/* fast-forward MSR_APIC_BASE.EN if it hasn't already been clobbered. */
else if ( vlapic_hw_disabled(vcpu_vlapic(v)) )
res->d &= ~cpufeat_mask(X86_FEATURE_PSE36);
/* SYSCALL is hidden outside of long mode on Intel. */
- if ( d->arch.x86_vendor == X86_VENDOR_INTEL &&
- !hvm_long_mode_enabled(v))
+ if ( p->x86_vendor == X86_VENDOR_INTEL && !hvm_long_mode_enabled(v) )
res->d &= ~cpufeat_mask(X86_FEATURE_SYSCALL);
break;
if ( (rc = init_domain_cpuid_policy(d)) )
goto fail;
- d->arch.x86_vendor = boot_cpu_data.x86_vendor;
-
d->arch.ioport_caps =
rangeset_new(d, "I/O Ports", RANGESETF_prettyprint_hex);
rc = -ENOMEM;
switch ( ctl->input[0] )
{
case 0: {
- int old_vendor = d->arch.x86_vendor;
+ int old_vendor = p->x86_vendor;
- d->arch.x86_vendor = get_cpu_vendor(
- ctl->ebx, ctl->ecx, ctl->edx, gcv_guest);
+ p->x86_vendor = get_cpu_vendor(ctl->ebx, ctl->ecx, ctl->edx, gcv_guest);
- if ( is_hvm_domain(d) && (d->arch.x86_vendor != old_vendor) )
+ if ( is_hvm_domain(d) && (p->x86_vendor != old_vendor) )
{
struct vcpu *v;
ecx |= cpufeat_mask(X86_FEATURE_CMP_LEGACY);
/* If not emulating AMD, clear the duplicated features in e1d. */
- if ( d->arch.x86_vendor != X86_VENDOR_AMD )
+ if ( p->x86_vendor != X86_VENDOR_AMD )
edx &= ~CPUID_COMMON_1D_FEATURES;
switch ( boot_cpu_data.x86_vendor )
hvmemul_ctxt->validate = validate;
hvmemul_ctxt->ctxt.regs = regs;
- hvmemul_ctxt->ctxt.vendor = curr->domain->arch.x86_vendor;
+ hvmemul_ctxt->ctxt.vendor = curr->domain->arch.cpuid->x86_vendor;
hvmemul_ctxt->ctxt.force_writeback = true;
if ( cpu_has_vmx )
{
struct vcpu *cur = current;
bool should_emulate =
- cur->domain->arch.x86_vendor != boot_cpu_data.x86_vendor;
+ cur->domain->arch.cpuid->x86_vendor != boot_cpu_data.x86_vendor;
struct hvm_emulate_ctxt ctxt;
hvm_emulate_init_once(&ctxt, opt_hvm_fep ? NULL : is_cross_vendor, regs);
(p->addr & 3);
/* AMD extended configuration space access? */
if ( CF8_ADDR_HI(cf8) &&
- d->arch.x86_vendor == X86_VENDOR_AMD &&
+ d->arch.cpuid->x86_vendor == X86_VENDOR_AMD &&
(x86_fam = get_cpu_family(
d->arch.cpuid->basic.raw_fms, NULL, NULL)) > 0x10 &&
x86_fam <= 0x17 )
u32 bitmap = vmcb_get_exception_intercepts(vmcb);
if ( opt_hvm_fep ||
- (v->domain->arch.x86_vendor != boot_cpu_data.x86_vendor) )
+ (v->domain->arch.cpuid->x86_vendor != boot_cpu_data.x86_vendor) )
bitmap |= (1U << TRAP_invalid_op);
else
bitmap &= ~(1U << TRAP_invalid_op);
static void vmx_update_guest_vendor(struct vcpu *v)
{
if ( opt_hvm_fep ||
- (v->domain->arch.x86_vendor != boot_cpu_data.x86_vendor) )
+ (v->domain->arch.cpuid->x86_vendor != boot_cpu_data.x86_vendor) )
v->arch.hvm_vmx.exception_bitmap |= (1U << TRAP_invalid_op);
else
v->arch.hvm_vmx.exception_bitmap &= ~(1U << TRAP_invalid_op);
struct ptwr_emulate_ctxt ptwr_ctxt = {
.ctxt = {
.regs = regs,
- .vendor = d->arch.x86_vendor,
+ .vendor = d->arch.cpuid->x86_vendor,
.addr_size = is_pv_32bit_domain(d) ? 32 : BITS_PER_LONG,
.sp_size = is_pv_32bit_domain(d) ? 32 : BITS_PER_LONG,
.swint_emulate = x86_swint_emulate_none,
struct mmio_ro_emulate_ctxt mmio_ro_ctxt = { .cr2 = addr };
struct x86_emulate_ctxt ctxt = {
.regs = regs,
- .vendor = v->domain->arch.x86_vendor,
+ .vendor = v->domain->arch.cpuid->x86_vendor,
.addr_size = addr_size,
.sp_size = addr_size,
.swint_emulate = x86_swint_emulate_none,
memset(sh_ctxt, 0, sizeof(*sh_ctxt));
sh_ctxt->ctxt.regs = regs;
- sh_ctxt->ctxt.vendor = v->domain->arch.x86_vendor;
+ sh_ctxt->ctxt.vendor = v->domain->arch.cpuid->x86_vendor;
sh_ctxt->ctxt.swint_emulate = x86_swint_emulate_none;
/* Segment cache initialisation. Primed with CS. */
struct domain *currd = curr->domain;
struct priv_op_ctxt ctxt = {
.ctxt.regs = regs,
- .ctxt.vendor = currd->arch.x86_vendor,
+ .ctxt.vendor = currd->arch.cpuid->x86_vendor,
};
int rc;
unsigned int eflags, ar;
/* Toolstack selected Hypervisor max_leaf (if non-zero). */
uint8_t hv_limit, hv2_limit;
+ /* Value calculated from raw data above. */
+ uint8_t x86_vendor;
+
/* Temporary: Legacy data array. */
#define MAX_CPUID_INPUT 40
xen_domctl_cpuid_t legacy[MAX_CPUID_INPUT];
/* Is PHYSDEVOP_eoi to automatically unmask the event channel? */
bool_t auto_unmask;
- /* Values snooped from updates to cpuids[] (below). */
- u8 x86_vendor; /* CPU vendor */
-
/*
* The width of the FIP/FDP register in the FPU that needs to be
* saved/restored during a context switch. This is needed because