{
struct vcpu *v = current;
struct domain *d = v->domain;
- unsigned int count = *ecx;
+ unsigned int count, dummy = 0;
+
+ if ( !eax )
+ eax = &dummy;
+ if ( !ebx )
+ ebx = &dummy;
+ if ( !ecx )
+ ecx = &dummy;
+ count = *ecx;
+ if ( !edx )
+ edx = &dummy;
if ( cpuid_viridian_leaves(input, eax, ebx, ecx, edx) )
return;
if ( cpuid_hypervisor_leaves(input, count, eax, ebx, ecx, edx) )
return;
- domain_cpuid(d, input, *ecx, eax, ebx, ecx, edx);
+ domain_cpuid(d, input, count, eax, ebx, ecx, edx);
switch ( input )
{
{
struct vcpu *v = current;
uint64_t *var_range_base, *fixed_range_base;
- int index, mtrr;
- uint32_t cpuid[4];
+ bool_t mtrr;
+ unsigned int edx, index;
int ret = X86EMUL_OKAY;
var_range_base = (uint64_t *)v->arch.hvm_vcpu.mtrr.var_ranges;
fixed_range_base = (uint64_t *)v->arch.hvm_vcpu.mtrr.fixed_ranges;
- hvm_cpuid(1, &cpuid[0], &cpuid[1], &cpuid[2], &cpuid[3]);
- mtrr = !!(cpuid[3] & cpufeat_mask(X86_FEATURE_MTRR));
+ hvm_cpuid(1, NULL, NULL, NULL, &edx);
+ mtrr = !!(edx & cpufeat_mask(X86_FEATURE_MTRR));
switch ( msr )
{
int hvm_msr_write_intercept(unsigned int msr, uint64_t msr_content)
{
struct vcpu *v = current;
- int index, mtrr;
- uint32_t cpuid[4];
+ bool_t mtrr;
+ unsigned int edx, index;
int ret = X86EMUL_OKAY;
HVMTRACE_3D(MSR_WRITE, msr,
(uint32_t)msr_content, (uint32_t)(msr_content >> 32));
- hvm_cpuid(1, &cpuid[0], &cpuid[1], &cpuid[2], &cpuid[3]);
- mtrr = !!(cpuid[3] & cpufeat_mask(X86_FEATURE_MTRR));
+ hvm_cpuid(1, NULL, NULL, NULL, &edx);
+ mtrr = !!(edx & cpufeat_mask(X86_FEATURE_MTRR));
hvm_memory_event_msr(msr, msr_content);
/* Update LWP_CFG MSR (0xc0000105). Return -1 if error; otherwise returns 0. */
static int svm_update_lwp_cfg(struct vcpu *v, uint64_t msr_content)
{
- unsigned int eax, ebx, ecx, edx;
+ unsigned int edx;
uint32_t msr_low;
static uint8_t lwp_intr_vector;
if ( xsave_enabled(v) && cpu_has_lwp )
{
- hvm_cpuid(0x8000001c, &eax, &ebx, &ecx, &edx);
+ hvm_cpuid(0x8000001c, NULL, NULL, NULL, &edx);
msr_low = (uint32_t)msr_content;
/* generate #GP if guest tries to turn on unsupported features. */
static int svm_handle_osvw(struct vcpu *v, uint32_t msr, uint64_t *val, bool_t read)
{
- uint eax, ebx, ecx, edx;
+ unsigned int ecx;
/* Guest OSVW support */
- hvm_cpuid(0x80000001, &eax, &ebx, &ecx, &edx);
+ hvm_cpuid(0x80000001, NULL, NULL, &ecx, NULL);
if ( !test_bit((X86_FEATURE_OSVW & 31), &ecx) )
return -1;
int nvmx_msr_read_intercept(unsigned int msr, u64 *msr_content)
{
struct vcpu *v = current;
- unsigned int eax, ebx, ecx, edx, dummy;
+ unsigned int eax, ebx, ecx, edx;
u64 data = 0, host_data = 0;
int r = 1;
return 0;
/* VMX capablity MSRs are available only when guest supports VMX. */
- hvm_cpuid(0x1, &dummy, &dummy, &ecx, &edx);
+ hvm_cpuid(0x1, NULL, NULL, &ecx, &edx);
if ( !(ecx & cpufeat_mask(X86_FEATURE_VMXE)) )
return 0;
if ( ecx & cpufeat_mask(X86_FEATURE_XSAVE) )
data |= X86_CR4_OSXSAVE;
- hvm_cpuid(0x0, &eax, &dummy, &dummy, &dummy);
+ hvm_cpuid(0x0, &eax, NULL, NULL, NULL);
switch ( eax )
{
default:
- hvm_cpuid(0xa, &eax, &dummy, &dummy, &dummy);
+ hvm_cpuid(0xa, &eax, NULL, NULL, NULL);
/* Check whether guest has the perf monitor feature. */
if ( (eax & 0xff) && (eax & 0xff00) )
data |= X86_CR4_PCE;
/* fall through */
case 0x7 ... 0x9:
ecx = 0;
- hvm_cpuid(0x7, &dummy, &ebx, &ecx, &dummy);
+ hvm_cpuid(0x7, NULL, &ebx, &ecx, NULL);
if ( ebx & cpufeat_mask(X86_FEATURE_FSGSBASE) )
data |= X86_CR4_FSGSBASE;
if ( ebx & cpufeat_mask(X86_FEATURE_SMEP) )