{
struct xen_domctl_ext_vcpucontext *evc;
struct vcpu *v;
- struct xen_domctl_ext_vcpu_msr msr;
evc = &domctl->u.ext_vcpucontext;
evc->vmce.mci_ctl2_bank0 = v->arch.vmce.bank[0].mci_ctl2;
evc->vmce.mci_ctl2_bank1 = v->arch.vmce.bank[1].mci_ctl2;
- i = ret = 0;
- if ( boot_cpu_has(X86_FEATURE_DBEXT) )
- {
- unsigned int j;
-
- if ( v->arch.pv_vcpu.dr_mask[0] )
- {
- if ( i < evc->msr_count && !ret )
- {
- msr.index = MSR_AMD64_DR0_ADDRESS_MASK;
- msr.reserved = 0;
- msr.value = v->arch.pv_vcpu.dr_mask[0];
- if ( copy_to_guest_offset(evc->msrs, i, &msr, 1) )
- ret = -EFAULT;
- }
- ++i;
- }
- for ( j = 0; j < 3; ++j )
- {
- if ( !v->arch.pv_vcpu.dr_mask[1 + j] )
- continue;
- if ( i < evc->msr_count && !ret )
- {
- msr.index = MSR_AMD64_DR1_ADDRESS_MASK + j;
- msr.reserved = 0;
- msr.value = v->arch.pv_vcpu.dr_mask[1 + j];
- if ( copy_to_guest_offset(evc->msrs, i, &msr, 1) )
- ret = -EFAULT;
- }
- ++i;
- }
- }
- if ( i > evc->msr_count && !ret )
- ret = -ENOBUFS;
- evc->msr_count = i;
-
+ ret = 0;
vcpu_unpause(v);
copyback = 1;
}
ret = vmce_restore_vcpu(v, &vmce);
}
- else if ( evc->size > offsetof(typeof(*evc), vmce) )
- ret = -EINVAL;
else
ret = 0;
- if ( ret || evc->size <= offsetof(typeof(*evc), msrs) )
- /* nothing */;
- else if ( evc->size < offsetof(typeof(*evc), msrs) +
- sizeof(evc->msrs) )
- ret = -EINVAL;
- else
- {
- for ( i = 0; i < evc->msr_count; ++i )
- {
- ret = -EFAULT;
- if ( copy_from_guest_offset(&msr, evc->msrs, i, 1) )
- break;
- ret = -EINVAL;
- if ( msr.reserved )
- break;
- switch ( msr.index )
- {
- case MSR_AMD64_DR0_ADDRESS_MASK:
- if ( !boot_cpu_has(X86_FEATURE_DBEXT) ||
- (msr.value >> 32) )
- break;
- v->arch.pv_vcpu.dr_mask[0] = msr.value;
- continue;
- case MSR_AMD64_DR1_ADDRESS_MASK ...
- MSR_AMD64_DR3_ADDRESS_MASK:
- if ( !boot_cpu_has(X86_FEATURE_DBEXT) ||
- (msr.value >> 32) )
- break;
- msr.index -= MSR_AMD64_DR1_ADDRESS_MASK - 1;
- v->arch.pv_vcpu.dr_mask[msr.index] = msr.value;
- continue;
- }
- break;
- }
- if ( i == evc->msr_count )
- ret = 0;
- }
-
domain_unpause(d);
}
}
DEFINE_XEN_GUEST_HANDLE(xen_domctl_pin_mem_cacheattr_t);
-#if defined(__i386__) || defined(__x86_64__)
-struct xen_domctl_ext_vcpu_msr {
- uint32_t index;
- uint32_t reserved;
- uint64_aligned_t value;
-};
-typedef struct xen_domctl_ext_vcpu_msr xen_domctl_ext_vcpu_msr_t;
-DEFINE_XEN_GUEST_HANDLE(xen_domctl_ext_vcpu_msr_t);
-#endif
-
/* XEN_DOMCTL_set_ext_vcpucontext */
/* XEN_DOMCTL_get_ext_vcpucontext */
struct xen_domctl_ext_vcpucontext {
uint16_t sysenter_callback_cs;
uint8_t syscall32_disables_events;
uint8_t sysenter_disables_events;
- /*
- * When, for the "get" version, msr_count is too small to cover all MSRs
- * the hypervisor needs to be saved, the call will return -ENOBUFS and
- * set msr_count to the required (minimum) value. Furthermore, for both
- * "get" and "set", that field as well as the msrs one only get looked at
- * if the size field above covers the structure up to the entire msrs one.
- */
- uint16_t msr_count;
#if defined(__GNUC__)
union {
uint64_aligned_t mcg_cap;
#else
struct hvm_vmce_vcpu vmce;
#endif
- XEN_GUEST_HANDLE_64(xen_domctl_ext_vcpu_msr_t) msrs;
#endif
};
typedef struct xen_domctl_ext_vcpucontext xen_domctl_ext_vcpucontext_t;