... to simplify the default cases.
There are multiple errors with the handling of these three MSRs, but they are
deliberately not addressed at this point.
This removes the dance converting -1/0/1 into X86EMUL_*, allowing for the
removal of the 'ret' variable.
While cleaning this up, drop the gdprintk()'s for #GP conditions, and the
'result' variable from svm_msr_write_intercept() as it is never modified.
No functional change.
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Roger Pau Monné <roger.pau@citrix.com>
local_event_delivery_enable(); /* unmask events for PV drivers */
}
-static int
-nestedsvm_vmcb_isvalid(struct vcpu *v, uint64_t vmcxaddr)
-{
- /* Address must be 4k aligned */
- if ( (vmcxaddr & ~PAGE_MASK) != 0 )
- return 0;
-
- /* Maximum valid physical address.
- * See AMD BKDG for HSAVE_PA MSR.
- */
- if ( vmcxaddr > 0xfd00000000ULL )
- return 0;
-
- return 1;
-}
-
int nestedsvm_vmcb_map(struct vcpu *v, uint64_t vmcbaddr)
{
struct nestedvcpu *nv = &vcpu_nestedhvm(v);
return hvm_intblk_none;
}
-/* MSR handling */
-int nsvm_rdmsr(struct vcpu *v, unsigned int msr, uint64_t *msr_content)
-{
- struct nestedsvm *svm = &vcpu_nestedsvm(v);
- int ret = 1;
-
- *msr_content = 0;
-
- switch (msr) {
- case MSR_K8_VM_CR:
- break;
- case MSR_K8_VM_HSAVE_PA:
- *msr_content = svm->ns_msr_hsavepa;
- break;
- case MSR_AMD64_TSC_RATIO:
- *msr_content = svm->ns_tscratio;
- break;
- default:
- ret = 0;
- break;
- }
-
- return ret;
-}
-
-int nsvm_wrmsr(struct vcpu *v, unsigned int msr, uint64_t msr_content)
-{
- int ret = 1;
- struct nestedsvm *svm = &vcpu_nestedsvm(v);
-
- switch (msr) {
- case MSR_K8_VM_CR:
- /* ignore write. handle all bits as read-only. */
- break;
- case MSR_K8_VM_HSAVE_PA:
- if (!nestedsvm_vmcb_isvalid(v, msr_content)) {
- gdprintk(XENLOG_ERR,
- "MSR_K8_VM_HSAVE_PA value invalid %#"PRIx64"\n", msr_content);
- ret = -1; /* inject #GP */
- break;
- }
- svm->ns_msr_hsavepa = msr_content;
- break;
- case MSR_AMD64_TSC_RATIO:
- if ((msr_content & ~TSC_RATIO_RSVD_BITS) != msr_content) {
- gdprintk(XENLOG_ERR,
- "reserved bits set in MSR_AMD64_TSC_RATIO %#"PRIx64"\n",
- msr_content);
- ret = -1; /* inject #GP */
- break;
- }
- svm->ns_tscratio = msr_content;
- break;
- default:
- ret = 0;
- break;
- }
-
- return ret;
-}
-
/* VMEXIT emulation */
void
nestedsvm_vmexit_defer(struct vcpu *v,
static int svm_msr_read_intercept(unsigned int msr, uint64_t *msr_content)
{
- int ret;
struct vcpu *v = current;
const struct domain *d = v->domain;
struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
+ const struct nestedsvm *nsvm = &vcpu_nestedsvm(v);
switch ( msr )
{
goto gpf;
break;
+ case MSR_K8_VM_CR:
+ *msr_content = 0;
+ break;
+
+ case MSR_K8_VM_HSAVE_PA:
+ *msr_content = nsvm->ns_msr_hsavepa;
+ break;
+
+ case MSR_AMD64_TSC_RATIO:
+ *msr_content = nsvm->ns_tscratio;
+ break;
+
case MSR_AMD_OSVW_ID_LENGTH:
case MSR_AMD_OSVW_STATUS:
if ( !d->arch.cpuid->extd.osvw )
break;
default:
- ret = nsvm_rdmsr(v, msr, msr_content);
- if ( ret < 0 )
- goto gpf;
- else if ( ret )
- break;
-
if ( rdmsr_safe(msr, *msr_content) == 0 )
break;
static int svm_msr_write_intercept(unsigned int msr, uint64_t msr_content)
{
- int ret, result = X86EMUL_OKAY;
struct vcpu *v = current;
struct domain *d = v->domain;
struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
+ struct nestedsvm *nsvm = &vcpu_nestedsvm(v);
switch ( msr )
{
goto gpf;
break;
+ case MSR_K8_VM_CR:
+ /* ignore write. handle all bits as read-only. */
+ break;
+
+ case MSR_K8_VM_HSAVE_PA:
+ if ( (msr_content & ~PAGE_MASK) || msr_content > 0xfd00000000ULL )
+ goto gpf;
+ nsvm->ns_msr_hsavepa = msr_content;
+ break;
+
+ case MSR_AMD64_TSC_RATIO:
+ if ( msr_content & TSC_RATIO_RSVD_BITS )
+ goto gpf;
+ nsvm->ns_tscratio = msr_content;
+ break;
+
case MSR_IA32_MCx_MISC(4): /* Threshold register */
case MSR_F10_MC4_MISC1 ... MSR_F10_MC4_MISC3:
/*
break;
default:
- ret = nsvm_wrmsr(v, msr, msr_content);
- if ( ret < 0 )
- goto gpf;
- else if ( ret )
- break;
-
/* Match up with the RDMSR side; ultimately this should go away. */
if ( rdmsr_safe(msr, msr_content) == 0 )
break;
goto gpf;
}
- return result;
+ return X86EMUL_OKAY;
gpf:
return X86EMUL_EXCEPTION;
bool_t nsvm_vmcb_hap_enabled(struct vcpu *v);
enum hvm_intblk nsvm_intr_blocked(struct vcpu *v);
-/* MSRs */
-int nsvm_rdmsr(struct vcpu *v, unsigned int msr, uint64_t *msr_content);
-int nsvm_wrmsr(struct vcpu *v, unsigned int msr, uint64_t msr_content);
-
/* Interrupts, vGIF */
void svm_vmexit_do_clgi(struct cpu_user_regs *regs, struct vcpu *v);
void svm_vmexit_do_stgi(struct cpu_user_regs *regs, struct vcpu *v);