This patch provides vMCE save/restore when migration.
1. MCG_CAP is well-defined. However, considering future cap extension,
we keep save/restore logic that Jan implement at c/s 24887;
2. MCi_CTL2 initialized by guestos when booting, so need save/restore
otherwise guest would surprise;
3. Other MSRs do not need save/restore since they are either error-
related and pointless to save/restore, or, unified among all vMCE
platform;
Signed-off-by: Liu, Jinsong <jinsong.liu@intel.com>
- fix handling of partial data in XEN_DOMCTL_set_ext_vcpucontext
- fix adjustment of xen_domctl_ext_vcpucontext
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Committed-by: Jan Beulich <jbeulich@suse.com>
HVM_SAVE_TYPE(VMCE_VCPU) p;
READ(p);
printf(" VMCE_VCPU: caps %" PRIx64 "\n", p.caps);
+ printf(" VMCE_VCPU: bank0 mci_ctl2 %" PRIx64 "\n", p.mci_ctl2_bank0);
+ printf(" VMCE_VCPU: bank1 mci_ctl2 %" PRIx64 "\n", p.mci_ctl2_bank1);
}
int main(int argc, char **argv)
spin_lock_init(&v->arch.vmce.lock);
}
-int vmce_restore_vcpu(struct vcpu *v, uint64_t caps)
+int vmce_restore_vcpu(struct vcpu *v, const struct hvm_vmce_vcpu *ctxt)
{
unsigned long guest_mcg_cap;
else
guest_mcg_cap = AMD_GUEST_MCG_CAP;
- if ( caps & ~guest_mcg_cap & ~MCG_CAP_COUNT & ~MCG_CTL_P )
+ if ( ctxt->caps & ~guest_mcg_cap & ~MCG_CAP_COUNT & ~MCG_CTL_P )
{
dprintk(XENLOG_G_ERR, "%s restore: unsupported MCA capabilities"
" %#" PRIx64 " for d%d:v%u (supported: %#Lx)\n",
- is_hvm_vcpu(v) ? "HVM" : "PV", caps, v->domain->domain_id,
- v->vcpu_id, guest_mcg_cap & ~MCG_CAP_COUNT);
+ is_hvm_vcpu(v) ? "HVM" : "PV", ctxt->caps,
+ v->domain->domain_id, v->vcpu_id,
+ guest_mcg_cap & ~MCG_CAP_COUNT);
return -EPERM;
}
- v->arch.vmce.mcg_cap = caps;
+ v->arch.vmce.mcg_cap = ctxt->caps;
+ v->arch.vmce.bank[0].mci_ctl2 = ctxt->mci_ctl2_bank0;
+ v->arch.vmce.bank[1].mci_ctl2 = ctxt->mci_ctl2_bank1;
+
return 0;
}
for_each_vcpu( d, v ) {
struct hvm_vmce_vcpu ctxt = {
- .caps = v->arch.vmce.mcg_cap
+ .caps = v->arch.vmce.mcg_cap,
+ .mci_ctl2_bank0 = v->arch.vmce.bank[0].mci_ctl2,
+ .mci_ctl2_bank1 = v->arch.vmce.bank[1].mci_ctl2
};
err = hvm_save_entry(VMCE_VCPU, v->vcpu_id, h, &ctxt);
err = -EINVAL;
}
else
- err = hvm_load_entry(VMCE_VCPU, h, &ctxt);
+ err = hvm_load_entry_zeroextend(VMCE_VCPU, h, &ctxt);
- return err ?: vmce_restore_vcpu(v, ctxt.caps);
+ return err ?: vmce_restore_vcpu(v, &ctxt);
}
HVM_REGISTER_SAVE_RESTORE(VMCE_VCPU, vmce_save_vcpu_ctxt,
evc->syscall32_callback_eip = 0;
evc->syscall32_disables_events = 0;
}
- evc->mcg_cap = v->arch.vmce.mcg_cap;
+ evc->vmce.caps = v->arch.vmce.mcg_cap;
+ evc->vmce.mci_ctl2_bank0 = v->arch.vmce.bank[0].mci_ctl2;
+ evc->vmce.mci_ctl2_bank1 = v->arch.vmce.bank[1].mci_ctl2;
}
else
{
ret = -EINVAL;
- if ( evc->size < offsetof(typeof(*evc), mcg_cap) )
+ if ( evc->size < offsetof(typeof(*evc), vmce) )
goto ext_vcpucontext_out;
if ( !is_hvm_domain(d) )
{
evc->syscall32_callback_eip )
goto ext_vcpucontext_out;
- if ( evc->size >= offsetof(typeof(*evc), mcg_cap) +
- sizeof(evc->mcg_cap) )
- ret = vmce_restore_vcpu(v, evc->mcg_cap);
+ BUILD_BUG_ON(offsetof(struct xen_domctl_ext_vcpucontext,
+ mcg_cap) !=
+ offsetof(struct xen_domctl_ext_vcpucontext,
+ vmce.caps));
+ BUILD_BUG_ON(sizeof(evc->mcg_cap) != sizeof(evc->vmce.caps));
+ if ( evc->size >= offsetof(typeof(*evc), vmce) +
+ sizeof(evc->vmce) )
+ ret = vmce_restore_vcpu(v, &evc->vmce);
+ else if ( evc->size >= offsetof(typeof(*evc), mcg_cap) +
+ sizeof(evc->mcg_cap) )
+ {
+ struct hvm_vmce_vcpu vmce = { .caps = evc->mcg_cap };
+
+ ret = vmce_restore_vcpu(v, &vmce);
+ }
}
ret = 0;
/* Guest vMCE MSRs virtualization */
extern void vmce_init_vcpu(struct vcpu *);
-extern int vmce_restore_vcpu(struct vcpu *, uint64_t caps);
+extern int vmce_restore_vcpu(struct vcpu *, const struct hvm_vmce_vcpu *);
extern int vmce_wrmsr(uint32_t msr, uint64_t val);
extern int vmce_rdmsr(uint32_t msr, uint64_t *val);
struct hvm_vmce_vcpu {
uint64_t caps;
+ uint64_t mci_ctl2_bank0;
+ uint64_t mci_ctl2_bank1;
};
DECLARE_HVM_SAVE_TYPE(VMCE_VCPU, 18, struct hvm_vmce_vcpu);
#error "domctl operations are intended for use by node control tools only"
#endif
+#include <xen/hvm/save.h>
#include "xen.h"
#include "grant_table.h"
uint16_t sysenter_callback_cs;
uint8_t syscall32_disables_events;
uint8_t sysenter_disables_events;
- uint64_aligned_t mcg_cap;
+#if defined(__GNUC__)
+ union {
+ uint64_aligned_t mcg_cap;
+ struct hvm_vmce_vcpu vmce;
+ };
+#else
+ struct hvm_vmce_vcpu vmce;
+#endif
#endif
};
typedef struct xen_domctl_ext_vcpucontext xen_domctl_ext_vcpucontext_t;