{
if ( evc->size >= 2 * sizeof(uint64_t) + XSTATE_AREA_MIN_SIZE )
ret = validate_xstate(_xcr0, _xcr0_accum,
- _xsave_area->xsave_hdr.xstate_bv,
- evc->xfeature_mask);
+ _xsave_area->xsave_hdr.xstate_bv);
}
else if ( !_xcr0 )
ret = 0;
h->cur += desc->length;
err = validate_xstate(ctxt->xcr0, ctxt->xcr0_accum,
- ctxt->save_area.xsave_hdr.xstate_bv,
- ctxt->xfeature_mask);
+ ctxt->save_area.xsave_hdr.xstate_bv);
if ( err )
{
printk(XENLOG_G_WARNING
return !(xcr0 & XSTATE_BNDREGS) == !(xcr0 & XSTATE_BNDCSR);
}
-int validate_xstate(u64 xcr0, u64 xcr0_accum, u64 xstate_bv, u64 xfeat_mask)
+int validate_xstate(u64 xcr0, u64 xcr0_accum, u64 xstate_bv)
{
- if ( (xcr0_accum & ~xfeat_mask) ||
- (xstate_bv & ~xcr0_accum) ||
+ if ( (xstate_bv & ~xcr0_accum) ||
(xcr0 & ~xcr0_accum) ||
!valid_xcr0(xcr0) ||
!valid_xcr0(xcr0_accum) )
void xsave(struct vcpu *v, uint64_t mask);
void xrstor(struct vcpu *v, uint64_t mask);
bool_t xsave_enabled(const struct vcpu *v);
-int __must_check validate_xstate(u64 xcr0, u64 xcr0_accum, u64 xstate_bv,
- u64 xfeat_mask);
+int __must_check validate_xstate(u64 xcr0, u64 xcr0_accum, u64 xstate_bv);
int __must_check handle_xsetbv(u32 index, u64 new_bv);
/* extended state init and cleanup functions */
*/
struct hvm_hw_cpu_xsave {
- uint64_t xfeature_mask;
+ uint64_t xfeature_mask; /* Ignored */
uint64_t xcr0; /* Updated by XSETBV */
uint64_t xcr0_accum; /* Updated by XSETBV */
struct {
/* IN: VCPU that this call applies to. */
uint32_t vcpu;
/*
- * SET: xfeature support mask of struct (IN)
+ * SET: Ignored.
* GET: xfeature support mask of struct (IN/OUT)
* xfeature mask is served as identifications of the saving format
* so that compatible CPUs can have a check on format to decide