SVM: use VMCB accessors
authorJan Beulich <jbeulich@suse.com>
Fri, 9 Jun 2017 12:13:24 +0000 (14:13 +0200)
committerJan Beulich <jbeulich@suse.com>
Fri, 9 Jun 2017 12:13:24 +0000 (14:13 +0200)
This is particularly relevant for the SET form, to ensure proper clean
bits tracking (albeit in the case here it's benign as CPL and other
segment register attributes share a clean bit).

Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
xen/arch/x86/hvm/svm/svm.c

index 178adc56b4048745c8d9955abf0dae5d96c63e32..402e8151433edd6cb7f39e55dafd510ef541bfc7 100644 (file)
@@ -653,7 +653,7 @@ static void svm_get_segment_register(struct vcpu *v, enum x86_segment seg,
         break;
     case x86_seg_ss:
         *reg = vmcb->ss;
-        reg->attr.fields.dpl = vmcb->_cpl;
+        reg->attr.fields.dpl = vmcb_get_cpl(vmcb);
         break;
     case x86_seg_tr:
         svm_sync_vmcb(v);
@@ -726,7 +726,7 @@ static void svm_set_segment_register(struct vcpu *v, enum x86_segment seg,
         break;
     case x86_seg_ss:
         vmcb->ss = *reg;
-        vmcb->_cpl = vmcb->ss.attr.fields.dpl;
+        vmcb_set_cpl(vmcb, reg->attr.fields.dpl);
         break;
     case x86_seg_tr:
         vmcb->tr = *reg;
@@ -1442,7 +1442,7 @@ static void svm_inject_event(const struct x86_event *event)
      * If injecting an event outside of 64bit mode, zero the upper bits of the
      * %eip and nextrip after the adjustments above.
      */
-    if ( !((vmcb->_efer & EFER_LMA) && vmcb->cs.attr.fields.l) )
+    if ( !((vmcb_get_efer(vmcb) & EFER_LMA) && vmcb->cs.attr.fields.l) )
     {
         regs->rip = regs->eip;
         vmcb->nextrip = (uint32_t)vmcb->nextrip;