svm_vmload(vmcb);
}
+static unsigned long svm_get_shadow_gs_base(struct vcpu *v)
+{
+ return v->arch.hvm_svm.vmcb->kerngsbase;
+}
+
static int svm_set_guest_pat(struct vcpu *v, u64 gpat)
{
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
.guest_x86_mode = svm_guest_x86_mode,
.get_segment_register = svm_get_segment_register,
.set_segment_register = svm_set_segment_register,
+ .get_shadow_gs_base = svm_get_shadow_gs_base,
.update_host_cr3 = svm_update_host_cr3,
.update_guest_cr = svm_update_guest_cr,
.update_guest_efer = svm_update_guest_efer,
vmx_vmcs_exit(v);
}
+static unsigned long vmx_get_shadow_gs_base(struct vcpu *v)
+{
+#ifdef __x86_64__
+ return v->arch.hvm_vmx.shadow_gs;
+#else
+ return 0;
+#endif
+}
+
static int vmx_set_guest_pat(struct vcpu *v, u64 gpat)
{
if ( !cpu_has_vmx_pat || !paging_mode_hap(v->domain) )
.guest_x86_mode = vmx_guest_x86_mode,
.get_segment_register = vmx_get_segment_register,
.set_segment_register = vmx_set_segment_register,
+ .get_shadow_gs_base = vmx_get_shadow_gs_base,
.update_host_cr3 = vmx_update_host_cr3,
.update_guest_cr = vmx_update_guest_cr,
.update_guest_efer = vmx_update_guest_efer,
struct segment_register *reg);
void (*set_segment_register)(struct vcpu *v, enum x86_segment seg,
struct segment_register *reg);
+ unsigned long (*get_shadow_gs_base)(struct vcpu *v);
/*
* Re-set the value of CR3 that Xen runs on when handling VM exits.
hvm_funcs.set_segment_register(v, seg, reg);
}
+static inline unsigned long hvm_get_shadow_gs_base(struct vcpu *v)
+{
+ return hvm_funcs.get_shadow_gs_base(v);
+}
+
#define is_viridian_domain(_d) \
(is_hvm_domain(_d) && ((_d)->arch.hvm_domain.params[HVM_PARAM_VIRIDIAN]))