}
#endif /* __x86_64__ */
+int nhvm_vcpu_initialise(struct vcpu *v)
+{
+ if (hvm_funcs.nhvm_vcpu_initialise)
+ return hvm_funcs.nhvm_vcpu_initialise(v);
+ return -EOPNOTSUPP;
+}
+
+int nhvm_vcpu_destroy(struct vcpu *v)
+{
+ if (hvm_funcs.nhvm_vcpu_destroy)
+ return hvm_funcs.nhvm_vcpu_destroy(v);
+ return -EOPNOTSUPP;
+}
+
+int nhvm_vcpu_reset(struct vcpu *v)
+{
+ if (hvm_funcs.nhvm_vcpu_reset)
+ return hvm_funcs.nhvm_vcpu_reset(v);
+ return -EOPNOTSUPP;
+}
+
+int nhvm_vcpu_hostrestore(struct vcpu *v, struct cpu_user_regs *regs)
+{
+ if (hvm_funcs.nhvm_vcpu_hostrestore)
+ return hvm_funcs.nhvm_vcpu_hostrestore(v, regs);
+ return -EOPNOTSUPP;
+}
+
+int nhvm_vcpu_vmexit(struct vcpu *v, struct cpu_user_regs *regs,
+ uint64_t exitcode)
+{
+ if (hvm_funcs.nhvm_vcpu_vmexit)
+ return hvm_funcs.nhvm_vcpu_vmexit(v, regs, exitcode);
+ return -EOPNOTSUPP;
+}
+
+int
+nhvm_vcpu_vmexit_trap(struct vcpu *v, unsigned int trapnr,
+ int errcode, unsigned long cr2)
+{
+ return hvm_funcs.nhvm_vcpu_vmexit_trap(v, trapnr, errcode, cr2);
+}
+
+uint64_t nhvm_vcpu_guestcr3(struct vcpu *v)
+{
+ if (hvm_funcs.nhvm_vcpu_guestcr3)
+ return hvm_funcs.nhvm_vcpu_guestcr3(v);
+ return -EOPNOTSUPP;
+}
+
+uint64_t nhvm_vcpu_hostcr3(struct vcpu *v)
+{
+ if (hvm_funcs.nhvm_vcpu_hostcr3)
+ return hvm_funcs.nhvm_vcpu_hostcr3(v);
+ return -EOPNOTSUPP;
+}
+
+uint32_t nhvm_vcpu_asid(struct vcpu *v)
+{
+ if (hvm_funcs.nhvm_vcpu_asid)
+ return hvm_funcs.nhvm_vcpu_asid(v);
+ return -EOPNOTSUPP;
+}
+
+int nhvm_vmcx_guest_intercepts_trap(struct vcpu *v, unsigned int trap)
+{
+ if (hvm_funcs.nhvm_vmcx_guest_intercepts_trap)
+ return hvm_funcs.nhvm_vmcx_guest_intercepts_trap(v, trap);
+ return -EOPNOTSUPP;
+}
+
+bool_t nhvm_vmcx_hap_enabled(struct vcpu *v)
+{
+ if (hvm_funcs.nhvm_vmcx_hap_enabled)
+ return hvm_funcs.nhvm_vmcx_hap_enabled(v);
+ return -EOPNOTSUPP;
+}
+
+enum hvm_intblk nhvm_interrupt_blocked(struct vcpu *v)
+{
+ return hvm_funcs.nhvm_intr_blocked(v);
+}
+
/*
* Local variables:
* mode: C
void (*set_uc_mode)(struct vcpu *v);
void (*set_info_guest)(struct vcpu *v);
void (*set_rdtsc_exiting)(struct vcpu *v, bool_t);
+
+ /* Nested HVM */
+ int (*nhvm_vcpu_initialise)(struct vcpu *v);
+ int (*nhvm_vcpu_destroy)(struct vcpu *v);
+ int (*nhvm_vcpu_reset)(struct vcpu *v);
+ int (*nhvm_vcpu_hostrestore)(struct vcpu *v,
+ struct cpu_user_regs *regs);
+ int (*nhvm_vcpu_vmexit)(struct vcpu *v, struct cpu_user_regs *regs,
+ uint64_t exitcode);
+ int (*nhvm_vcpu_vmexit_trap)(struct vcpu *v,
+ unsigned int trapnr,
+ int errcode,
+ unsigned long cr2);
+ uint64_t (*nhvm_vcpu_guestcr3)(struct vcpu *v);
+ uint64_t (*nhvm_vcpu_hostcr3)(struct vcpu *v);
+ uint32_t (*nhvm_vcpu_asid)(struct vcpu *v);
+ int (*nhvm_vmcx_guest_intercepts_trap)(struct vcpu *v, unsigned int trapnr);
+
+ bool_t (*nhvm_vmcx_hap_enabled)(struct vcpu *v);
+
+ enum hvm_intblk (*nhvm_intr_blocked)(struct vcpu *v);
};
extern struct hvm_function_table hvm_funcs;
void hvm_memory_event_cr0(unsigned long value, unsigned long old);
void hvm_memory_event_cr3(unsigned long value, unsigned long old);
void hvm_memory_event_cr4(unsigned long value, unsigned long old);
-
/* Called for current VCPU on int3: returns -1 if no listener */
int hvm_memory_event_int3(unsigned long gla);
#else
{ return 0; }
#endif
+/*
+ * Nested HVM
+ */
+
+/* Initialize vcpu's struct nestedhvm */
+int nhvm_vcpu_initialise(struct vcpu *v);
+/* Destroy and free vcpu's struct nestedhvm */
+int nhvm_vcpu_destroy(struct vcpu *v);
+/* Reset vcpu's state when l1 guest disables nested virtualization */
+int nhvm_vcpu_reset(struct vcpu *v);
+/* Restores l1 guest state */
+int nhvm_vcpu_hostrestore(struct vcpu *v, struct cpu_user_regs *regs);
+/* Fill l1 guest's VMCB/VMCS with data provided by generic exit codes
+ * (do conversion as needed), other misc SVM/VMX specific tweaks to make
+ * it work */
+int nhvm_vcpu_vmexit(struct vcpu *v, struct cpu_user_regs *regs,
+ uint64_t exitcode);
+/* inject vmexit into l1 guest. l1 guest will see a VMEXIT due to
+ * 'trapnr' exception.
+ */
+int nhvm_vcpu_vmexit_trap(struct vcpu *v,
+ unsigned int trapnr, int errcode, unsigned long cr2);
+
+/* returns l2 guest cr3 in l2 guest physical address space. */
+uint64_t nhvm_vcpu_guestcr3(struct vcpu *v);
+/* returns l1 guest's cr3 that points to the page table used to
+ * translate l2 guest physical address to l1 guest physical address.
+ */
+uint64_t nhvm_vcpu_hostcr3(struct vcpu *v);
+/* returns the asid number l1 guest wants to use to run the l2 guest */
+uint32_t nhvm_vcpu_asid(struct vcpu *v);
+
+/* returns true, when l1 guest intercepts the specified trap */
+int nhvm_vmcx_guest_intercepts_trap(struct vcpu *v, unsigned int trapnr);
+
+/* returns true when l1 guest wants to use hap to run l2 guest */
+bool_t nhvm_vmcx_hap_enabled(struct vcpu *v);
+/* interrupt */
+enum hvm_intblk nhvm_interrupt_blocked(struct vcpu *v);
+
#endif /* __ASM_X86_HVM_HVM_H__ */