Some of the generic hooks were unused altogether - drop them.
Some of the hooks were used only to handle calls from the specific
vendor's code (SVM) - drop them too.
Several more hooks were pointlessly implementaed as out-of-line
functions, when most (all?) other HVM hooks use inline ones - make
them inlines. None of them are implemented by only one of SVM or VMX,
so also drop the conditionals. Funnily nhvm_vmcx_hap_enabled(), having
return type bool_t, nevertheless returned -EOPNOTSUPP.
nhvm_vmcx_guest_intercepts_trap() and its hook and implementations are
being made return bool_t, as they should have been from the beginning
(its sole caller only checks for a non-zero result).
Finally, make static whatever can as a result be static.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
Acked-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
return rc;
}
-int nhvm_vcpu_hostrestore(struct vcpu *v, struct cpu_user_regs *regs)
-{
- if (hvm_funcs.nhvm_vcpu_hostrestore)
- return hvm_funcs.nhvm_vcpu_hostrestore(v, regs);
- return -EOPNOTSUPP;
-}
-
-int nhvm_vcpu_vmexit(struct vcpu *v, struct cpu_user_regs *regs,
- uint64_t exitcode)
-{
- if (hvm_funcs.nhvm_vcpu_vmexit)
- return hvm_funcs.nhvm_vcpu_vmexit(v, regs, exitcode);
- return -EOPNOTSUPP;
-}
-
-int nhvm_vcpu_vmexit_trap(struct vcpu *v, struct hvm_trap *trap)
-{
- return hvm_funcs.nhvm_vcpu_vmexit_trap(v, trap);
-}
-
-uint64_t nhvm_vcpu_guestcr3(struct vcpu *v)
-{
- if (hvm_funcs.nhvm_vcpu_guestcr3)
- return hvm_funcs.nhvm_vcpu_guestcr3(v);
- return -EOPNOTSUPP;
-}
-
-uint64_t nhvm_vcpu_p2m_base(struct vcpu *v)
-{
- if ( hvm_funcs.nhvm_vcpu_p2m_base )
- return hvm_funcs.nhvm_vcpu_p2m_base(v);
- return -EOPNOTSUPP;
-}
-
-uint32_t nhvm_vcpu_asid(struct vcpu *v)
-{
- if (hvm_funcs.nhvm_vcpu_asid)
- return hvm_funcs.nhvm_vcpu_asid(v);
- return -EOPNOTSUPP;
-}
-
-int nhvm_vmcx_guest_intercepts_trap(struct vcpu *v, unsigned int trap, int errcode)
-{
- if (hvm_funcs.nhvm_vmcx_guest_intercepts_trap)
- return hvm_funcs.nhvm_vmcx_guest_intercepts_trap(v, trap, errcode);
- return -EOPNOTSUPP;
-}
-
-bool_t nhvm_vmcx_hap_enabled(struct vcpu *v)
-{
- if (hvm_funcs.nhvm_vmcx_hap_enabled)
- return hvm_funcs.nhvm_vmcx_hap_enabled(v);
- return -EOPNOTSUPP;
-}
-
-enum hvm_intblk nhvm_interrupt_blocked(struct vcpu *v)
-{
- return hvm_funcs.nhvm_intr_blocked(v);
-}
-
/*
* Local variables:
* mode: C
return 0;
}
-int nsvm_vcpu_hostrestore(struct vcpu *v, struct cpu_user_regs *regs)
+static int nsvm_vcpu_hostrestore(struct vcpu *v, struct cpu_user_regs *regs)
{
struct nestedvcpu *nv = &vcpu_nestedhvm(v);
struct nestedsvm *svm = &vcpu_nestedsvm(v);
return 0;
}
-int
+static int
nsvm_vcpu_vmexit_inject(struct vcpu *v, struct cpu_user_regs *regs,
uint64_t exitcode)
{
return NESTEDHVM_VMEXIT_DONE;
}
-uint64_t nsvm_vcpu_guestcr3(struct vcpu *v)
-{
- return vcpu_nestedsvm(v).ns_vmcb_guestcr3;
-}
-
uint64_t nsvm_vcpu_hostcr3(struct vcpu *v)
{
return vcpu_nestedsvm(v).ns_vmcb_hostcr3;
}
-uint32_t nsvm_vcpu_asid(struct vcpu *v)
-{
- return vcpu_nestedsvm(v).ns_guest_asid;
-}
-
static int
nsvm_vmcb_guest_intercepts_msr(unsigned long *msr_bitmap,
uint32_t msr, bool_t write)
return NESTEDHVM_VMEXIT_INJECT;
}
-int
+static bool_t
nsvm_vmcb_guest_intercepts_exitcode(struct vcpu *v,
struct cpu_user_regs *regs, uint64_t exitcode)
{
return 1;
}
-int
+bool_t
nsvm_vmcb_guest_intercepts_trap(struct vcpu *v, unsigned int trapnr, int errcode)
{
return nsvm_vmcb_guest_intercepts_exitcode(v,
if (rc)
ret = NESTEDHVM_VMEXIT_ERROR;
- rc = nhvm_vcpu_hostrestore(v, regs);
+ rc = nsvm_vcpu_hostrestore(v, regs);
if (rc)
ret = NESTEDHVM_VMEXIT_FATALERROR;
/* Prepare for running the l1 guest. Make the actual
* modifications to the virtual VMCB/VMCS.
*/
- rc = nhvm_vcpu_vmexit(v, regs, exitcode);
+ rc = nsvm_vcpu_vmexit_inject(v, regs, exitcode);
/* If l1 guest uses shadow paging, update the paging mode. */
if (!nestedhvm_paging_mode_hap(v))
.nhvm_vcpu_initialise = nsvm_vcpu_initialise,
.nhvm_vcpu_destroy = nsvm_vcpu_destroy,
.nhvm_vcpu_reset = nsvm_vcpu_reset,
- .nhvm_vcpu_hostrestore = nsvm_vcpu_hostrestore,
- .nhvm_vcpu_vmexit = nsvm_vcpu_vmexit_inject,
.nhvm_vcpu_vmexit_trap = nsvm_vcpu_vmexit_trap,
- .nhvm_vcpu_guestcr3 = nsvm_vcpu_guestcr3,
.nhvm_vcpu_p2m_base = nsvm_vcpu_hostcr3,
- .nhvm_vcpu_asid = nsvm_vcpu_asid,
.nhvm_vmcx_guest_intercepts_trap = nsvm_vmcb_guest_intercepts_trap,
.nhvm_vmcx_hap_enabled = nsvm_vmcb_hap_enabled,
.nhvm_intr_blocked = nsvm_intr_blocked,
.nhvm_vcpu_initialise = nvmx_vcpu_initialise,
.nhvm_vcpu_destroy = nvmx_vcpu_destroy,
.nhvm_vcpu_reset = nvmx_vcpu_reset,
- .nhvm_vcpu_guestcr3 = nvmx_vcpu_guestcr3,
.nhvm_vcpu_p2m_base = nvmx_vcpu_eptp_base,
- .nhvm_vcpu_asid = nvmx_vcpu_asid,
.nhvm_vmcx_hap_enabled = nvmx_ept_enabled,
.nhvm_vmcx_guest_intercepts_trap = nvmx_intercepts_exception,
.nhvm_vcpu_vmexit_trap = nvmx_vmexit_trap,
return 0;
}
-uint64_t nvmx_vcpu_guestcr3(struct vcpu *v)
-{
- /* TODO */
- ASSERT(0);
- return 0;
-}
-
uint64_t nvmx_vcpu_eptp_base(struct vcpu *v)
{
uint64_t eptp_base;
return eptp_base & PAGE_MASK;
}
-uint32_t nvmx_vcpu_asid(struct vcpu *v)
-{
- /* TODO */
- ASSERT(0);
- return 0;
-}
-
bool_t nvmx_ept_enabled(struct vcpu *v)
{
struct nestedvmx *nvmx = &vcpu_2_nvmx(v);
regs->eflags = eflags;
}
-int nvmx_intercepts_exception(struct vcpu *v, unsigned int trap,
- int error_code)
+bool_t nvmx_intercepts_exception(struct vcpu *v, unsigned int trap,
+ int error_code)
{
struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
u32 exception_bitmap, pfec_match=0, pfec_mask=0;
int (*nhvm_vcpu_initialise)(struct vcpu *v);
void (*nhvm_vcpu_destroy)(struct vcpu *v);
int (*nhvm_vcpu_reset)(struct vcpu *v);
- int (*nhvm_vcpu_hostrestore)(struct vcpu *v,
- struct cpu_user_regs *regs);
- int (*nhvm_vcpu_vmexit)(struct vcpu *v, struct cpu_user_regs *regs,
- uint64_t exitcode);
int (*nhvm_vcpu_vmexit_trap)(struct vcpu *v, struct hvm_trap *trap);
- uint64_t (*nhvm_vcpu_guestcr3)(struct vcpu *v);
uint64_t (*nhvm_vcpu_p2m_base)(struct vcpu *v);
- uint32_t (*nhvm_vcpu_asid)(struct vcpu *v);
- int (*nhvm_vmcx_guest_intercepts_trap)(struct vcpu *v,
- unsigned int trapnr, int errcode);
+ bool_t (*nhvm_vmcx_guest_intercepts_trap)(struct vcpu *v,
+ unsigned int trapnr,
+ int errcode);
bool_t (*nhvm_vmcx_hap_enabled)(struct vcpu *v);
* Nested HVM
*/
-/* Restores l1 guest state */
-int nhvm_vcpu_hostrestore(struct vcpu *v, struct cpu_user_regs *regs);
-/* Fill l1 guest's VMCB/VMCS with data provided by generic exit codes
- * (do conversion as needed), other misc SVM/VMX specific tweaks to make
- * it work */
-int nhvm_vcpu_vmexit(struct vcpu *v, struct cpu_user_regs *regs,
- uint64_t exitcode);
/* inject vmexit into l1 guest. l1 guest will see a VMEXIT due to
* 'trapnr' exception.
*/
-int nhvm_vcpu_vmexit_trap(struct vcpu *v, struct hvm_trap *trap);
+static inline int nhvm_vcpu_vmexit_trap(struct vcpu *v, struct hvm_trap *trap)
+{
+ return hvm_funcs.nhvm_vcpu_vmexit_trap(v, trap);
+}
-/* returns l2 guest cr3 in l2 guest physical address space. */
-uint64_t nhvm_vcpu_guestcr3(struct vcpu *v);
/* returns l1 guest's cr3 that points to the page table used to
* translate l2 guest physical address to l1 guest physical address.
*/
-uint64_t nhvm_vcpu_p2m_base(struct vcpu *v);
-/* returns the asid number l1 guest wants to use to run the l2 guest */
-uint32_t nhvm_vcpu_asid(struct vcpu *v);
+static inline uint64_t nhvm_vcpu_p2m_base(struct vcpu *v)
+{
+ return hvm_funcs.nhvm_vcpu_p2m_base(v);
+}
/* returns true, when l1 guest intercepts the specified trap */
-int nhvm_vmcx_guest_intercepts_trap(struct vcpu *v,
- unsigned int trapnr, int errcode);
+static inline bool_t nhvm_vmcx_guest_intercepts_trap(struct vcpu *v,
+ unsigned int trap,
+ int errcode)
+{
+ return hvm_funcs.nhvm_vmcx_guest_intercepts_trap(v, trap, errcode);
+}
/* returns true when l1 guest wants to use hap to run l2 guest */
-bool_t nhvm_vmcx_hap_enabled(struct vcpu *v);
+static inline bool_t nhvm_vmcx_hap_enabled(struct vcpu *v)
+{
+ return hvm_funcs.nhvm_vmcx_hap_enabled(v);
+}
+
/* interrupt */
-enum hvm_intblk nhvm_interrupt_blocked(struct vcpu *v);
+static inline enum hvm_intblk nhvm_interrupt_blocked(struct vcpu *v)
+{
+ return hvm_funcs.nhvm_intr_blocked(v);
+}
+
#ifndef NDEBUG
/* Permit use of the Forced Emulation Prefix in HVM guests */
void nsvm_vcpu_destroy(struct vcpu *v);
int nsvm_vcpu_initialise(struct vcpu *v);
int nsvm_vcpu_reset(struct vcpu *v);
-int nsvm_vcpu_hostrestore(struct vcpu *v, struct cpu_user_regs *regs);
int nsvm_vcpu_vmrun(struct vcpu *v, struct cpu_user_regs *regs);
-int nsvm_vcpu_vmexit_inject(struct vcpu *v, struct cpu_user_regs *regs,
- uint64_t exitcode);
int nsvm_vcpu_vmexit_trap(struct vcpu *v, struct hvm_trap *trap);
-uint64_t nsvm_vcpu_guestcr3(struct vcpu *v);
uint64_t nsvm_vcpu_hostcr3(struct vcpu *v);
-uint32_t nsvm_vcpu_asid(struct vcpu *v);
-int nsvm_vmcb_guest_intercepts_exitcode(struct vcpu *v,
- struct cpu_user_regs *regs, uint64_t exitcode);
-int nsvm_vmcb_guest_intercepts_trap(struct vcpu *v, unsigned int trapnr,
- int errcode);
+bool_t nsvm_vmcb_guest_intercepts_trap(struct vcpu *v, unsigned int trapnr,
+ int errcode);
bool_t nsvm_vmcb_hap_enabled(struct vcpu *v);
enum hvm_intblk nsvm_intr_blocked(struct vcpu *v);
int nvmx_vcpu_initialise(struct vcpu *v);
void nvmx_vcpu_destroy(struct vcpu *v);
int nvmx_vcpu_reset(struct vcpu *v);
-uint64_t nvmx_vcpu_guestcr3(struct vcpu *v);
uint64_t nvmx_vcpu_eptp_base(struct vcpu *v);
-uint32_t nvmx_vcpu_asid(struct vcpu *v);
enum hvm_intblk nvmx_intr_blocked(struct vcpu *v);
-int nvmx_intercepts_exception(struct vcpu *v,
- unsigned int trap, int error_code);
+bool_t nvmx_intercepts_exception(struct vcpu *v, unsigned int trap,
+ int error_code);
void nvmx_domain_relinquish_resources(struct domain *d);
bool_t nvmx_ept_enabled(struct vcpu *v);