* the same as for shadow paging.
*/
- rv = nestedhvm_hap_nested_page_fault(curr, &gpa,
- npfec.read_access,
- npfec.write_access,
- npfec.insn_fetch);
+ rv = nestedhvm_hap_nested_page_fault(curr, &gpa, npfec);
switch (rv) {
case NESTEDHVM_PAGEFAULT_DONE:
case NESTEDHVM_PAGEFAULT_RETRY:
* walk is successful, the translated value is returned in
* L1_gpa. The result value tells what to do next.
*/
-int
-nsvm_hap_walk_L1_p2m(struct vcpu *v, paddr_t L2_gpa, paddr_t *L1_gpa,
- unsigned int *page_order, uint8_t *p2m_acc,
- bool_t access_r, bool_t access_w, bool_t access_x)
+int nsvm_hap_walk_L1_p2m(
+ struct vcpu *v, paddr_t L2_gpa, paddr_t *L1_gpa, unsigned int *page_order,
+ uint8_t *p2m_acc, struct npfec npfec)
{
uint32_t pfec;
unsigned long nested_cr3, gfn;
nested_cr3 = nhvm_vcpu_p2m_base(v);
pfec = PFEC_user_mode | PFEC_page_present;
- if ( access_w )
+ if ( npfec.write_access )
pfec |= PFEC_write_access;
- if ( access_x )
+ if ( npfec.insn_fetch )
pfec |= PFEC_insn_fetch;
/* Walk the guest-supplied NPT table, just as if it were a pagetable */
* walk is successful, the translated value is returned in
* L1_gpa. The result value tells what to do next.
*/
-int
-nvmx_hap_walk_L1_p2m(struct vcpu *v, paddr_t L2_gpa, paddr_t *L1_gpa,
- unsigned int *page_order, uint8_t *p2m_acc,
- bool_t access_r, bool_t access_w, bool_t access_x)
+int nvmx_hap_walk_L1_p2m(
+ struct vcpu *v, paddr_t L2_gpa, paddr_t *L1_gpa, unsigned int *page_order,
+ uint8_t *p2m_acc, struct npfec npfec)
{
int rc;
unsigned long gfn;
uint64_t exit_qual;
uint32_t exit_reason = EXIT_REASON_EPT_VIOLATION;
- uint32_t rwx_rights = (access_x << 2) | (access_w << 1) | access_r;
+ uint32_t rwx_rights =
+ (npfec.insn_fetch << 2) | (npfec.write_access << 1) | npfec.read_access;
struct nestedvmx *nvmx = &vcpu_2_nvmx(v);
vmx_vmcs_enter(v);
#ifndef __ASM_X86_HVM_HVM_H__
#define __ASM_X86_HVM_HVM_H__
+#include <xen/mm.h>
+
#include <asm/alternative.h>
#include <asm/asm_defns.h>
#include <asm/current.h>
/*Walk nested p2m */
int (*nhvm_hap_walk_L1_p2m)(struct vcpu *v, paddr_t L2_gpa,
paddr_t *L1_gpa, unsigned int *page_order,
- uint8_t *p2m_acc, bool_t access_r,
- bool_t access_w, bool_t access_x);
+ uint8_t *p2m_acc, struct npfec npfec);
void (*enable_msr_interception)(struct domain *d, uint32_t msr);
bool_t (*is_singlestep_supported)(void);
void hvm_toggle_singlestep(struct vcpu *v);
void hvm_fast_singlestep(struct vcpu *v, uint16_t p2midx);
-struct npfec;
int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long gla,
struct npfec npfec);
return hvm_funcs.nhvm_intr_blocked(v);
}
+static inline int nhvm_hap_walk_L1_p2m(
+ struct vcpu *v, paddr_t L2_gpa, paddr_t *L1_gpa, unsigned int *page_order,
+ uint8_t *p2m_acc, struct npfec npfec)
+{
+ return hvm_funcs.nhvm_hap_walk_L1_p2m(
+ v, L2_gpa, L1_gpa, page_order, p2m_acc, npfec);
+}
+
static inline void hvm_enable_msr_interception(struct domain *d, uint32_t msr)
{
hvm_funcs.enable_msr_interception(d, msr);
#define NESTEDHVM_PAGEFAULT_RETRY 5
#define NESTEDHVM_PAGEFAULT_DIRECT_MMIO 6
int nestedhvm_hap_nested_page_fault(struct vcpu *v, paddr_t *L2_gpa,
- bool_t access_r, bool_t access_w, bool_t access_x);
-
-int nestedhap_walk_L1_p2m(struct vcpu *v, paddr_t L2_gpa, paddr_t *L1_gpa,
- unsigned int *page_order, uint8_t *p2m_acc,
- bool_t access_r, bool_t access_w, bool_t access_x);
+ struct npfec npfec);
/* IO permission map */
unsigned long *nestedhvm_vcpu_iomap_get(bool_t ioport_80, bool_t ioport_ed);
void svm_vmexit_do_clgi(struct cpu_user_regs *regs, struct vcpu *v);
void svm_vmexit_do_stgi(struct cpu_user_regs *regs, struct vcpu *v);
bool_t nestedsvm_gif_isset(struct vcpu *v);
-int nsvm_hap_walk_L1_p2m(struct vcpu *v, paddr_t L2_gpa, paddr_t *L1_gpa,
- unsigned int *page_order, uint8_t *p2m_acc,
- bool_t access_r, bool_t access_w, bool_t access_x);
+int nsvm_hap_walk_L1_p2m(
+ struct vcpu *v, paddr_t L2_gpa, paddr_t *L1_gpa, unsigned int *page_order,
+ uint8_t *p2m_acc, struct npfec npfec);
#define NSVM_INTR_NOTHANDLED 3
#define NSVM_INTR_NOTINTERCEPTED 2
#define EPT_TRANSLATE_MISCONFIG 2
#define EPT_TRANSLATE_RETRY 3
-int
-nvmx_hap_walk_L1_p2m(struct vcpu *v, paddr_t L2_gpa, paddr_t *L1_gpa,
- unsigned int *page_order, uint8_t *p2m_acc,
- bool_t access_r, bool_t access_w, bool_t access_x);
+int nvmx_hap_walk_L1_p2m(
+ struct vcpu *v, paddr_t L2_gpa, paddr_t *L1_gpa, unsigned int *page_order,
+ uint8_t *p2m_acc, struct npfec npfec);
+
/*
* Virtual VMCS layout
*
}
}
-/* This function uses L2_gpa to walk the P2M page table in L1. If the
- * walk is successful, the translated value is returned in
- * L1_gpa. The result value tells what to do next.
- */
-int
-nestedhap_walk_L1_p2m(struct vcpu *v, paddr_t L2_gpa, paddr_t *L1_gpa,
- unsigned int *page_order, uint8_t *p2m_acc,
- bool_t access_r, bool_t access_w, bool_t access_x)
-{
- ASSERT(hvm_funcs.nhvm_hap_walk_L1_p2m);
-
- return hvm_funcs.nhvm_hap_walk_L1_p2m(v, L2_gpa, L1_gpa, page_order,
- p2m_acc, access_r, access_w, access_x);
-}
-
-
/* This function uses L1_gpa to walk the P2M table in L0 hypervisor. If the
* walk is successful, the translated value is returned in L0_gpa. The return
* value tells the upper level what to do.
*/
-static int
-nestedhap_walk_L0_p2m(struct p2m_domain *p2m, paddr_t L1_gpa, paddr_t *L0_gpa,
- p2m_type_t *p2mt, p2m_access_t *p2ma,
- unsigned int *page_order,
- bool_t access_r, bool_t access_w, bool_t access_x)
+static int nestedhap_walk_L0_p2m(
+ struct p2m_domain *p2m, paddr_t L1_gpa, paddr_t *L0_gpa, p2m_type_t *p2mt,
+ p2m_access_t *p2ma, unsigned int *page_order, struct npfec npfec)
{
mfn_t mfn;
int rc;
goto out;
rc = NESTEDHVM_PAGEFAULT_L0_ERROR;
- if ( access_w && p2m_is_readonly(*p2mt) )
+ if ( npfec.write_access && p2m_is_readonly(*p2mt) )
goto out;
if ( p2m_is_paging(*p2mt) || p2m_is_shared(*p2mt) || !p2m_is_ram(*p2mt) )
*
* Returns:
*/
-int
-nestedhvm_hap_nested_page_fault(struct vcpu *v, paddr_t *L2_gpa,
- bool_t access_r, bool_t access_w, bool_t access_x)
+int nestedhvm_hap_nested_page_fault(
+ struct vcpu *v, paddr_t *L2_gpa, struct npfec npfec)
{
int rv;
paddr_t L1_gpa, L0_gpa;
p2m = p2m_get_hostp2m(d); /* L0 p2m */
/* walk the L1 P2M table */
- rv = nestedhap_walk_L1_p2m(v, *L2_gpa, &L1_gpa, &page_order_21, &p2ma_21,
- access_r, access_w, access_x);
+ rv = nhvm_hap_walk_L1_p2m(v, *L2_gpa, &L1_gpa, &page_order_21, &p2ma_21,
+ npfec);
/* let caller to handle these two cases */
switch (rv) {
}
/* ==> we have to walk L0 P2M */
- rv = nestedhap_walk_L0_p2m(p2m, L1_gpa, &L0_gpa,
- &p2mt_10, &p2ma_10, &page_order_10,
- access_r, access_w, access_x);
+ rv = nestedhap_walk_L0_p2m(p2m, L1_gpa, &L0_gpa, &p2mt_10, &p2ma_10,
+ &page_order_10, npfec);
/* let upper level caller to handle these two cases */
switch (rv) {
const struct paging_mode *mode;
uint8_t l1_p2ma;
unsigned int l1_page_order;
+ struct npfec npfec = {
+ .read_access = 1,
+ .write_access = *pfec & PFEC_write_access,
+ .insn_fetch = *pfec & PFEC_insn_fetch,
+ };
int rv;
/* translate l2 guest va into l2 guest gfn */
if ( l2_gfn == gfn_x(INVALID_GFN) )
return gfn_x(INVALID_GFN);
- rv = nestedhap_walk_L1_p2m(v, pfn_to_paddr(l2_gfn), &l1_gpa,
- &l1_page_order, &l1_p2ma,
- 1,
- !!(*pfec & PFEC_write_access),
- !!(*pfec & PFEC_insn_fetch));
+ rv = nhvm_hap_walk_L1_p2m(
+ v, pfn_to_paddr(l2_gfn), &l1_gpa, &l1_page_order, &l1_p2ma, npfec);
if ( rv != NESTEDHVM_PAGEFAULT_DONE )
return gfn_x(INVALID_GFN);