}
}
-bool_t p2m_mem_access_check(unsigned long gpa, bool_t gla_valid, unsigned long gla,
+bool_t p2m_mem_access_check(paddr_t gpa, bool_t gla_valid, unsigned long gla,
bool_t access_r, bool_t access_w, bool_t access_x,
mem_event_request_t **req_ptr)
{
int hvm_debug_op(struct vcpu *v, int32_t op);
-int hvm_hap_nested_page_fault(unsigned long gpa,
+int hvm_hap_nested_page_fault(paddr_t gpa,
bool_t gla_valid, unsigned long gla,
bool_t access_r,
bool_t access_w,
* been promoted with no underlying vcpu pause. If the req_ptr has been populated,
* then the caller must put the event in the ring (once having released get_gfn*
* locks -- caller must also xfree the request. */
-bool_t p2m_mem_access_check(unsigned long gpa, bool_t gla_valid, unsigned long gla,
+bool_t p2m_mem_access_check(paddr_t gpa, bool_t gla_valid, unsigned long gla,
bool_t access_r, bool_t access_w, bool_t access_x,
mem_event_request_t **req_ptr);
/* Resumes the running of the VCPU, restarting the last instruction */
hvmmem_access_t *access);
#else
-static inline bool_t p2m_mem_access_check(unsigned long gpa, bool_t gla_valid,
+static inline bool_t p2m_mem_access_check(paddr_t gpa, bool_t gla_valid,
unsigned long gla, bool_t access_r,
bool_t access_w, bool_t access_x,
mem_event_request_t **req_ptr)