/* If this guest has a restricted physical address space then the
* target GFN must fit within it. */
- if ( !(rc & _PAGE_PRESENT) && !gfn_valid(d, guest_l1e_get_gfn(gw->l1e)) )
+ if ( !(rc & _PAGE_PRESENT) && !gfn_valid(d, guest_walk_to_gfn(gw)) )
rc |= _PAGE_INVALID_BITS;
return rc;
/* Interpret the answer */
if ( missing == 0 )
{
- gfn_t gfn = guest_l1e_get_gfn(gw.l1e);
+ gfn_t gfn = guest_walk_to_gfn(&gw);
struct page_info *page;
page = get_page_from_gfn_p2m(p2m->domain, p2m, gfn_x(gfn), &p2mt,
NULL, P2M_ALLOC | P2M_UNSHARE);
}
/* What mfn is the guest trying to access? */
- gfn = guest_l1e_get_gfn(gw.l1e);
+ gfn = guest_walk_to_gfn(&gw);
gmfn = get_gfn(d, gfn, &p2mt);
if ( shadow_mode_refcounts(d) &&
/* Given a walk_t, translate the gw->va into the guest's notion of the
* corresponding frame number. */
-static inline gfn_t
-guest_walk_to_gfn(walk_t *gw)
+static inline gfn_t guest_walk_to_gfn(const walk_t *gw)
{
if ( !(guest_l1e_get_flags(gw->l1e) & _PAGE_PRESENT) )
return INVALID_GFN;
/* Given a walk_t, translate the gw->va into the guest's notion of the
* corresponding physical address. */
-static inline paddr_t
-guest_walk_to_gpa(walk_t *gw)
+static inline paddr_t guest_walk_to_gpa(const walk_t *gw)
{
- if ( !(guest_l1e_get_flags(gw->l1e) & _PAGE_PRESENT) )
- return 0;
- return ((paddr_t)gfn_x(guest_l1e_get_gfn(gw->l1e)) << PAGE_SHIFT) +
- (gw->va & ~PAGE_MASK);
+ gfn_t gfn = guest_walk_to_gfn(gw);
+
+ if ( gfn_eq(gfn, INVALID_GFN) )
+ return INVALID_PADDR;
+
+ return (gfn_x(gfn) << PAGE_SHIFT) | (gw->va & ~PAGE_MASK);
}
/* Given a walk_t from a successful walk, return the page-order of the
* page or superpage that the virtual address is in. */
-static inline unsigned int
-guest_walk_to_page_order(walk_t *gw)
+static inline unsigned int guest_walk_to_page_order(const walk_t *gw)
{
/* This is only valid for successful walks - otherwise the
* PSE bits might be invalid. */