}
#ifdef CONFIG_DOMAIN_PAGE
-void *map_domain_page_global(unsigned long mfn)
+void *map_domain_page_global(mfn_t mfn)
{
- mfn_t m = _mfn(mfn);
-
- return vmap(&m, 1);
+ return vmap(&mfn, 1);
}
void unmap_domain_page_global(const void *va)
return 0;
}
-void *map_domain_page_global(unsigned long mfn)
+void *map_domain_page_global(mfn_t mfn)
{
- mfn_t m = _mfn(mfn);
ASSERT(!in_irq() && local_irq_is_enabled());
#ifdef NDEBUG
- if ( mfn <= PFN_DOWN(__pa(HYPERVISOR_VIRT_END - 1)) )
- return mfn_to_virt(mfn);
+ if ( mfn_x(mfn) <= PFN_DOWN(__pa(HYPERVISOR_VIRT_END - 1)) )
+ return mfn_to_virt(mfn_x(mfn));
#endif
- return vmap(&m, 1);
+ return vmap(&mfn, 1);
}
void unmap_domain_page_global(const void *ptr)
if ( v->arch.paging.shadow.guest_vtable )
{
if ( shadow_mode_external(d) || shadow_mode_translate(d) )
- sh_unmap_domain_page_global(v->arch.paging.shadow.guest_vtable);
+ unmap_domain_page_global(v->arch.paging.shadow.guest_vtable);
v->arch.paging.shadow.guest_vtable = NULL;
}
#endif // !NDEBUG
if ( shadow_mode_external(d) || shadow_mode_translate(d) )
{
if ( v->arch.paging.shadow.guest_vtable )
- sh_unmap_domain_page_global(v->arch.paging.shadow.guest_vtable);
- v->arch.paging.shadow.guest_vtable = sh_map_domain_page_global(gmfn);
+ unmap_domain_page_global(v->arch.paging.shadow.guest_vtable);
+ v->arch.paging.shadow.guest_vtable = map_domain_page_global(gmfn);
/* PAGING_LEVELS==4 implies 64-bit, which means that
* map_domain_page_global can't fail */
BUG_ON(v->arch.paging.shadow.guest_vtable == NULL);
if ( shadow_mode_external(d) || shadow_mode_translate(d) )
{
if ( v->arch.paging.shadow.guest_vtable )
- sh_unmap_domain_page_global(v->arch.paging.shadow.guest_vtable);
- v->arch.paging.shadow.guest_vtable = sh_map_domain_page_global(gmfn);
+ unmap_domain_page_global(v->arch.paging.shadow.guest_vtable);
+ v->arch.paging.shadow.guest_vtable = map_domain_page_global(gmfn);
/* Does this really need map_domain_page_global? Handle the
* error properly if so. */
BUG_ON(v->arch.paging.shadow.guest_vtable == NULL); /* XXX */
unmap_domain_page(p);
}
-static inline void *
-sh_map_domain_page_global(mfn_t mfn)
-{
- return map_domain_page_global(mfn_x(mfn));
-}
-
-static inline void
-sh_unmap_domain_page_global(void *p)
-{
- unmap_domain_page_global(p);
-}
-
/**************************************************************************/
/* Shadow-page refcounting. */
#include <xen/kernel.h>
#include <asm/page.h>
#include <public/xen.h>
-#include <xen/domain_page.h>
#include <xen/pdx.h>
/* Align Xen to a 2 MiB boundary. */
* address spaces (not just within the VCPU that created the mapping). Global
* mappings can also be unmapped from any context.
*/
-void *map_domain_page_global(unsigned long mfn);
+void *map_domain_page_global(mfn_t mfn);
void unmap_domain_page_global(const void *va);
#define __map_domain_page(pg) map_domain_page(__page_to_mfn(pg))
-#define __map_domain_page_global(pg) map_domain_page_global(__page_to_mfn(pg))
+
+static inline void *__map_domain_page_global(const struct page_info *pg)
+{
+ return map_domain_page_global(_mfn(__page_to_mfn(pg)));
+}
#define DMCACHE_ENTRY_VALID 1U
#define DMCACHE_ENTRY_HELD 2U
mfn_to_virt(smfn))
#define domain_page_map_to_mfn(va) virt_to_mfn((unsigned long)(va))
-#define map_domain_page_global(mfn) mfn_to_virt(mfn)
-#define __map_domain_page_global(pg) page_to_virt(pg)
-#define unmap_domain_page_global(va) ((void)(va))
+static inline void *map_domain_page_global(mfn_t mfn)
+{
+ return mfn_to_virt(mfn_x(mfn));
+}
+
+static inline void *__map_domain_page_global(const struct page_info *pg)
+{
+ return page_to_virt(pg);
+}
+
+static inline void unmap_domain_page_global(const void *va) {};
struct domain_mmap_cache {
};