local_irq_restore(flags);
}
-void clear_domain_page(unsigned long mfn)
-{
- void *ptr = map_domain_page(mfn);
-
- clear_page(ptr);
- unmap_domain_page(ptr);
-}
-
-void copy_domain_page(unsigned long dmfn, unsigned long smfn)
-{
- const void *src = map_domain_page(smfn);
- void *dst = map_domain_page(dmfn);
-
- copy_page(dst, src);
- unmap_domain_page(dst);
- unmap_domain_page(src);
-}
-
int mapcache_domain_init(struct domain *d)
{
struct mapcache_domain *dcache = &d->arch.pv_domain.mapcache;
if ( (1UL << (max_bitsize - PAGE_SHIFT)) <= mfn )
{
struct page_info *new_page;
- void *sp, *dp;
new_page = alloc_domheap_page(e, MEMF_no_owner |
MEMF_bits(max_bitsize));
goto unlock_and_copyback;
}
- sp = map_domain_page(mfn);
- dp = __map_domain_page(new_page);
- memcpy(dp, sp, PAGE_SIZE);
- unmap_domain_page(dp);
- unmap_domain_page(sp);
+ copy_domain_page(page_to_mfn(new_page), mfn);
page->count_info &= ~(PGC_count_mask|PGC_allocated);
free_domheap_page(page);
/* Make sure there's no crud left over in the table from the
old version. */
for ( i = 0; i < nr_grant_frames(gt); i++ )
- memset(gt->shared_raw[i], 0, PAGE_SIZE);
+ clear_page(gt->shared_raw[i]);
/* Restore the first 8 entries (toolstack reserved grants) */
if ( gt->gt_version != 0 && op.version == 1 )
return rc;
}
+#ifdef CONFIG_DOMAIN_PAGE
+void clear_domain_page(unsigned long mfn)
+{
+ void *ptr = map_domain_page(mfn);
+
+ clear_page(ptr);
+ unmap_domain_page(ptr);
+}
+
+void copy_domain_page(unsigned long dmfn, unsigned long smfn)
+{
+ const void *src = map_domain_page(smfn);
+ void *dst = map_domain_page(dmfn);
+
+ copy_page(dst, src);
+ unmap_domain_page(dst);
+ unmap_domain_page(src);
+}
+#endif
+
void destroy_ring_for_helper(
void **_va, struct page_info *page)
{
/* Actual cacheline size on the boot CPU. */
extern size_t cacheline_bytes;
+#define copy_page(dp, sp) memcpy(dp, sp, PAGE_SIZE)
+
/* Functions for flushing medium-sized areas.
* if 'range' is large enough we might want to use model-specific
* full-cache flushes. */