xen_pfn_t *live_p2m; /* Live mapping of the table mapping each PFN to its current MFN. */
xen_pfn_t *p2m; /* A table mapping each PFN to its new MFN. */
unsigned no_superpage_mem; /* If have enough continuous memory for super page allocation */
+ struct domain_info_context dinfo;
};
static struct restore_ctx _ctx = {
};
static struct restore_ctx *ctx = &_ctx;
-static struct domain_info_context _dinfo;
-static struct domain_info_context *dinfo = &_dinfo;
-
/*
**
**
/* Buffer of pfn list for 2M page, or series of 4K pages */
xen_pfn_t *batch_buf;
unsigned int batch_buf_len;
+ struct domain_info_context *dinfo = &ctx->dinfo;
if ( !superpages )
{
int i, pte_last;
unsigned long pfn;
uint64_t pte;
+ struct domain_info_context *dinfo = &ctx->dinfo;
pte_last = PAGE_SIZE / ((ctx->pt_levels == 2)? 4 : 8);
xen_pfn_t *p2m_frame_list;
vcpu_guest_context_any_t ctxt;
xen_pfn_t p2m_fl_zero;
+ struct domain_info_context *dinfo = &ctx->dinfo;
/* Read first entry of P2M list, or extended-info signature (~0UL). */
if ( read_exact(io_fd, &p2m_fl_zero, sizeof(long)) )
{
unsigned int i;
size_t pfnlen, vcpulen;
+ struct domain_info_context *dinfo = &ctx->dinfo;
/* TODO: handle changing pfntab and vcpu counts */
/* PFN tab */
/* A temporary mapping, and a copy, of one frame of guest memory. */
unsigned long *page = NULL;
int nraces = 0;
+ struct domain_info_context *dinfo = &ctx->dinfo;
unsigned long mfn, pfn, pagetype;
tailbuf_t tailbuf, tmptail;
void* vcpup;
+ struct domain_info_context *dinfo = &ctx->dinfo;
+
pagebuf_init(&pagebuf);
memset(&tailbuf, 0, sizeof(tailbuf));
tailbuf.ishvm = hvm;