domctl.u.hvmcontext.size = size;
set_xen_guest_handle(domctl.u.hvmcontext.buffer, ctxt_buf);
- if ( (ret = lock_pages(ctxt_buf, size)) != 0 )
- return ret;
+ if ( ctxt_buf )
+ if ( (ret = lock_pages(ctxt_buf, size)) != 0 )
+ return ret;
ret = do_domctl(xc_handle, &domctl);
- unlock_pages(ctxt_buf, size);
+ if ( ctxt_buf )
+ unlock_pages(ctxt_buf, size);
return (ret < 0 ? -1 : domctl.u.hvmcontext.size);
}
#include "xg_private.h"
#include "xg_save_restore.h"
-/*
- * Size of a buffer big enough to take the HVM state of a domain.
- * Ought to calculate this a bit more carefully, or maybe ask Xen.
- */
-#define HVM_CTXT_SIZE 8192
-
/*
** Default values for important tuning parameters. Can override by passing
** non-zero replacement values to xc_hvm_save().
unsigned long *pfn_batch = NULL;
/* A copy of hvm domain context buffer*/
+ uint32_t hvm_buf_size;
uint8_t *hvm_buf = NULL;
/* Live mapping of shared info structure */
page_array = (unsigned long *) malloc( sizeof(unsigned long) * max_pfn);
- hvm_buf = malloc(HVM_CTXT_SIZE);
+ hvm_buf_size = xc_domain_hvm_getcontext(xc_handle, dom, 0, 0);
+ if ( hvm_buf_size == -1 )
+ {
+ ERROR("Couldn't get HVM context size from Xen");
+ goto out;
+ }
+ hvm_buf = malloc(hvm_buf_size);
- if (!to_send ||!to_skip ||!page_array ||!hvm_buf ) {
+ if (!to_send ||!to_skip ||!page_array ||!hvm_buf) {
ERROR("Couldn't allocate memory");
goto out;
}
}
if ( (rec_size = xc_domain_hvm_getcontext(xc_handle, dom, hvm_buf,
- HVM_CTXT_SIZE)) == -1) {
+ hvm_buf_size)) == -1) {
ERROR("HVM:Could not get hvm buffer");
goto out;
}
struct hvm_domain_context c;
struct domain *d;
- c.cur = 0;
- c.size = domctl->u.hvmcontext.size;
- c.data = NULL;
-
ret = -ESRCH;
if ( (d = get_domain_by_id(domctl->domain)) == NULL )
break;
if ( !is_hvm_domain(d) )
goto gethvmcontext_out;
+ c.cur = 0;
+ c.size = hvm_save_size(d);
+ c.data = NULL;
+
+ if ( guest_handle_is_null(domctl->u.hvmcontext.buffer) )
+ {
+ /* Client is querying for the correct buffer size */
+ domctl->u.hvmcontext.size = c.size;
+ ret = 0;
+ goto gethvmcontext_out;
+ }
+
+ /* Check that the client has a big enough buffer */
+ ret = -ENOSPC;
+ if ( domctl->u.hvmcontext.size < c.size )
+ goto gethvmcontext_out;
+
+ /* Allocate our own marshalling buffer */
ret = -ENOMEM;
if ( (c.data = xmalloc_bytes(c.size)) == NULL )
goto gethvmcontext_out;
ret = hvm_save(d, &c);
+ domctl->u.hvmcontext.size = c.cur;
if ( copy_to_guest(domctl->u.hvmcontext.buffer, c.data, c.size) != 0 )
ret = -EFAULT;
+ gethvmcontext_out:
if ( copy_to_guest(u_domctl, domctl, 1) )
ret = -EFAULT;
- gethvmcontext_out:
if ( c.data != NULL )
xfree(c.data);
return 0;
}
-HVM_REGISTER_SAVE_RESTORE(HPET, hpet_save, hpet_load);
+HVM_REGISTER_SAVE_RESTORE(HPET, hpet_save, hpet_load, 1, HVMSR_PER_DOM);
void hpet_init(struct vcpu *v)
{
return 0;
}
-HVM_REGISTER_SAVE_RESTORE(CPU, hvm_save_cpu_ctxt, hvm_load_cpu_ctxt);
+HVM_REGISTER_SAVE_RESTORE(CPU, hvm_save_cpu_ctxt, hvm_load_cpu_ctxt,
+ 1, HVMSR_PER_VCPU);
int hvm_vcpu_initialise(struct vcpu *v)
{
return 0;
}
-HVM_REGISTER_SAVE_RESTORE(PIT, pit_save, pit_load);
+HVM_REGISTER_SAVE_RESTORE(PIT, pit_save, pit_load, 1, HVMSR_PER_DOM);
static void pit_reset(void *opaque)
{
return 0;
}
-HVM_REGISTER_SAVE_RESTORE(PCI_IRQ, irq_save_pci, irq_load_pci);
-HVM_REGISTER_SAVE_RESTORE(ISA_IRQ, irq_save_isa, irq_load_isa);
-HVM_REGISTER_SAVE_RESTORE(PCI_LINK, irq_save_link, irq_load_link);
+HVM_REGISTER_SAVE_RESTORE(PCI_IRQ, irq_save_pci, irq_load_pci,
+ 1, HVMSR_PER_DOM);
+HVM_REGISTER_SAVE_RESTORE(ISA_IRQ, irq_save_isa, irq_load_isa,
+ 1, HVMSR_PER_DOM);
+HVM_REGISTER_SAVE_RESTORE(PCI_LINK, irq_save_link, irq_load_link,
+ 1, HVMSR_PER_DOM);
return 0;
}
-HVM_REGISTER_SAVE_RESTORE(RTC, rtc_save, rtc_load);
+HVM_REGISTER_SAVE_RESTORE(RTC, rtc_save, rtc_load, 1, HVMSR_PER_DOM);
void rtc_init(struct vcpu *v, int base)
hvm_save_handler save;
hvm_load_handler load;
const char *name;
+ size_t size;
+ int kind;
} hvm_sr_handlers [HVM_SAVE_CODE_MAX + 1] = {{NULL, NULL, "<?>"},};
/* Init-time function to add entries to that list */
void hvm_register_savevm(uint16_t typecode,
const char *name,
hvm_save_handler save_state,
- hvm_load_handler load_state)
+ hvm_load_handler load_state,
+ size_t size, int kind)
{
ASSERT(typecode <= HVM_SAVE_CODE_MAX);
ASSERT(hvm_sr_handlers[typecode].save == NULL);
hvm_sr_handlers[typecode].save = save_state;
hvm_sr_handlers[typecode].load = load_state;
hvm_sr_handlers[typecode].name = name;
+ hvm_sr_handlers[typecode].size = size;
+ hvm_sr_handlers[typecode].kind = kind;
+}
+
+size_t hvm_save_size(struct domain *d)
+{
+ struct vcpu *v;
+ size_t sz;
+ int i;
+
+ /* Basic overhead for header and footer */
+ sz = (2 * sizeof (struct hvm_save_descriptor)) + HVM_SAVE_LENGTH(HEADER);
+
+ /* Plus space for each thing we will be saving */
+ for ( i = 0; i <= HVM_SAVE_CODE_MAX; i++ )
+ if ( hvm_sr_handlers[i].kind == HVMSR_PER_VCPU )
+ for_each_vcpu(d, v)
+ sz += hvm_sr_handlers[i].size;
+ else
+ sz += hvm_sr_handlers[i].size;
+
+ return sz;
}
return 0;
}
-HVM_REGISTER_SAVE_RESTORE(IOAPIC, ioapic_save, ioapic_load);
+HVM_REGISTER_SAVE_RESTORE(IOAPIC, ioapic_save, ioapic_load, 1, HVMSR_PER_DOM);
void vioapic_init(struct domain *d)
{
return 0;
}
-HVM_REGISTER_SAVE_RESTORE(LAPIC, lapic_save_hidden, lapic_load_hidden);
-HVM_REGISTER_SAVE_RESTORE(LAPIC_REGS, lapic_save_regs, lapic_load_regs);
+HVM_REGISTER_SAVE_RESTORE(LAPIC, lapic_save_hidden, lapic_load_hidden,
+ 1, HVMSR_PER_VCPU);
+HVM_REGISTER_SAVE_RESTORE(LAPIC_REGS, lapic_save_regs, lapic_load_regs,
+ 1, HVMSR_PER_VCPU);
int vlapic_init(struct vcpu *v)
{
return 0;
}
-HVM_REGISTER_SAVE_RESTORE(PIC, vpic_save, vpic_load);
+HVM_REGISTER_SAVE_RESTORE(PIC, vpic_save, vpic_load, 2, HVMSR_PER_DOM);
void vpic_init(struct domain *d)
{
typedef int (*hvm_load_handler) (struct domain *d,
hvm_domain_context_t *h);
-/* Init-time function to declare a pair of handlers for a type */
+/* Init-time function to declare a pair of handlers for a type,
+ * and the maximum buffer space needed to save this type of state */
void hvm_register_savevm(uint16_t typecode,
const char *name,
hvm_save_handler save_state,
- hvm_load_handler load_state);
-
-/* Syntactic sugar around that function */
-#define HVM_REGISTER_SAVE_RESTORE(_x, _save, _load) \
-static int __hvm_register_##_x##_save_and_restore(void) \
-{ \
- hvm_register_savevm(HVM_SAVE_CODE(_x), #_x, &_save, &_load); \
- return 0; \
-} \
+ hvm_load_handler load_state,
+ size_t size, int kind);
+
+/* The space needed for saving can be per-domain or per-vcpu: */
+#define HVMSR_PER_DOM 0
+#define HVMSR_PER_VCPU 1
+
+/* Syntactic sugar around that function: specify the max number of
+ * saves, and this calculates the size of buffer needed */
+#define HVM_REGISTER_SAVE_RESTORE(_x, _save, _load, _num, _k) \
+static int __hvm_register_##_x##_save_and_restore(void) \
+{ \
+ hvm_register_savevm(HVM_SAVE_CODE(_x), \
+ #_x, \
+ &_save, \
+ &_load, \
+ (_num) * (HVM_SAVE_LENGTH(_x) \
+ + sizeof (struct hvm_save_descriptor)), \
+ _k); \
+ return 0; \
+} \
__initcall(__hvm_register_##_x##_save_and_restore);
/* Entry points for saving and restoring HVM domain state */
+size_t hvm_save_size(struct domain *d);
int hvm_save(struct domain *d, hvm_domain_context_t *h);
int hvm_load(struct domain *d, hvm_domain_context_t *h);
#define XEN_DOMCTL_sethvmcontext 34
typedef struct xen_domctl_hvmcontext {
uint32_t size; /* IN/OUT: size of buffer / bytes filled */
- XEN_GUEST_HANDLE(uint8_t) buffer; /* IN/OUT */
+ XEN_GUEST_HANDLE(uint8_t) buffer; /* IN/OUT: data, or call gethvmcontext
+ * with NULL buffer to get size req'd */
} xen_domctl_hvmcontext_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_hvmcontext_t);