[HVM] Save/restore: dynamically calculate the size of the save buffer
authorTim Deegan <Tim.Deegan@xensource.com>
Wed, 7 Feb 2007 12:41:46 +0000 (12:41 +0000)
committerTim Deegan <Tim.Deegan@xensource.com>
Wed, 7 Feb 2007 12:41:46 +0000 (12:41 +0000)
Signed-off-by: Tim Deegan <Tim.Deegan@xensource.com>
14 files changed:
tools/libxc/xc_domain.c
tools/libxc/xc_hvm_save.c
xen/arch/x86/domctl.c
xen/arch/x86/hvm/hpet.c
xen/arch/x86/hvm/hvm.c
xen/arch/x86/hvm/i8254.c
xen/arch/x86/hvm/irq.c
xen/arch/x86/hvm/rtc.c
xen/arch/x86/hvm/save.c
xen/arch/x86/hvm/vioapic.c
xen/arch/x86/hvm/vlapic.c
xen/arch/x86/hvm/vpic.c
xen/include/asm-x86/hvm/support.h
xen/include/public/domctl.h

index e557f43f136dfb363eb21ca13145cd81575056b0..73d229dc61af5003f4a412b1e685204061f02cbf 100644 (file)
@@ -252,12 +252,14 @@ int xc_domain_hvm_getcontext(int xc_handle,
     domctl.u.hvmcontext.size = size;
     set_xen_guest_handle(domctl.u.hvmcontext.buffer, ctxt_buf);
 
-    if ( (ret = lock_pages(ctxt_buf, size)) != 0 )
-        return ret;
+    if ( ctxt_buf ) 
+        if ( (ret = lock_pages(ctxt_buf, size)) != 0 )
+            return ret;
 
     ret = do_domctl(xc_handle, &domctl);
 
-    unlock_pages(ctxt_buf, size);
+    if ( ctxt_buf ) 
+        unlock_pages(ctxt_buf, size);
 
     return (ret < 0 ? -1 : domctl.u.hvmcontext.size);
 }
index d9cca58d07347a5ef660ab802c60cfbfd5717e5a..e6f5ee1dded65dcad12aa9f9560c5344e585debb 100644 (file)
 #include "xg_private.h"
 #include "xg_save_restore.h"
 
-/*
- * Size of a buffer big enough to take the HVM state of a domain.
- * Ought to calculate this a bit more carefully, or maybe ask Xen.
- */
-#define HVM_CTXT_SIZE 8192
-
 /*
 ** Default values for important tuning parameters. Can override by passing
 ** non-zero replacement values to xc_hvm_save().
@@ -286,6 +280,7 @@ int xc_hvm_save(int xc_handle, int io_fd, uint32_t dom, uint32_t max_iters,
     unsigned long *pfn_batch = NULL;
 
     /* A copy of hvm domain context buffer*/
+    uint32_t hvm_buf_size;
     uint8_t *hvm_buf = NULL;
 
     /* Live mapping of shared info structure */
@@ -431,9 +426,15 @@ int xc_hvm_save(int xc_handle, int io_fd, uint32_t dom, uint32_t max_iters,
 
     page_array = (unsigned long *) malloc( sizeof(unsigned long) * max_pfn);
 
-    hvm_buf = malloc(HVM_CTXT_SIZE);
+    hvm_buf_size = xc_domain_hvm_getcontext(xc_handle, dom, 0, 0);
+    if ( hvm_buf_size == -1 )
+    {
+        ERROR("Couldn't get HVM context size from Xen");
+        goto out;
+    }
+    hvm_buf = malloc(hvm_buf_size);
 
-    if (!to_send ||!to_skip ||!page_array ||!hvm_buf ) {
+    if (!to_send ||!to_skip ||!page_array ||!hvm_buf) {
         ERROR("Couldn't allocate memory");
         goto out;
     }
@@ -661,7 +662,7 @@ int xc_hvm_save(int xc_handle, int io_fd, uint32_t dom, uint32_t max_iters,
     }
 
     if ( (rec_size = xc_domain_hvm_getcontext(xc_handle, dom, hvm_buf, 
-                                              HVM_CTXT_SIZE)) == -1) {
+                                              hvm_buf_size)) == -1) {
         ERROR("HVM:Could not get hvm buffer");
         goto out;
     }
index d73ae65bd50ca9bed1080528adcdaa520c09cf64..384faf48e4d70c3b8dabf6fc1a12c362d1748ae2 100644 (file)
@@ -326,10 +326,6 @@ long arch_do_domctl(
         struct hvm_domain_context c;
         struct domain             *d;
 
-        c.cur = 0;
-        c.size = domctl->u.hvmcontext.size;
-        c.data = NULL;
-
         ret = -ESRCH;
         if ( (d = get_domain_by_id(domctl->domain)) == NULL )
             break;
@@ -338,19 +334,38 @@ long arch_do_domctl(
         if ( !is_hvm_domain(d) ) 
             goto gethvmcontext_out;
 
+        c.cur = 0;
+        c.size = hvm_save_size(d);
+        c.data = NULL;
+
+        if ( guest_handle_is_null(domctl->u.hvmcontext.buffer) )
+        {
+            /* Client is querying for the correct buffer size */
+            domctl->u.hvmcontext.size = c.size;
+            ret = 0;
+            goto gethvmcontext_out;            
+        }
+
+        /* Check that the client has a big enough buffer */
+        ret = -ENOSPC;
+        if ( domctl->u.hvmcontext.size < c.size ) 
+            goto gethvmcontext_out;
+
+        /* Allocate our own marshalling buffer */
         ret = -ENOMEM;
         if ( (c.data = xmalloc_bytes(c.size)) == NULL )
             goto gethvmcontext_out;
 
         ret = hvm_save(d, &c);
 
+        domctl->u.hvmcontext.size = c.cur;
         if ( copy_to_guest(domctl->u.hvmcontext.buffer, c.data, c.size) != 0 )
             ret = -EFAULT;
 
+    gethvmcontext_out:
         if ( copy_to_guest(u_domctl, domctl, 1) )
             ret = -EFAULT;
 
-    gethvmcontext_out:
         if ( c.data != NULL )
             xfree(c.data);
 
index 6269e149a870d5ce00ff7ee5ea1bca86b14cec1c..419a886ebafc81a9db1c5ea3d52cb9e4c97e0653 100644 (file)
@@ -409,7 +409,7 @@ static int hpet_load(struct domain *d, hvm_domain_context_t *h)
     return 0;
 }
 
-HVM_REGISTER_SAVE_RESTORE(HPET, hpet_save, hpet_load);
+HVM_REGISTER_SAVE_RESTORE(HPET, hpet_save, hpet_load, 1, HVMSR_PER_DOM);
 
 void hpet_init(struct vcpu *v)
 {
index 39ef3c12213e2c1fb05b812171badc594ffd02dd..96c827d2356ecbe744acf04ae0458c46b8350f90 100644 (file)
@@ -227,7 +227,8 @@ static int hvm_load_cpu_ctxt(struct domain *d, hvm_domain_context_t *h)
     return 0;
 }
 
-HVM_REGISTER_SAVE_RESTORE(CPU, hvm_save_cpu_ctxt, hvm_load_cpu_ctxt);
+HVM_REGISTER_SAVE_RESTORE(CPU, hvm_save_cpu_ctxt, hvm_load_cpu_ctxt,
+                          1, HVMSR_PER_VCPU);
 
 int hvm_vcpu_initialise(struct vcpu *v)
 {
index 11ae4ff0cad2a7cc2b52f97ce980a8af35076677..a3a21809fe01c3977c62d6e5a2747d8e5bb439d2 100644 (file)
@@ -445,7 +445,7 @@ static int pit_load(struct domain *d, hvm_domain_context_t *h)
     return 0;
 }
 
-HVM_REGISTER_SAVE_RESTORE(PIT, pit_save, pit_load);
+HVM_REGISTER_SAVE_RESTORE(PIT, pit_save, pit_load, 1, HVMSR_PER_DOM);
 
 static void pit_reset(void *opaque)
 {
index 6d8432c1ba3199903b0dd834bf4401de6b7695f4..aa531b33fb520af37fc75c86ab78a6840f245974 100644 (file)
@@ -480,6 +480,9 @@ static int irq_load_link(struct domain *d, hvm_domain_context_t *h)
     return 0;
 }
 
-HVM_REGISTER_SAVE_RESTORE(PCI_IRQ, irq_save_pci, irq_load_pci);
-HVM_REGISTER_SAVE_RESTORE(ISA_IRQ, irq_save_isa, irq_load_isa);
-HVM_REGISTER_SAVE_RESTORE(PCI_LINK, irq_save_link, irq_load_link);
+HVM_REGISTER_SAVE_RESTORE(PCI_IRQ, irq_save_pci, irq_load_pci,
+                          1, HVMSR_PER_DOM);
+HVM_REGISTER_SAVE_RESTORE(ISA_IRQ, irq_save_isa, irq_load_isa, 
+                          1, HVMSR_PER_DOM);
+HVM_REGISTER_SAVE_RESTORE(PCI_LINK, irq_save_link, irq_load_link,
+                          1, HVMSR_PER_DOM);
index d2ee7f8825cd13a15553122ad50cb88cb6fe0374..5cda6bbb7564d0c8f4933ce28f07ccd480dbcc79 100644 (file)
@@ -417,7 +417,7 @@ static int rtc_load(struct domain *d, hvm_domain_context_t *h)
     return 0;
 }
 
-HVM_REGISTER_SAVE_RESTORE(RTC, rtc_save, rtc_load);
+HVM_REGISTER_SAVE_RESTORE(RTC, rtc_save, rtc_load, 1, HVMSR_PER_DOM);
 
 
 void rtc_init(struct vcpu *v, int base)
index 313f140d24757e903dfe0e6ee3447e0e9235b006..a19043c1755d115dc7e1aeb96feae3910b07adb6 100644 (file)
@@ -35,13 +35,16 @@ static struct {
     hvm_save_handler save;
     hvm_load_handler load; 
     const char *name;
+    size_t size;
+    int kind;
 } hvm_sr_handlers [HVM_SAVE_CODE_MAX + 1] = {{NULL, NULL, "<?>"},};
 
 /* Init-time function to add entries to that list */
 void hvm_register_savevm(uint16_t typecode, 
                          const char *name,
                          hvm_save_handler save_state,
-                         hvm_load_handler load_state)
+                         hvm_load_handler load_state,
+                         size_t size, int kind)
 {
     ASSERT(typecode <= HVM_SAVE_CODE_MAX);
     ASSERT(hvm_sr_handlers[typecode].save == NULL);
@@ -49,6 +52,28 @@ void hvm_register_savevm(uint16_t typecode,
     hvm_sr_handlers[typecode].save = save_state;
     hvm_sr_handlers[typecode].load = load_state;
     hvm_sr_handlers[typecode].name = name;
+    hvm_sr_handlers[typecode].size = size;
+    hvm_sr_handlers[typecode].kind = kind;
+}
+
+size_t hvm_save_size(struct domain *d) 
+{
+    struct vcpu *v;
+    size_t sz;
+    int i;
+    
+    /* Basic overhead for header and footer */
+    sz = (2 * sizeof (struct hvm_save_descriptor)) + HVM_SAVE_LENGTH(HEADER);
+
+    /* Plus space for each thing we will be saving */
+    for ( i = 0; i <= HVM_SAVE_CODE_MAX; i++ ) 
+        if ( hvm_sr_handlers[i].kind == HVMSR_PER_VCPU )
+            for_each_vcpu(d, v)
+                sz += hvm_sr_handlers[i].size;
+        else 
+            sz += hvm_sr_handlers[i].size;
+
+    return sz;
 }
 
 
index 4e444ce93d8c4061808b79a25b53088364e76b37..0ce59f94b12c0c8f44fa4c0e45c4efea5e17625c 100644 (file)
@@ -514,7 +514,7 @@ static int ioapic_load(struct domain *d, hvm_domain_context_t *h)
     return 0;
 }
 
-HVM_REGISTER_SAVE_RESTORE(IOAPIC, ioapic_save, ioapic_load);
+HVM_REGISTER_SAVE_RESTORE(IOAPIC, ioapic_save, ioapic_load, 1, HVMSR_PER_DOM);
 
 void vioapic_init(struct domain *d)
 {
index cc4964980d890ea07f1bc71b6aec920fcbb861ce..858c9ab6e2e457aec57ecfe59ce17aa094b0a8da 100644 (file)
@@ -904,8 +904,10 @@ static int lapic_load_regs(struct domain *d, hvm_domain_context_t *h)
     return 0;
 }
 
-HVM_REGISTER_SAVE_RESTORE(LAPIC, lapic_save_hidden, lapic_load_hidden);
-HVM_REGISTER_SAVE_RESTORE(LAPIC_REGS, lapic_save_regs, lapic_load_regs);
+HVM_REGISTER_SAVE_RESTORE(LAPIC, lapic_save_hidden, lapic_load_hidden,
+                          1, HVMSR_PER_VCPU);
+HVM_REGISTER_SAVE_RESTORE(LAPIC_REGS, lapic_save_regs, lapic_load_regs,
+                          1, HVMSR_PER_VCPU);
 
 int vlapic_init(struct vcpu *v)
 {
index 41e3ed11a489b6cf3370901eeeb36e9b9b366fcb..3a5b2a16a8651a3d7821e69e0ff36fad0c22208f 100644 (file)
@@ -440,7 +440,7 @@ static int vpic_load(struct domain *d, hvm_domain_context_t *h)
     return 0;
 }
 
-HVM_REGISTER_SAVE_RESTORE(PIC, vpic_save, vpic_load);
+HVM_REGISTER_SAVE_RESTORE(PIC, vpic_save, vpic_load, 2, HVMSR_PER_DOM);
 
 void vpic_init(struct domain *d)
 {
index 6d57140faea8fc4d539385e6c6163c4f507f06ae..6fe753458c0d0dd311caaa7cdf928c3155e3ec0c 100644 (file)
@@ -221,23 +221,37 @@ typedef int (*hvm_save_handler) (struct domain *d,
 typedef int (*hvm_load_handler) (struct domain *d,
                                  hvm_domain_context_t *h);
 
-/* Init-time function to declare a pair of handlers for a type */
+/* Init-time function to declare a pair of handlers for a type,
+ * and the maximum buffer space needed to save this type of state */
 void hvm_register_savevm(uint16_t typecode,
                          const char *name, 
                          hvm_save_handler save_state,
-                         hvm_load_handler load_state);
-
-/* Syntactic sugar around that function */
-#define HVM_REGISTER_SAVE_RESTORE(_x, _save, _load)                     \
-static int __hvm_register_##_x##_save_and_restore(void)                 \
-{                                                                       \
-    hvm_register_savevm(HVM_SAVE_CODE(_x), #_x, &_save, &_load);        \
-    return 0;                                                           \
-}                                                                       \
+                         hvm_load_handler load_state,
+                         size_t size, int kind);
+
+/* The space needed for saving can be per-domain or per-vcpu: */
+#define HVMSR_PER_DOM  0
+#define HVMSR_PER_VCPU 1
+
+/* Syntactic sugar around that function: specify the max number of
+ * saves, and this calculates the size of buffer needed */
+#define HVM_REGISTER_SAVE_RESTORE(_x, _save, _load, _num, _k)             \
+static int __hvm_register_##_x##_save_and_restore(void)                   \
+{                                                                         \
+    hvm_register_savevm(HVM_SAVE_CODE(_x),                                \
+                        #_x,                                              \
+                        &_save,                                           \
+                        &_load,                                           \
+                        (_num) * (HVM_SAVE_LENGTH(_x)                     \
+                                  + sizeof (struct hvm_save_descriptor)), \
+                        _k);                                              \
+    return 0;                                                             \
+}                                                                         \
 __initcall(__hvm_register_##_x##_save_and_restore);
 
 
 /* Entry points for saving and restoring HVM domain state */
+size_t hvm_save_size(struct domain *d);
 int hvm_save(struct domain *d, hvm_domain_context_t *h);
 int hvm_load(struct domain *d, hvm_domain_context_t *h);
 
index 93f44f3ba58a208b173dab9654dc1b55b2169dd1..f7ea2d8accdebf0fcbda783886a83ab13534b12f 100644 (file)
@@ -390,7 +390,8 @@ DEFINE_XEN_GUEST_HANDLE(xen_domctl_settimeoffset_t);
 #define XEN_DOMCTL_sethvmcontext   34
 typedef struct xen_domctl_hvmcontext {
     uint32_t size; /* IN/OUT: size of buffer / bytes filled */
-    XEN_GUEST_HANDLE(uint8_t) buffer; /* IN/OUT */
+    XEN_GUEST_HANDLE(uint8_t) buffer; /* IN/OUT: data, or call gethvmcontext 
+                                       * with NULL buffer to get size req'd */
 } xen_domctl_hvmcontext_t;
 DEFINE_XEN_GUEST_HANDLE(xen_domctl_hvmcontext_t);