[HVM] Replace relinquish_resources() destructor hook with
authorkfraser@localhost.localdomain <kfraser@localhost.localdomain>
Mon, 6 Nov 2006 16:36:51 +0000 (16:36 +0000)
committerkfraser@localhost.localdomain <kfraser@localhost.localdomain>
Mon, 6 Nov 2006 16:36:51 +0000 (16:36 +0000)
separate vcpu and domain destructors that are called at the
point the domain is finally destroyed.
Signed-off-by: Keir Fraser <keir@xensource.com>
xen/arch/x86/domain.c
xen/arch/x86/hvm/hvm.c
xen/arch/x86/hvm/svm/svm.c
xen/arch/x86/hvm/svm/vmcb.c
xen/arch/x86/hvm/vlapic.c
xen/arch/x86/hvm/vmx/vmcs.c
xen/arch/x86/hvm/vmx/vmx.c
xen/include/asm-x86/hvm/hvm.h
xen/include/asm-x86/hvm/io.h
xen/include/asm-x86/hvm/vlapic.h

index 0ee7361825bc7c072e6bc1c11d6faff40684eb2f..6c4ea37ae148dc9fb5827f4a2c3bf201b5cfba02 100644 (file)
@@ -235,7 +235,7 @@ int arch_domain_create(struct domain *d)
             virt_to_page(d->shared_info), d, XENSHARE_writable);
     }
 
-    return hvm_domain_initialise(d);
+    return is_hvm_domain(d) ? hvm_domain_initialise(d) : 0;
 
  fail:
     free_xenheap_page(d->shared_info);
@@ -249,6 +249,15 @@ int arch_domain_create(struct domain *d)
 
 void arch_domain_destroy(struct domain *d)
 {
+    struct vcpu *v;
+
+    if ( is_hvm_domain(d) )
+    {
+        for_each_vcpu ( d, v )
+            hvm_vcpu_destroy(v);
+        hvm_domain_destroy(d);
+    }
+
     shadow_final_teardown(d);
 
     free_xenheap_pages(
@@ -974,9 +983,6 @@ void domain_relinquish_resources(struct domain *d)
 #endif
     }
 
-    if ( is_hvm_domain(d) )
-        hvm_relinquish_guest_resources(d);
-
     /* Tear down shadow mode stuff. */
     shadow_teardown(d);
 
index 6bc9881165e11a2d380851331a48e3669da6170f..10b97c2b96e2b300cd4e9116514a0af7f949e499 100644 (file)
@@ -110,19 +110,11 @@ void hvm_do_resume(struct vcpu *v)
     }
 }
 
-void hvm_release_assist_channel(struct vcpu *v)
-{
-    free_xen_event_channel(v, v->arch.hvm_vcpu.xen_port);
-}
-
 int hvm_domain_initialise(struct domain *d)
 {
     struct hvm_domain *platform = &d->arch.hvm_domain;
     int rc;
 
-    if ( !is_hvm_domain(d) )
-        return 0;
-
     if ( !hvm_enabled )
     {
         gdprintk(XENLOG_WARNING, "Attempt to create a HVM guest "
@@ -146,6 +138,20 @@ int hvm_domain_initialise(struct domain *d)
     return 0;
 }
 
+void hvm_domain_destroy(struct domain *d)
+{
+    kill_timer(&d->arch.hvm_domain.pl_time.periodic_tm.timer);
+    rtc_deinit(d);
+    pmtimer_deinit(d);
+
+    if ( d->arch.hvm_domain.shared_page_va )
+        unmap_domain_page_global(
+            (void *)d->arch.hvm_domain.shared_page_va);
+
+    if ( d->arch.hvm_domain.buffered_io_va )
+        unmap_domain_page_global((void *)d->arch.hvm_domain.buffered_io_va);
+}
+
 int hvm_vcpu_initialise(struct vcpu *v)
 {
     struct hvm_domain *platform;
@@ -154,12 +160,20 @@ int hvm_vcpu_initialise(struct vcpu *v)
     if ( (rc = hvm_funcs.vcpu_initialise(v)) != 0 )
         return rc;
 
+    if ( (rc = vlapic_init(v)) != 0 )
+    {
+        hvm_funcs.vcpu_destroy(v);
+        return rc;
+    }
+
     /* Create ioreq event channel. */
     v->arch.hvm_vcpu.xen_port = alloc_unbound_xen_event_channel(v, 0);
     if ( get_sp(v->domain) && get_vio(v->domain, v->vcpu_id) )
         get_vio(v->domain, v->vcpu_id)->vp_eport =
             v->arch.hvm_vcpu.xen_port;
 
+    init_timer(&v->arch.hvm_vcpu.hlt_timer, hlt_timer_fn, v, v->processor);
+
     if ( v->vcpu_id != 0 )
         return 0;
 
@@ -178,6 +192,16 @@ int hvm_vcpu_initialise(struct vcpu *v)
     return 0;
 }
 
+void hvm_vcpu_destroy(struct vcpu *v)
+{
+    kill_timer(&v->arch.hvm_vcpu.hlt_timer);
+    vlapic_destroy(v);
+    hvm_funcs.vcpu_destroy(v);
+
+    /* Event channel is already freed by evtchn_destroy(). */
+    /*free_xen_event_channel(v, v->arch.hvm_vcpu.xen_port);*/
+}
+
 void pic_irq_request(void *data, int level)
 {
     int *interrupt_request = data;
index 5ff04f79e562630a227fc0b661414e593f82a2d1..3e4dd31d23795bfd60636b5f3536fb361788ea9e 100644 (file)
@@ -61,7 +61,6 @@ extern void svm_dump_inst(unsigned long eip);
 extern int svm_dbg_on;
 void svm_dump_regs(const char *from, struct cpu_user_regs *regs);
 
-static void svm_relinquish_guest_resources(struct domain *d);
 static int svm_do_vmmcall_reset_to_realmode(struct vcpu *v,
                                             struct cpu_user_regs *regs);
 
@@ -777,6 +776,11 @@ static int svm_vcpu_initialise(struct vcpu *v)
     return 0;
 }
 
+static void svm_vcpu_destroy(struct vcpu *v)
+{
+    destroy_vmcb(&v->arch.hvm_svm);
+}
+
 int start_svm(void)
 {
     u32 eax, ecx, edx;
@@ -825,7 +829,7 @@ int start_svm(void)
     hvm_funcs.disable = stop_svm;
 
     hvm_funcs.vcpu_initialise = svm_vcpu_initialise;
-    hvm_funcs.relinquish_guest_resources = svm_relinquish_guest_resources;
+    hvm_funcs.vcpu_destroy    = svm_vcpu_destroy;
 
     hvm_funcs.store_cpu_guest_regs = svm_store_cpu_guest_regs;
     hvm_funcs.load_cpu_guest_regs = svm_load_cpu_guest_regs;
@@ -851,40 +855,6 @@ int start_svm(void)
 }
 
 
-static void svm_relinquish_guest_resources(struct domain *d)
-{
-    struct vcpu *v;
-
-    for_each_vcpu ( d, v )
-    {
-        if ( !test_bit(_VCPUF_initialised, &v->vcpu_flags) )
-            continue;
-
-        destroy_vmcb(&v->arch.hvm_svm);
-        kill_timer(&v->arch.hvm_vcpu.hlt_timer);
-        if ( VLAPIC(v) != NULL )
-        {
-            kill_timer(&VLAPIC(v)->vlapic_timer);
-            unmap_domain_page_global(VLAPIC(v)->regs);
-            free_domheap_page(VLAPIC(v)->regs_page);
-            xfree(VLAPIC(v));
-        }
-        hvm_release_assist_channel(v);
-    }
-
-    kill_timer(&d->arch.hvm_domain.pl_time.periodic_tm.timer);
-    rtc_deinit(d);
-    pmtimer_deinit(d);
-
-    if ( d->arch.hvm_domain.shared_page_va )
-        unmap_domain_page_global(
-            (void *)d->arch.hvm_domain.shared_page_va);
-
-    if ( d->arch.hvm_domain.buffered_io_va )
-        unmap_domain_page_global((void *)d->arch.hvm_domain.buffered_io_va);
-}
-
-
 static void svm_migrate_timers(struct vcpu *v)
 {
     struct periodic_time *pt = 
index d2fee7677ce65993c5b58794023f4ba96a165e7c..9daa11969f7b49281b5b7ae344743d66f985c747 100644 (file)
@@ -351,9 +351,6 @@ void svm_do_launch(struct vcpu *v)
     if ( !asidpool_assign_next( vmcb, 0, core, core ))
         BUG();
 
-    vlapic_init(v);
-    init_timer(&v->arch.hvm_vcpu.hlt_timer, hlt_timer_fn, v, v->processor);
-
     vmcb->ldtr.sel = 0;
     vmcb->ldtr.base = 0;
     vmcb->ldtr.limit = 0;
index 64820c767c8b9ab55c052e2d32142846316854a2..5524dd7036512b15d84a3729006655b556ef182e 100644 (file)
@@ -1016,7 +1016,7 @@ static int vlapic_reset(struct vlapic *vlapic)
 
 int vlapic_init(struct vcpu *v)
 {
-    struct vlapic *vlapic = NULL;
+    struct vlapic *vlapic;
 
     HVM_DBG_LOG(DBG_LEVEL_VLAPIC, "vlapic_init %d", v->vcpu_id);
 
@@ -1047,3 +1047,18 @@ int vlapic_init(struct vcpu *v)
 
     return 0;
 }
+
+void vlapic_destroy(struct vcpu *v)
+{
+    struct vlapic *vlapic = VLAPIC(v);
+    
+    if ( vlapic == NULL )
+        return;
+
+    VLAPIC(v) = NULL;
+
+    kill_timer(&vlapic->vlapic_timer);
+    unmap_domain_page_global(vlapic->regs);
+    free_domheap_page(vlapic->regs_page);
+    xfree(vlapic);
+}
index 9c4e8defb37d8ab9cfdff4f70fdc7d2c9f384119..4f679e60aca8953d292a7ec520cd62a404ae5794 100644 (file)
@@ -193,11 +193,7 @@ void vmx_vmcs_enter(struct vcpu *v)
 {
     /*
      * NB. We must *always* run an HVM VCPU on its own VMCS, except for
-     * vmx_vmcs_enter/exit critical regions. This leads to some TODOs:
-     *  1. VMPTRLD as soon as we context-switch to a HVM VCPU.
-     *  2. VMCS destruction needs to happen later (from domain_destroy()).
-     * We can relax this a bit if a paused VCPU always commits its
-     * architectural state to a software structure.
+     * vmx_vmcs_enter/exit critical regions.
      */
     if ( v == current )
         return;
@@ -416,11 +412,6 @@ static int construct_vmcs(struct vcpu *v)
         cr4 & ~(X86_CR4_PGE | X86_CR4_VMXE | X86_CR4_PAE);
     error |= __vmwrite(CR4_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr4);
 
-    /* XXX Move this out. */
-    init_timer(&v->arch.hvm_vcpu.hlt_timer, hlt_timer_fn, v, v->processor);
-    if ( vlapic_init(v) != 0 )
-        return -1;
-
 #ifdef __x86_64__ 
     /* VLAPIC TPR optimisation. */
     v->arch.hvm_vcpu.u.vmx.exec_control |= CPU_BASED_TPR_SHADOW;
index a921e2a1c384460ccdfe4ff6a4aaa21adfd9e5bc..8ce1e1d503278d1ed94e28e5b3b7a3c218ba41e6 100644 (file)
@@ -74,36 +74,9 @@ static int vmx_vcpu_initialise(struct vcpu *v)
     return 0;
 }
 
-static void vmx_relinquish_guest_resources(struct domain *d)
+static void vmx_vcpu_destroy(struct vcpu *v)
 {
-    struct vcpu *v;
-
-    for_each_vcpu ( d, v )
-    {
-        vmx_destroy_vmcs(v);
-        if ( !test_bit(_VCPUF_initialised, &v->vcpu_flags) )
-            continue;
-        kill_timer(&v->arch.hvm_vcpu.hlt_timer);
-        if ( VLAPIC(v) != NULL )
-        {
-            kill_timer(&VLAPIC(v)->vlapic_timer);
-            unmap_domain_page_global(VLAPIC(v)->regs);
-            free_domheap_page(VLAPIC(v)->regs_page);
-            xfree(VLAPIC(v));
-        }
-        hvm_release_assist_channel(v);
-    }
-
-    kill_timer(&d->arch.hvm_domain.pl_time.periodic_tm.timer);
-    rtc_deinit(d);
-    pmtimer_deinit(d);
-
-    if ( d->arch.hvm_domain.shared_page_va )
-        unmap_domain_page_global(
-            (void *)d->arch.hvm_domain.shared_page_va);
-
-    if ( d->arch.hvm_domain.buffered_io_va )
-        unmap_domain_page_global((void *)d->arch.hvm_domain.buffered_io_va);
+    vmx_destroy_vmcs(v);
 }
 
 #ifdef __x86_64__
@@ -674,7 +647,7 @@ static void vmx_setup_hvm_funcs(void)
     hvm_funcs.disable = stop_vmx;
 
     hvm_funcs.vcpu_initialise = vmx_vcpu_initialise;
-    hvm_funcs.relinquish_guest_resources = vmx_relinquish_guest_resources;
+    hvm_funcs.vcpu_destroy    = vmx_vcpu_destroy;
 
     hvm_funcs.store_cpu_guest_regs = vmx_store_cpu_guest_regs;
     hvm_funcs.load_cpu_guest_regs = vmx_load_cpu_guest_regs;
index 851b37830e81bcb8d725d5d681a13cb3c6f16ce7..6576e20839bef3288a6d253d2603d8864388d116 100644 (file)
@@ -33,10 +33,10 @@ struct hvm_function_table {
     void (*disable)(void);
 
     /*
-     * Initialize/relinguish HVM guest resources
+     * Initialise/destroy HVM VCPU resources
      */
     int  (*vcpu_initialise)(struct vcpu *v);
-    void (*relinquish_guest_resources)(struct domain *d);
+    void (*vcpu_destroy)(struct vcpu *v);
 
     /*
      * Store and load guest state:
@@ -92,13 +92,10 @@ hvm_disable(void)
 }
 
 int hvm_domain_initialise(struct domain *d);
-int hvm_vcpu_initialise(struct vcpu *v);
+void hvm_domain_destroy(struct domain *d);
 
-static inline void
-hvm_relinquish_guest_resources(struct domain *d)
-{
-    hvm_funcs.relinquish_guest_resources(d);
-}
+int hvm_vcpu_initialise(struct vcpu *v);
+void hvm_vcpu_destroy(struct vcpu *v);
 
 static inline void
 hvm_store_cpu_guest_regs(
index b784d059fe318185342dc3fce7ff2749bc97cec7..66c48b438c19e9c73dd0d40740128483deb25758 100644 (file)
@@ -151,7 +151,6 @@ extern void pic_irq_request(void *data, int level);
 extern void hvm_pic_assist(struct vcpu *v);
 extern int cpu_get_interrupt(struct vcpu *v, int *type);
 extern int cpu_has_pending_irq(struct vcpu *v);
-extern void hvm_release_assist_channel(struct vcpu *v);
 
 // XXX - think about this, maybe use bit 30 of the mfn to signify an MMIO frame.
 #define mmio_space(gpa) (!VALID_MFN(get_mfn_from_gpfn((gpa) >> PAGE_SHIFT)))
index 3827c117666213ee4f3545130329aa18a8e28a94..b53c9e149b908ed6fab6de9d4490a4e8ee6c9906 100644 (file)
@@ -77,7 +77,8 @@ int vlapic_find_highest_irr(struct vlapic *vlapic);
 
 int cpu_get_apic_interrupt(struct vcpu *v, int *mode);
 
-int vlapic_init(struct vcpu *vc);
+int  vlapic_init(struct vcpu *v);
+void vlapic_destroy(struct vcpu *v);
 
 void vlapic_msr_set(struct vlapic *vlapic, uint64_t value);