static void paravirt_ctxt_switch_from(struct vcpu *v);
static void paravirt_ctxt_switch_to(struct vcpu *v);
+static void vcpu_destroy_pagetables(struct vcpu *v);
+
static void continue_idle_domain(struct vcpu *v)
{
reset_stack_and_jump(idle_loop);
#undef c
}
+int arch_vcpu_reset(struct vcpu *v)
+{
+ destroy_gdt(v);
+ vcpu_destroy_pagetables(v);
+ return 0;
+}
+
long
arch_do_vcpu_op(
int cmd, struct vcpu *v, XEN_GUEST_HANDLE(void) arg)
spin_unlock_recursive(&d->page_alloc_lock);
}
-void domain_relinquish_resources(struct domain *d)
+static void vcpu_destroy_pagetables(struct vcpu *v)
{
- struct vcpu *v;
+ struct domain *d = v->domain;
unsigned long pfn;
- BUG_ON(!cpus_empty(d->domain_dirty_cpumask));
-
- /* Drop the in-use references to page-table bases. */
- for_each_vcpu ( d, v )
- {
- /* Drop ref to guest_table (from new_guest_cr3(), svm/vmx cr3 handling,
- * or sh_update_paging_modes()) */
#ifdef CONFIG_COMPAT
- if ( IS_COMPAT(d) )
- {
- if ( is_hvm_vcpu(v) )
- pfn = pagetable_get_pfn(v->arch.guest_table);
- else
- pfn = l4e_get_pfn(*(l4_pgentry_t *)__va(pagetable_get_paddr(v->arch.guest_table)));
+ if ( IS_COMPAT(d) )
+ {
+ if ( is_hvm_vcpu(v) )
+ pfn = pagetable_get_pfn(v->arch.guest_table);
+ else
+ pfn = l4e_get_pfn(*(l4_pgentry_t *)
+ __va(pagetable_get_paddr(v->arch.guest_table)));
- if ( pfn != 0 )
- {
- if ( shadow_mode_refcounts(d) )
- put_page(mfn_to_page(pfn));
- else
- put_page_and_type(mfn_to_page(pfn));
- }
- continue;
- }
-#endif
- pfn = pagetable_get_pfn(v->arch.guest_table);
if ( pfn != 0 )
{
if ( shadow_mode_refcounts(d) )
put_page(mfn_to_page(pfn));
else
put_page_and_type(mfn_to_page(pfn));
-#ifdef __x86_64__
- if ( pfn == pagetable_get_pfn(v->arch.guest_table_user) )
- v->arch.guest_table_user = pagetable_null();
-#endif
- v->arch.guest_table = pagetable_null();
}
+ v->arch.guest_table = pagetable_null();
+ v->arch.cr3 = 0;
+ return;
+ }
+#endif
+
+ pfn = pagetable_get_pfn(v->arch.guest_table);
+ if ( pfn != 0 )
+ {
+ if ( shadow_mode_refcounts(d) )
+ put_page(mfn_to_page(pfn));
+ else
+ put_page_and_type(mfn_to_page(pfn));
#ifdef __x86_64__
- /* Drop ref to guest_table_user (from MMUEXT_NEW_USER_BASEPTR) */
- pfn = pagetable_get_pfn(v->arch.guest_table_user);
- if ( pfn != 0 )
- {
- if ( shadow_mode_refcounts(d) )
- put_page(mfn_to_page(pfn));
- else
- put_page_and_type(mfn_to_page(pfn));
+ if ( pfn == pagetable_get_pfn(v->arch.guest_table_user) )
v->arch.guest_table_user = pagetable_null();
- }
#endif
+ v->arch.guest_table = pagetable_null();
+ }
+
+#ifdef __x86_64__
+ /* Drop ref to guest_table_user (from MMUEXT_NEW_USER_BASEPTR) */
+ pfn = pagetable_get_pfn(v->arch.guest_table_user);
+ if ( pfn != 0 )
+ {
+ if ( shadow_mode_refcounts(d) )
+ put_page(mfn_to_page(pfn));
+ else
+ put_page_and_type(mfn_to_page(pfn));
+ v->arch.guest_table_user = pagetable_null();
}
+#endif
+
+ v->arch.cr3 = 0;
+}
+
+void domain_relinquish_resources(struct domain *d)
+{
+ struct vcpu *v;
+
+ BUG_ON(!cpus_empty(d->domain_dirty_cpumask));
+
+ /* Drop the in-use references to page-table bases. */
+ for_each_vcpu ( d, v )
+ vcpu_destroy_pagetables(v);
/* Tear down shadow mode stuff. */
shadow_teardown(d);
*/
#include <xen/config.h>
+#include <xen/compat.h>
#include <xen/init.h>
#include <xen/lib.h>
#include <xen/errno.h>
if ( (vcpu >= MAX_VIRT_CPUS) || ((v = d->vcpu[vcpu]) == NULL) )
return -EINVAL;
-
+
+ if ( IS_COMPAT(v->domain)
+ ? compat_handle_is_null(vcpucontext.cmp->ctxt)
+ : guest_handle_is_null(vcpucontext.nat->ctxt) )
+ return vcpu_reset(v);
+
#ifdef CONFIG_COMPAT
BUILD_BUG_ON(sizeof(struct vcpu_guest_context)
< sizeof(struct compat_vcpu_guest_context));
return arch_set_info_guest(v, ctxt);
}
+int vcpu_reset(struct vcpu *v)
+{
+ struct domain *d = v->domain;
+ int rc;
+
+ domain_pause(d);
+ LOCK_BIGLOCK(d);
+
+ rc = arch_vcpu_reset(v);
+ if ( rc != 0 )
+ goto out;
+
+ set_bit(_VCPUF_down, &v->vcpu_flags);
+
+ clear_bit(_VCPUF_fpu_initialised, &v->vcpu_flags);
+ clear_bit(_VCPUF_fpu_dirtied, &v->vcpu_flags);
+ clear_bit(_VCPUF_blocked, &v->vcpu_flags);
+ clear_bit(_VCPUF_initialised, &v->vcpu_flags);
+ clear_bit(_VCPUF_nmi_pending, &v->vcpu_flags);
+ clear_bit(_VCPUF_nmi_masked, &v->vcpu_flags);
+ clear_bit(_VCPUF_polling, &v->vcpu_flags);
+
+ out:
+ UNLOCK_BIGLOCK(v->domain);
+ domain_unpause(d);
+
+ return rc;
+}
+
+
long do_vcpu_op(int cmd, int vcpuid, XEN_GUEST_HANDLE(void) arg)
{
struct domain *d = current->domain;