deallocate_rid_range(d);
}
-int arch_vcpu_reset(struct vcpu *v)
+void arch_vcpu_reset(struct vcpu *v)
{
/* FIXME: Stub for now */
- return 0;
}
/* Here it is assumed that all of the CPUs has same RSE.N_STACKED_PHYS */
#undef c
}
-int arch_vcpu_reset(struct vcpu *v)
+void arch_vcpu_reset(struct vcpu *v)
{
destroy_gdt(v);
vcpu_destroy_pagetables(v);
- return 0;
}
/*
kill_timer(&h->timers[i]);
}
+void hpet_reset(struct domain *d)
+{
+ hpet_deinit(d);
+ hpet_init(d->vcpu[0]);
+}
return rc;
}
+void hvm_vcpu_reset_state(struct vcpu *v, uint16_t cs, uint16_t ip)
+{
+ struct domain *d = current->domain;
+ struct vcpu_guest_context *ctxt;
+ struct segment_register reg;
+
+ BUG_ON(vcpu_runnable(v));
+
+ domain_lock(d);
+
+ if ( v->is_initialised )
+ goto out;
+
+ ctxt = &v->arch.guest_context;
+ memset(ctxt, 0, sizeof(*ctxt));
+ ctxt->flags = VGCF_online;
+ ctxt->user_regs.eflags = 2;
+ ctxt->user_regs.edx = 0x00000f00;
+ ctxt->user_regs.eip = ip;
+
+ v->arch.hvm_vcpu.guest_cr[0] = X86_CR0_ET;
+ hvm_update_guest_cr(v, 0);
+
+ v->arch.hvm_vcpu.guest_cr[2] = 0;
+ hvm_update_guest_cr(v, 2);
+
+ v->arch.hvm_vcpu.guest_cr[3] = 0;
+ hvm_update_guest_cr(v, 3);
+
+ v->arch.hvm_vcpu.guest_cr[4] = 0;
+ hvm_update_guest_cr(v, 4);
+
+ v->arch.hvm_vcpu.guest_efer = 0;
+ hvm_update_guest_efer(v);
+
+ reg.sel = cs;
+ reg.base = (uint32_t)reg.sel << 4;
+ reg.limit = 0xffff;
+ reg.attr.bytes = 0x09b;
+ hvm_set_segment_register(v, x86_seg_cs, ®);
+
+ reg.sel = reg.base = 0;
+ reg.limit = 0xffff;
+ reg.attr.bytes = 0x093;
+ hvm_set_segment_register(v, x86_seg_ds, ®);
+ hvm_set_segment_register(v, x86_seg_es, ®);
+ hvm_set_segment_register(v, x86_seg_fs, ®);
+ hvm_set_segment_register(v, x86_seg_gs, ®);
+ hvm_set_segment_register(v, x86_seg_ss, ®);
+
+ reg.attr.bytes = 0x82; /* LDT */
+ hvm_set_segment_register(v, x86_seg_ldtr, ®);
+
+ reg.attr.bytes = 0x8b; /* 32-bit TSS (busy) */
+ hvm_set_segment_register(v, x86_seg_tr, ®);
+
+ reg.attr.bytes = 0;
+ hvm_set_segment_register(v, x86_seg_gdtr, ®);
+ hvm_set_segment_register(v, x86_seg_idtr, ®);
+
+ /* Sync AP's TSC with BSP's. */
+ v->arch.hvm_vcpu.cache_tsc_offset =
+ v->domain->vcpu[0]->arch.hvm_vcpu.cache_tsc_offset;
+ hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset);
+
+ v->arch.flags |= TF_kernel_mode;
+ v->is_initialised = 1;
+ clear_bit(_VPF_down, &v->pause_flags);
+
+ out:
+ domain_unlock(d);
+}
+
+static void hvm_s3_suspend(struct domain *d)
+{
+ struct vcpu *v;
+
+ domain_pause(d);
+ domain_lock(d);
+
+ if ( (d->vcpu[0] == NULL) ||
+ test_and_set_bool(d->arch.hvm_domain.is_s3_suspended) )
+ {
+ domain_unlock(d);
+ domain_unpause(d);
+ return;
+ }
+
+ for_each_vcpu ( d, v )
+ {
+ vlapic_reset(vcpu_vlapic(v));
+ vcpu_reset(v);
+ }
+
+ vpic_reset(d);
+ vioapic_reset(d);
+ pit_reset(d);
+ rtc_reset(d);
+ pmtimer_reset(d);
+ hpet_reset(d);
+
+ hvm_vcpu_reset_state(d->vcpu[0], 0xf000, 0xfff0);
+
+ domain_unlock(d);
+}
+
+static void hvm_s3_resume(struct domain *d)
+{
+ if ( test_and_clear_bool(d->arch.hvm_domain.is_s3_suspended) )
+ domain_unpause(d);
+}
+
static int hvmop_set_isa_irq_level(
XEN_GUEST_HANDLE(xen_hvm_set_isa_irq_level_t) uop)
{
}
domain_unpause(d);
break;
+ case HVM_PARAM_ACPI_S_STATE:
+ /* Privileged domains only, as we must domain_pause(d). */
+ rc = -EPERM;
+ if ( !IS_PRIV_FOR(current->domain, d) )
+ break;
+
+ rc = 0;
+ if ( a.value == 3 )
+ hvm_s3_suspend(d);
+ else if ( a.value == 0 )
+ hvm_s3_resume(d);
+ else
+ rc = -EINVAL;
+
+ break;
}
if ( rc == 0 )
}
else
{
- a.value = d->arch.hvm_domain.params[a.index];
+ switch ( a.index )
+ {
+ case HVM_PARAM_ACPI_S_STATE:
+ a.value = d->arch.hvm_domain.is_s3_suspended ? 3 : 0;
+ break;
+ default:
+ a.value = d->arch.hvm_domain.params[a.index];
+ break;
+ }
rc = copy_to_guest(arg, &a, 1) ? -EFAULT : 0;
}
HVM_REGISTER_SAVE_RESTORE(PIT, pit_save, pit_load, 1, HVMSR_PER_DOM);
-void pit_init(struct vcpu *v, unsigned long cpu_khz)
+void pit_reset(struct domain *d)
{
- PITState *pit = vcpu_vpit(v);
+ PITState *pit = domain_vpit(d);
struct hvm_hw_pit_channel *s;
int i;
- spin_lock_init(&pit->lock);
-
- /* Some sub-functions assert that they are called with the lock held. */
- spin_lock(&pit->lock);
-
+ destroy_periodic_time(&pit->pt0);
pit->pt0.source = PTSRC_isa;
- register_portio_handler(v->domain, PIT_BASE, 4, handle_pit_io);
- register_portio_handler(v->domain, 0x61, 1, handle_speaker_io);
- ticks_per_sec(v) = cpu_khz * (int64_t)1000;
+ spin_lock(&pit->lock);
for ( i = 0; i < 3; i++ )
{
spin_unlock(&pit->lock);
}
+void pit_init(struct vcpu *v, unsigned long cpu_khz)
+{
+ PITState *pit = vcpu_vpit(v);
+
+ spin_lock_init(&pit->lock);
+
+ register_portio_handler(v->domain, PIT_BASE, 4, handle_pit_io);
+ register_portio_handler(v->domain, 0x61, 1, handle_speaker_io);
+
+ ticks_per_sec(v) = cpu_khz * (int64_t)1000;
+
+ pit_reset(v->domain);
+}
+
void pit_deinit(struct domain *d)
{
PITState *pit = domain_vpit(d);
PMTState *s = &d->arch.hvm_domain.pl_time.vpmt;
kill_timer(&s->timer);
}
+
+void pmtimer_reset(struct domain *d)
+{
+ /* Reset the counter. */
+ d->arch.hvm_domain.pl_time.vpmt.pm.tmr_val = 0;
+}
kill_timer(&s->second_timer);
kill_timer(&s->second_timer2);
}
+
+void rtc_reset(struct domain *d)
+{
+ RTCState *s = domain_vrtc(d);
+ destroy_periodic_time(&s->pt);
+}
HVM_REGISTER_SAVE_RESTORE(IOAPIC, ioapic_save, ioapic_load, 1, HVMSR_PER_DOM);
-int vioapic_init(struct domain *d)
+void vioapic_reset(struct domain *d)
{
- struct hvm_vioapic *vioapic;
+ struct hvm_vioapic *vioapic = d->arch.hvm_domain.vioapic;
int i;
- vioapic = d->arch.hvm_domain.vioapic = xmalloc(struct hvm_vioapic);
- if ( vioapic == NULL )
- return -ENOMEM;
-
- vioapic->domain = d;
-
memset(&vioapic->hvm_hw_vioapic, 0, sizeof(vioapic->hvm_hw_vioapic));
for ( i = 0; i < VIOAPIC_NUM_PINS; i++ )
vioapic->hvm_hw_vioapic.redirtbl[i].fields.mask = 1;
vioapic->hvm_hw_vioapic.base_address = VIOAPIC_DEFAULT_BASE_ADDRESS;
+}
+
+int vioapic_init(struct domain *d)
+{
+ if ( (d->arch.hvm_domain.vioapic == NULL) &&
+ ((d->arch.hvm_domain.vioapic = xmalloc(struct hvm_vioapic)) == NULL) )
+ return -ENOMEM;
+
+ d->arch.hvm_domain.vioapic->domain = d;
+ vioapic_reset(d);
return 0;
}
static int vlapic_accept_sipi(struct vcpu *v, int trampoline_vector)
{
- struct domain *d = current->domain;
- struct vcpu_guest_context *ctxt;
- struct segment_register reg;
-
/* If the VCPU is not on its way down we have nothing to do. */
if ( !test_bit(_VPF_down, &v->pause_flags) )
return X86EMUL_OKAY;
if ( !vlapic_vcpu_pause_async(v) )
return X86EMUL_RETRY;
- domain_lock(d);
-
- if ( v->is_initialised )
- goto out;
-
- ctxt = &v->arch.guest_context;
- memset(ctxt, 0, sizeof(*ctxt));
- ctxt->flags = VGCF_online;
- ctxt->user_regs.eflags = 2;
-
- v->arch.hvm_vcpu.guest_cr[0] = X86_CR0_ET;
- hvm_update_guest_cr(v, 0);
-
- v->arch.hvm_vcpu.guest_cr[2] = 0;
- hvm_update_guest_cr(v, 2);
-
- v->arch.hvm_vcpu.guest_cr[3] = 0;
- hvm_update_guest_cr(v, 3);
-
- v->arch.hvm_vcpu.guest_cr[4] = 0;
- hvm_update_guest_cr(v, 4);
-
- v->arch.hvm_vcpu.guest_efer = 0;
- hvm_update_guest_efer(v);
-
- reg.sel = trampoline_vector << 8;
- reg.base = (uint32_t)reg.sel << 4;
- reg.limit = 0xffff;
- reg.attr.bytes = 0x89b;
- hvm_set_segment_register(v, x86_seg_cs, ®);
-
- reg.sel = reg.base = 0;
- reg.limit = 0xffff;
- reg.attr.bytes = 0x893;
- hvm_set_segment_register(v, x86_seg_ds, ®);
- hvm_set_segment_register(v, x86_seg_es, ®);
- hvm_set_segment_register(v, x86_seg_fs, ®);
- hvm_set_segment_register(v, x86_seg_gs, ®);
- hvm_set_segment_register(v, x86_seg_ss, ®);
-
- reg.attr.bytes = 0x82; /* LDT */
- hvm_set_segment_register(v, x86_seg_ldtr, ®);
-
- reg.attr.bytes = 0x8b; /* 32-bit TSS (busy) */
- hvm_set_segment_register(v, x86_seg_tr, ®);
+ hvm_vcpu_reset_state(v, trampoline_vector << 8, 0);
- reg.attr.bytes = 0;
- hvm_set_segment_register(v, x86_seg_gdtr, ®);
- hvm_set_segment_register(v, x86_seg_idtr, ®);
-
- /* Sync AP's TSC with BSP's. */
- v->arch.hvm_vcpu.cache_tsc_offset =
- v->domain->vcpu[0]->arch.hvm_vcpu.cache_tsc_offset;
- hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset);
-
- v->arch.flags |= TF_kernel_mode;
- v->is_initialised = 1;
- clear_bit(_VPF_down, &v->pause_flags);
-
- out:
- domain_unlock(d);
vcpu_unpause(v);
+
return X86EMUL_OKAY;
}
if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL )
memflags |= MEMF_bits(32);
#endif
-
- vlapic->regs_page = alloc_domheap_page(NULL, memflags);
- if ( vlapic->regs_page == NULL )
+ if (vlapic->regs_page == NULL)
{
- dprintk(XENLOG_ERR, "alloc vlapic regs error: %d/%d\n",
- v->domain->domain_id, v->vcpu_id);
- return -ENOMEM;
+ vlapic->regs_page = alloc_domheap_page(NULL, memflags);
+ if ( vlapic->regs_page == NULL )
+ {
+ dprintk(XENLOG_ERR, "alloc vlapic regs error: %d/%d\n",
+ v->domain->domain_id, v->vcpu_id);
+ return -ENOMEM;
+ }
}
-
- vlapic->regs = map_domain_page_global(page_to_mfn(vlapic->regs_page));
- if ( vlapic->regs == NULL )
+ if (vlapic->regs == NULL)
{
- dprintk(XENLOG_ERR, "map vlapic regs error: %d/%d\n",
- v->domain->domain_id, v->vcpu_id);
- return -ENOMEM;
+ vlapic->regs = map_domain_page_global(page_to_mfn(vlapic->regs_page));
+ if ( vlapic->regs == NULL )
+ {
+ dprintk(XENLOG_ERR, "map vlapic regs error: %d/%d\n",
+ v->domain->domain_id, v->vcpu_id);
+ return -ENOMEM;
+ }
}
-
clear_page(vlapic->regs);
vlapic_reset(vlapic);
HVM_REGISTER_SAVE_RESTORE(PIC, vpic_save, vpic_load, 2, HVMSR_PER_DOM);
-void vpic_init(struct domain *d)
+void vpic_reset(struct domain *d)
{
struct hvm_hw_vpic *vpic;
memset(vpic, 0, sizeof(*vpic));
vpic->is_master = 1;
vpic->elcr = 1 << 2;
- register_portio_handler(d, 0x20, 2, vpic_intercept_pic_io);
- register_portio_handler(d, 0x4d0, 1, vpic_intercept_elcr_io);
/* Slave PIC. */
vpic++;
memset(vpic, 0, sizeof(*vpic));
+}
+
+void vpic_init(struct domain *d)
+{
+ vpic_reset(d);
+
+ register_portio_handler(d, 0x20, 2, vpic_intercept_pic_io);
register_portio_handler(d, 0xa0, 2, vpic_intercept_pic_io);
+
+ register_portio_handler(d, 0x4d0, 1, vpic_intercept_elcr_io);
register_portio_handler(d, 0x4d1, 1, vpic_intercept_elcr_io);
}
return arch_set_info_guest(v, ctxt);
}
-int vcpu_reset(struct vcpu *v)
+void vcpu_reset(struct vcpu *v)
{
struct domain *d = v->domain;
- int rc;
domain_pause(d);
domain_lock(d);
- rc = arch_vcpu_reset(v);
- if ( rc != 0 )
- goto out;
+ arch_vcpu_reset(v);
set_bit(_VPF_down, &v->pause_flags);
v->nmi_masked = 0;
clear_bit(_VPF_blocked, &v->pause_flags);
- out:
domain_unlock(v->domain);
domain_unpause(d);
-
- return rc;
}
if ( guest_handle_is_null(op->u.vcpucontext.ctxt) )
{
- ret = vcpu_reset(v);
+ vcpu_reset(v);
+ ret = 0;
goto svc_out;
}
bool_t hap_enabled;
bool_t qemu_mapcache_invalidate;
+ bool_t is_s3_suspended;
union {
struct vmx_domain vmx;
void hvm_vcpu_down(struct vcpu *v);
int hvm_vcpu_cacheattr_init(struct vcpu *v);
void hvm_vcpu_cacheattr_destroy(struct vcpu *v);
+void hvm_vcpu_reset_state(struct vcpu *v, uint16_t cs, uint16_t ip);
void hvm_send_assist_req(struct vcpu *v);
int vioapic_init(struct domain *d);
void vioapic_deinit(struct domain *d);
+void vioapic_reset(struct domain *d);
void vioapic_irq_positive_edge(struct domain *d, unsigned int irq);
void vioapic_update_EOI(struct domain *d, int vector);
void vpic_irq_positive_edge(struct domain *d, int irq);
void vpic_irq_negative_edge(struct domain *d, int irq);
void vpic_init(struct domain *d);
+void vpic_reset(struct domain *d);
int vpic_ack_pending_irq(struct vcpu *v);
int is_periodic_irq(struct vcpu *v, int irq, int type);
void destroy_periodic_time(struct periodic_time *pt);
int pv_pit_handler(int port, int data, int write);
+void pit_reset(struct domain *d);
+
void pit_init(struct vcpu *v, unsigned long cpu_khz);
void pit_stop_channel0_irq(PITState * pit);
void pit_deinit(struct domain *d);
void rtc_init(struct vcpu *v, int base);
void rtc_migrate_timers(struct vcpu *v);
void rtc_deinit(struct domain *d);
+void rtc_reset(struct domain *d);
+
void pmtimer_init(struct vcpu *v);
void pmtimer_deinit(struct domain *d);
+void pmtimer_reset(struct domain *d);
void hpet_migrate_timers(struct vcpu *v);
void hpet_init(struct vcpu *v);
void hpet_deinit(struct domain *d);
+void hpet_reset(struct domain *d);
#endif /* __ASM_X86_HVM_VPT_H__ */
/* Device Model domain, defaults to 0. */
#define HVM_PARAM_DM_DOMAIN 13
-#define HVM_NR_PARAMS 14
+/* ACPI S state: currently support S0 and S3 on x86. */
+#define HVM_PARAM_ACPI_S_STATE 14
+
+#define HVM_NR_PARAMS 15
#endif /* __XEN_PUBLIC_HVM_PARAMS_H__ */
int boot_vcpu(
struct domain *d, int vcpuid, vcpu_guest_context_u ctxt);
struct vcpu *alloc_idle_vcpu(unsigned int cpu_id);
-int vcpu_reset(struct vcpu *v);
+void vcpu_reset(struct vcpu *v);
struct domain *alloc_domain(domid_t domid);
void free_domain(struct domain *d);
void arch_dump_domain_info(struct domain *d);
-int arch_vcpu_reset(struct vcpu *v);
+void arch_vcpu_reset(struct vcpu *v);
extern unsigned int xen_processor_pmbits;