/* Ignore multiple shutdown requests. */
static int shutting_down = SHUTDOWN_INVALID;
-static void __do_suspend(void)
+#ifndef CONFIG_HOTPLUG_CPU
+#define cpu_down(x) (-EOPNOTSUPP)
+#define cpu_up(x) (-EOPNOTSUPP)
+#endif
+
+static void save_vcpu_context(int vcpu, vcpu_guest_context_t *ctxt)
+{
+ int r;
+ int gdt_pages;
+ r = HYPERVISOR_vcpu_pickle(vcpu, ctxt);
+ if (r != 0)
+ panic("pickling vcpu %d -> %d!\n", vcpu, r);
+
+ /* Translate from machine to physical addresses where necessary,
+ so that they can be translated to our new machine address space
+ after resume. libxc is responsible for doing this to vcpu0,
+ but we do it to the others. */
+ gdt_pages = (ctxt->gdt_ents + 511) / 512;
+ ctxt->ctrlreg[3] = machine_to_phys(ctxt->ctrlreg[3]);
+ for (r = 0; r < gdt_pages; r++)
+ ctxt->gdt_frames[r] = mfn_to_pfn(ctxt->gdt_frames[r]);
+}
+
+void _restore_vcpu(int cpu);
+
+atomic_t vcpus_rebooting;
+
+static int restore_vcpu_context(int vcpu, vcpu_guest_context_t *ctxt)
+{
+ int r;
+ int gdt_pages = (ctxt->gdt_ents + 511) / 512;
+
+ /* This is kind of a hack, and implicitly relies on the fact that
+ the vcpu stops in a place where all of the call clobbered
+ registers are already dead. */
+ ctxt->user_regs.esp -= 4;
+ ((unsigned long *)ctxt->user_regs.esp)[0] = ctxt->user_regs.eip;
+ ctxt->user_regs.eip = (unsigned long)_restore_vcpu;
+
+ /* De-canonicalise. libxc handles this for vcpu 0, but we need
+ to do it for the other vcpus. */
+ ctxt->ctrlreg[3] = phys_to_machine(ctxt->ctrlreg[3]);
+ for (r = 0; r < gdt_pages; r++)
+ ctxt->gdt_frames[r] = pfn_to_mfn(ctxt->gdt_frames[r]);
+
+ atomic_set(&vcpus_rebooting, 1);
+ r = HYPERVISOR_boot_vcpu(vcpu, ctxt);
+ if (r != 0) {
+ printk(KERN_EMERG "Failed to reboot vcpu %d (%d)\n", vcpu, r);
+ return -1;
+ }
+
+ /* Make sure we wait for the new vcpu to come up before trying to do
+ anything with it or starting the next one. */
+ while (atomic_read(&vcpus_rebooting))
+ barrier();
+
+ return 0;
+}
+
- extern unsigned uber_debug;
-
+static int __do_suspend(void *ignore)
{
int i, j;
suspend_record_t *suspend_record;
usbif_resume();
+ for_each_cpu_mask(i, prev_present_cpus) {
+ restore_vcpu_context(i, &suspended_cpu_records[i]);
+ }
+
__sti();
- uber_debug = 0;
-
+ out_reenable_cpus:
+ for_each_cpu_mask(i, prev_online_cpus) {
+ j = cpu_up(i);
+ if (j != 0) {
+ printk(KERN_CRIT "Failed to bring cpu %d back up (%d).\n",
+ i, j);
+ err = j;
+ }
+ }
+
out:
if ( suspend_record != NULL )
free_page((unsigned long)suspend_record);