free_xenheap_pages(v, get_order_from_bytes(sizeof(*v)));
}
-int vcpu_initialise(struct vcpu *v)
+int arch_vcpu_create(struct vcpu *v)
{
int rc = 0;
return rc;
fail:
- vcpu_destroy(v);
+ arch_vcpu_destroy(v);
return rc;
}
-void vcpu_destroy(struct vcpu *v)
+void arch_vcpu_destroy(struct vcpu *v)
{
vcpu_timer_destroy(v);
vcpu_vgic_free(v);
struct vcpu *__init alloc_dom0_vcpu0(struct domain *dom0)
{
- return alloc_vcpu(dom0, 0, 0);
+ return vcpu_create(dom0, 0, 0);
}
static unsigned int __init get_11_allocation_size(paddr_t size)
for ( i = 1, cpu = 0; i < d->max_vcpus; i++ )
{
cpu = cpumask_cycle(cpu, &cpu_online_map);
- if ( alloc_vcpu(d, i, cpu) == NULL )
+ if ( vcpu_create(d, i, cpu) == NULL )
{
printk("Failed to allocate dom0 vcpu %d on pcpu %d\n", i, cpu);
break;
unsigned int prev_cpu)
{
unsigned int cpu = cpumask_cycle(prev_cpu, &dom0_cpus);
- struct vcpu *v = alloc_vcpu(d, vcpu_id, cpu);
+ struct vcpu *v = vcpu_create(d, vcpu_id, cpu);
if ( v )
{
free_xenheap_page(v);
}
-int vcpu_initialise(struct vcpu *v)
+int arch_vcpu_create(struct vcpu *v)
{
struct domain *d = v->domain;
int rc;
return rc;
}
-void vcpu_destroy(struct vcpu *v)
+void arch_vcpu_destroy(struct vcpu *v)
{
xfree(v->arch.vm_event);
v->arch.vm_event = NULL;
v->vcpu_info_mfn = INVALID_MFN;
}
-struct vcpu *alloc_vcpu(
+struct vcpu *vcpu_create(
struct domain *d, unsigned int vcpu_id, unsigned int cpu_id)
{
struct vcpu *v;
if ( sched_init_vcpu(v, cpu_id) != 0 )
goto fail_wq;
- if ( vcpu_initialise(v) != 0 )
+ if ( arch_vcpu_create(v) != 0 )
{
sched_destroy_vcpu(v);
fail_wq:
if ( (v = d->vcpu[i]) == NULL )
continue;
tasklet_kill(&v->continue_hypercall_tasklet);
- vcpu_destroy(v);
+ arch_vcpu_destroy(v);
sched_destroy_vcpu(v);
destroy_waitqueue_vcpu(v);
}
cpumask_any(online) :
cpumask_cycle(d->vcpu[i-1]->processor, online);
- if ( alloc_vcpu(d, i, cpu) == NULL )
+ if ( vcpu_create(d, i, cpu) == NULL )
goto maxvcpu_out;
}
return 0;
if ( idle_vcpu[cpu] == NULL )
- alloc_vcpu(idle_vcpu[0]->domain, cpu, cpu);
+ vcpu_create(idle_vcpu[0]->domain, cpu, cpu);
else
{
struct vcpu *idle = idle_vcpu[cpu];
BUG_ON(IS_ERR(idle_domain));
idle_domain->vcpu = idle_vcpu;
idle_domain->max_vcpus = nr_cpu_ids;
- if ( alloc_vcpu(idle_domain, 0, 0) == NULL )
+ if ( vcpu_create(idle_domain, 0, 0) == NULL )
BUG();
this_cpu(schedule_data).sched_priv = SCHED_OP(&ops, alloc_pdata, 0);
BUG_ON(IS_ERR(this_cpu(schedule_data).sched_priv));
struct compat_vcpu_guest_context *cmp;
} vcpu_guest_context_u __attribute__((__transparent_union__));
-struct vcpu *alloc_vcpu(
+struct vcpu *vcpu_create(
struct domain *d, unsigned int vcpu_id, unsigned int cpu_id);
unsigned int dom0_max_vcpus(void);
/*
* Initialise/destroy arch-specific details of a VCPU.
- * - vcpu_initialise() is called after the basic generic fields of the
+ * - arch_vcpu_create() is called after the basic generic fields of the
* VCPU structure are initialised. Many operations can be applied to the
* VCPU at this point (e.g., vcpu_pause()).
- * - vcpu_destroy() is called only if vcpu_initialise() previously succeeded.
+ * - arch_vcpu_destroy() is called only if arch_vcpu_create() previously
+ * succeeded.
*/
-int vcpu_initialise(struct vcpu *v);
-void vcpu_destroy(struct vcpu *v);
+int arch_vcpu_create(struct vcpu *v);
+void arch_vcpu_destroy(struct vcpu *v);
int map_vcpu_info(struct vcpu *v, unsigned long gfn, unsigned offset);
void unmap_vcpu_info(struct vcpu *v);