This section stores the array of mapped_regs_t.
The size of array is stored in xch_nr_vcpus member of header
note descriptor in .note.Xen note section.
- This section is ia64 specific and must exist for ia64 domain.
- This section must not exist for non-ia64 domain.
+ This section is ia64 specific and must exist for ia64 PV
+ domain.
+ This section must not exist for non-ia64 domain or ia64 HVM
+ domain.
note section
- EI_CLASS member of elf header was changed to ELFCLASS64 independent of
architecture. This is mainly for x86_32pae.
The format version isn't bumped because analysis tools can distinguish it.
+- .xen_ia64_mapped_regs section was made only for ia64 PV domain.
+ In case of IA64 HVM domain, this section doesn't exist.
* |.xen_prstatus |
* | vcpu_guest_context_t[nr_vcpus] |
* +--------------------------------------------------------+
- * |.xen_ia64_mmapped_regs if ia64 |
+ * |.xen_ia64_mmapped_regs if ia64 pv |
* | mmapped_regs_t[nr_vcpus] |
* +--------------------------------------------------------+
* |.xen_shared_info if possible |
int xc_handle, uint32_t domid)
{
mapped_regs_t* mapped_regs;
+
+ if ( ctxt->privregs_pfn == VGC_PRIVREGS_HVM )
+ return 0; /* VTi domain case */
+
if ( ctxt->privregs_pfn == INVALID_P2M_ENTRY )
{
PERROR("Could not get mmapped privregs gmfn");
int sts = -1;
Elf64_Shdr *shdr;
+ if ( arch_ctxt->nr_vcpus == 0 )
+ {
+ /* VTi domain case */
+ *filesz = 0;
+ return 0;
+ }
+
/* mmapped priv regs */
shdr = xc_core_shdr_get(sheaders);
if ( shdr == NULL )
return -ENOMEM;
v->arch.privregs = (mapped_regs_t *)vpd;
- vcpu_share_privregs_with_guest(v);
vpd->vpd_low.virt_env_vaddr = vm_buffer;
v->domain->arch.vmx_platform.gos_type = OS_UNKNOWN;
return 0;
}
-void vcpu_share_privregs_with_guest(struct vcpu *v)
+static void vcpu_share_privregs_with_guest(struct vcpu *v)
{
struct domain *d = v->domain;
int i, order = get_order_from_shift(XMAPPEDREGS_SHIFT);
c.nat->regs.rbs_rnat &= ~((1UL << bottom_slot) - 1);
}
- c.nat->privregs_pfn = get_gpfn_from_mfn
- (virt_to_maddr(v->arch.privregs) >> PAGE_SHIFT);
+ if (VMX_DOMAIN(v))
+ c.nat->privregs_pfn = VGC_PRIVREGS_HVM;
+ else
+ c.nat->privregs_pfn = get_gpfn_from_mfn(
+ virt_to_maddr(v->arch.privregs) >> PAGE_SHIFT);
for (i = 0; i < IA64_NUM_DBG_REGS; i++) {
if (VMX_DOMAIN(v)) {
struct vcpu;
extern void relinquish_vcpu_resources(struct vcpu *v);
-extern void vcpu_share_privregs_with_guest(struct vcpu *v);
extern int vcpu_late_initialise(struct vcpu *v);
/* given a current domain metaphysical address, return the physical address */
struct vcpu_guest_context_regs regs;
unsigned long event_callback_ip;
+
+ /* xen doesn't share privregs pages with hvm domain so that this member
+ * doesn't make sense for hvm domain.
+ * ~0UL is already used for INVALID_P2M_ENTRY. */
+#define VGC_PRIVREGS_HVM (~(-2UL))
unsigned long privregs_pfn;
};
typedef struct vcpu_guest_context vcpu_guest_context_t;