#ifdef CONFIG_COMPAT
if ( IS_COMPAT(d) )
{
- pfn = l4e_get_pfn(*(l4_pgentry_t *)__va(pagetable_get_paddr(v->arch.guest_table)));
+ if ( is_hvm_vcpu(v) )
+ pfn = pagetable_get_pfn(v->arch.guest_table);
+ else
+ pfn = l4e_get_pfn(*(l4_pgentry_t *)__va(pagetable_get_paddr(v->arch.guest_table)));
+
if ( pfn != 0 )
{
if ( shadow_mode_refcounts(d) )
spin_lock(buffered_io_lock);
if ( buffered_iopage->write_pointer - buffered_iopage->read_pointer ==
- (unsigned long)IOREQ_BUFFER_SLOT_NUM ) {
+ (unsigned int)IOREQ_BUFFER_SLOT_NUM ) {
/* the queue is full.
* send the iopacket through the normal path.
* NOTE: The arithimetic operation could handle the situation for
shadow_free(d, _mfn(l3e_get_pfn(l3e[3])));
sh_unmap_domain_page(l3e);
}
- shadow_free(d, mmfn);
sh_unmap_domain_page(l4e);
}
#elif CONFIG_PAGING_LEVELS == 3
.quad compat_xenoprof_op
.quad do_event_channel_op
.quad compat_physdev_op
- .quad compat_ni_hypercall
+ .quad do_hvm_op
.quad compat_sysctl /* 35 */
.quad compat_domctl
.quad compat_kexec_op
.byte 2 /* compat_xenoprof_op */
.byte 2 /* compat_event_channel_op */
.byte 2 /* compat_physdev_op */
- .byte 0 /* compat_ni_hypercall */
+ .byte 2 /* do_hvm_op */
.byte 1 /* compat_sysctl */ /* 35 */
.byte 1 /* compat_domctl */
.byte 2 /* compat_kexec_op */
uint8_t dir:1; /* 1=read, 0=write */
uint8_t df:1;
uint8_t type; /* I/O type */
+ uint8_t _pad0[6];
uint64_t io_count; /* How many IO done on a vcpu */
};
typedef struct ioreq ioreq_t;
#define IOREQ_BUFFER_SLOT_NUM 80
struct buffered_iopage {
- unsigned long read_pointer;
- unsigned long write_pointer;
+ unsigned int read_pointer;
+ unsigned int write_pointer;
ioreq_t ioreq[IOREQ_BUFFER_SLOT_NUM];
}; /* sizeof this structure must be in one page */
typedef struct buffered_iopage buffered_iopage_t;