This patch fixed those mistakes.
Signed-off-by: Masaki Kanno <kanno.masaki@jp.fujitsu.com>
#include <asm/page.h>
#include <xen/mm.h>
#include <xen/multicall.h>
+#include <xen/hypercall.h>
void hyper_not_support(void)
void hyper_sched_op(void)
{
VCPU *vcpu=current;
- u64 r32,ret;
+ u64 r32,r33,ret;
vcpu_get_gr_nat(vcpu,16,&r32);
- ret=do_sched_op(r32);
+ vcpu_get_gr_nat(vcpu,17,&r33);
+ ret=do_sched_op(r32,r33);
vcpu_set_gr(vcpu, 8, ret, 0);
vmx_vcpu_increment_iip(vcpu);
void hyper_xen_version(void)
{
VCPU *vcpu=current;
- u64 r32,ret;
+ u64 r32,r33,ret;
vcpu_get_gr_nat(vcpu,16,&r32);
- ret=do_xen_version((int )r32);
+ vcpu_get_gr_nat(vcpu,17,&r33);
+ ret=do_xen_version((int )r32,r33);
vcpu_set_gr(vcpu, 8, ret, 0);
vmx_vcpu_increment_iip(vcpu);
}
*/
#include <xen/config.h>
#include <xen/sched.h>
+#include <xen/hypercall.h>
+#include <public/sched.h>
#include <public/hvm/ioreq.h>
#include <asm/vmx.h>
#include <asm/vmx_vcpu.h>
/*
* I/O emulation should be atomic from domain point of view. However,
- * when emulation code is waiting for I/O completion by do_block,
+ * when emulation code is waiting for I/O completion by blocking,
* other events like DM interrupt, VBD, etc. may come and unblock
* current exection flow. So we have to prepare for re-block if unblocked
* by non I/O completion event.
{
struct vcpu *v = current;
struct domain *d = v->domain;
- extern void do_block();
int port = iopacket_port(d);
do {
if (!test_bit(port,
&d->shared_info->evtchn_pending[0]))
- do_block();
+ do_sched_op(SCHEDOP_block, 0);
/* Unblocked when some event is coming. Clear pending indication
* immediately if deciding to go for io assist
if (test_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags)) {
if (p->state != STATE_IORESP_READY) {
- /* Can't do_block here, for the same reason as other places to
+ /* Can't block here, for the same reason as other places to
* use vmx_wait_io. Simple return is safe since vmx_wait_io will
* try to block again
*/
#include <xen/config.h>
#include <xen/sched.h>
+#include <xen/hypercall.h>
#include <linux/efi.h> /* FOR EFI_UNIMPLEMENTED */
#include <asm/sal.h> /* FOR struct ia64_sal_retval */
}
else {
pal_halt_light_count++;
- do_sched_op(SCHEDOP_yield);
+ do_sched_op(SCHEDOP_yield, 0);
}
regs->r8 = 0;
regs->r9 = 0;
#include <xen/irq.h>
#include <xen/softirq.h>
#include <xen/domain_page.h>
+#include <xen/hypercall.h>
#include <asm/current.h>
#include <asm/io.h>
#include <asm/shadow.h>
#endif
extern long evtchn_send(int lport);
-extern long do_block(void);
void do_nmi(struct cpu_user_regs *);
static int check_vmx_controls(ctrls, msr)
}
if ( next_wakeup != - 1 )
set_timer(¤t->arch.arch_vmx.hlt_timer, next_wakeup);
- do_block();
+ do_sched_op(SCHEDOP_block, 0);
}
static inline void vmx_vmexit_do_extint(struct cpu_user_regs *regs)
case TRAP_debug:
{
void store_cpu_user_regs(struct cpu_user_regs *regs);
- long do_sched_op(unsigned long op);
-
store_cpu_user_regs(®s);
__vm_clear_bit(GUEST_PENDING_DBG_EXCEPTIONS, PENDING_DEBUG_EXC_BS);
domain_pause_for_debugger();
- do_sched_op(SCHEDOP_yield);
+ do_sched_op(SCHEDOP_yield, 0);
break;
}
#include <xen/errno.h>
#include <xen/trace.h>
#include <xen/event.h>
-
+#include <xen/hypercall.h>
#include <asm/current.h>
#include <asm/cpufeature.h>
#include <asm/processor.h>
#include <asm/shadow.h>
#include <asm/vmx_vpic.h>
#include <asm/vmx_vlapic.h>
+#include <public/sched.h>
#include <public/hvm/ioreq.h>
#ifdef CONFIG_VMX
the device model */
void vmx_wait_io()
{
- extern void do_block();
int port = iopacket_port(current->domain);
do {
if (!test_bit(port, ¤t->domain->shared_info->evtchn_pending[0]))
- do_block();
+ do_sched_op(SCHEDOP_block, 0);
vmx_check_events(current);
if (!test_bit(ARCH_VMX_IO_WAIT, ¤t->arch.arch_vmx.flags))
}
/* Block the currently-executing domain until a pertinent event occurs. */
-long do_block(void)
+static long do_block(void)
{
struct vcpu *v = current;