x86/HVM: properly deal with hvm_copy_*_guest_phys() errors
authorJan Beulich <jbeulich@suse.com>
Mon, 14 Oct 2013 07:53:31 +0000 (09:53 +0200)
committerJan Beulich <jbeulich@suse.com>
Mon, 14 Oct 2013 07:53:31 +0000 (09:53 +0200)
In memory read/write handling the default case should tell the caller
that the operation cannot be handled rather than the operation having
succeeded, so that when new HVMCOPY_* states get added not handling
them explicitly will not result in errors being ignored.

In task switch emulation code stop handling some errors, but not
others.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
Acked-by: Keir Fraser <keir@xen.org>
xen/arch/x86/hvm/emulate.c
xen/arch/x86/hvm/hvm.c

index 15b2257c87225dd8de31f4da2c8f9f020c66ef22..6cb66781750c70716a531a66c9e79aaf9d761215 100644 (file)
@@ -504,10 +504,10 @@ static int __hvmemul_read(
 
     switch ( rc )
     {
+    case HVMCOPY_okay:
+        break;
     case HVMCOPY_bad_gva_to_gfn:
         return X86EMUL_EXCEPTION;
-    case HVMCOPY_unhandleable:
-        return X86EMUL_UNHANDLEABLE;
     case HVMCOPY_bad_gfn_to_mfn:
         if ( access_type == hvm_access_insn_fetch )
             return X86EMUL_UNHANDLEABLE;
@@ -535,11 +535,10 @@ static int __hvmemul_read(
         }
         return rc;
     case HVMCOPY_gfn_paged_out:
-        return X86EMUL_RETRY;
     case HVMCOPY_gfn_shared:
         return X86EMUL_RETRY;
     default:
-        break;
+        return X86EMUL_UNHANDLEABLE;
     }
 
     return X86EMUL_OKAY;
@@ -634,10 +633,10 @@ static int hvmemul_write(
 
     switch ( rc )
     {
+    case HVMCOPY_okay:
+        break;
     case HVMCOPY_bad_gva_to_gfn:
         return X86EMUL_EXCEPTION;
-    case HVMCOPY_unhandleable:
-        return X86EMUL_UNHANDLEABLE;
     case HVMCOPY_bad_gfn_to_mfn:
         rc = hvmemul_linear_to_phys(addr, &gpa, chunk, &reps, pfec,
                                     hvmemul_ctxt);
@@ -663,11 +662,10 @@ static int hvmemul_write(
         }
         return rc;
     case HVMCOPY_gfn_paged_out:
-        return X86EMUL_RETRY;
     case HVMCOPY_gfn_shared:
         return X86EMUL_RETRY;
     default:
-        break;
+        return X86EMUL_UNHANDLEABLE;
     }
 
     return X86EMUL_OKAY;
index 6fcd95a2468918f9b8abbcae5ed24797aa6d76d3..5f3a94a043f29e5347fdde72451ddc72d073cca5 100644 (file)
@@ -2346,11 +2346,7 @@ void hvm_task_switch(
 
     rc = hvm_copy_to_guest_virt(
         prev_tr.base, &tss, sizeof(tss), PFEC_page_present);
-    if ( rc == HVMCOPY_bad_gva_to_gfn )
-        goto out;
-    if ( rc == HVMCOPY_gfn_paged_out )
-        goto out;
-    if ( rc == HVMCOPY_gfn_shared )
+    if ( rc != HVMCOPY_okay )
         goto out;
 
     rc = hvm_copy_from_guest_virt(
@@ -2397,9 +2393,7 @@ void hvm_task_switch(
         tr.base, &tss, sizeof(tss), PFEC_page_present);
     if ( rc == HVMCOPY_bad_gva_to_gfn )
         exn_raised = 1;
-    if ( rc == HVMCOPY_gfn_paged_out )
-        goto out;
-    if ( rc == HVMCOPY_gfn_shared )
+    else if ( rc != HVMCOPY_okay )
         goto out;
 
     if ( (tss.trace & 1) && !exn_raised )