bitkeeper revision 1.911.1.4 (40ac8fefyv7QTbpYikVy1Mlh5Thh3Q)
authoriap10@labyrinth.cl.cam.ac.uk <iap10@labyrinth.cl.cam.ac.uk>
Thu, 20 May 2004 11:01:03 +0000 (11:01 +0000)
committeriap10@labyrinth.cl.cam.ac.uk <iap10@labyrinth.cl.cam.ac.uk>
Thu, 20 May 2004 11:01:03 +0000 (11:01 +0000)
When doing a live migrate, be more persistent in continuing to iterate --
even with a worst-case memory thrasher we seem to get over the knee after
a few iterations.

Also, free L1 shadows rather than zeroing. I think this will be faster, but
will add some stats.

tools/xc/lib/xc_linux_save.c
xen/common/shadow.c

index 8bcd207d7f7f8bbee427fc41c00c33abc5477c27..ddcdb1df3927edb59e6c58fcdbf4bb695e4288fe 100644 (file)
@@ -292,7 +292,7 @@ int xc_linux_save(int xc_handle,
 
        last_iter = 0;
        sent_last_iter = 1<<20; // 4GB's worth of pages
-       max_iters = 19; // limit us to 20 times round loop
+       max_iters = 29; // limit us to 30 times round loop
     }
     else
        last_iter = 1;
@@ -645,8 +645,10 @@ int xc_linux_save(int xc_handle,
 
        if ( live )
        {
-           if ( ( sent_this_iter > (sent_last_iter * 0.95) ) ||
-                (iter >= max_iters) || (sent_this_iter < 10) || 
+           if ( 
+                // ( sent_this_iter > (sent_last_iter * 0.95) ) ||              
+                (iter >= max_iters) || 
+                (sent_this_iter+skip_this_iter < 10) || 
                 (total_sent > nr_pfns*2) )
            {
                DPRINTF("Start last iteration\n");
@@ -657,7 +659,7 @@ int xc_linux_save(int xc_handle,
            } 
 
            if ( xc_shadow_control( xc_handle, domid, 
-                                   DOM0_SHADOW_CONTROL_OP_CLEAN,
+                                   DOM0_SHADOW_CONTROL_OP_CLEAN2,
                                    to_send, nr_pfns ) != nr_pfns ) 
            {
                ERROR("Error flushing shadow PT");
index 62081df926682e46361704dc40082299d13bc224..f222419b2506934b4042ca760cdb81a156e08e41 100644 (file)
@@ -109,7 +109,13 @@ static void __free_shadow_table( struct mm_struct *m )
     SH_LOG("Free shadow table. Freed= %d",free);
 }
 
-static inline int shadow_page_op( struct mm_struct *m, unsigned int op,
+
+#define TABLE_OP_ZERO_L2 1
+#define TABLE_OP_ZERO_L1 2
+#define TABLE_OP_FREE_L1 3
+
+static inline int shadow_page_op( struct mm_struct *m, unsigned int op, 
+                                                                 unsigned int gpfn,
                                   struct pfn_info *spfn_info, int *work )
 {
     unsigned int spfn = spfn_info-frame_table;
@@ -117,48 +123,45 @@ static inline int shadow_page_op( struct mm_struct *m, unsigned int op,
 
     switch( op )
     {
-    case DOM0_SHADOW_CONTROL_OP_CLEAN:
-    {
-        int i;
-        if ( (spfn_info->type_and_flags & PGT_type_mask) == 
-             PGT_l1_page_table )
-        {
-            unsigned long * spl1e = map_domain_mem( spfn<<PAGE_SHIFT );
-
-            for (i=0;i<ENTRIES_PER_L1_PAGETABLE;i++)
-            {                    
-                if ( (spl1e[i] & _PAGE_PRESENT ) && (spl1e[i] & _PAGE_RW) )
-                {
-                    *work++;
-                    spl1e[i] &= ~_PAGE_RW;
-                }
-            }
-            unmap_domain_mem( spl1e );
-        }
+       case TABLE_OP_ZERO_L2:
+       {
+               if ( (spfn_info->type_and_flags & PGT_type_mask) == 
+             PGT_l2_page_table )
+               {
+                       unsigned long * spl1e = map_domain_mem( spfn<<PAGE_SHIFT );
+                       memset( spl1e, 0, DOMAIN_ENTRIES_PER_L2_PAGETABLE * sizeof(*spl1e) );
+                       unmap_domain_mem( spl1e );
+               }
     }
        break;
-
-    case DOM0_SHADOW_CONTROL_OP_CLEAN2:
-    {
-        if ( (spfn_info->type_and_flags & PGT_type_mask) == 
+       
+       case TABLE_OP_ZERO_L1:
+       {
+               if ( (spfn_info->type_and_flags & PGT_type_mask) == 
              PGT_l1_page_table )
-        {
-                       delete_shadow_status( m, frame_table-spfn_info );
-                       restart = 1; // we need to go to start of list again
-               }
-               else if ( (spfn_info->type_and_flags & PGT_type_mask) == 
-             PGT_l2_page_table )
                {
                        unsigned long * spl1e = map_domain_mem( spfn<<PAGE_SHIFT );
-                       memset( spl1e, 0, DOMAIN_ENTRIES_PER_L2_PAGETABLE * sizeof(*spl1e) );
+                       memset( spl1e, 0, ENTRIES_PER_L1_PAGETABLE * sizeof(*spl1e) );
                        unmap_domain_mem( spl1e );
                }
-               else
-                       BUG();
     }
        break;
 
+       case TABLE_OP_FREE_L1:
+       {
+               if ( (spfn_info->type_and_flags & PGT_type_mask) == 
+             PGT_l1_page_table )
+               {
+                       // lock is already held
+                       delete_shadow_status( m, gpfn );
+                       restart = 1; // we need to go to start of list again
+               }
+    }
 
+       break;
+       
+       default:
+               BUG();
 
     }
     return restart;
@@ -183,18 +186,18 @@ static void __scan_shadow_table( struct mm_struct *m, unsigned int op )
                next = a->next;
         if (a->pfn)
         {
-            if ( shadow_page_op( m, op, 
-                                                       &frame_table[a->spfn_and_flags & PSH_pfn_mask], 
-                                                       &work ) )
+            if ( shadow_page_op( m, op, a->pfn,                                                                 
+                                                                &frame_table[a->spfn_and_flags & PSH_pfn_mask], 
+                                                                &work ) )
                                goto retry;
         }
         a=next;
         while(a)
         { 
                        next = a->next;
-            if ( shadow_page_op( m, op, 
-                                                       &frame_table[a->spfn_and_flags & PSH_pfn_mask],
-                                                       &work ) )
+            if ( shadow_page_op( m, op, a->pfn,
+                                                                &frame_table[a->spfn_and_flags & PSH_pfn_mask],
+                                                                &work ) )
                                goto retry;
             a=next;
         }
@@ -332,17 +335,29 @@ static int shadow_mode_table_op( struct task_struct *p,
     switch(op)
     {
     case DOM0_SHADOW_CONTROL_OP_FLUSH:
-        __free_shadow_table( m );
+        // XXX THIS IS VERY DANGEROUS : MUST ENSURE THE PTs ARE NOT IN USE ON
+               // OTHER CPU -- fix when we get sched sync pause.
+        __free_shadow_table( m );  
         break;
    
     case DOM0_SHADOW_CONTROL_OP_CLEAN:   // zero all-non hypervisor
+       {
+               __scan_shadow_table( m, TABLE_OP_ZERO_L2 );
+               __scan_shadow_table( m, TABLE_OP_ZERO_L1 );
+
+               goto send_bitmap;
+       }
+               
+
     case DOM0_SHADOW_CONTROL_OP_CLEAN2:  // zero all L2, free L1s
     {
                int i,j,zero=1;
                
-               __scan_shadow_table( m, op );
-               //    __free_shadow_table( m );
-       
+               __scan_shadow_table( m, TABLE_OP_ZERO_L2 );
+               __scan_shadow_table( m, TABLE_OP_FREE_L1 );
+               
+       send_bitmap:
+
                if( p->tot_pages > sc->pages || 
                        !sc->dirty_bitmap || !p->mm.shadow_dirty_bitmap )
                {