page scrub: Serialise softirq with a new lock.
authorKeir Fraser <keir.fraser@citrix.com>
Mon, 11 Aug 2008 11:20:11 +0000 (12:20 +0100)
committerKeir Fraser <keir.fraser@citrix.com>
Mon, 11 Aug 2008 11:20:11 +0000 (12:20 +0100)
Avoids holding up acquiring page_scrub_lock in free_domheap_pages().

Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
xen/common/page_alloc.c

index b202570a0fe732df2687730ae591098004766810..d9e0a2946ac738c4b9cc4179c03d43254cec89b5 100644 (file)
@@ -950,9 +950,10 @@ static void page_scrub_softirq(void)
     void             *p;
     int               i;
     s_time_t          start = NOW();
+    static spinlock_t serialise_lock = SPIN_LOCK_UNLOCKED;
 
     /* free_heap_pages() does not parallelise well. Serialise this function. */
-    if ( !spin_trylock(&page_scrub_lock) )
+    if ( !spin_trylock(&serialise_lock) )
     {
         set_timer(&this_cpu(page_scrub_timer), NOW() + MILLISECS(1));
         return;
@@ -960,10 +961,12 @@ static void page_scrub_softirq(void)
 
     /* Aim to do 1ms of work every 10ms. */
     do {
+        spin_lock(&page_scrub_lock);
+
         if ( unlikely((ent = page_scrub_list.next) == &page_scrub_list) )
         {
             spin_unlock(&page_scrub_lock);
-            return;
+            goto out;
         }
         
         /* Peel up to 16 pages from the list. */
@@ -979,6 +982,8 @@ static void page_scrub_softirq(void)
         page_scrub_list.next = ent->next;
         scrub_pages -= (i+1);
 
+        spin_unlock(&page_scrub_lock);
+
         /* Working backwards, scrub each page in turn. */
         while ( ent != &page_scrub_list )
         {
@@ -991,9 +996,10 @@ static void page_scrub_softirq(void)
         }
     } while ( (NOW() - start) < MILLISECS(1) );
 
-    spin_unlock(&page_scrub_lock);
-
     set_timer(&this_cpu(page_scrub_timer), NOW() + MILLISECS(10));
+
+ out:
+    spin_unlock(&serialise_lock);
 }
 
 static void page_scrub_timer_fn(void *unused)