}
memset(frame_table, 0, nr_pages << PAGE_SHIFT);
+
+#if defined(__x86_64__)
+ for ( i = 0; i < max_page; i ++ )
+ spin_lock_init(&frame_table[i].lock);
+#endif
}
void __init arch_init_memory(void)
#endif
+static void page_lock(struct page_info *page)
+{
+#if defined(__i386__)
+ while ( unlikely(test_and_set_bit(_PGC_locked, &page->count_info)) )
+ while ( test_bit(_PGC_locked, &page->count_info) )
+ cpu_relax();
+#else
+ spin_lock(&page->lock);
+#endif
+}
+
+static void page_unlock(struct page_info *page)
+{
+#if defined(__i386__)
+ clear_bit(_PGC_locked, &page->count_info);
+#else
+ spin_unlock(&page->lock);
+#endif
+}
/* How to write an entry to the guest pagetables.
* Returns 0 for failure (pointer not valid), 1 for success. */
struct vcpu *curr = current;
struct domain *d = curr->domain;
unsigned long mfn;
+ struct page_info *l1pg = mfn_to_page(gl1mfn);
+ int rc = 1;
+
+ page_lock(l1pg);
if ( unlikely(__copy_from_user(&ol1e, pl1e, sizeof(ol1e)) != 0) )
- return 0;
+ return page_unlock(l1pg), 0;
if ( unlikely(paging_mode_refcounts(d)) )
- return UPDATE_ENTRY(l1, pl1e, ol1e, nl1e, gl1mfn, curr, preserve_ad);
+ {
+ rc = UPDATE_ENTRY(l1, pl1e, ol1e, nl1e, gl1mfn, curr, preserve_ad);
+ page_unlock(l1pg);
+ return rc;
+ }
if ( l1e_get_flags(nl1e) & _PAGE_PRESENT )
{
/* Translate foreign guest addresses. */
mfn = gmfn_to_mfn(FOREIGNDOM, l1e_get_pfn(nl1e));
if ( unlikely(mfn == INVALID_MFN) )
- return 0;
+ return page_unlock(l1pg), 0;
ASSERT((mfn & ~(PADDR_MASK >> PAGE_SHIFT)) == 0);
nl1e = l1e_from_pfn(mfn, l1e_get_flags(nl1e));
if ( unlikely(l1e_get_flags(nl1e) & l1_disallow_mask(d)) )
{
+ page_unlock(l1pg);
MEM_LOG("Bad L1 flags %x",
l1e_get_flags(nl1e) & l1_disallow_mask(d));
return 0;
if ( !l1e_has_changed(ol1e, nl1e, _PAGE_RW | _PAGE_PRESENT) )
{
adjust_guest_l1e(nl1e, d);
- return UPDATE_ENTRY(l1, pl1e, ol1e, nl1e, gl1mfn, curr,
- preserve_ad);
+ rc = UPDATE_ENTRY(l1, pl1e, ol1e, nl1e, gl1mfn, curr,
+ preserve_ad);
+ page_unlock(l1pg);
+ return rc;
}
if ( unlikely(!get_page_from_l1e(nl1e, FOREIGNDOM)) )
- return 0;
+ return page_unlock(l1pg), 0;
adjust_guest_l1e(nl1e, d);
if ( unlikely(!UPDATE_ENTRY(l1, pl1e, ol1e, nl1e, gl1mfn, curr,
preserve_ad)) )
{
- put_page_from_l1e(nl1e, d);
- return 0;
+ ol1e = nl1e;
+ rc = 0;
}
}
- else
+ else if ( unlikely(!UPDATE_ENTRY(l1, pl1e, ol1e, nl1e, gl1mfn, curr,
+ preserve_ad)) )
{
- if ( unlikely(!UPDATE_ENTRY(l1, pl1e, ol1e, nl1e, gl1mfn, curr,
- preserve_ad)) )
- return 0;
+ page_unlock(l1pg);
+ return 0;
}
+ page_unlock(l1pg);
put_page_from_l1e(ol1e, d);
- return 1;
+ return rc;
}
l2_pgentry_t ol2e;
struct vcpu *curr = current;
struct domain *d = curr->domain;
+ struct page_info *l2pg = mfn_to_page(pfn);
+ int rc = 1;
if ( unlikely(!is_guest_l2_slot(d, type, pgentry_ptr_to_slot(pl2e))) )
{
return 0;
}
+ page_lock(l2pg);
+
if ( unlikely(__copy_from_user(&ol2e, pl2e, sizeof(ol2e)) != 0) )
- return 0;
+ return page_unlock(l2pg), 0;
if ( l2e_get_flags(nl2e) & _PAGE_PRESENT )
{
if ( unlikely(l2e_get_flags(nl2e) & L2_DISALLOW_MASK) )
{
+ page_unlock(l2pg);
MEM_LOG("Bad L2 flags %x",
l2e_get_flags(nl2e) & L2_DISALLOW_MASK);
return 0;
if ( !l2e_has_changed(ol2e, nl2e, _PAGE_PRESENT) )
{
adjust_guest_l2e(nl2e, d);
- return UPDATE_ENTRY(l2, pl2e, ol2e, nl2e, pfn, curr, preserve_ad);
+ rc = UPDATE_ENTRY(l2, pl2e, ol2e, nl2e, pfn, curr, preserve_ad);
+ page_unlock(l2pg);
+ return rc;
}
if ( unlikely(!get_page_from_l2e(nl2e, pfn, d)) )
- return 0;
+ return page_unlock(l2pg), 0;
adjust_guest_l2e(nl2e, d);
if ( unlikely(!UPDATE_ENTRY(l2, pl2e, ol2e, nl2e, pfn, curr,
preserve_ad)) )
{
- put_page_from_l2e(nl2e, pfn);
- return 0;
+ ol2e = nl2e;
+ rc = 0;
}
}
else if ( unlikely(!UPDATE_ENTRY(l2, pl2e, ol2e, nl2e, pfn, curr,
preserve_ad)) )
{
+ page_unlock(l2pg);
return 0;
}
+ page_unlock(l2pg);
put_page_from_l2e(ol2e, pfn);
- return 1;
+ return rc;
}
#if CONFIG_PAGING_LEVELS >= 3
l3_pgentry_t ol3e;
struct vcpu *curr = current;
struct domain *d = curr->domain;
- int okay;
+ struct page_info *l3pg = mfn_to_page(pfn);
+ int okay, rc = 1;
if ( unlikely(!is_guest_l3_slot(pgentry_ptr_to_slot(pl3e))) )
{
if ( is_pv_32bit_domain(d) && (pgentry_ptr_to_slot(pl3e) >= 3) )
return 0;
+ page_lock(l3pg);
+
if ( unlikely(__copy_from_user(&ol3e, pl3e, sizeof(ol3e)) != 0) )
- return 0;
+ return page_unlock(l3pg), 0;
if ( l3e_get_flags(nl3e) & _PAGE_PRESENT )
{
if ( unlikely(l3e_get_flags(nl3e) & l3_disallow_mask(d)) )
{
+ page_unlock(l3pg);
MEM_LOG("Bad L3 flags %x",
l3e_get_flags(nl3e) & l3_disallow_mask(d));
return 0;
if ( !l3e_has_changed(ol3e, nl3e, _PAGE_PRESENT) )
{
adjust_guest_l3e(nl3e, d);
- return UPDATE_ENTRY(l3, pl3e, ol3e, nl3e, pfn, curr, preserve_ad);
+ rc = UPDATE_ENTRY(l3, pl3e, ol3e, nl3e, pfn, curr, preserve_ad);
+ page_unlock(l3pg);
+ return rc;
}
if ( unlikely(!get_page_from_l3e(nl3e, pfn, d)) )
- return 0;
+ return page_unlock(l3pg), 0;
adjust_guest_l3e(nl3e, d);
if ( unlikely(!UPDATE_ENTRY(l3, pl3e, ol3e, nl3e, pfn, curr,
preserve_ad)) )
{
- put_page_from_l3e(nl3e, pfn);
- return 0;
+ ol3e = nl3e;
+ rc = 0;
}
}
else if ( unlikely(!UPDATE_ENTRY(l3, pl3e, ol3e, nl3e, pfn, curr,
preserve_ad)) )
{
+ page_unlock(l3pg);
return 0;
}
pae_flush_pgd(pfn, pgentry_ptr_to_slot(pl3e), nl3e);
+ page_unlock(l3pg);
put_page_from_l3e(ol3e, pfn);
- return 1;
+ return rc;
}
#endif
struct vcpu *curr = current;
struct domain *d = curr->domain;
l4_pgentry_t ol4e;
+ struct page_info *l4pg = mfn_to_page(pfn);
+ int rc = 1;
if ( unlikely(!is_guest_l4_slot(d, pgentry_ptr_to_slot(pl4e))) )
{
return 0;
}
+ page_lock(l4pg);
+
if ( unlikely(__copy_from_user(&ol4e, pl4e, sizeof(ol4e)) != 0) )
- return 0;
+ return page_unlock(l4pg), 0;
if ( l4e_get_flags(nl4e) & _PAGE_PRESENT )
{
if ( unlikely(l4e_get_flags(nl4e) & L4_DISALLOW_MASK) )
{
+ page_unlock(l4pg);
MEM_LOG("Bad L4 flags %x",
l4e_get_flags(nl4e) & L4_DISALLOW_MASK);
return 0;
if ( !l4e_has_changed(ol4e, nl4e, _PAGE_PRESENT) )
{
adjust_guest_l4e(nl4e, d);
- return UPDATE_ENTRY(l4, pl4e, ol4e, nl4e, pfn, curr, preserve_ad);
+ rc = UPDATE_ENTRY(l4, pl4e, ol4e, nl4e, pfn, curr, preserve_ad);
+ page_unlock(l4pg);
+ return rc;
}
if ( unlikely(!get_page_from_l4e(nl4e, pfn, d)) )
- return 0;
+ return page_unlock(l4pg), 0;
adjust_guest_l4e(nl4e, d);
if ( unlikely(!UPDATE_ENTRY(l4, pl4e, ol4e, nl4e, pfn, curr,
preserve_ad)) )
{
- put_page_from_l4e(nl4e, pfn);
- return 0;
+ ol4e = nl4e;
+ rc = 0;
}
}
else if ( unlikely(!UPDATE_ENTRY(l4, pl4e, ol4e, nl4e, pfn, curr,
preserve_ad)) )
{
+ page_unlock(l4pg);
return 0;
}
+ page_unlock(l4pg);
put_page_from_l4e(ol4e, pfn);
- return 1;
+ return rc;
}
#endif
goto out;
}
- domain_lock(d);
-
for ( i = 0; i < count; i++ )
{
if ( hypercall_preempt_check() )
process_deferred_ops();
- domain_unlock(d);
-
perfc_add(num_mmuext_ops, i);
out:
domain_mmap_cache_init(&mapcache);
- domain_lock(d);
-
for ( i = 0; i < count; i++ )
{
if ( hypercall_preempt_check() )
process_deferred_ops();
- domain_unlock(d);
-
domain_mmap_cache_destroy(&mapcache);
perfc_add(num_page_updates, i);
goto failed;
}
+ page_lock(page);
+
ol1e = *(l1_pgentry_t *)va;
if ( !UPDATE_ENTRY(l1, (l1_pgentry_t *)va, ol1e, nl1e, mfn, v, 0) )
{
+ page_unlock(page);
put_page_type(page);
rc = GNTST_general_error;
goto failed;
}
+ page_unlock(page);
+
if ( !paging_mode_refcounts(d) )
put_page_from_l1e(ol1e, d);
goto failed;
}
- if ( __copy_from_user(&ol1e, (l1_pgentry_t *)va, sizeof(ol1e)) )
- {
- put_page_type(page);
- rc = GNTST_general_error;
- goto failed;
- }
+ page_lock(page);
+
+ ol1e = *(l1_pgentry_t *)va;
/* Check that the virtual address supplied is actually mapped to frame. */
if ( unlikely((l1e_get_intpte(ol1e) >> PAGE_SHIFT) != frame) )
{
+ page_unlock(page);
MEM_LOG("PTE entry %lx for address %"PRIx64" doesn't match frame %lx",
(unsigned long)l1e_get_intpte(ol1e), addr, frame);
put_page_type(page);
d->vcpu[0] /* Change if we go to per-vcpu shadows. */,
0)) )
{
+ page_unlock(page);
MEM_LOG("Cannot delete PTE entry at %p", va);
put_page_type(page);
rc = GNTST_general_error;
goto failed;
}
+ page_unlock(page);
put_page_type(page);
failed:
l1_pgentry_t *pl1e, ol1e;
struct domain *d = v->domain;
unsigned long gl1mfn;
+ struct page_info *l1pg;
int okay;
ASSERT(domain_is_locked(d));
MEM_LOG("Could not find L1 PTE for address %lx", va);
return GNTST_general_error;
}
+ l1pg = mfn_to_page(gl1mfn);
+ page_lock(l1pg);
ol1e = *pl1e;
okay = UPDATE_ENTRY(l1, pl1e, ol1e, nl1e, gl1mfn, v, 0);
+ page_unlock(l1pg);
guest_unmap_l1e(v, pl1e);
pl1e = NULL;
{
l1_pgentry_t *pl1e, ol1e;
unsigned long gl1mfn;
+ struct page_info *l1pg;
int rc = 0;
pl1e = guest_map_l1e(v, addr, &gl1mfn);
MEM_LOG("Could not find L1 PTE for address %lx", addr);
return GNTST_general_error;
}
+
+ l1pg = mfn_to_page(gl1mfn);
+ page_lock(l1pg);
ol1e = *pl1e;
/* Check that the virtual address supplied is actually mapped to frame. */
if ( unlikely(l1e_get_pfn(ol1e) != frame) )
{
+ page_unlock(l1pg);
MEM_LOG("PTE entry %lx for address %lx doesn't match frame %lx",
l1e_get_pfn(ol1e), addr, frame);
rc = GNTST_general_error;
/* Delete pagetable entry. */
if ( unlikely(!UPDATE_ENTRY(l1, pl1e, ol1e, nl1e, gl1mfn, v, 0)) )
{
+ page_unlock(l1pg);
MEM_LOG("Cannot delete PTE entry at %p", (unsigned long *)pl1e);
rc = GNTST_general_error;
goto out;
}
+ page_unlock(l1pg);
+
out:
guest_unmap_l1e(v, pl1e);
return rc;
struct vcpu *curr = current;
l1_pgentry_t *pl1e, ol1e;
unsigned long gl1mfn;
+ struct page_info *l1pg;
int rc;
if ( flags & GNTMAP_contains_pte )
(unsigned long)new_addr);
return GNTST_general_error;
}
+
+ l1pg = mfn_to_page(gl1mfn);
+ page_lock(l1pg);
ol1e = *pl1e;
if ( unlikely(!UPDATE_ENTRY(l1, pl1e, ol1e, l1e_empty(),
gl1mfn, curr, 0)) )
{
+ page_unlock(l1pg);
MEM_LOG("Cannot delete PTE entry at %p", (unsigned long *)pl1e);
guest_unmap_l1e(curr, pl1e);
return GNTST_general_error;
}
+ page_unlock(l1pg);
guest_unmap_l1e(curr, pl1e);
rc = replace_grant_va_mapping(addr, frame, ol1e, curr);
if ( rc )
return rc;
- domain_lock(d);
-
pl1e = guest_map_l1e(v, va, &gl1mfn);
if ( unlikely(!pl1e || !mod_l1_entry(pl1e, val, gl1mfn, 0)) )
process_deferred_ops();
- domain_unlock(d);
-
switch ( flags & UVMF_FLUSHTYPE_MASK )
{
case UVMF_TLB_FLUSH:
struct ptwr_emulate_ctxt ptwr_ctxt;
int rc;
- domain_lock(d);
-
/* Attempt to read the PTE that maps the VA being accessed. */
guest_get_eff_l1e(v, addr, &pte);
page = l1e_get_page(pte);
ptwr_ctxt.cr2 = addr;
ptwr_ctxt.pte = pte;
+ page_lock(page);
rc = x86_emulate(&ptwr_ctxt.ctxt, &ptwr_emulate_ops);
+ page_unlock(page);
if ( rc == X86EMUL_UNHANDLEABLE )
goto bail;
- domain_unlock(d);
perfc_incr(ptwr_emulations);
return EXCRET_fault_fixed;
bail:
- domain_unlock(d);
return 0;
}