for ( i = 0; i < L2_PAGETABLE_ENTRIES; i++ )
{
- if ( is_guest_l2_slot(d, type, i) &&
- unlikely(!get_page_from_l2e(pl2e[i], pfn, d)) )
+ if ( !is_guest_l2_slot(d, type, i) )
+ continue;
+
+ if ( unlikely(!get_page_from_l2e(pl2e[i], pfn, d)) )
goto fail;
adjust_guest_l2e(pl2e[i], d);
d) )
goto fail;
}
- else if ( is_guest_l3_slot(i) &&
- unlikely(!get_page_from_l3e(pl3e[i], pfn, d)) )
+ else if ( !is_guest_l3_slot(i) )
+ continue;
+ else if ( unlikely(!get_page_from_l3e(pl3e[i], pfn, d)) )
goto fail;
adjust_guest_l3e(pl3e[i], d);
fail:
MEM_LOG("Failure in alloc_l3_table: entry %d", i);
while ( i-- > 0 )
- if ( is_guest_l3_slot(i) )
- put_page_from_l3e(pl3e[i], pfn);
+ {
+ if ( !is_guest_l3_slot(i) )
+ continue;
+ unadjust_guest_l3e(pl3e[i], d);
+ put_page_from_l3e(pl3e[i], pfn);
+ }
unmap_domain_page(pl3e);
return 0;
for ( i = 0; i < L4_PAGETABLE_ENTRIES; i++ )
{
- if ( is_guest_l4_slot(d, i) &&
- unlikely(!get_page_from_l4e(pl4e[i], pfn, d)) )
+ if ( !is_guest_l4_slot(d, i) )
+ continue;
+
+ if ( unlikely(!get_page_from_l4e(pl4e[i], pfn, d)) )
goto fail;
adjust_guest_l4e(pl4e[i], d);
struct vcpu *curr = current;
struct domain *d = curr->domain;
struct page_info *l3pg = mfn_to_page(pfn);
- int okay, rc = 1;
+ int rc = 1;
if ( unlikely(!is_guest_l3_slot(pgentry_ptr_to_slot(pl3e))) )
{
return 0;
}
- okay = create_pae_xen_mappings(d, pl3e);
- BUG_ON(!okay);
+ if ( likely(rc) )
+ {
+ if ( !create_pae_xen_mappings(d, pl3e) )
+ BUG();
- pae_flush_pgd(pfn, pgentry_ptr_to_slot(pl3e), nl3e);
+ pae_flush_pgd(pfn, pgentry_ptr_to_slot(pl3e), nl3e);
+ }
page_unlock(l3pg);
put_page_from_l3e(ol3e, pfn);