* NOTE: In a fine-grained p2m locking scenario this operation
* may need to promote its locking from gfn->1g superpage
*/
- p2m_set_entry(p2m, gfn_aligned, INVALID_MFN, PAGE_ORDER_2M,
- p2m_populate_on_demand, p2m->default_access);
- return true;
+ return !p2m_set_entry(p2m, gfn_aligned, INVALID_MFN, PAGE_ORDER_2M,
+ p2m_populate_on_demand, p2m->default_access);
}
/* Only reclaim if we're in actual need of more cache. */
BUG_ON((mfn_x(mfn) & ((1UL << order) - 1)) != 0);
- p2m_set_entry(p2m, gfn_aligned, mfn, order, p2m_ram_rw,
- p2m->default_access);
+ if ( p2m_set_entry(p2m, gfn_aligned, mfn, order, p2m_ram_rw,
+ p2m->default_access) )
+ {
+ p2m_pod_cache_add(p2m, p, order);
+ goto out_fail;
+ }
for( i = 0; i < (1UL << order); i++ )
{
BUG_ON(order != PAGE_ORDER_2M);
pod_unlock(p2m);
- /* Remap this 2-meg region in singleton chunks */
/*
+ * Remap this 2-meg region in singleton chunks. See the comment on the
+ * 1G page splitting path above for why a single call suffices.
+ *
* NOTE: In a p2m fine-grained lock scenario this might
* need promoting the gfn lock from gfn->2M superpage.
*/
- for ( i = 0; i < (1UL << order); i++ )
- p2m_set_entry(p2m, gfn_add(gfn_aligned, i), INVALID_MFN, PAGE_ORDER_4K,
- p2m_populate_on_demand, p2m->default_access);
+ if ( p2m_set_entry(p2m, gfn_aligned, INVALID_MFN, PAGE_ORDER_4K,
+ p2m_populate_on_demand, p2m->default_access) )
+ return false;
+
if ( tb_init_done )
{
struct {