num_mds++;
dom_mem -= dom_md->num_pages << EFI_PAGE_SHIFT;
+ d->arch.convmem_end = end;
break;
case EFI_UNUSABLE_MEMORY:
/* Memory. */
MAKE_MD(EFI_CONVENTIONAL_MEMORY, EFI_MEMORY_WB,
FW_END_PADDR, maxmem);
+ d->arch.convmem_end = maxmem;
/* Create an entry for IO ports. */
MAKE_MD(EFI_MEMORY_MAPPED_IO_PORT_SPACE, EFI_MEMORY_UC,
atomic64_set(&d->arch.shadow_fault_count, 0);
atomic64_set(&d->arch.shadow_dirty_count, 0);
- d->arch.shadow_bitmap_size = (d->max_pages + BITS_PER_LONG-1) &
- ~(BITS_PER_LONG-1);
+ d->arch.shadow_bitmap_size =
+ ((d->arch.convmem_end >> PAGE_SHIFT) +
+ BITS_PER_LONG - 1) & ~(BITS_PER_LONG - 1);
d->arch.shadow_bitmap = xmalloc_array(unsigned long,
d->arch.shadow_bitmap_size / BITS_PER_LONG);
if (d->arch.shadow_bitmap == NULL) {
printk("%s: d 0x%p id %d current 0x%p id %d\n",
__func__, d, d->domain_id, current, current->vcpu_id);
- if ((mpaddr >> PAGE_SHIFT) < d->max_pages)
+ if (mpaddr < d->arch.convmem_end)
printk("%s: non-allocated mpa 0x%lx (< 0x%lx)\n", __func__,
- mpaddr, (unsigned long)d->max_pages << PAGE_SHIFT);
+ mpaddr, d->arch.convmem_end);
else
printk("%s: bad mpa 0x%lx (=> 0x%lx)\n", __func__,
- mpaddr, (unsigned long)d->max_pages << PAGE_SHIFT);
+ mpaddr, d->arch.convmem_end);
if (entry != NULL)
p2m_entry_set(entry, NULL, __pte(0));
};
};
+ /* maximum metaphysical address of conventional memory */
+ u64 convmem_end;
+
/* Allowed accesses to io ports. */
struct rangeset *ioport_caps;