unsigned long lsize;
};
-unsigned int paddr_bits __read_mostly = 36;
-
/**
* mtrr_bp_init - initialize mtrrs on the boot CPU
*
{
if (cpu_has_mtrr) {
mtrr_if = &generic_mtrr_ops;
- size_or_mask = 0xff000000; /* 36 bits */
- size_and_mask = 0x00f00000;
-
- /* This is an AMD specific MSR, but we assume(hope?) that
- Intel will implement it to when they extend the address
- bus of the Xeon. */
- if (cpuid_eax(0x80000000) >= 0x80000008) {
- paddr_bits = cpuid_eax(0x80000008) & 0xff;
- /* CPUID workaround for Intel 0F33/0F34 CPU */
- if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
- boot_cpu_data.x86 == 0xF &&
- boot_cpu_data.x86_model == 0x3 &&
- (boot_cpu_data.x86_mask == 0x3 ||
- boot_cpu_data.x86_mask == 0x4))
- paddr_bits = 36;
-
- size_or_mask = ~((1ULL << (paddr_bits - PAGE_SHIFT)) - 1);
- size_and_mask = ~size_or_mask & 0xfffff00000ULL;
- } else if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR &&
- boot_cpu_data.x86 == 6) {
- /* VIA C* family have Intel style MTRRs, but
- don't support PAE */
- size_or_mask = 0xfff00000; /* 32 bits */
- size_and_mask = 0;
- }
+ size_or_mask = ~((1ULL << (paddr_bits - PAGE_SHIFT)) - 1);
+ size_and_mask = ~size_or_mask & 0xfffff00000ULL;
}
if (mtrr_if) {
unsigned long mfn = domctl->u.memory_mapping.first_mfn;
unsigned long nr_mfns = domctl->u.memory_mapping.nr_mfns;
int add = domctl->u.memory_mapping.add_mapping;
- int i;
+ unsigned long i;
ret = -EINVAL;
- if ( (mfn + nr_mfns - 1) < mfn ) /* wrap? */
+ if ( (mfn + nr_mfns - 1) < mfn || /* wrap? */
+ ((mfn | (mfn + nr_mfns - 1)) >> (paddr_bits - PAGE_SHIFT)) ||
+ (gfn + nr_mfns - 1) < gfn ) /* wrap? */
break;
ret = -EPERM;
d->domain_id, gfn, mfn, nr_mfns);
ret = iomem_permit_access(d, mfn, mfn + nr_mfns - 1);
- for ( i = 0; i < nr_mfns; i++ )
- set_mmio_p2m_entry(d, gfn+i, _mfn(mfn+i));
+ if ( !ret && paging_mode_translate(d) )
+ {
+ for ( i = 0; !ret && i < nr_mfns; i++ )
+ if ( !set_mmio_p2m_entry(d, gfn + i, _mfn(mfn + i)) )
+ ret = -EIO;
+ if ( ret )
+ {
+ printk(XENLOG_G_WARNING
+ "memory_map:fail: dom%d gfn=%lx mfn=%lx\n",
+ d->domain_id, gfn + i, mfn + i);
+ while ( i-- )
+ clear_mmio_p2m_entry(d, gfn + i);
+ if ( iomem_deny_access(d, mfn, mfn + nr_mfns - 1) &&
+ IS_PRIV(current->domain) )
+ printk(XENLOG_ERR
+ "memory_map: failed to deny dom%d access to [%lx,%lx]\n",
+ d->domain_id, mfn, mfn + nr_mfns - 1);
+ }
+ }
}
else
{
"memory_map:remove: dom%d gfn=%lx mfn=%lx nr=%lx\n",
d->domain_id, gfn, mfn, nr_mfns);
- for ( i = 0; i < nr_mfns; i++ )
- clear_mmio_p2m_entry(d, gfn+i);
+ if ( paging_mode_translate(d) )
+ for ( i = 0; i < nr_mfns; i++ )
+ add |= !clear_mmio_p2m_entry(d, gfn + i);
ret = iomem_deny_access(d, mfn, mfn + nr_mfns - 1);
+ if ( !ret && add )
+ ret = -EIO;
+ if ( ret && IS_PRIV(current->domain) )
+ printk(XENLOG_ERR
+ "memory_map: error %ld %s dom%d access to [%lx,%lx]\n",
+ ret, add ? "removing" : "denying", d->domain_id,
+ mfn, mfn + nr_mfns - 1);
}
rcu_unlock_domain(d);
if ( !found )
{
g2m_ioport = xmalloc(struct g2m_ioport);
+ if ( !g2m_ioport )
+ ret = -ENOMEM;
+ }
+ if ( !found && !ret )
+ {
g2m_ioport->gport = fgp;
g2m_ioport->mport = fmp;
g2m_ioport->np = np;
list_add_tail(&g2m_ioport->list, &hd->g2m_ioport_list);
}
- ret = ioports_permit_access(d, fmp, fmp + np - 1);
+ if ( !ret )
+ ret = ioports_permit_access(d, fmp, fmp + np - 1);
+ if ( ret && !found && g2m_ioport )
+ {
+ list_del(&g2m_ioport->list);
+ xfree(g2m_ioport);
+ }
}
else
{
break;
}
ret = ioports_deny_access(d, fmp, fmp + np - 1);
+ if ( ret && IS_PRIV(current->domain) )
+ printk(XENLOG_ERR
+ "ioport_map: error %ld denying dom%d access to [%x,%x]\n",
+ ret, d->domain_id, fmp, fmp + np - 1);
}
rcu_unlock_domain(d);
}