Now guest vhpt table of VMX domain is searched to insert some
entry into vtlb on the fly. However previous guard on memory
attribute is only done for guest itc.d emulation. That breaks
VGA acceleration and this patch fixes it by moving check to
right place.
Signed-off-by Kevin Tian <kevin.tian@intel.com>
}
#endif //VTLB_DEBUG
gpfn = (pte & _PAGE_PPN_MASK)>> PAGE_SHIFT;
- if (VMX_DOMAIN(vcpu)) {
- if (__gpfn_is_io(vcpu->domain, gpfn))
- pte |= VTLB_PTE_IO;
- else{
- if ((pte & _PAGE_MA_MASK)!=_PAGE_MA_NAT)
- /* Ensure WB attribute if pte is related to a normal mem page,
- * which is required by vga acceleration since qemu maps shared
- * vram buffer with WB.
- */
- pte &= ~_PAGE_MA_MASK;
- }
- }
+ if (VMX_DOMAIN(vcpu) && __gpfn_is_io(vcpu->domain, gpfn))
+ pte |= VTLB_PTE_IO;
thash_purge_and_insert(vcpu, pte, itir, ifa);
return IA64_NO_FAULT;
ps = itir_ps(itir);
if(VMX_DOMAIN(v)){
+ /* Ensure WB attribute if pte is related to a normal mem page,
+ * which is required by vga acceleration since qemu maps shared
+ * vram buffer with WB.
+ */
+ if (!(pte & VTLB_PTE_IO) && ((pte & _PAGE_MA_MASK) != _PAGE_MA_NAT))
+ pte &= ~_PAGE_MA_MASK;
+
phy_pte = translate_phy_pte(v, &pte, itir, ifa);
if(ps==PAGE_SHIFT){
if(!(pte&VTLB_PTE_IO)){