& ~SHF_L1_ANY));
ASSERT(!sh_page_has_multiple_shadows(mfn_to_page(gmfn)));
- SHADOW_PRINTK("d=%d, v=%d, gmfn=%05lx\n",
- v->domain->domain_id, v->vcpu_id, mfn_x(gmfn));
+ SHADOW_PRINTK("%pv gmfn=%"PRI_mfn"\n", v, mfn_x(gmfn));
/* Need to pull write access so the page *stays* in sync. */
if ( oos_remove_write_access(v, gmfn, fixup) )
mfn_t *oos_snapshot = v->arch.paging.shadow.oos_snapshot;
struct oos_fixup *oos_fixup = v->arch.paging.shadow.oos_fixup;
- SHADOW_PRINTK("d=%d, v=%d\n", v->domain->domain_id, v->vcpu_id);
+ SHADOW_PRINTK("%pv\n", v);
ASSERT(paging_locked_by_me(v->domain));
ASSERT(paging_locked_by_me(v->domain));
- SHADOW_PRINTK("d=%d, v=%d, gmfn=%05lx\n",
- v->domain->domain_id, v->vcpu_id, mfn_x(gmfn));
+ SHADOW_PRINTK("%pv gmfn=%"PRI_mfn"\n", v, mfn_x(gmfn));
pg = mfn_to_page(gmfn);
* can be called via put_page_type when we clear a shadow l1e).*/
paging_lock_recursive(d);
- SHADOW_PRINTK("d=%d: gmfn=%lx\n", d->domain_id, mfn_x(gmfn));
+ SHADOW_PRINTK("d%d gmfn=%"PRI_mfn"\n", d->domain_id, mfn_x(gmfn));
/* Bail out now if the page is not shadowed */
if ( (pg->count_info & PGC_page_table) == 0 )
/* If that didn't catch the shadows, something is wrong */
if ( !fast && all && (pg->count_info & PGC_page_table) )
{
- SHADOW_ERROR("can't find all shadows of mfn %05lx "
+ SHADOW_ERROR("can't find all shadows of mfn %"PRI_mfn" "
"(shadow_flags=%08x)\n",
mfn_x(gmfn), pg->shadow_flags);
domain_crash(d);
if ( v->arch.paging.mode != old_mode )
{
- SHADOW_PRINTK("new paging mode: d=%u v=%u pe=%d gl=%u "
+ SHADOW_PRINTK("new paging mode: %pv pe=%d gl=%u "
"sl=%u (was g=%u s=%u)\n",
- d->domain_id, v->vcpu_id,
+ v,
is_hvm_domain(d) ? hvm_paging_enabled(v) : 1,
v->arch.paging.mode->guest_levels,
v->arch.paging.mode->shadow.shadow_levels,
if ( v != current && vcpu_runnable(v) )
{
- SHADOW_ERROR("Some third party (d=%u v=%u) is changing "
- "this HVM vcpu's (d=%u v=%u) paging mode "
+ SHADOW_ERROR("Some third party (%pv) is changing "
+ "this HVM vcpu's (%pv) paging mode "
"while it is running.\n",
- current->domain->domain_id, current->vcpu_id,
- v->domain->domain_id, v->vcpu_id);
+ current, v);
/* It's not safe to do that because we can't change
* the host CR3 for a running domain */
domain_crash(v->domain);
set_fl1_shadow_status(struct domain *d, gfn_t gfn, mfn_t smfn)
/* Put an FL1 shadow into the hash table */
{
- SHADOW_PRINTK("gfn=%"SH_PRI_gfn", type=%08x, smfn=%05lx\n",
+ SHADOW_PRINTK("gfn=%"SH_PRI_gfn", type=%08x, smfn=%"PRI_mfn"\n",
gfn_x(gfn), SH_type_fl1_shadow, mfn_x(smfn));
ASSERT(mfn_to_page(smfn)->u.sh.head);
{
int res;
- SHADOW_PRINTK("d=%d: gmfn=%lx, type=%08x, smfn=%lx\n",
+ SHADOW_PRINTK("d%d gmfn=%lx, type=%08x, smfn=%lx\n",
d->domain_id, mfn_x(gmfn), shadow_type, mfn_x(smfn));
ASSERT(mfn_to_page(smfn)->u.sh.head);
delete_fl1_shadow_status(struct domain *d, gfn_t gfn, mfn_t smfn)
/* Remove a shadow from the hash table */
{
- SHADOW_PRINTK("gfn=%"SH_PRI_gfn", type=%08x, smfn=%05lx\n",
+ SHADOW_PRINTK("gfn=%"SH_PRI_gfn", type=%08x, smfn=%"PRI_mfn"\n",
gfn_x(gfn), SH_type_fl1_shadow, mfn_x(smfn));
ASSERT(mfn_to_page(smfn)->u.sh.head);
shadow_hash_delete(d, gfn_x(gfn), SH_type_fl1_shadow, smfn);
delete_shadow_status(struct domain *d, mfn_t gmfn, u32 shadow_type, mfn_t smfn)
/* Remove a shadow from the hash table */
{
- SHADOW_PRINTK("d=%d: gmfn=%lx, type=%08x, smfn=%lx\n",
+ SHADOW_PRINTK("d%d gmfn=%"PRI_mfn", type=%08x, smfn=%"PRI_mfn"\n",
d->domain_id, mfn_x(gmfn), shadow_type, mfn_x(smfn));
ASSERT(mfn_to_page(smfn)->u.sh.head);
shadow_hash_delete(d, mfn_x(gmfn), shadow_type, smfn);
res = xsm_priv_mapping(XSM_TARGET, d, owner);
if ( !res ) {
res = get_page_from_l1e(sl1e, d, owner);
- SHADOW_PRINTK("privileged domain %d installs map of mfn %05lx "
- "which is owned by domain %d: %s\n",
+ SHADOW_PRINTK("privileged domain %d installs map of mfn %"PRI_mfn" "
+ "which is owned by d%d: %s\n",
d->domain_id, mfn_x(mfn), owner->domain_id,
res >= 0 ? "success" : "failed");
}
{
struct domain *d = v->domain;
mfn_t smfn = shadow_alloc(d, shadow_type, mfn_x(gmfn));
- SHADOW_DEBUG(MAKE_SHADOW, "(%05lx, %u)=>%05lx\n",
+ SHADOW_DEBUG(MAKE_SHADOW, "(%"PRI_mfn", %u)=>%"PRI_mfn"\n",
mfn_x(gmfn), shadow_type, mfn_x(smfn));
if ( sh_type_has_up_pointer(d, shadow_type) )
u32 t = sp->u.sh.type;
mfn_t gmfn, sl4mfn;
- SHADOW_DEBUG(DESTROY_SHADOW,
- "%s(%05lx)\n", __func__, mfn_x(smfn));
+ SHADOW_DEBUG(DESTROY_SHADOW, "%"PRI_mfn"\n", mfn_x(smfn));
ASSERT(t == SH_type_l4_shadow);
ASSERT(sp->u.sh.head);
u32 t = sp->u.sh.type;
mfn_t gmfn, sl3mfn;
- SHADOW_DEBUG(DESTROY_SHADOW,
- "%s(%05lx)\n", __func__, mfn_x(smfn));
+ SHADOW_DEBUG(DESTROY_SHADOW, "%"PRI_mfn"\n", mfn_x(smfn));
ASSERT(t == SH_type_l3_shadow);
ASSERT(sp->u.sh.head);
u32 t = sp->u.sh.type;
mfn_t gmfn, sl2mfn;
- SHADOW_DEBUG(DESTROY_SHADOW,
- "%s(%05lx)\n", __func__, mfn_x(smfn));
+ SHADOW_DEBUG(DESTROY_SHADOW, "%"PRI_mfn"\n", mfn_x(smfn));
#if GUEST_PAGING_LEVELS >= 3
ASSERT(t == SH_type_l2_shadow || t == SH_type_l2h_shadow);
struct page_info *sp = mfn_to_page(smfn);
u32 t = sp->u.sh.type;
- SHADOW_DEBUG(DESTROY_SHADOW,
- "%s(%05lx)\n", __func__, mfn_x(smfn));
+ SHADOW_DEBUG(DESTROY_SHADOW, "%"PRI_mfn"\n", mfn_x(smfn));
ASSERT(t == SH_type_l1_shadow || t == SH_type_fl1_shadow);
ASSERT(sp->u.sh.head);
{
// attempt by the guest to write to a xen reserved slot
//
- SHADOW_PRINTK("%s out-of-range update "
- "sl4mfn=%05lx index=%#x val=%" SH_PRI_pte "\n",
- __func__, mfn_x(sl4mfn), shadow_index, new_sl4e.l4);
+ SHADOW_PRINTK("out-of-range update "
+ "sl4mfn=%"PRI_mfn" index=%#x val=%" SH_PRI_pte "\n",
+ mfn_x(sl4mfn), shadow_index, new_sl4e.l4);
if ( shadow_l4e_get_flags(new_sl4e) & _PAGE_PRESENT )
{
SHADOW_ERROR("out-of-range l4e update\n");
int fast_emul = 0;
#endif
- SHADOW_PRINTK("d:v=%u:%u va=%#lx err=%u, rip=%lx\n",
- v->domain->domain_id, v->vcpu_id, va, regs->error_code,
- regs->eip);
+ SHADOW_PRINTK("%pv va=%#lx err=%#x, rip=%lx\n",
+ v, va, regs->error_code, regs->eip);
perfc_incr(shadow_fault);
}
#endif
- SHADOW_PRINTK("d=%u v=%u guest_table=%05lx\n",
- d->domain_id, v->vcpu_id,
- (unsigned long)pagetable_get_pfn(v->arch.guest_table));
+ SHADOW_PRINTK("%pv guest_table=%"PRI_mfn"\n",
+ v, (unsigned long)pagetable_get_pfn(v->arch.guest_table));
#if GUEST_PAGING_LEVELS == 4
if ( !(v->arch.flags & TF_kernel_mode) && !is_pv_32bit_domain(d) )