{
memcpy(&p->arch.user_regs, stack_regs, CTXT_SWITCH_STACK_BYTES);
vcpu_save_fpu(p);
+ if ( psr_cmt_enabled() )
+ psr_assoc_rmid(0);
p->arch.ctxt_switch_from(p);
}
}
vcpu_restore_fpu_eager(n);
n->arch.ctxt_switch_to(n);
+
+ if ( psr_cmt_enabled() && n->domain->arch.psr_rmid > 0 )
+ psr_assoc_rmid(n->domain->arch.psr_rmid);
}
gdt = !is_pv_32on64_vcpu(n) ? per_cpu(gdt_table, cpu) :
#define PSR_CMT (1<<0)
+struct psr_assoc {
+ uint64_t val;
+ bool_t initialized;
+};
+
struct psr_cmt *__read_mostly psr_cmt;
static bool_t __initdata opt_psr;
static unsigned int __initdata opt_rmid_max = 255;
static uint64_t rmid_mask;
+static DEFINE_PER_CPU(struct psr_assoc, psr_assoc);
static void __init parse_psr_param(char *s)
{
d->arch.psr_rmid = 0;
}
+void psr_assoc_rmid(unsigned int rmid)
+{
+ uint64_t val;
+ uint64_t new_val;
+ struct psr_assoc *psra = &this_cpu(psr_assoc);
+
+ if ( !psra->initialized )
+ {
+ rdmsrl(MSR_IA32_PSR_ASSOC, psra->val);
+ psra->initialized = 1;
+ }
+ val = psra->val;
+
+ new_val = (val & ~rmid_mask) | (rmid & rmid_mask);
+ if ( val != new_val )
+ {
+ wrmsrl(MSR_IA32_PSR_ASSOC, new_val);
+ psra->val = new_val;
+ }
+}
+
/*
* Local variables:
* mode: C
#define MSR_IA32_TSC_DEADLINE 0x000006E0
#define MSR_IA32_ENERGY_PERF_BIAS 0x000001b0
+/* Platform Shared Resource MSRs */
+#define MSR_IA32_PSR_ASSOC 0x00000c8f
+
/* Intel Model 6 */
#define MSR_P6_PERFCTR(n) (0x000000c1 + (n))
#define MSR_P6_EVNTSEL(n) (0x00000186 + (n))
int psr_alloc_rmid(struct domain *d);
void psr_free_rmid(struct domain *d);
+void psr_assoc_rmid(unsigned int rmid);
#endif /* __ASM_PSR_H__ */