if ( (regs[0] & 0x1f) != 0 )
continue;
}
+ /* Extended Topology leaves. */
+ else if ( input[0] == 0xb )
+ {
+ uint8_t level_type = regs[2] >> 8;
+
+ input[1]++;
+ if ( level_type >= 1 && level_type <= 2 )
+ continue;
+ }
input[0]++;
if ( !(input[0] & 0x80000000u) && (input[0] > base_max ) )
input[0] = 0x80000000u;
input[1] = XEN_CPUID_INPUT_UNUSED;
- if ( (input[0] == 4) || (input[0] == 7) )
+ if ( (input[0] == 4) || (input[0] == 7) || (input[0] == 0xb) )
input[1] = 0;
else if ( input[0] == 0xd )
input[1] = 1; /* Xen automatically calculates almost everything. */
p->basic.raw[0x6] = EMPTY_LEAF; /* Therm/Power not exposed to guests. */
p->basic.raw[0x8] = EMPTY_LEAF;
- p->basic.raw[0xb] = EMPTY_LEAF; /* TODO: Rework topology logic. */
+
+ /* TODO: Rework topology logic. */
+ memset(p->topo.raw, 0, sizeof(p->topo.raw));
+
p->basic.raw[0xc] = EMPTY_LEAF;
p->extd.e1d &= ~CPUID_COMMON_1D_FEATURES;
{
switch ( i )
{
- case 0x4: case 0x7: case 0xd:
+ case 0x4: case 0x7: case 0xb: case 0xd:
/* Multi-invocation leaves. Deferred. */
continue;
}
cpuid_count_leaf(7, i, &p->feat.raw[i]);
}
+ if ( p->basic.max_leaf >= 0xb )
+ {
+ union {
+ struct cpuid_leaf l;
+ struct cpuid_topo_leaf t;
+ } u;
+
+ for ( i = 0; i < ARRAY_SIZE(p->topo.raw); ++i )
+ {
+ cpuid_count_leaf(0xb, i, &u.l);
+
+ if ( u.t.type == 0 )
+ break;
+
+ p->topo.subleaf[i] = u.t;
+ }
+
+ /*
+ * The choice of CPUID_GUEST_NR_TOPO is per the manual. It may need
+ * to grow for future hardware.
+ */
+ if ( i == ARRAY_SIZE(p->topo.raw) &&
+ (cpuid_count_leaf(0xb, i, &u.l), u.t.type != 0) )
+ printk(XENLOG_WARNING
+ "CPUID: Insufficient Leaf 0xb space for this hardware\n");
+ }
+
if ( p->basic.max_leaf >= XSTATE_CPUID )
{
uint64_t xstates;
*res = p->feat.raw[subleaf];
break;
+ case 0xb:
+ if ( subleaf >= ARRAY_SIZE(p->topo.raw) )
+ return;
+
+ *res = p->topo.raw[subleaf];
+ break;
+
case XSTATE_CPUID:
if ( !p->basic.xsave || subleaf >= ARRAY_SIZE(p->xstate.raw) )
return;
ctl->input[1] >= ARRAY_SIZE(p->feat.raw) )
return 0;
+ if ( ctl->input[0] == 0xb &&
+ ctl->input[1] >= ARRAY_SIZE(p->topo.raw) )
+ return 0;
+
BUILD_BUG_ON(ARRAY_SIZE(p->xstate.raw) < 2);
if ( ctl->input[0] == XSTATE_CPUID &&
ctl->input[1] != 1 ) /* Everything else automatically calculated. */
p->feat.raw[ctl->input[1]] = leaf;
break;
+ case 0xb:
+ p->topo.raw[ctl->input[1]] = leaf;
+ break;
+
case XSTATE_CPUID:
p->xstate.raw[ctl->input[1]] = leaf;
break;
#define CPUID_GUEST_NR_BASIC (0xdu + 1)
#define CPUID_GUEST_NR_FEAT (0u + 1)
#define CPUID_GUEST_NR_CACHE (5u + 1)
+#define CPUID_GUEST_NR_TOPO (1u + 1)
#define CPUID_GUEST_NR_XSTATE (62u + 1)
#define CPUID_GUEST_NR_EXTD_INTEL (0x8u + 1)
#define CPUID_GUEST_NR_EXTD_AMD (0x1cu + 1)
uint64_t :64, :64; /* Leaf 0x9 - DCA */
/* Leaf 0xa - Intel PMU. */
- uint8_t pmu_version;
+ uint8_t pmu_version, _pmu[15];
+
+ uint64_t :64, :64; /* Leaf 0xb - Topology. */
+ uint64_t :64, :64; /* Leaf 0xc - rsvd */
+ uint64_t :64, :64; /* Leaf 0xd - XSTATE. */
};
} basic;
};
} feat;
+ /* Extended topology enumeration: 0x0000000B[xx] */
+ union {
+ struct cpuid_leaf raw[CPUID_GUEST_NR_TOPO];
+ struct cpuid_topo_leaf {
+ uint32_t id_shift:5, :27;
+ uint16_t nr_logical, :16;
+ uint8_t level, type, :8, :8;
+ uint32_t x2apic_id;
+ } subleaf[CPUID_GUEST_NR_TOPO];
+ } topo;
+
/* Xstate feature leaf: 0x0000000D[xx] */
union {
struct cpuid_leaf raw[CPUID_GUEST_NR_XSTATE];