{ 0x00, 0, 0}
};
-
-enum _cache_type
-{
- CACHE_TYPE_NULL = 0,
- CACHE_TYPE_DATA = 1,
- CACHE_TYPE_INST = 2,
- CACHE_TYPE_UNIFIED = 3
-};
-
-union _cpuid4_leaf_eax {
- struct {
- enum _cache_type type:5;
- unsigned int level:3;
- unsigned int is_self_initializing:1;
- unsigned int is_fully_associative:1;
- unsigned int reserved:4;
- unsigned int num_threads_sharing:12;
- unsigned int num_cores_on_die:6;
- } split;
- u32 full;
-};
-
-union _cpuid4_leaf_ebx {
- struct {
- unsigned int coherency_line_size:12;
- unsigned int physical_line_partition:10;
- unsigned int ways_of_associativity:10;
- } split;
- u32 full;
-};
-
-union _cpuid4_leaf_ecx {
- struct {
- unsigned int number_of_sets:32;
- } split;
- u32 full;
-};
-
-struct _cpuid4_info {
- union _cpuid4_leaf_eax eax;
- union _cpuid4_leaf_ebx ebx;
- union _cpuid4_leaf_ecx ecx;
- unsigned long size;
-};
-
unsigned short num_cache_leaves;
-static int __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf)
+int cpuid4_cache_lookup(int index, struct cpuid4_info *this_leaf)
{
union _cpuid4_leaf_eax eax;
union _cpuid4_leaf_ebx ebx;
* parameters cpuid leaf to find the cache details
*/
for (i = 0; i < num_cache_leaves; i++) {
- struct _cpuid4_info this_leaf;
+ struct cpuid4_info this_leaf;
int retval;
#include <xen/nodemask.h>
#include <xen/cpu.h>
#include <xsm/xsm.h>
+#include <asm/psr.h>
#define get_xen_guest_handle(val, hnd) do { val = (hnd).p; } while (0)
+struct l3_cache_info {
+ int ret;
+ unsigned long size;
+};
+
+static void l3_cache_get(void *arg)
+{
+ struct cpuid4_info info;
+ struct l3_cache_info *l3_info = arg;
+
+ l3_info->ret = cpuid4_cache_lookup(3, &info);
+ if ( !l3_info->ret )
+ l3_info->size = info.size / 1024; /* in KB unit */
+}
+
long cpu_up_helper(void *data)
{
int cpu = (unsigned long)data;
}
break;
+ case XEN_SYSCTL_psr_cmt_op:
+ if ( !psr_cmt_enabled() )
+ return -ENODEV;
+
+ if ( sysctl->u.psr_cmt_op.flags != 0 )
+ return -EINVAL;
+
+ switch ( sysctl->u.psr_cmt_op.cmd )
+ {
+ case XEN_SYSCTL_PSR_CMT_enabled:
+ sysctl->u.psr_cmt_op.u.data =
+ (psr_cmt->features & PSR_RESOURCE_TYPE_L3) &&
+ (psr_cmt->l3.features & PSR_CMT_L3_OCCUPANCY);
+ break;
+ case XEN_SYSCTL_PSR_CMT_get_total_rmid:
+ sysctl->u.psr_cmt_op.u.data = psr_cmt->rmid_max;
+ break;
+ case XEN_SYSCTL_PSR_CMT_get_l3_upscaling_factor:
+ sysctl->u.psr_cmt_op.u.data = psr_cmt->l3.upscaling_factor;
+ break;
+ case XEN_SYSCTL_PSR_CMT_get_l3_cache_size:
+ {
+ struct l3_cache_info info;
+ unsigned int cpu = sysctl->u.psr_cmt_op.u.l3_cache.cpu;
+
+ if ( (cpu >= nr_cpu_ids) || !cpu_online(cpu) )
+ {
+ ret = -ENODEV;
+ sysctl->u.psr_cmt_op.u.data = 0;
+ break;
+ }
+ if ( cpu == smp_processor_id() )
+ l3_cache_get(&info);
+ else
+ on_selected_cpus(cpumask_of(cpu), l3_cache_get, &info, 1);
+
+ ret = info.ret;
+ sysctl->u.psr_cmt_op.u.data = (ret ? 0 : info.size);
+ break;
+ }
+ default:
+ sysctl->u.psr_cmt_op.u.data = 0;
+ ret = -ENOSYS;
+ break;
+ }
+
+ if ( __copy_to_guest(u_sysctl, sysctl, 1) )
+ ret = -EFAULT;
+
+ break;
+
default:
ret = -ENOSYS;
break;
#define cpu_has_vmx boot_cpu_has(X86_FEATURE_VMXE)
#define cpu_has_cpuid_faulting boot_cpu_has(X86_FEATURE_CPUID_FAULTING)
+
+enum _cache_type {
+ CACHE_TYPE_NULL = 0,
+ CACHE_TYPE_DATA = 1,
+ CACHE_TYPE_INST = 2,
+ CACHE_TYPE_UNIFIED = 3
+};
+
+union _cpuid4_leaf_eax {
+ struct {
+ enum _cache_type type:5;
+ unsigned int level:3;
+ unsigned int is_self_initializing:1;
+ unsigned int is_fully_associative:1;
+ unsigned int reserved:4;
+ unsigned int num_threads_sharing:12;
+ unsigned int num_cores_on_die:6;
+ } split;
+ u32 full;
+};
+
+union _cpuid4_leaf_ebx {
+ struct {
+ unsigned int coherency_line_size:12;
+ unsigned int physical_line_partition:10;
+ unsigned int ways_of_associativity:10;
+ } split;
+ u32 full;
+};
+
+union _cpuid4_leaf_ecx {
+ struct {
+ unsigned int number_of_sets:32;
+ } split;
+ u32 full;
+};
+
+struct cpuid4_info {
+ union _cpuid4_leaf_eax eax;
+ union _cpuid4_leaf_ebx ebx;
+ union _cpuid4_leaf_ecx ecx;
+ unsigned long size;
+};
+
+int cpuid4_cache_lookup(int index, struct cpuid4_info *this_leaf);
#endif
#endif /* __ASM_I386_CPUFEATURE_H */
typedef struct xen_sysctl_coverage_op xen_sysctl_coverage_op_t;
DEFINE_XEN_GUEST_HANDLE(xen_sysctl_coverage_op_t);
+#define XEN_SYSCTL_PSR_CMT_get_total_rmid 0
+#define XEN_SYSCTL_PSR_CMT_get_l3_upscaling_factor 1
+/* The L3 cache size is returned in KB unit */
+#define XEN_SYSCTL_PSR_CMT_get_l3_cache_size 2
+#define XEN_SYSCTL_PSR_CMT_enabled 3
+struct xen_sysctl_psr_cmt_op {
+ uint32_t cmd; /* IN: XEN_SYSCTL_PSR_CMT_* */
+ uint32_t flags; /* padding variable, may be extended for future use */
+ union {
+ uint64_t data; /* OUT */
+ struct {
+ uint32_t cpu; /* IN */
+ uint32_t rsvd;
+ } l3_cache;
+ } u;
+};
+typedef struct xen_sysctl_psr_cmt_op xen_sysctl_psr_cmt_op_t;
+DEFINE_XEN_GUEST_HANDLE(xen_sysctl_psr_cmt_op_t);
struct xen_sysctl {
uint32_t cmd;
#define XEN_SYSCTL_cpupool_op 18
#define XEN_SYSCTL_scheduler_op 19
#define XEN_SYSCTL_coverage_op 20
+#define XEN_SYSCTL_psr_cmt_op 21
uint32_t interface_version; /* XEN_SYSCTL_INTERFACE_VERSION */
union {
struct xen_sysctl_readconsole readconsole;
struct xen_sysctl_cpupool_op cpupool_op;
struct xen_sysctl_scheduler_op scheduler_op;
struct xen_sysctl_coverage_op coverage_op;
+ struct xen_sysctl_psr_cmt_op psr_cmt_op;
uint8_t pad[128];
} u;
};