P(cpu_has_pause_filter, "Pause-Intercept Filter");
P(cpu_has_pause_thresh, "Pause-Intercept Filter Threshold");
P(cpu_has_tsc_ratio, "TSC Rate MSR");
+ P(cpu_has_svm_sss, "NPT Supervisor Shadow Stack");
#undef P
if ( !printed )
vmcb->cstar, vmcb->sfmask);
printk("KernGSBase = 0x%016"PRIx64" PAT = 0x%016"PRIx64"\n",
vmcb->kerngsbase, vmcb_get_g_pat(vmcb));
+ printk("SSP = 0x%016"PRIx64" S_CET = 0x%016"PRIx64" ISST = 0x%016"PRIx64"\n",
+ vmcb->_ssp, vmcb->_msr_s_cet, vmcb->_msr_isst);
printk("H_CR3 = 0x%016"PRIx64" CleanBits = %#x\n",
vmcb_get_h_cr3(vmcb), vmcb->cleanbits.raw);
#define SVM_FEATURE_PAUSETHRESH 12 /* Pause intercept filter support */
#define SVM_FEATURE_VLOADSAVE 15 /* virtual vmload/vmsave */
#define SVM_FEATURE_VGIF 16 /* Virtual GIF */
+#define SVM_FEATURE_SSS 19 /* NPT Supervisor Shadow Stacks */
#define cpu_has_svm_feature(f) (svm_feature_flags & (1u << (f)))
#define cpu_has_svm_npt cpu_has_svm_feature(SVM_FEATURE_NPT)
#define cpu_has_pause_thresh cpu_has_svm_feature(SVM_FEATURE_PAUSETHRESH)
#define cpu_has_tsc_ratio cpu_has_svm_feature(SVM_FEATURE_TSCRATEMSR)
#define cpu_has_svm_vloadsave cpu_has_svm_feature(SVM_FEATURE_VLOADSAVE)
+#define cpu_has_svm_sss cpu_has_svm_feature(SVM_FEATURE_SSS)
#define SVM_PAUSEFILTER_INIT 4000
#define SVM_PAUSETHRESH_INIT 1000
VMEXIT_EXCEPTION_AC = 81, /* 0x51, alignment-check */
VMEXIT_EXCEPTION_MC = 82, /* 0x52, machine-check */
VMEXIT_EXCEPTION_XF = 83, /* 0x53, simd floating-point */
+/* VMEXIT_EXCEPTION_20 = 84, 0x54, #VE (Intel specific) */
+ VMEXIT_EXCEPTION_CP = 85, /* 0x55, controlflow protection */
/* exceptions 20-31 (exitcodes 84-95) are reserved */
bool seg:1; /* 8: cs, ds, es, ss, cpl */
bool cr2:1; /* 9: cr2 */
bool lbr:1; /* 10: debugctlmsr, last{branch,int}{to,from}ip */
+ bool :1;
+ bool cet:1; /* 12: msr_s_set, ssp, msr_isst */
};
uint32_t raw;
} vmcbcleanbits_t;
bool _sev_enable :1;
bool _sev_es_enable :1;
bool _gmet :1;
- bool :1;
+ bool _np_sss :1;
bool _vte :1;
};
uint64_t _np_ctrl;
u64 rip;
u64 res14[11];
u64 rsp;
- u64 res15[3];
+ u64 _msr_s_cet; /* offset 0x400 + 0x1E0 - cleanbit 12 */
+ u64 _ssp; /* offset 0x400 + 0x1E8 | */
+ u64 _msr_isst; /* offset 0x400 + 0x1F0 v */
u64 rax;
u64 star;
u64 lstar;
VMCB_ACCESSORS(lastbranchtoip, lbr)
VMCB_ACCESSORS(lastintfromip, lbr)
VMCB_ACCESSORS(lastinttoip, lbr)
+VMCB_ACCESSORS(msr_s_cet, cet)
+VMCB_ACCESSORS(ssp, cet)
+VMCB_ACCESSORS(msr_isst, cet)
#undef VMCB_ACCESSORS