* Ordinary packed integers:
* - 64 bits without prefix 66 (MMX)
* - 128 bits with prefix 66 (SSEn)
- * - 128/256 bits depending on VEX.L (AVX)
+ * - 128/256/512 bits depending on VEX.L/EVEX.LR (AVX+)
*/
simd_packed_int,
/*
* Ordinary packed/scalar floating point:
* - 128 bits without prefix or with prefix 66 (SSEn)
- * - 128/256 bits depending on VEX.L (AVX)
+ * - 128/256/512 bits depending on VEX.L/EVEX.LR (AVX+)
* - 32 bits with prefix F3 (scalar single)
* - 64 bits with prefix F2 (scalar doubgle)
*/
/*
* Packed floating point:
* - 128 bits without prefix or with prefix 66 (SSEn)
- * - 128/256 bits depending on VEX.L (AVX)
+ * - 128/256/512 bits depending on VEX.L/EVEX.LR (AVX+)
*/
simd_packed_fp,
/*
* Single precision packed/scalar floating point:
* - 128 bits without prefix (SSEn)
- * - 128/256 bits depending on VEX.L, no prefix (AVX)
+ * - 128/256/512 bits depending on VEX.L/EVEX.LR (AVX+)
* - 32 bits with prefix F3 (scalar)
*/
simd_single_fp,
/*
* Scalar floating point:
- * - 32/64 bits depending on VEX.W
+ * - 32/64 bits depending on VEX.W/EVEX.W
*/
simd_scalar_vexw,
#define lock_prefix (state->lock_prefix)
#define vex (state->vex)
#define evex (state->evex)
+#define evex_encoded() (evex.mbs)
#define ea (state->ea)
static int
opcode |= b | MASK_INSR(vex.pfx, X86EMUL_OPC_PFX_MASK);
+ if ( !evex_encoded() )
+ evex.lr = vex.l;
+
if ( !(d & ModRM) )
break;
}
/* fall through */
case vex_66:
- op_bytes = 16 << vex.l;
+ op_bytes = 16 << evex.lr;
break;
default:
op_bytes = 0;
case simd_any_fp:
switch ( vex.pfx )
{
- default: op_bytes = 16 << vex.l; break;
- case vex_f3: op_bytes = 4; break;
- case vex_f2: op_bytes = 8; break;
+ default:
+ op_bytes = 16 << evex.lr;
+ break;
+ case vex_f3:
+ generate_exception_if(evex_encoded() && evex.w, EXC_UD);
+ op_bytes = 4;
+ break;
+ case vex_f2:
+ generate_exception_if(evex_encoded() && !evex.w, EXC_UD);
+ op_bytes = 8;
+ break;
}
break;