OS_S8, OS_S16, OS_S32, OS_S64, OS_S128,
{ single, double, extended, comp, float128 }
OS_F32, OS_F64, OS_F80, OS_C64, OS_F128,
- { multi-media sizes: split in byte, word, dword, ... }
- { entities, then the signed counterparts }
- OS_M8, OS_M16, OS_M32, OS_M64, OS_M128, OS_M256, OS_M512,
- OS_MS8, OS_MS16, OS_MS32, OS_MS64, OS_MS128, OS_MS256, OS_MS512,
- { multi-media sizes: single-precision floating-point }
- OS_MF32, OS_MF128, OS_MF256, OS_MF512,
- { multi-media sizes: double-precision floating-point }
- OS_MD64, OS_MD128, OS_MD256, OS_MD512);
+ { multi-media sizes, describes only the register size but not how it is split,
+ this information must be passed separately }
+ OS_M8, OS_M16, OS_M32, OS_M64, OS_M128, OS_M256, OS_M512);
{ Register types }
TRegisterType = (
{ floating point values }
4, 8, 10, 8, 16,
{ multimedia values }
- 1, 2, 4, 8, 16, 32, 64,
- 1, 2, 4, 8, 16, 32, 64,
- { single-precision multimedia values }
- 4, 16, 32, 64,
- { double-precision multimedia values }
- 8, 16, 32, 64);
+ 1, 2, 4, 8, 16, 32, 64);
tfloat2tcgsize: array[tfloattype] of tcgsize =
(OS_F32,OS_F64,OS_F80,OS_F80,OS_C64,OS_C64,OS_F128);
OS_8, OS_16, OS_32, OS_64, OS_128,
OS_F32, OS_F64, OS_F80, OS_C64, OS_F128,
- OS_M8, OS_M16, OS_M32, OS_M64, OS_M128, OS_M256, OS_M512,
- OS_M8, OS_M16, OS_M32, OS_M64, OS_M128, OS_M256, OS_M512,
- OS_MF32, OS_MF128,OS_MF256,OS_MF512,
- OS_MD64, OS_MD128,OS_MD256,OS_MD512);
+ OS_M8, OS_M16, OS_M32, OS_M64, OS_M128, OS_M256, OS_M512);
tcgsize2signed : array[tcgsize] of tcgsize = (OS_NO,
OS_S8, OS_S16, OS_S32, OS_S64, OS_S128,
OS_F32, OS_F64, OS_F80, OS_C64, OS_F128,
- OS_MS8, OS_MS16, OS_MS32, OS_MS64, OS_MS128,OS_MS256,OS_MS512,
- OS_MS8, OS_MS16, OS_MS32, OS_MS64, OS_MS128,OS_MS256,OS_MS512,
- OS_MF32, OS_MF128,OS_MF256,OS_MF512,
- OS_MD64, OS_MD128,OS_MD256,OS_MD512);
+ OS_M8, OS_M16, OS_M32, OS_M64, OS_M128, OS_M256,OS_M512);
tcgloc2str : array[TCGLoc] of string[12] = (
begin
case a of
4:
- result := OS_MF32;
+ result := OS_M32;
16:
- result := OS_MF128;
+ result := OS_M128;
32:
- result := OS_MF256;
+ result := OS_M256;
64:
- result := OS_MF512;
+ result := OS_M512;
else
result := int_cgsize(a);
end;
begin
case a of
8:
- result := OS_MD64;
+ result := OS_M64;
16:
- result := OS_MD128;
+ result := OS_M128;
32:
- result := OS_MD256;
+ result := OS_M256;
64:
- result := OS_MD512;
+ result := OS_M512;
else
result := int_cgsize(a);
end;
TCGSize2OpSize: Array[tcgsize] of topsize =
(S_NO,S_B,S_W,S_L,S_Q,S_XMM,S_B,S_W,S_L,S_Q,S_XMM,
S_FS,S_FL,S_FX,S_IQ,S_FXX,
- S_NO,S_NO,S_NO,S_MD,S_XMM,S_YMM,S_ZMM,
- S_NO,S_NO,S_NO,S_NO,S_XMM,S_YMM,S_ZMM,
- S_NO,S_XMM,S_YMM,S_ZMM,
- S_NO,S_XMM,S_YMM,S_ZMM);
+ S_NO,S_NO,S_NO,S_MD,S_XMM,S_YMM,S_ZMM);
{$elseif defined(i386)}
TCGSize2OpSize: Array[tcgsize] of topsize =
(S_NO,S_B,S_W,S_L,S_L,S_T,S_B,S_W,S_L,S_L,S_L,
S_FS,S_FL,S_FX,S_IQ,S_FXX,
- S_NO,S_NO,S_NO,S_MD,S_XMM,S_YMM,S_ZMM,
- S_NO,S_NO,S_NO,S_NO,S_XMM,S_YMM,S_ZMM,
- S_NO,S_XMM,S_YMM,S_ZMM,
- S_NO,S_XMM,S_YMM,S_ZMM);
+ S_NO,S_NO,S_NO,S_MD,S_XMM,S_YMM,S_ZMM);
{$elseif defined(i8086)}
TCGSize2OpSize: Array[tcgsize] of topsize =
(S_NO,S_B,S_W,S_W,S_W,S_T,S_B,S_W,S_W,S_W,S_W,
S_FS,S_FL,S_FX,S_IQ,S_FXX,
- S_NO,S_NO,S_NO,S_MD,S_XMM,S_YMM,S_ZMM,
- S_NO,S_NO,S_NO,S_NO,S_XMM,S_YMM,S_ZMM,
- S_NO,S_XMM,S_YMM,S_ZMM,
- S_NO,S_XMM,S_YMM,S_ZMM);
+ S_NO,S_NO,S_NO,S_MD,S_XMM,S_YMM,S_ZMM);
{$endif}
{$ifndef NOTARGETWIN}
OS_M64:
result:=rg[R_MMREGISTER].getregister(list,R_SUBQ);
OS_M128,
- OS_F128,
- OS_MF128,
- OS_MD128:
+ OS_F128:
result:=rg[R_MMREGISTER].getregister(list,R_SUBMMX); { R_SUBMMWHOLE seems a bit dangerous and ambiguous, so changed to R_SUBMMX. [Kit] }
- OS_M256,
- OS_MF256,
- OS_MD256:
+ OS_M256:
result:=rg[R_MMREGISTER].getregister(list,R_SUBMMY);
- OS_M512,
- OS_MF512,
- OS_MD512:
+ OS_M512:
result:=rg[R_MMREGISTER].getregister(list,R_SUBMMZ);
else
internalerror(200506041);
if fromsize=tosize then
{ needs correct size in case of spilling }
case fromsize of
- OS_F32,
- OS_MF128:
+ OS_F32:
if UseAVX then
instr:=taicpu.op_reg_reg(A_VMOVAPS,S_NO,reg1,reg2)
else
instr:=taicpu.op_reg_reg(A_MOVAPS,S_NO,reg1,reg2);
- OS_F64,
- OS_MD128:
+ OS_F64:
if UseAVX then
instr:=taicpu.op_reg_reg(A_VMOVAPD,S_NO,reg1,reg2)
else
instr:=taicpu.op_reg_reg(A_VMOVQ,S_NO,reg1,reg2)
else
instr:=taicpu.op_reg_reg(A_MOVQ,S_NO,reg1,reg2);
- OS_M128, OS_MS128:
+ OS_M128:
if UseAVX then
instr:=taicpu.op_reg_reg(A_VMOVDQA,S_NO,reg1,reg2)
else
instr:=taicpu.op_reg_reg(A_MOVDQA,S_NO,reg1,reg2);
- OS_MF256,
- OS_MF512:
- if UseAVX then
- instr:=taicpu.op_reg_reg(A_VMOVAPS,S_NO,reg1,reg2)
- else
- { SSE doesn't support 512-bit vectors }
- InternalError(2018012931);
- OS_MD256,
- OS_MD512:
- if UseAVX then
- instr:=taicpu.op_reg_reg(A_VMOVAPD,S_NO,reg1,reg2)
- else
- { SSE doesn't support 512-bit vectors }
- InternalError(2018012932);
- OS_M256, OS_MS256,
- OS_M512, OS_MS512:
+ OS_M256,
+ OS_M512:
if UseAVX then
instr:=taicpu.op_reg_reg(A_VMOVDQA,S_NO,reg1,reg2)
else
op := A_VMOVQ
else
op := A_MOVQ;
- OS_MF128:
- { Use XMM transfer of packed singles }
- if UseAVX then
- begin
- if GetRefAlignment(tmpref) = 16 then
- op := A_VMOVAPS
- else
- op := A_VMOVUPS
- end
- else
- begin
- if GetRefAlignment(tmpref) = 16 then
- op := A_MOVAPS
- else
- op := A_MOVUPS
- end;
- OS_MD128:
- { Use XMM transfer of packed doubles }
- if UseAVX then
- begin
- if GetRefAlignment(tmpref) = 16 then
- op := A_VMOVAPD
- else
- op := A_VMOVUPD
- end
- else
- begin
- if GetRefAlignment(tmpref) = 16 then
- op := A_MOVAPD
- else
- op := A_MOVUPD
- end;
- OS_M128, OS_MS128:
+ OS_M128:
{ Use XMM integer transfer }
if UseAVX then
begin
if GetRefAlignment(tmpref) = 16 then
op := A_MOVDQA
else
- op := A_MOVDQU
+ op := A_MOVDQU;
end;
- OS_MF256:
- { Use YMM transfer of packed singles }
- if UseAVX then
- begin
- if GetRefAlignment(tmpref) = 32 then
- op := A_VMOVAPS
- else
- op := A_VMOVUPS
- end
- else
- { SSE doesn't support 256-bit vectors }
- InternalError(2018012934);
- OS_MD256:
- { Use YMM transfer of packed doubles }
- if UseAVX then
- begin
- if GetRefAlignment(tmpref) = 32 then
- op := A_VMOVAPD
- else
- op := A_VMOVUPD
- end
- else
- { SSE doesn't support 256-bit vectors }
- InternalError(2018012935);
- OS_M256, OS_MS256:
+ OS_M256:
{ Use YMM integer transfer }
if UseAVX then
begin
end
else
{ SSE doesn't support 256-bit vectors }
- InternalError(2018012936);
- OS_MF512:
- { Use ZMM transfer of packed singles }
- if UseAVX then
- begin
- if GetRefAlignment(tmpref) = 64 then
- op := A_VMOVAPS
- else
- op := A_VMOVUPS
- end
- else
- { SSE doesn't support 512-bit vectors }
- InternalError(2018012937);
- OS_MD512:
- { Use ZMM transfer of packed doubles }
- if UseAVX then
- begin
- if GetRefAlignment(tmpref) = 64 then
- op := A_VMOVAPD
- else
- op := A_VMOVUPD
- end
- else
- { SSE doesn't support 512-bit vectors }
- InternalError(2018012938);
- OS_M512, OS_MS512:
+ Internalerror(2020010401);
+ OS_M512:
{ Use ZMM integer transfer }
if UseAVX then
begin
op := A_VMOVQ
else
op := A_MOVQ;
- OS_MF128:
- { Use XMM transfer of packed singles }
- if UseAVX then
- begin
- if GetRefAlignment(tmpref) = 16 then
- op := A_VMOVAPS
- else
- op := A_VMOVUPS
- end else
- begin
- if GetRefAlignment(tmpref) = 16 then
- op := A_MOVAPS
- else
- op := A_MOVUPS
- end;
- OS_MD128:
- { Use XMM transfer of packed doubles }
- if UseAVX then
- begin
- if GetRefAlignment(tmpref) = 16 then
- op := A_VMOVAPD
- else
- op := A_VMOVUPD
- end else
- begin
- if GetRefAlignment(tmpref) = 16 then
- op := A_MOVAPD
- else
- op := A_MOVUPD
- end;
- OS_M128, OS_MS128:
+ OS_M128:
{ Use XMM integer transfer }
if UseAVX then
begin
else
op := A_MOVDQU
end;
- OS_MF256:
- { Use XMM transfer of packed singles }
- if UseAVX then
- begin
- if GetRefAlignment(tmpref) = 32 then
- op := A_VMOVAPS
- else
- op := A_VMOVUPS
- end else
- { SSE doesn't support 256-bit vectors }
- InternalError(2018012940);
- OS_MD256:
- { Use XMM transfer of packed doubles }
- if UseAVX then
- begin
- if GetRefAlignment(tmpref) = 32 then
- op := A_VMOVAPD
- else
- op := A_VMOVUPD
- end else
- { SSE doesn't support 256-bit vectors }
- InternalError(2018012941);
- OS_M256, OS_MS256:
+ OS_M256:
{ Use XMM integer transfer }
if UseAVX then
begin
end else
{ SSE doesn't support 256-bit vectors }
InternalError(2018012942);
- OS_MF512:
- { Use XMM transfer of packed singles }
- if UseAVX then
- begin
- if GetRefAlignment(tmpref) = 64 then
- op := A_VMOVAPS
- else
- op := A_VMOVUPS
- end else
- { SSE doesn't support 512-bit vectors }
- InternalError(2018012943);
- OS_MD512:
- { Use XMM transfer of packed doubles }
- if UseAVX then
- begin
- if GetRefAlignment(tmpref) = 64 then
- op := A_VMOVAPD
- else
- op := A_VMOVUPD
- end else
- { SSE doesn't support 512-bit vectors }
- InternalError(2018012944);
- OS_M512, OS_MS512:
+ OS_M512:
{ Use XMM integer transfer }
if UseAVX then
begin