/* for move, move-object, long-to-int */
/* op vA, vB */
mov r1, rINST, lsr #12 @ r1<- B from 15:12
- ubfx r0, rINST, #8, #4 @ r0<- A from 11:8
+ @ begin replacement of ubfx r0, rINST, #8, #4 @ r0<- A from 11:8
+ lsl r0, rINST, #20
+ lsr r0, r0, #28
+ @ end replacement of ubfx r0, rINST, #8, #4 @ r0<- A from 11:8
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
GET_VREG r2, r1 @ r2<- fp[B]
GET_INST_OPCODE ip @ ip<- opcode from rINST
/* move-wide vA, vB */
/* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
mov r3, rINST, lsr #12 @ r3<- B
- ubfx rINST, rINST, #8, #4 @ rINST<- A
+ @ begin replacement of ubfx rINST, rINST, #8, #4 @ rINST<- A
+ lsl rINST, rINST, #20
+ lsr rINST, rINST, #28
+ @ end replacement of ubfx rINST, rINST, #8, #4 @ rINST<- A
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[B]
VREG_INDEX_TO_ADDR r2, rINST @ r2<- &fp[A]
ldmia r3, {r0-r1} @ r0/r1<- fp[B]
/* for move, move-object, long-to-int */
/* op vA, vB */
mov r1, rINST, lsr #12 @ r1<- B from 15:12
- ubfx r0, rINST, #8, #4 @ r0<- A from 11:8
+ @ begin replacement of ubfx r0, rINST, #8, #4 @ r0<- A from 11:8
+ lsl r0, rINST, #20
+ lsr r0, r0, #28
+ @ end replacement of ubfx r0, rINST, #8, #4 @ r0<- A from 11:8
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
GET_VREG r2, r1 @ r2<- fp[B]
GET_INST_OPCODE ip @ ip<- opcode from rINST
.L_op_const_4: /* 0x12 */
/* File: arm/op_const_4.S */
/* const/4 vA, #+B */
- sbfx r1, rINST, #12, #4 @ r1<- sssssssB (sign-extended)
- ubfx r0, rINST, #8, #4 @ r0<- A
+ @ begin replacement of sbfx r1, rINST, #12, #4 @ r1<- sssssssB (sign-extended)
+ lsl r1, rINST, #16
+ asr r1, r1, #28
+ @ end replacement of sbfx r1, rINST, #12, #4 @ r1<- sssssssB (sign-extended)
+ @ begin replacement of ubfx r0, rINST, #8, #4 @ r0<- A
+ lsl r0, rINST, #20
+ lsr r0, r0, #28
+ @ end replacement of ubfx r0, rINST, #8, #4 @ r0<- A
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
GET_INST_OPCODE ip @ ip<- opcode from rINST
SET_VREG r1, r0 @ fp[A]<- r1
mov r3, rSELF @ r3<- self
bl MterpInstanceOf @ (index, &obj, method, self)
ldr r1, [rSELF, #THREAD_EXCEPTION_OFFSET]
- ubfx r9, rINST, #8, #4 @ r9<- A
+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
+ lsl r9, rINST, #20
+ lsr r9, r9, #28
+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
PREFETCH_INST 2
cmp r1, #0 @ exception pending?
bne MterpException
* Return the length of an array.
*/
mov r1, rINST, lsr #12 @ r1<- B
- ubfx r2, rINST, #8, #4 @ r2<- A
+ @ begin replacement of ubfx r2, rINST, #8, #4 @ r2<- A
+ lsl r2, rINST, #20
+ lsr r2, r2, #28
+ @ end replacement of ubfx r2, rINST, #8, #4 @ r2<- A
GET_VREG r0, r1 @ r0<- vB (object ref)
cmp r0, #0 @ is object null?
beq common_errNullObject @ yup, fail
* double to get a byte offset.
*/
/* goto +AA */
- sbfx rINST, rINST, #8, #8 @ rINST<- ssssssAA (sign-extended)
+ @ begin replacement of sbfx rINST, rINST, #8, #8 @ rINST<- ssssssAA (sign-extended)
+ lsl rINST, rINST, #16
+ asr rINST, rINST, #24
+ @ end replacement of sbfx rINST, rINST, #8, #8 @ rINST<- ssssssAA (sign-extended)
b MterpCommonTakenBranchNoFlags
/* ------------------------------ */
*/
/* if-cmp vA, vB, +CCCC */
mov r1, rINST, lsr #12 @ r1<- B
- ubfx r0, rINST, #8, #4 @ r0<- A
+ @ begin replacement of ubfx r0, rINST, #8, #4 @ r0<- A
+ lsl r0, rINST, #20
+ lsr r0, r0, #28
+ @ end replacement of ubfx r0, rINST, #8, #4 @ r0<- A
GET_VREG r3, r1 @ r3<- vB
GET_VREG r0, r0 @ r0<- vA
FETCH_S rINST, 1 @ rINST<- branch offset, in code units
*/
/* if-cmp vA, vB, +CCCC */
mov r1, rINST, lsr #12 @ r1<- B
- ubfx r0, rINST, #8, #4 @ r0<- A
+ @ begin replacement of ubfx r0, rINST, #8, #4 @ r0<- A
+ lsl r0, rINST, #20
+ lsr r0, r0, #28
+ @ end replacement of ubfx r0, rINST, #8, #4 @ r0<- A
GET_VREG r3, r1 @ r3<- vB
GET_VREG r0, r0 @ r0<- vA
FETCH_S rINST, 1 @ rINST<- branch offset, in code units
*/
/* if-cmp vA, vB, +CCCC */
mov r1, rINST, lsr #12 @ r1<- B
- ubfx r0, rINST, #8, #4 @ r0<- A
+ @ begin replacement of ubfx r0, rINST, #8, #4 @ r0<- A
+ lsl r0, rINST, #20
+ lsr r0, r0, #28
+ @ end replacement of ubfx r0, rINST, #8, #4 @ r0<- A
GET_VREG r3, r1 @ r3<- vB
GET_VREG r0, r0 @ r0<- vA
FETCH_S rINST, 1 @ rINST<- branch offset, in code units
*/
/* if-cmp vA, vB, +CCCC */
mov r1, rINST, lsr #12 @ r1<- B
- ubfx r0, rINST, #8, #4 @ r0<- A
+ @ begin replacement of ubfx r0, rINST, #8, #4 @ r0<- A
+ lsl r0, rINST, #20
+ lsr r0, r0, #28
+ @ end replacement of ubfx r0, rINST, #8, #4 @ r0<- A
GET_VREG r3, r1 @ r3<- vB
GET_VREG r0, r0 @ r0<- vA
FETCH_S rINST, 1 @ rINST<- branch offset, in code units
*/
/* if-cmp vA, vB, +CCCC */
mov r1, rINST, lsr #12 @ r1<- B
- ubfx r0, rINST, #8, #4 @ r0<- A
+ @ begin replacement of ubfx r0, rINST, #8, #4 @ r0<- A
+ lsl r0, rINST, #20
+ lsr r0, r0, #28
+ @ end replacement of ubfx r0, rINST, #8, #4 @ r0<- A
GET_VREG r3, r1 @ r3<- vB
GET_VREG r0, r0 @ r0<- vA
FETCH_S rINST, 1 @ rINST<- branch offset, in code units
*/
/* if-cmp vA, vB, +CCCC */
mov r1, rINST, lsr #12 @ r1<- B
- ubfx r0, rINST, #8, #4 @ r0<- A
+ @ begin replacement of ubfx r0, rINST, #8, #4 @ r0<- A
+ lsl r0, rINST, #20
+ lsr r0, r0, #28
+ @ end replacement of ubfx r0, rINST, #8, #4 @ r0<- A
GET_VREG r3, r1 @ r3<- vB
GET_VREG r0, r0 @ r0<- vA
FETCH_S rINST, 1 @ rINST<- branch offset, in code units
mov r3, rSELF @ r3<- self
bl artGet32InstanceFromCode
ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
- ubfx r2, rINST, #8, #4 @ r2<- A
+ @ begin replacement of ubfx r2, rINST, #8, #4 @ r2<- A
+ lsl r2, rINST, #20
+ lsr r2, r2, #28
+ @ end replacement of ubfx r2, rINST, #8, #4 @ r2<- A
PREFETCH_INST 2
cmp r3, #0
bne MterpPossibleException @ bail out
mov r3, rSELF @ r3<- self
bl artGet64InstanceFromCode
ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
- ubfx r2, rINST, #8, #4 @ r2<- A
+ @ begin replacement of ubfx r2, rINST, #8, #4 @ r2<- A
+ lsl r2, rINST, #20
+ lsr r2, r2, #28
+ @ end replacement of ubfx r2, rINST, #8, #4 @ r2<- A
PREFETCH_INST 2
cmp r3, #0
bne MterpException @ bail out
mov r3, rSELF @ r3<- self
bl artGetObjInstanceFromCode
ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
- ubfx r2, rINST, #8, #4 @ r2<- A
+ @ begin replacement of ubfx r2, rINST, #8, #4 @ r2<- A
+ lsl r2, rINST, #20
+ lsr r2, r2, #28
+ @ end replacement of ubfx r2, rINST, #8, #4 @ r2<- A
PREFETCH_INST 2
cmp r3, #0
bne MterpPossibleException @ bail out
mov r3, rSELF @ r3<- self
bl artGetBooleanInstanceFromCode
ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
- ubfx r2, rINST, #8, #4 @ r2<- A
+ @ begin replacement of ubfx r2, rINST, #8, #4 @ r2<- A
+ lsl r2, rINST, #20
+ lsr r2, r2, #28
+ @ end replacement of ubfx r2, rINST, #8, #4 @ r2<- A
PREFETCH_INST 2
cmp r3, #0
bne MterpPossibleException @ bail out
mov r3, rSELF @ r3<- self
bl artGetByteInstanceFromCode
ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
- ubfx r2, rINST, #8, #4 @ r2<- A
+ @ begin replacement of ubfx r2, rINST, #8, #4 @ r2<- A
+ lsl r2, rINST, #20
+ lsr r2, r2, #28
+ @ end replacement of ubfx r2, rINST, #8, #4 @ r2<- A
PREFETCH_INST 2
cmp r3, #0
bne MterpPossibleException @ bail out
mov r3, rSELF @ r3<- self
bl artGetCharInstanceFromCode
ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
- ubfx r2, rINST, #8, #4 @ r2<- A
+ @ begin replacement of ubfx r2, rINST, #8, #4 @ r2<- A
+ lsl r2, rINST, #20
+ lsr r2, r2, #28
+ @ end replacement of ubfx r2, rINST, #8, #4 @ r2<- A
PREFETCH_INST 2
cmp r3, #0
bne MterpPossibleException @ bail out
mov r3, rSELF @ r3<- self
bl artGetShortInstanceFromCode
ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
- ubfx r2, rINST, #8, #4 @ r2<- A
+ @ begin replacement of ubfx r2, rINST, #8, #4 @ r2<- A
+ lsl r2, rINST, #20
+ lsr r2, r2, #28
+ @ end replacement of ubfx r2, rINST, #8, #4 @ r2<- A
PREFETCH_INST 2
cmp r3, #0
bne MterpPossibleException @ bail out
FETCH r0, 1 @ r0<- field ref CCCC
mov r1, rINST, lsr #12 @ r1<- B
GET_VREG r1, r1 @ r1<- fp[B], the object pointer
- ubfx r2, rINST, #8, #4 @ r2<- A
+ @ begin replacement of ubfx r2, rINST, #8, #4 @ r2<- A
+ lsl r2, rINST, #20
+ lsr r2, r2, #28
+ @ end replacement of ubfx r2, rINST, #8, #4 @ r2<- A
GET_VREG r2, r2 @ r2<- fp[A]
ldr r3, [rFP, #OFF_FP_METHOD] @ r3<- referrer
PREFETCH_INST 2
FETCH r0, 1 @ r0<- field ref CCCC
mov r1, rINST, lsr #12 @ r1<- B
GET_VREG r1, r1 @ r1<- fp[B], the object pointer
- ubfx r2, rINST, #8, #4 @ r2<- A
+ @ begin replacement of ubfx r2, rINST, #8, #4 @ r2<- A
+ lsl r2, rINST, #20
+ lsr r2, r2, #28
+ @ end replacement of ubfx r2, rINST, #8, #4 @ r2<- A
VREG_INDEX_TO_ADDR r2, r2 @ r2<- &fp[A]
ldr r3, [rFP, #OFF_FP_METHOD] @ r3<- referrer
PREFETCH_INST 2
FETCH r0, 1 @ r0<- field ref CCCC
mov r1, rINST, lsr #12 @ r1<- B
GET_VREG r1, r1 @ r1<- fp[B], the object pointer
- ubfx r2, rINST, #8, #4 @ r2<- A
+ @ begin replacement of ubfx r2, rINST, #8, #4 @ r2<- A
+ lsl r2, rINST, #20
+ lsr r2, r2, #28
+ @ end replacement of ubfx r2, rINST, #8, #4 @ r2<- A
GET_VREG r2, r2 @ r2<- fp[A]
ldr r3, [rFP, #OFF_FP_METHOD] @ r3<- referrer
PREFETCH_INST 2
FETCH r0, 1 @ r0<- field ref CCCC
mov r1, rINST, lsr #12 @ r1<- B
GET_VREG r1, r1 @ r1<- fp[B], the object pointer
- ubfx r2, rINST, #8, #4 @ r2<- A
+ @ begin replacement of ubfx r2, rINST, #8, #4 @ r2<- A
+ lsl r2, rINST, #20
+ lsr r2, r2, #28
+ @ end replacement of ubfx r2, rINST, #8, #4 @ r2<- A
GET_VREG r2, r2 @ r2<- fp[A]
ldr r3, [rFP, #OFF_FP_METHOD] @ r3<- referrer
PREFETCH_INST 2
FETCH r0, 1 @ r0<- field ref CCCC
mov r1, rINST, lsr #12 @ r1<- B
GET_VREG r1, r1 @ r1<- fp[B], the object pointer
- ubfx r2, rINST, #8, #4 @ r2<- A
+ @ begin replacement of ubfx r2, rINST, #8, #4 @ r2<- A
+ lsl r2, rINST, #20
+ lsr r2, r2, #28
+ @ end replacement of ubfx r2, rINST, #8, #4 @ r2<- A
GET_VREG r2, r2 @ r2<- fp[A]
ldr r3, [rFP, #OFF_FP_METHOD] @ r3<- referrer
PREFETCH_INST 2
FETCH r0, 1 @ r0<- field ref CCCC
mov r1, rINST, lsr #12 @ r1<- B
GET_VREG r1, r1 @ r1<- fp[B], the object pointer
- ubfx r2, rINST, #8, #4 @ r2<- A
+ @ begin replacement of ubfx r2, rINST, #8, #4 @ r2<- A
+ lsl r2, rINST, #20
+ lsr r2, r2, #28
+ @ end replacement of ubfx r2, rINST, #8, #4 @ r2<- A
GET_VREG r2, r2 @ r2<- fp[A]
ldr r3, [rFP, #OFF_FP_METHOD] @ r3<- referrer
PREFETCH_INST 2
*/
/* unop vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
+ lsl r9, rINST, #20
+ lsr r9, r9, #28
+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
GET_VREG r0, r3 @ r0<- vB
@ optional op; may set condition codes
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
*/
/* unop vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
+ lsl r9, rINST, #20
+ lsr r9, r9, #28
+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
GET_VREG r0, r3 @ r0<- vB
@ optional op; may set condition codes
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
*/
/* unop vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
- ubfx rINST, rINST, #8, #4 @ rINST<- A
+ @ begin replacement of ubfx rINST, rINST, #8, #4 @ rINST<- A
+ lsl rINST, rINST, #20
+ lsr rINST, rINST, #28
+ @ end replacement of ubfx rINST, rINST, #8, #4 @ rINST<- A
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[B]
VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A]
ldmia r3, {r0-r1} @ r0/r1<- vAA
*/
/* unop vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
- ubfx rINST, rINST, #8, #4 @ rINST<- A
+ @ begin replacement of ubfx rINST, rINST, #8, #4 @ rINST<- A
+ lsl rINST, rINST, #20
+ lsr rINST, rINST, #28
+ @ end replacement of ubfx rINST, rINST, #8, #4 @ rINST<- A
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[B]
VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A]
ldmia r3, {r0-r1} @ r0/r1<- vAA
*/
/* unop vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
+ lsl r9, rINST, #20
+ lsr r9, r9, #28
+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
GET_VREG r0, r3 @ r0<- vB
@ optional op; may set condition codes
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
*/
/* unop vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
- ubfx rINST, rINST, #8, #4 @ rINST<- A
+ @ begin replacement of ubfx rINST, rINST, #8, #4 @ rINST<- A
+ lsl rINST, rINST, #20
+ lsr rINST, rINST, #28
+ @ end replacement of ubfx rINST, rINST, #8, #4 @ rINST<- A
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[B]
VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A]
ldmia r3, {r0-r1} @ r0/r1<- vAA
*/
/* unop vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
- ubfx rINST, rINST, #8, #4 @ rINST<- A
+ @ begin replacement of ubfx rINST, rINST, #8, #4 @ rINST<- A
+ lsl rINST, rINST, #20
+ lsr rINST, rINST, #28
+ @ end replacement of ubfx rINST, rINST, #8, #4 @ rINST<- A
GET_VREG r0, r3 @ r0<- vB
VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A]
@ optional op; may set condition codes
mov r3, rINST, lsr #12 @ r3<- B
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
flds s0, [r3] @ s0<- vB
- ubfx r9, rINST, #8, #4 @ r9<- A
+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
+ lsl r9, rINST, #20
+ lsr r9, r9, #28
+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
fsitos s1, s0 @ s1<- op
GET_INST_OPCODE ip @ extract opcode from rINST
mov r3, rINST, lsr #12 @ r3<- B
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
flds s0, [r3] @ s0<- vB
- ubfx r9, rINST, #8, #4 @ r9<- A
+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
+ lsl r9, rINST, #20
+ lsr r9, r9, #28
+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
fsitod d0, s0 @ d0<- op
CLEAR_SHADOW_PAIR r9, ip, lr @ Zero shadow regs
/* for move, move-object, long-to-int */
/* op vA, vB */
mov r1, rINST, lsr #12 @ r1<- B from 15:12
- ubfx r0, rINST, #8, #4 @ r0<- A from 11:8
+ @ begin replacement of ubfx r0, rINST, #8, #4 @ r0<- A from 11:8
+ lsl r0, rINST, #20
+ lsr r0, r0, #28
+ @ end replacement of ubfx r0, rINST, #8, #4 @ r0<- A from 11:8
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
GET_VREG r2, r1 @ r2<- fp[B]
GET_INST_OPCODE ip @ ip<- opcode from rINST
*/
/* unop vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
+ lsl r9, rINST, #20
+ lsr r9, r9, #28
+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[B]
ldmia r3, {r0-r1} @ r0/r1<- vB/vB+1
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
* For: long-to-double
*/
mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
+ lsl r9, rINST, #20
+ lsr r9, r9, #28
+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[B]
VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[A]
vldr d0, [r3] @ d0<- vAA
mov r3, rINST, lsr #12 @ r3<- B
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
flds s0, [r3] @ s0<- vB
- ubfx r9, rINST, #8, #4 @ r9<- A
+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
+ lsl r9, rINST, #20
+ lsr r9, r9, #28
+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
ftosizs s1, s0 @ s1<- op
GET_INST_OPCODE ip @ extract opcode from rINST
*/
/* unop vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
- ubfx rINST, rINST, #8, #4 @ rINST<- A
+ @ begin replacement of ubfx rINST, rINST, #8, #4 @ rINST<- A
+ lsl rINST, rINST, #20
+ lsr rINST, rINST, #28
+ @ end replacement of ubfx rINST, rINST, #8, #4 @ rINST<- A
GET_VREG r0, r3 @ r0<- vB
VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A]
@ optional op; may set condition codes
mov r3, rINST, lsr #12 @ r3<- B
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
flds s0, [r3] @ s0<- vB
- ubfx r9, rINST, #8, #4 @ r9<- A
+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
+ lsl r9, rINST, #20
+ lsr r9, r9, #28
+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
vcvt.f64.f32 d0, s0 @ d0<- op
CLEAR_SHADOW_PAIR r9, ip, lr @ Zero shadow regs
mov r3, rINST, lsr #12 @ r3<- B
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
fldd d0, [r3] @ d0<- vB
- ubfx r9, rINST, #8, #4 @ r9<- A
+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
+ lsl r9, rINST, #20
+ lsr r9, r9, #28
+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
ftosizd s0, d0 @ s0<- op
GET_INST_OPCODE ip @ extract opcode from rINST
*/
/* unop vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
- ubfx rINST, rINST, #8, #4 @ rINST<- A
+ @ begin replacement of ubfx rINST, rINST, #8, #4 @ rINST<- A
+ lsl rINST, rINST, #20
+ lsr rINST, rINST, #28
+ @ end replacement of ubfx rINST, rINST, #8, #4 @ rINST<- A
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[B]
VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A]
ldmia r3, {r0-r1} @ r0/r1<- vAA
mov r3, rINST, lsr #12 @ r3<- B
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
fldd d0, [r3] @ d0<- vB
- ubfx r9, rINST, #8, #4 @ r9<- A
+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
+ lsl r9, rINST, #20
+ lsr r9, r9, #28
+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
vcvt.f32.f64 s0, d0 @ s0<- op
GET_INST_OPCODE ip @ extract opcode from rINST
*/
/* unop vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
+ lsl r9, rINST, #20
+ lsr r9, r9, #28
+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
GET_VREG r0, r3 @ r0<- vB
@ optional op; may set condition codes
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
*/
/* unop vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
+ lsl r9, rINST, #20
+ lsr r9, r9, #28
+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
GET_VREG r0, r3 @ r0<- vB
@ optional op; may set condition codes
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
*/
/* unop vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
+ lsl r9, rINST, #20
+ lsr r9, r9, #28
+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
GET_VREG r0, r3 @ r0<- vB
@ optional op; may set condition codes
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
*/
/* binop/2addr vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
+ lsl r9, rINST, #20
+ lsr r9, r9, #28
+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
GET_VREG r1, r3 @ r1<- vB
GET_VREG r0, r9 @ r0<- vA
.if 0
*/
/* binop/2addr vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
+ lsl r9, rINST, #20
+ lsr r9, r9, #28
+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
GET_VREG r1, r3 @ r1<- vB
GET_VREG r0, r9 @ r0<- vA
.if 0
*/
/* binop/2addr vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
+ lsl r9, rINST, #20
+ lsr r9, r9, #28
+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
GET_VREG r1, r3 @ r1<- vB
GET_VREG r0, r9 @ r0<- vA
.if 0
*
*/
mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
+ lsl r9, rINST, #20
+ lsr r9, r9, #28
+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
GET_VREG r1, r3 @ r1<- vB
GET_VREG r0, r9 @ r0<- vA
cmp r1, #0 @ is second operand zero?
*
*/
mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
+ lsl r9, rINST, #20
+ lsr r9, r9, #28
+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
GET_VREG r1, r3 @ r1<- vB
GET_VREG r0, r9 @ r0<- vA
cmp r1, #0 @ is second operand zero?
*/
/* binop/2addr vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
+ lsl r9, rINST, #20
+ lsr r9, r9, #28
+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
GET_VREG r1, r3 @ r1<- vB
GET_VREG r0, r9 @ r0<- vA
.if 0
*/
/* binop/2addr vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
+ lsl r9, rINST, #20
+ lsr r9, r9, #28
+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
GET_VREG r1, r3 @ r1<- vB
GET_VREG r0, r9 @ r0<- vA
.if 0
*/
/* binop/2addr vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
+ lsl r9, rINST, #20
+ lsr r9, r9, #28
+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
GET_VREG r1, r3 @ r1<- vB
GET_VREG r0, r9 @ r0<- vA
.if 0
*/
/* binop/2addr vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
+ lsl r9, rINST, #20
+ lsr r9, r9, #28
+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
GET_VREG r1, r3 @ r1<- vB
GET_VREG r0, r9 @ r0<- vA
.if 0
*/
/* binop/2addr vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
+ lsl r9, rINST, #20
+ lsr r9, r9, #28
+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
GET_VREG r1, r3 @ r1<- vB
GET_VREG r0, r9 @ r0<- vA
.if 0
*/
/* binop/2addr vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
+ lsl r9, rINST, #20
+ lsr r9, r9, #28
+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
GET_VREG r1, r3 @ r1<- vB
GET_VREG r0, r9 @ r0<- vA
.if 0
*/
/* binop/2addr vA, vB */
mov r1, rINST, lsr #12 @ r1<- B
- ubfx rINST, rINST, #8, #4 @ rINST<- A
+ @ begin replacement of ubfx rINST, rINST, #8, #4 @ rINST<- A
+ lsl rINST, rINST, #20
+ lsr rINST, rINST, #28
+ @ end replacement of ubfx rINST, rINST, #8, #4 @ rINST<- A
VREG_INDEX_TO_ADDR r1, r1 @ r1<- &fp[B]
VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A]
ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
*/
/* binop/2addr vA, vB */
mov r1, rINST, lsr #12 @ r1<- B
- ubfx rINST, rINST, #8, #4 @ rINST<- A
+ @ begin replacement of ubfx rINST, rINST, #8, #4 @ rINST<- A
+ lsl rINST, rINST, #20
+ lsr rINST, rINST, #28
+ @ end replacement of ubfx rINST, rINST, #8, #4 @ rINST<- A
VREG_INDEX_TO_ADDR r1, r1 @ r1<- &fp[B]
VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A]
ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
*/
/* mul-long/2addr vA, vB */
mov r1, rINST, lsr #12 @ r1<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
+ lsl r9, rINST, #20
+ lsr r9, r9, #28
+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
VREG_INDEX_TO_ADDR r1, r1 @ r1<- &fp[B]
VREG_INDEX_TO_ADDR rINST, r9 @ rINST<- &fp[A]
ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
*/
/* binop/2addr vA, vB */
mov r1, rINST, lsr #12 @ r1<- B
- ubfx rINST, rINST, #8, #4 @ rINST<- A
+ @ begin replacement of ubfx rINST, rINST, #8, #4 @ rINST<- A
+ lsl rINST, rINST, #20
+ lsr rINST, rINST, #28
+ @ end replacement of ubfx rINST, rINST, #8, #4 @ rINST<- A
VREG_INDEX_TO_ADDR r1, r1 @ r1<- &fp[B]
VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A]
ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
*/
/* binop/2addr vA, vB */
mov r1, rINST, lsr #12 @ r1<- B
- ubfx rINST, rINST, #8, #4 @ rINST<- A
+ @ begin replacement of ubfx rINST, rINST, #8, #4 @ rINST<- A
+ lsl rINST, rINST, #20
+ lsr rINST, rINST, #28
+ @ end replacement of ubfx rINST, rINST, #8, #4 @ rINST<- A
VREG_INDEX_TO_ADDR r1, r1 @ r1<- &fp[B]
VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A]
ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
*/
/* binop/2addr vA, vB */
mov r1, rINST, lsr #12 @ r1<- B
- ubfx rINST, rINST, #8, #4 @ rINST<- A
+ @ begin replacement of ubfx rINST, rINST, #8, #4 @ rINST<- A
+ lsl rINST, rINST, #20
+ lsr rINST, rINST, #28
+ @ end replacement of ubfx rINST, rINST, #8, #4 @ rINST<- A
VREG_INDEX_TO_ADDR r1, r1 @ r1<- &fp[B]
VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A]
ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
*/
/* binop/2addr vA, vB */
mov r1, rINST, lsr #12 @ r1<- B
- ubfx rINST, rINST, #8, #4 @ rINST<- A
+ @ begin replacement of ubfx rINST, rINST, #8, #4 @ rINST<- A
+ lsl rINST, rINST, #20
+ lsr rINST, rINST, #28
+ @ end replacement of ubfx rINST, rINST, #8, #4 @ rINST<- A
VREG_INDEX_TO_ADDR r1, r1 @ r1<- &fp[B]
VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A]
ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
*/
/* binop/2addr vA, vB */
mov r1, rINST, lsr #12 @ r1<- B
- ubfx rINST, rINST, #8, #4 @ rINST<- A
+ @ begin replacement of ubfx rINST, rINST, #8, #4 @ rINST<- A
+ lsl rINST, rINST, #20
+ lsr rINST, rINST, #28
+ @ end replacement of ubfx rINST, rINST, #8, #4 @ rINST<- A
VREG_INDEX_TO_ADDR r1, r1 @ r1<- &fp[B]
VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A]
ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
*/
/* shl-long/2addr vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
+ lsl r9, rINST, #20
+ lsr r9, r9, #28
+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
GET_VREG r2, r3 @ r2<- vB
CLEAR_SHADOW_PAIR r9, lr, ip @ Zero out the shadow regs
VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[A]
*/
/* shr-long/2addr vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
+ lsl r9, rINST, #20
+ lsr r9, r9, #28
+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
GET_VREG r2, r3 @ r2<- vB
CLEAR_SHADOW_PAIR r9, lr, ip @ Zero out the shadow regs
VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[A]
*/
/* ushr-long/2addr vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
+ lsl r9, rINST, #20
+ lsr r9, r9, #28
+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
GET_VREG r2, r3 @ r2<- vB
CLEAR_SHADOW_PAIR r9, lr, ip @ Zero out the shadow regs
VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[A]
*/
/* binop/2addr vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
+ lsl r9, rINST, #20
+ lsr r9, r9, #28
+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
flds s1, [r3] @ s1<- vB
*/
/* binop/2addr vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
+ lsl r9, rINST, #20
+ lsr r9, r9, #28
+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
flds s1, [r3] @ s1<- vB
*/
/* binop/2addr vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
+ lsl r9, rINST, #20
+ lsr r9, r9, #28
+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
flds s1, [r3] @ s1<- vB
*/
/* binop/2addr vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
+ lsl r9, rINST, #20
+ lsr r9, r9, #28
+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
flds s1, [r3] @ s1<- vB
*/
/* binop/2addr vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
+ lsl r9, rINST, #20
+ lsr r9, r9, #28
+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
GET_VREG r1, r3 @ r1<- vB
GET_VREG r0, r9 @ r0<- vA
.if 0
*/
/* binop/2addr vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
+ lsl r9, rINST, #20
+ lsr r9, r9, #28
+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
CLEAR_SHADOW_PAIR r9, ip, r0 @ Zero out shadow regs
fldd d1, [r3] @ d1<- vB
*/
/* binop/2addr vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
+ lsl r9, rINST, #20
+ lsr r9, r9, #28
+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
CLEAR_SHADOW_PAIR r9, ip, r0 @ Zero out shadow regs
fldd d1, [r3] @ d1<- vB
*/
/* binop/2addr vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
+ lsl r9, rINST, #20
+ lsr r9, r9, #28
+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
CLEAR_SHADOW_PAIR r9, ip, r0 @ Zero out shadow regs
fldd d1, [r3] @ d1<- vB
*/
/* binop/2addr vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
+ lsl r9, rINST, #20
+ lsr r9, r9, #28
+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
CLEAR_SHADOW_PAIR r9, ip, r0 @ Zero out shadow regs
fldd d1, [r3] @ d1<- vB
*/
/* binop/2addr vA, vB */
mov r1, rINST, lsr #12 @ r1<- B
- ubfx rINST, rINST, #8, #4 @ rINST<- A
+ @ begin replacement of ubfx rINST, rINST, #8, #4 @ rINST<- A
+ lsl rINST, rINST, #20
+ lsr rINST, rINST, #28
+ @ end replacement of ubfx rINST, rINST, #8, #4 @ rINST<- A
VREG_INDEX_TO_ADDR r1, r1 @ r1<- &fp[B]
VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A]
ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
/* binop/lit16 vA, vB, #+CCCC */
FETCH_S r1, 1 @ r1<- ssssCCCC (sign-extended)
mov r2, rINST, lsr #12 @ r2<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
+ lsl r9, rINST, #20
+ lsr r9, r9, #28
+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
GET_VREG r0, r2 @ r0<- vB
.if 0
cmp r1, #0 @ is second operand zero?
/* binop/lit16 vA, vB, #+CCCC */
FETCH_S r1, 1 @ r1<- ssssCCCC (sign-extended)
mov r2, rINST, lsr #12 @ r2<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
+ lsl r9, rINST, #20
+ lsr r9, r9, #28
+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
GET_VREG r0, r2 @ r0<- vB
.if 0
cmp r1, #0 @ is second operand zero?
/* binop/lit16 vA, vB, #+CCCC */
FETCH_S r1, 1 @ r1<- ssssCCCC (sign-extended)
mov r2, rINST, lsr #12 @ r2<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
+ lsl r9, rINST, #20
+ lsr r9, r9, #28
+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
GET_VREG r0, r2 @ r0<- vB
.if 0
cmp r1, #0 @ is second operand zero?
*/
FETCH_S r1, 1 @ r1<- ssssCCCC (sign-extended)
mov r2, rINST, lsr #12 @ r2<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
+ lsl r9, rINST, #20
+ lsr r9, r9, #28
+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
GET_VREG r0, r2 @ r0<- vB
cmp r1, #0 @ is second operand zero?
beq common_errDivideByZero
*/
FETCH_S r1, 1 @ r1<- ssssCCCC (sign-extended)
mov r2, rINST, lsr #12 @ r2<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
+ lsl r9, rINST, #20
+ lsr r9, r9, #28
+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
GET_VREG r0, r2 @ r0<- vB
cmp r1, #0 @ is second operand zero?
beq common_errDivideByZero
/* binop/lit16 vA, vB, #+CCCC */
FETCH_S r1, 1 @ r1<- ssssCCCC (sign-extended)
mov r2, rINST, lsr #12 @ r2<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
+ lsl r9, rINST, #20
+ lsr r9, r9, #28
+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
GET_VREG r0, r2 @ r0<- vB
.if 0
cmp r1, #0 @ is second operand zero?
/* binop/lit16 vA, vB, #+CCCC */
FETCH_S r1, 1 @ r1<- ssssCCCC (sign-extended)
mov r2, rINST, lsr #12 @ r2<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
+ lsl r9, rINST, #20
+ lsr r9, r9, #28
+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
GET_VREG r0, r2 @ r0<- vB
.if 0
cmp r1, #0 @ is second operand zero?
/* binop/lit16 vA, vB, #+CCCC */
FETCH_S r1, 1 @ r1<- ssssCCCC (sign-extended)
mov r2, rINST, lsr #12 @ r2<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
+ lsl r9, rINST, #20
+ lsr r9, r9, #28
+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
GET_VREG r0, r2 @ r0<- vB
.if 0
cmp r1, #0 @ is second operand zero?
mov r9, rINST, lsr #8 @ r9<- AA
and r2, r3, #255 @ r2<- BB
GET_VREG r0, r2 @ r0<- vBB
- ubfx r1, r3, #8, #5 @ optional; typically r1<- ssssssCC (sign extended)
+ @ begin replacement of ubfx r1, r3, #8, #5 @ optional; typically r1<- ssssssCC (sign extended)
+ lsl r1, r3, #19
+ lsr r1, r1, #27
+ @ end replacement of ubfx r1, r3, #8, #5 @ optional; typically r1<- ssssssCC (sign extended)
.if 0
@cmp r1, #0 @ is second operand zero?
beq common_errDivideByZero
mov r9, rINST, lsr #8 @ r9<- AA
and r2, r3, #255 @ r2<- BB
GET_VREG r0, r2 @ r0<- vBB
- ubfx r1, r3, #8, #5 @ optional; typically r1<- ssssssCC (sign extended)
+ @ begin replacement of ubfx r1, r3, #8, #5 @ optional; typically r1<- ssssssCC (sign extended)
+ lsl r1, r3, #19
+ lsr r1, r1, #27
+ @ end replacement of ubfx r1, r3, #8, #5 @ optional; typically r1<- ssssssCC (sign extended)
.if 0
@cmp r1, #0 @ is second operand zero?
beq common_errDivideByZero
mov r9, rINST, lsr #8 @ r9<- AA
and r2, r3, #255 @ r2<- BB
GET_VREG r0, r2 @ r0<- vBB
- ubfx r1, r3, #8, #5 @ optional; typically r1<- ssssssCC (sign extended)
+ @ begin replacement of ubfx r1, r3, #8, #5 @ optional; typically r1<- ssssssCC (sign extended)
+ lsl r1, r3, #19
+ lsr r1, r1, #27
+ @ end replacement of ubfx r1, r3, #8, #5 @ optional; typically r1<- ssssssCC (sign extended)
.if 0
@cmp r1, #0 @ is second operand zero?
beq common_errDivideByZero
mov r2, rINST, lsr #12 @ r2<- B
FETCH r1, 1 @ r1<- field byte offset
GET_VREG r3, r2 @ r3<- object we're operating on
- ubfx r2, rINST, #8, #4 @ r2<- A
+ @ begin replacement of ubfx r2, rINST, #8, #4 @ r2<- A
+ lsl r2, rINST, #20
+ lsr r2, r2, #28
+ @ end replacement of ubfx r2, rINST, #8, #4 @ r2<- A
cmp r3, #0 @ check object for null
beq common_errNullObject @ object was null
ldr r0, [r3, r1] @ r0<- obj.field
mov r2, rINST, lsr #12 @ r2<- B
FETCH ip, 1 @ ip<- field byte offset
GET_VREG r3, r2 @ r3<- object we're operating on
- ubfx r2, rINST, #8, #4 @ r2<- A
+ @ begin replacement of ubfx r2, rINST, #8, #4 @ r2<- A
+ lsl r2, rINST, #20
+ lsr r2, r2, #28
+ @ end replacement of ubfx r2, rINST, #8, #4 @ r2<- A
cmp r3, #0 @ check object for null
beq common_errNullObject @ object was null
ldrd r0, [r3, ip] @ r0<- obj.field (64 bits, aligned)
GET_VREG r0, r2 @ r0<- object we're operating on
bl artIGetObjectFromMterp @ (obj, offset)
ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
- ubfx r2, rINST, #8, #4 @ r2<- A
+ @ begin replacement of ubfx r2, rINST, #8, #4 @ r2<- A
+ lsl r2, rINST, #20
+ lsr r2, r2, #28
+ @ end replacement of ubfx r2, rINST, #8, #4 @ r2<- A
PREFETCH_INST 2
cmp r3, #0
bne MterpPossibleException @ bail out
mov r2, rINST, lsr #12 @ r2<- B
FETCH r1, 1 @ r1<- field byte offset
GET_VREG r3, r2 @ r3<- fp[B], the object pointer
- ubfx r2, rINST, #8, #4 @ r2<- A
+ @ begin replacement of ubfx r2, rINST, #8, #4 @ r2<- A
+ lsl r2, rINST, #20
+ lsr r2, r2, #28
+ @ end replacement of ubfx r2, rINST, #8, #4 @ r2<- A
cmp r3, #0 @ check object for null
beq common_errNullObject @ object was null
GET_VREG r0, r2 @ r0<- fp[A]
mov r2, rINST, lsr #12 @ r2<- B
FETCH r3, 1 @ r3<- field byte offset
GET_VREG r2, r2 @ r2<- fp[B], the object pointer
- ubfx r0, rINST, #8, #4 @ r0<- A
+ @ begin replacement of ubfx r0, rINST, #8, #4 @ r0<- A
+ lsl r0, rINST, #20
+ lsr r0, r0, #28
+ @ end replacement of ubfx r0, rINST, #8, #4 @ r0<- A
cmp r2, #0 @ check object for null
beq common_errNullObject @ object was null
VREG_INDEX_TO_ADDR r0, r0 @ r0<- &fp[A]
mov r2, rINST, lsr #12 @ r2<- B
FETCH r1, 1 @ r1<- field byte offset
GET_VREG r3, r2 @ r3<- fp[B], the object pointer
- ubfx r2, rINST, #8, #4 @ r2<- A
+ @ begin replacement of ubfx r2, rINST, #8, #4 @ r2<- A
+ lsl r2, rINST, #20
+ lsr r2, r2, #28
+ @ end replacement of ubfx r2, rINST, #8, #4 @ r2<- A
cmp r3, #0 @ check object for null
beq common_errNullObject @ object was null
GET_VREG r0, r2 @ r0<- fp[A]
mov r2, rINST, lsr #12 @ r2<- B
FETCH r1, 1 @ r1<- field byte offset
GET_VREG r3, r2 @ r3<- fp[B], the object pointer
- ubfx r2, rINST, #8, #4 @ r2<- A
+ @ begin replacement of ubfx r2, rINST, #8, #4 @ r2<- A
+ lsl r2, rINST, #20
+ lsr r2, r2, #28
+ @ end replacement of ubfx r2, rINST, #8, #4 @ r2<- A
cmp r3, #0 @ check object for null
beq common_errNullObject @ object was null
GET_VREG r0, r2 @ r0<- fp[A]
mov r2, rINST, lsr #12 @ r2<- B
FETCH r1, 1 @ r1<- field byte offset
GET_VREG r3, r2 @ r3<- fp[B], the object pointer
- ubfx r2, rINST, #8, #4 @ r2<- A
+ @ begin replacement of ubfx r2, rINST, #8, #4 @ r2<- A
+ lsl r2, rINST, #20
+ lsr r2, r2, #28
+ @ end replacement of ubfx r2, rINST, #8, #4 @ r2<- A
cmp r3, #0 @ check object for null
beq common_errNullObject @ object was null
GET_VREG r0, r2 @ r0<- fp[A]
mov r2, rINST, lsr #12 @ r2<- B
FETCH r1, 1 @ r1<- field byte offset
GET_VREG r3, r2 @ r3<- fp[B], the object pointer
- ubfx r2, rINST, #8, #4 @ r2<- A
+ @ begin replacement of ubfx r2, rINST, #8, #4 @ r2<- A
+ lsl r2, rINST, #20
+ lsr r2, r2, #28
+ @ end replacement of ubfx r2, rINST, #8, #4 @ r2<- A
cmp r3, #0 @ check object for null
beq common_errNullObject @ object was null
GET_VREG r0, r2 @ r0<- fp[A]
mov r2, rINST, lsr #12 @ r2<- B
FETCH r1, 1 @ r1<- field byte offset
GET_VREG r3, r2 @ r3<- object we're operating on
- ubfx r2, rINST, #8, #4 @ r2<- A
+ @ begin replacement of ubfx r2, rINST, #8, #4 @ r2<- A
+ lsl r2, rINST, #20
+ lsr r2, r2, #28
+ @ end replacement of ubfx r2, rINST, #8, #4 @ r2<- A
cmp r3, #0 @ check object for null
beq common_errNullObject @ object was null
ldrb r0, [r3, r1] @ r0<- obj.field
mov r2, rINST, lsr #12 @ r2<- B
FETCH r1, 1 @ r1<- field byte offset
GET_VREG r3, r2 @ r3<- object we're operating on
- ubfx r2, rINST, #8, #4 @ r2<- A
+ @ begin replacement of ubfx r2, rINST, #8, #4 @ r2<- A
+ lsl r2, rINST, #20
+ lsr r2, r2, #28
+ @ end replacement of ubfx r2, rINST, #8, #4 @ r2<- A
cmp r3, #0 @ check object for null
beq common_errNullObject @ object was null
ldrsb r0, [r3, r1] @ r0<- obj.field
mov r2, rINST, lsr #12 @ r2<- B
FETCH r1, 1 @ r1<- field byte offset
GET_VREG r3, r2 @ r3<- object we're operating on
- ubfx r2, rINST, #8, #4 @ r2<- A
+ @ begin replacement of ubfx r2, rINST, #8, #4 @ r2<- A
+ lsl r2, rINST, #20
+ lsr r2, r2, #28
+ @ end replacement of ubfx r2, rINST, #8, #4 @ r2<- A
cmp r3, #0 @ check object for null
beq common_errNullObject @ object was null
ldrh r0, [r3, r1] @ r0<- obj.field
mov r2, rINST, lsr #12 @ r2<- B
FETCH r1, 1 @ r1<- field byte offset
GET_VREG r3, r2 @ r3<- object we're operating on
- ubfx r2, rINST, #8, #4 @ r2<- A
+ @ begin replacement of ubfx r2, rINST, #8, #4 @ r2<- A
+ lsl r2, rINST, #20
+ lsr r2, r2, #28
+ @ end replacement of ubfx r2, rINST, #8, #4 @ r2<- A
cmp r3, #0 @ check object for null
beq common_errNullObject @ object was null
ldrsh r0, [r3, r1] @ r0<- obj.field
* to modest integer. The EABI convert function isn't doing this for us.
*/
f2l_doconv:
- ubfx r2, r0, #23, #8 @ grab the exponent
+ @ begin replacement of ubfx r2, r0, #23, #8 @ grab the exponent
+ lsl r2, r0, #1
+ lsr r2, r2, #24
+ @ end replacement of ubfx r2, r0, #23, #8 @ grab the exponent
cmp r2, #0xbe @ MININT < x > MAXINT?
bhs f2l_special_cases
b __aeabi_f2lz @ tail call to convert float to long
* to modest integer. The EABI convert function isn't doing this for us.
*/
d2l_doconv:
- ubfx r2, r1, #20, #11 @ grab the exponent
+ @ begin replacement of ubfx r2, r1, #20, #11 @ grab the exponent
+ lsl r2, r1, #1
+ lsr r2, r2, #21
+ @ end replacement of ubfx r2, r1, #20, #11 @ grab the exponent
movw r3, #0x43e
cmp r2, r3 @ MINLONG < x > MAXLONG?
bhs d2l_special_cases