- ubfx r2, r3, #LOCK_WORD_THIN_LOCK_COUNT_SHIFT, #LOCK_WORD_THIN_LOCK_COUNT_SIZE
+ @ begin replacement of ubfx r2, r3, #LOCK_WORD_THIN_LOCK_COUNT_SHIFT, #LOCK_WORD_THIN_LOCK_COUNT_SIZE
+ lsl r2, r3, #(32-(LOCK_WORD_THIN_LOCK_COUNT_SIZE)-(LOCK_WORD_THIN_LOCK_COUNT_SHIFT))
-+ lsr r2, r2, #((32-(LOCK_WORD_THIN_LOCK_COUNT_SIZE))
++ lsr r2, r2, #((32-(LOCK_WORD_THIN_LOCK_COUNT_SIZE)))
+ @ end replacement of ubfx r2, r3, #LOCK_WORD_THIN_LOCK_COUNT_SHIFT, #LOCK_WORD_THIN_LOCK_COUNT_SIZE
cbz r2, .Lslow_lock @ Zero as the new count indicates overflow, go slow path.
strex r2, r3, [r0, #MIRROR_OBJECT_LOCK_WORD_OFFSET] @ strex necessary for read barrier bits.
- ubfx r1, r12, #0, #METHOD_DEX_CACHE_HASH_BITS // Calculate DexCache method slot index.
+ @ begin replacement of ubfx r1, r12, #0, #METHOD_DEX_CACHE_HASH_BITS // Calculate DexCache method slot index.
+ lsl r1, r12, #(32-(METHOD_DEX_CACHE_HASH_BITS)-(0))
-+ lsr r1, r1, #((32-(METHOD_DEX_CACHE_HASH_BITS))
++ lsr r1, r1, #((32-(METHOD_DEX_CACHE_HASH_BITS)))
+ @ end replacement of ubfx r1, r12, #0, #METHOD_DEX_CACHE_HASH_BITS // Calculate DexCache method slot index.
ldr r4, [r4, #MIRROR_DEX_CACHE_RESOLVED_METHODS_OFFSET] // Load the resolved methods.
add r4, r4, r1, lsl #(POINTER_SIZE_SHIFT + 1) // Load DexCache method slot address.
- ubfx r9, rINST, #8, #4 @ r9<- A
+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
+ lsl r9, rINST, #(32-(4)-(8))
-+ lsr r9, r9, #((32-(4))
++ lsr r9, r9, #((32-(4)))
+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
GET_VREG r1, r3 @ r1<- vB
GET_VREG r0, r9 @ r0<- vA
- ubfx r9, rINST, #8, #4 @ r9<- A
+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
+ lsl r9, rINST, #(32-(4)-(8))
-+ lsr r9, r9, #((32-(4))
++ lsr r9, r9, #((32-(4)))
+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
GET_VREG r0, r2 @ r0<- vB
.if $chkzero
- ubfx rINST, rINST, #8, #4 @ rINST<- A
+ @ begin replacement of ubfx rINST, rINST, #8, #4 @ rINST<- A
+ lsl rINST, rINST, #(32-(4)-(8))
-+ lsr rINST, rINST, #((32-(4))
++ lsr rINST, rINST, #((32-(4)))
+ @ end replacement of ubfx rINST, rINST, #8, #4 @ rINST<- A
VREG_INDEX_TO_ADDR r1, r1 @ r1<- &fp[B]
VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A]
- ubfx r9, rINST, #8, #4 @ r9<- A
+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
+ lsl r9, rINST, #(32-(4)-(8))
-+ lsr r9, r9, #((32-(4))
++ lsr r9, r9, #((32-(4)))
+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
GET_VREG r0, r3 @ r0<- vB
$preinstr @ optional op; may set condition codes
- ubfx r9, rINST, #8, #4 @ r9<- A
+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
+ lsl r9, rINST, #(32-(4)-(8))
-+ lsr r9, r9, #((32-(4))
++ lsr r9, r9, #((32-(4)))
+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[B]
GET_VREG_WIDE_BY_ADDR r0, r1, r3 @ r0/r1<- vB/vB+1
- ubfx rINST, rINST, #8, #4 @ rINST<- A
+ @ begin replacement of ubfx rINST, rINST, #8, #4 @ rINST<- A
+ lsl rINST, rINST, #(32-(4)-(8))
-+ lsr rINST, rINST, #((32-(4))
++ lsr rINST, rINST, #((32-(4)))
+ @ end replacement of ubfx rINST, rINST, #8, #4 @ rINST<- A
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[B]
VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A]
- ubfx rINST, rINST, #8, #4 @ rINST<- A
+ @ begin replacement of ubfx rINST, rINST, #8, #4 @ rINST<- A
+ lsl rINST, rINST, #(32-(4)-(8))
-+ lsr rINST, rINST, #((32-(4))
++ lsr rINST, rINST, #((32-(4)))
+ @ end replacement of ubfx rINST, rINST, #8, #4 @ rINST<- A
GET_VREG r0, r3 @ r0<- vB
VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A]
- ubfx r9, rINST, #8, #4 @ r9<- A
+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
+ lsl r9, rINST, #(32-(4)-(8))
-+ lsr r9, r9, #((32-(4))
++ lsr r9, r9, #((32-(4)))
+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
GET_VREG r1, r3 @ r1<- vB
GET_VREG r0, r9 @ r0<- vA
- ubfx r9, rINST, #8, #4 @ r9<- A
+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
+ lsl r9, rINST, #(32-(4)-(8))
-+ lsr r9, r9, #((32-(4))
++ lsr r9, r9, #((32-(4)))
+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
GET_VREG r0, r2 @ r0<- vB
cmp r1, #0 @ is second operand zero?
- ubfx r9, rINST, #8, #4 @ r9<- A
+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
+ lsl r9, rINST, #(32-(4)-(8))
-+ lsr r9, r9, #((32-(4))
++ lsr r9, r9, #((32-(4)))
+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
VREG_INDEX_TO_ADDR r1, r1 @ r1<- &fp[B]
VREG_INDEX_TO_ADDR rINST, r9 @ rINST<- &fp[A]
- ubfx r9, rINST, #8, #4 @ r9<- A
+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
+ lsl r9, rINST, #(32-(4)-(8))
-+ lsr r9, r9, #((32-(4))
++ lsr r9, r9, #((32-(4)))
+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
GET_VREG r1, r3 @ r1<- vB
GET_VREG r0, r9 @ r0<- vA
- ubfx r9, rINST, #8, #4 @ r9<- A
+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
+ lsl r9, rINST, #(32-(4)-(8))
-+ lsr r9, r9, #((32-(4))
++ lsr r9, r9, #((32-(4)))
+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
GET_VREG r0, r2 @ r0<- vB
cmp r1, #0 @ is second operand zero?
- ubfx r9, rINST, #8, #4 @ r9<- A
+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
+ lsl r9, rINST, #(32-(4)-(8))
-+ lsr r9, r9, #((32-(4))
++ lsr r9, r9, #((32-(4)))
+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
GET_VREG r2, r3 @ r2<- vB
CLEAR_SHADOW_PAIR r9, lr, ip @ Zero out the shadow regs
- ubfx r9, rINST, #8, #4 @ r9<- A
+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
+ lsl r9, rINST, #(32-(4)-(8))
-+ lsr r9, r9, #((32-(4))
++ lsr r9, r9, #((32-(4)))
+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
GET_VREG r2, r3 @ r2<- vB
CLEAR_SHADOW_PAIR r9, lr, ip @ Zero out the shadow regs
- ubfx r9, rINST, #8, #4 @ r9<- A
+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
+ lsl r9, rINST, #(32-(4)-(8))
-+ lsr r9, r9, #((32-(4))
++ lsr r9, r9, #((32-(4)))
+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
GET_VREG r2, r3 @ r2<- vB
CLEAR_SHADOW_PAIR r9, lr, ip @ Zero out the shadow regs
- ubfx r2, rINST, #8, #4 @ r2<- A
+ @ begin replacement of ubfx r2, rINST, #8, #4 @ r2<- A
+ lsl r2, rINST, #(32-(4)-(8))
-+ lsr r2, r2, #((32-(4))
++ lsr r2, r2, #((32-(4)))
+ @ end replacement of ubfx r2, rINST, #8, #4 @ r2<- A
GET_VREG r0, r1 @ r0<- vB (object ref)
cmp r0, #0 @ is object null?
- ubfx r0, rINST, #8, #4 @ r0<- A
+ @ begin replacement of ubfx r0, rINST, #8, #4 @ r0<- A
+ lsl r0, rINST, #(32-(4)-(8))
-+ lsr r0, r0, #((32-(4))
++ lsr r0, r0, #((32-(4)))
+ @ end replacement of ubfx r0, rINST, #8, #4 @ r0<- A
GET_VREG r3, r1 @ r3<- vB
GET_VREG r0, r0 @ r0<- vA
- sbfx rINST, rINST, #8, #8 @ rINST<- ssssssAA (sign-extended)
+ @ begin replacement of sbfx rINST, rINST, #8, #8 @ rINST<- ssssssAA (sign-extended)
+ lsl rINST, rINST, #(32-(8)-(8))
-+ asr rINST, rINST, #((32-(8))
++ asr rINST, rINST, #((32-(8)))
+ @ end replacement of sbfx rINST, rINST, #8, #8 @ rINST<- ssssssAA (sign-extended)
b MterpCommonTakenBranchNoFlags
- ubfx r9, rINST, #8, #4 @ r9<- A
+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
+ lsl r9, rINST, #(32-(4)-(8))
-+ lsr r9, r9, #((32-(4))
++ lsr r9, r9, #((32-(4)))
+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
- ubfx r9, rINST, #8, #4 @ r9<- A
+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
+ lsl r9, rINST, #(32-(4)-(8))
-+ lsr r9, r9, #((32-(4))
++ lsr r9, r9, #((32-(4)))
+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
CLEAR_SHADOW_PAIR r9, ip, r0 @ Zero out shadow regs
- ubfx r9, rINST, #8, #4 @ r9<- A
+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
+ lsl r9, rINST, #(32-(4)-(8))
-+ lsr r9, r9, #((32-(4))
++ lsr r9, r9, #((32-(4)))
+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
$instr @ s1<- op
- ubfx r9, rINST, #8, #4 @ r9<- A
+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
+ lsl r9, rINST, #(32-(4)-(8))
-+ lsr r9, r9, #((32-(4))
++ lsr r9, r9, #((32-(4)))
+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
$instr @ s0<- op
- ubfx r9, rINST, #8, #4 @ r9<- A
+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
+ lsl r9, rINST, #(32-(4)-(8))
-+ lsr r9, r9, #((32-(4))
++ lsr r9, r9, #((32-(4)))
+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
$instr @ d0<- op
- ubfx r2, r1, #20, #11 @ grab the exponent
+ @ begin replacement of ubfx r2, r1, #20, #11 @ grab the exponent
+ lsl r2, r1, #(32-(11)-(20))
-+ lsr r2, r2, #((32-(11))
++ lsr r2, r2, #((32-(11)))
+ @ end replacement of ubfx r2, r1, #20, #11 @ grab the exponent
movw r3, #0x43e
cmp r2, r3 @ MINLONG < x > MAXLONG?
- ubfx r2, r0, #23, #8 @ grab the exponent
+ @ begin replacement of ubfx r2, r0, #23, #8 @ grab the exponent
+ lsl r2, r0, #(32-(8)-(23))
-+ lsr r2, r2, #((32-(8))
++ lsr r2, r2, #((32-(8)))
+ @ end replacement of ubfx r2, r0, #23, #8 @ grab the exponent
cmp r2, #0xbe @ MININT < x > MAXINT?
bhs f2l_special_cases
- ubfx r9, rINST, #8, #4 @ r9<- A
+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
+ lsl r9, rINST, #(32-(4)-(8))
-+ lsr r9, r9, #((32-(4))
++ lsr r9, r9, #((32-(4)))
+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
CLEAR_SHADOW_PAIR r9, ip, lr @ Zero shadow regs
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[B]
- ubfx r1, rPC, #2, #THREAD_INTERPRETER_CACHE_SIZE_LOG2 @ entry index
+ @ begin replacement of ubfx r1, rPC, #2, #THREAD_INTERPRETER_CACHE_SIZE_LOG2 @ entry index
+ lsl r1, rPC, #(32-(THREAD_INTERPRETER_CACHE_SIZE_LOG2)-(2))
-+ lsr r1, r1, #((32-(THREAD_INTERPRETER_CACHE_SIZE_LOG2))
++ lsr r1, r1, #((32-(THREAD_INTERPRETER_CACHE_SIZE_LOG2)))
+ @ end replacement of ubfx r1, rPC, #2, #THREAD_INTERPRETER_CACHE_SIZE_LOG2 @ entry index
add r0, r0, r1, lsl #3 @ entry address within the cache
ldrd r0, r1, [r0] @ entry key (pc) and value (offset)
- ubfx r2, rINST, #8, #4 @ A
+ @ begin replacement of ubfx r2, rINST, #8, #4 @ A
+ lsl r2, rINST, #(32-(4)-(8))
-+ lsr r2, r2, #((32-(4))
++ lsr r2, r2, #((32-(4)))
+ @ end replacement of ubfx r2, rINST, #8, #4 @ A
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
% if is_object:
- ubfx r2, rINST, #8, #4 @ r2<- A
+ @ begin replacement of ubfx r2, rINST, #8, #4 @ r2<- A
+ lsl r2, rINST, #(32-(4)-(8))
-+ lsr r2, r2, #((32-(4))
++ lsr r2, r2, #((32-(4)))
+ @ end replacement of ubfx r2, rINST, #8, #4 @ r2<- A
PREFETCH_INST 2
cmp r3, #0
- ubfx r2, rINST, #8, #4 @ r2<- A
+ @ begin replacement of ubfx r2, rINST, #8, #4 @ r2<- A
+ lsl r2, rINST, #(32-(4)-(8))
-+ lsr r2, r2, #((32-(4))
++ lsr r2, r2, #((32-(4)))
+ @ end replacement of ubfx r2, rINST, #8, #4 @ r2<- A
cmp r3, #0 @ check object for null
beq common_errNullObject @ object was null
- ubfx r2, rINST, #8, #4 @ r2<- A
+ @ begin replacement of ubfx r2, rINST, #8, #4 @ r2<- A
+ lsl r2, rINST, #(32-(4)-(8))
-+ lsr r2, r2, #((32-(4))
++ lsr r2, r2, #((32-(4)))
+ @ end replacement of ubfx r2, rINST, #8, #4 @ r2<- A
cmp r3, #0 @ check object for null
beq common_errNullObject @ object was null
- ubfx r9, rINST, #8, #4 @ r9<- A
+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
+ lsl r9, rINST, #(32-(4)-(8))
-+ lsr r9, r9, #((32-(4))
++ lsr r9, r9, #((32-(4)))
+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
PREFETCH_INST 2
cmp r1, #0 @ exception pending?
- ubfx r2, rINST, #8, #4 @ r2<- A
+ @ begin replacement of ubfx r2, rINST, #8, #4 @ r2<- A
+ lsl r2, rINST, #(32-(4)-(8))
-+ lsr r2, r2, #((32-(4))
++ lsr r2, r2, #((32-(4)))
+ @ end replacement of ubfx r2, rINST, #8, #4 @ r2<- A
cmp r3, #0 @ check object for null
beq common_errNullObject @ object was null
- ubfx r0, rINST, #8, #4 @ r0<- A
+ @ begin replacement of ubfx r0, rINST, #8, #4 @ r0<- A
+ lsl r0, rINST, #(32-(4)-(8))
-+ lsr r0, r0, #((32-(4))
++ lsr r0, r0, #((32-(4)))
+ @ end replacement of ubfx r0, rINST, #8, #4 @ r0<- A
cmp r2, #0 @ check object for null
beq common_errNullObject @ object was null
- ubfx r0, rINST, #8, #4 @ r0<- A
+ @ begin replacement of sbfx r1, rINST, #12, #4 @ r1<- sssssssB (sign-extended)
+ lsl r1, rINST, #(32-(4)-(12))
-+ asr r1, r1, #((32-(4))
++ asr r1, r1, #((32-(4)))
+ @ end replacement of sbfx r1, rINST, #12, #4 @ r1<- sssssssB (sign-extended)
+ @ begin replacement of ubfx r0, rINST, #8, #4 @ r0<- A
+ lsl r0, rINST, #(32-(4)-(8))
-+ lsr r0, r0, #((32-(4))
++ lsr r0, r0, #((32-(4)))
+ @ end replacement of ubfx r0, rINST, #8, #4 @ r0<- A
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
GET_INST_OPCODE ip @ ip<- opcode from rINST
- ubfx r0, rINST, #8, #4 @ r0<- A from 11:8
+ @ begin replacement of ubfx r0, rINST, #8, #4 @ r0<- A from 11:8
+ lsl r0, rINST, #(32-(4)-(8))
-+ lsr r0, r0, #((32-(4))
++ lsr r0, r0, #((32-(4)))
+ @ end replacement of ubfx r0, rINST, #8, #4 @ r0<- A from 11:8
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
GET_VREG r2, r1 @ r2<- fp[B]
- ubfx rINST, rINST, #8, #4 @ rINST<- A
+ @ begin replacement of ubfx rINST, rINST, #8, #4 @ rINST<- A
+ lsl rINST, rINST, #(32-(4)-(8))
-+ lsr rINST, rINST, #((32-(4))
++ lsr rINST, rINST, #((32-(4)))
+ @ end replacement of ubfx rINST, rINST, #8, #4 @ rINST<- A
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[B]
VREG_INDEX_TO_ADDR r2, rINST @ r2<- &fp[A]
+ rightshift = 'lsr'
+ else:
+ rightshift = 'asr'
-+ print(linestartwhitespace+rightshift+' '+destreg+', '+destreg+', #((32-('+width+'))')
++ print(linestartwhitespace+rightshift+' '+destreg+', '+destreg+', #((32-('+width+')))')
+ print(linestartwhitespace+'@ end replacement of '+linels)
+ else:
+ print(line)
mov r3, rINST, lsr #12 @ r3<- B
@ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
lsl r9, rINST, #(32-(4)-(8))
- lsr r9, r9, #((32-(4))
+ lsr r9, r9, #((32-(4)))
@ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
GET_VREG r1, r3 @ r1<- vB
GET_VREG r0, r9 @ r0<- vA
mov r2, rINST, lsr #12 @ r2<- B
@ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
lsl r9, rINST, #(32-(4)-(8))
- lsr r9, r9, #((32-(4))
+ lsr r9, r9, #((32-(4)))
@ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
GET_VREG r0, r2 @ r0<- vB
.if $chkzero
mov r1, rINST, lsr #12 @ r1<- B
@ begin replacement of ubfx rINST, rINST, #8, #4 @ rINST<- A
lsl rINST, rINST, #(32-(4)-(8))
- lsr rINST, rINST, #((32-(4))
+ lsr rINST, rINST, #((32-(4)))
@ end replacement of ubfx rINST, rINST, #8, #4 @ rINST<- A
VREG_INDEX_TO_ADDR r1, r1 @ r1<- &fp[B]
VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A]
mov r3, rINST, lsr #12 @ r3<- B
@ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
lsl r9, rINST, #(32-(4)-(8))
- lsr r9, r9, #((32-(4))
+ lsr r9, r9, #((32-(4)))
@ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
GET_VREG r0, r3 @ r0<- vB
$preinstr @ optional op; may set condition codes
mov r3, rINST, lsr #12 @ r3<- B
@ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
lsl r9, rINST, #(32-(4)-(8))
- lsr r9, r9, #((32-(4))
+ lsr r9, r9, #((32-(4)))
@ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[B]
GET_VREG_WIDE_BY_ADDR r0, r1, r3 @ r0/r1<- vB/vB+1
mov r3, rINST, lsr #12 @ r3<- B
@ begin replacement of ubfx rINST, rINST, #8, #4 @ rINST<- A
lsl rINST, rINST, #(32-(4)-(8))
- lsr rINST, rINST, #((32-(4))
+ lsr rINST, rINST, #((32-(4)))
@ end replacement of ubfx rINST, rINST, #8, #4 @ rINST<- A
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[B]
VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A]
mov r3, rINST, lsr #12 @ r3<- B
@ begin replacement of ubfx rINST, rINST, #8, #4 @ rINST<- A
lsl rINST, rINST, #(32-(4)-(8))
- lsr rINST, rINST, #((32-(4))
+ lsr rINST, rINST, #((32-(4)))
@ end replacement of ubfx rINST, rINST, #8, #4 @ rINST<- A
GET_VREG r0, r3 @ r0<- vB
VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A]
mov r3, rINST, lsr #12 @ r3<- B
@ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
lsl r9, rINST, #(32-(4)-(8))
- lsr r9, r9, #((32-(4))
+ lsr r9, r9, #((32-(4)))
@ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
GET_VREG r1, r3 @ r1<- vB
GET_VREG r0, r9 @ r0<- vA
mov r2, rINST, lsr #12 @ r2<- B
@ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
lsl r9, rINST, #(32-(4)-(8))
- lsr r9, r9, #((32-(4))
+ lsr r9, r9, #((32-(4)))
@ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
GET_VREG r0, r2 @ r0<- vB
cmp r1, #0 @ is second operand zero?
mov r1, rINST, lsr #12 @ r1<- B
@ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
lsl r9, rINST, #(32-(4)-(8))
- lsr r9, r9, #((32-(4))
+ lsr r9, r9, #((32-(4)))
@ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
VREG_INDEX_TO_ADDR r1, r1 @ r1<- &fp[B]
VREG_INDEX_TO_ADDR rINST, r9 @ rINST<- &fp[A]
mov r3, rINST, lsr #12 @ r3<- B
@ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
lsl r9, rINST, #(32-(4)-(8))
- lsr r9, r9, #((32-(4))
+ lsr r9, r9, #((32-(4)))
@ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
GET_VREG r1, r3 @ r1<- vB
GET_VREG r0, r9 @ r0<- vA
mov r2, rINST, lsr #12 @ r2<- B
@ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
lsl r9, rINST, #(32-(4)-(8))
- lsr r9, r9, #((32-(4))
+ lsr r9, r9, #((32-(4)))
@ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
GET_VREG r0, r2 @ r0<- vB
cmp r1, #0 @ is second operand zero?
mov r3, rINST, lsr #12 @ r3<- B
@ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
lsl r9, rINST, #(32-(4)-(8))
- lsr r9, r9, #((32-(4))
+ lsr r9, r9, #((32-(4)))
@ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
GET_VREG r2, r3 @ r2<- vB
CLEAR_SHADOW_PAIR r9, lr, ip @ Zero out the shadow regs
mov r3, rINST, lsr #12 @ r3<- B
@ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
lsl r9, rINST, #(32-(4)-(8))
- lsr r9, r9, #((32-(4))
+ lsr r9, r9, #((32-(4)))
@ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
GET_VREG r2, r3 @ r2<- vB
CLEAR_SHADOW_PAIR r9, lr, ip @ Zero out the shadow regs
mov r3, rINST, lsr #12 @ r3<- B
@ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
lsl r9, rINST, #(32-(4)-(8))
- lsr r9, r9, #((32-(4))
+ lsr r9, r9, #((32-(4)))
@ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
GET_VREG r2, r3 @ r2<- vB
CLEAR_SHADOW_PAIR r9, lr, ip @ Zero out the shadow regs