@ thread id did not match, go slow path.
add r3, r2, #LOCK_WORD_THIN_LOCK_COUNT_ONE @ Increment the recursive lock count.
@ Extract the new thin lock count for overflow check.
- ubfx r2, r3, #LOCK_WORD_THIN_LOCK_COUNT_SHIFT, #LOCK_WORD_THIN_LOCK_COUNT_SIZE
+ @ begin replacement of ubfx r2, r3, #LOCK_WORD_THIN_LOCK_COUNT_SHIFT, #LOCK_WORD_THIN_LOCK_COUNT_SIZE
+ lsl r2, r3, #(32-(LOCK_WORD_THIN_LOCK_COUNT_SIZE)-(LOCK_WORD_THIN_LOCK_COUNT_SHIFT))
+ lsr r2, r2, #((32-(LOCK_WORD_THIN_LOCK_COUNT_SIZE))
+ @ end replacement of ubfx r2, r3, #LOCK_WORD_THIN_LOCK_COUNT_SHIFT, #LOCK_WORD_THIN_LOCK_COUNT_SIZE
cbz r2, .Lslow_lock @ Zero as the new count indicates overflow, go slow path.
strex r2, r3, [r0, #MIRROR_OBJECT_LOCK_WORD_OFFSET] @ strex necessary for read barrier bits.
cbnz r2, .Llock_strex_fail @ If strex failed, retry.
bcs .Limt_conflict_trampoline_dex_cache_miss
ldr r4, [r0, #MIRROR_CLASS_DEX_CACHE_OFFSET] // Load the DexCache (without read barrier).
UNPOISON_HEAP_REF r4
- ubfx r1, r12, #0, #METHOD_DEX_CACHE_HASH_BITS // Calculate DexCache method slot index.
+ @ begin replacement of ubfx r1, r12, #0, #METHOD_DEX_CACHE_HASH_BITS // Calculate DexCache method slot index.
+ lsl r1, r12, #(32-(METHOD_DEX_CACHE_HASH_BITS)-(0))
+ lsr r1, r1, #((32-(METHOD_DEX_CACHE_HASH_BITS))
+ @ end replacement of ubfx r1, r12, #0, #METHOD_DEX_CACHE_HASH_BITS // Calculate DexCache method slot index.
ldr r4, [r4, #MIRROR_DEX_CACHE_RESOLVED_METHODS_OFFSET] // Load the resolved methods.
add r4, r4, r1, lsl #(POINTER_SIZE_SHIFT + 1) // Load DexCache method slot address.
*/
/* binop/2addr vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
+ lsl r9, rINST, #(32-(4)-(8))
+ lsr r9, r9, #((32-(4))
+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
GET_VREG r1, r3 @ r1<- vB
GET_VREG r0, r9 @ r0<- vA
.if $chkzero
/* binop/lit16 vA, vB, #+CCCC */
FETCH_S r1, 1 @ r1<- ssssCCCC (sign-extended)
mov r2, rINST, lsr #12 @ r2<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
+ lsl r9, rINST, #(32-(4)-(8))
+ lsr r9, r9, #((32-(4))
+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
GET_VREG r0, r2 @ r0<- vB
.if $chkzero
cmp r1, #0 @ is second operand zero?
*/
/* binop/2addr vA, vB */
mov r1, rINST, lsr #12 @ r1<- B
- ubfx rINST, rINST, #8, #4 @ rINST<- A
+ @ begin replacement of ubfx rINST, rINST, #8, #4 @ rINST<- A
+ lsl rINST, rINST, #(32-(4)-(8))
+ lsr rINST, rINST, #((32-(4))
+ @ end replacement of ubfx rINST, rINST, #8, #4 @ rINST<- A
VREG_INDEX_TO_ADDR r1, r1 @ r1<- &fp[B]
VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A]
GET_VREG_WIDE_BY_ADDR r2, r3, r1 @ r2/r3<- vBB/vBB+1
*/
/* unop vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
+ lsl r9, rINST, #(32-(4)-(8))
+ lsr r9, r9, #((32-(4))
+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
GET_VREG r0, r3 @ r0<- vB
$preinstr @ optional op; may set condition codes
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
*/
/* unop vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
+ lsl r9, rINST, #(32-(4)-(8))
+ lsr r9, r9, #((32-(4))
+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[B]
GET_VREG_WIDE_BY_ADDR r0, r1, r3 @ r0/r1<- vB/vB+1
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
*/
/* unop vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
- ubfx rINST, rINST, #8, #4 @ rINST<- A
+ @ begin replacement of ubfx rINST, rINST, #8, #4 @ rINST<- A
+ lsl rINST, rINST, #(32-(4)-(8))
+ lsr rINST, rINST, #((32-(4))
+ @ end replacement of ubfx rINST, rINST, #8, #4 @ rINST<- A
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[B]
VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A]
GET_VREG_WIDE_BY_ADDR r0, r1, r3 @ r0/r1<- vAA
*/
/* unop vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
- ubfx rINST, rINST, #8, #4 @ rINST<- A
+ @ begin replacement of ubfx rINST, rINST, #8, #4 @ rINST<- A
+ lsl rINST, rINST, #(32-(4)-(8))
+ lsr rINST, rINST, #((32-(4))
+ @ end replacement of ubfx rINST, rINST, #8, #4 @ rINST<- A
GET_VREG r0, r3 @ r0<- vB
VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A]
$preinstr @ optional op; may set condition codes
*
*/
mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
+ lsl r9, rINST, #(32-(4)-(8))
+ lsr r9, r9, #((32-(4))
+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
GET_VREG r1, r3 @ r1<- vB
GET_VREG r0, r9 @ r0<- vA
cmp r1, #0 @ is second operand zero?
*/
FETCH_S r1, 1 @ r1<- ssssCCCC (sign-extended)
mov r2, rINST, lsr #12 @ r2<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
+ lsl r9, rINST, #(32-(4)-(8))
+ lsr r9, r9, #((32-(4))
+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
GET_VREG r0, r2 @ r0<- vB
cmp r1, #0 @ is second operand zero?
beq common_errDivideByZero
*/
/* mul-long/2addr vA, vB */
mov r1, rINST, lsr #12 @ r1<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
+ lsl r9, rINST, #(32-(4)-(8))
+ lsr r9, r9, #((32-(4))
+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
VREG_INDEX_TO_ADDR r1, r1 @ r1<- &fp[B]
VREG_INDEX_TO_ADDR rINST, r9 @ rINST<- &fp[A]
GET_VREG_WIDE_BY_ADDR r2, r3, r1 @ r2/r3<- vBB/vBB+1
*
*/
mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
+ lsl r9, rINST, #(32-(4)-(8))
+ lsr r9, r9, #((32-(4))
+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
GET_VREG r1, r3 @ r1<- vB
GET_VREG r0, r9 @ r0<- vA
cmp r1, #0 @ is second operand zero?
*/
FETCH_S r1, 1 @ r1<- ssssCCCC (sign-extended)
mov r2, rINST, lsr #12 @ r2<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
+ lsl r9, rINST, #(32-(4)-(8))
+ lsr r9, r9, #((32-(4))
+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
GET_VREG r0, r2 @ r0<- vB
cmp r1, #0 @ is second operand zero?
beq common_errDivideByZero
*/
/* shl-long/2addr vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
+ lsl r9, rINST, #(32-(4)-(8))
+ lsr r9, r9, #((32-(4))
+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
GET_VREG r2, r3 @ r2<- vB
CLEAR_SHADOW_PAIR r9, lr, ip @ Zero out the shadow regs
VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[A]
*/
/* shr-long/2addr vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
+ lsl r9, rINST, #(32-(4)-(8))
+ lsr r9, r9, #((32-(4))
+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
GET_VREG r2, r3 @ r2<- vB
CLEAR_SHADOW_PAIR r9, lr, ip @ Zero out the shadow regs
VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[A]
*/
/* ushr-long/2addr vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
+ lsl r9, rINST, #(32-(4)-(8))
+ lsr r9, r9, #((32-(4))
+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
GET_VREG r2, r3 @ r2<- vB
CLEAR_SHADOW_PAIR r9, lr, ip @ Zero out the shadow regs
VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[A]
* Return the length of an array.
*/
mov r1, rINST, lsr #12 @ r1<- B
- ubfx r2, rINST, #8, #4 @ r2<- A
+ @ begin replacement of ubfx r2, rINST, #8, #4 @ r2<- A
+ lsl r2, rINST, #(32-(4)-(8))
+ lsr r2, r2, #((32-(4))
+ @ end replacement of ubfx r2, rINST, #8, #4 @ r2<- A
GET_VREG r0, r1 @ r0<- vB (object ref)
cmp r0, #0 @ is object null?
beq common_errNullObject @ yup, fail
*/
/* if-cmp vA, vB, +CCCC */
mov r1, rINST, lsr #12 @ r1<- B
- ubfx r0, rINST, #8, #4 @ r0<- A
+ @ begin replacement of ubfx r0, rINST, #8, #4 @ r0<- A
+ lsl r0, rINST, #(32-(4)-(8))
+ lsr r0, r0, #((32-(4))
+ @ end replacement of ubfx r0, rINST, #8, #4 @ r0<- A
GET_VREG r3, r1 @ r3<- vB
GET_VREG r0, r0 @ r0<- vA
FETCH_S rINST, 1 @ rINST<- branch offset, in code units
* double to get a byte offset.
*/
/* goto +AA */
- sbfx rINST, rINST, #8, #8 @ rINST<- ssssssAA (sign-extended)
+ @ begin replacement of sbfx rINST, rINST, #8, #8 @ rINST<- ssssssAA (sign-extended)
+ lsl rINST, rINST, #(32-(8)-(8))
+ asr rINST, rINST, #((32-(8))
+ @ end replacement of sbfx rINST, rINST, #8, #8 @ rINST<- ssssssAA (sign-extended)
b MterpCommonTakenBranchNoFlags
%def op_goto_16():
*/
/* binop/2addr vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
+ lsl r9, rINST, #(32-(4)-(8))
+ lsr r9, r9, #((32-(4))
+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
GET_VREG_FLOAT_BY_ADDR s1, r3 @ s1<- vB
*/
/* binop/2addr vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
+ lsl r9, rINST, #(32-(4)-(8))
+ lsr r9, r9, #((32-(4))
+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
CLEAR_SHADOW_PAIR r9, ip, r0 @ Zero out shadow regs
GET_VREG_DOUBLE_BY_ADDR d1, r3 @ d1<- vB
mov r3, rINST, lsr #12 @ r3<- B
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
GET_VREG_FLOAT_BY_ADDR s0, r3 @ s0<- vB
- ubfx r9, rINST, #8, #4 @ r9<- A
+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
+ lsl r9, rINST, #(32-(4)-(8))
+ lsr r9, r9, #((32-(4))
+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
$instr @ s1<- op
GET_INST_OPCODE ip @ extract opcode from rINST
mov r3, rINST, lsr #12 @ r3<- B
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
GET_VREG_DOUBLE_BY_ADDR d0, r3 @ d0<- vB
- ubfx r9, rINST, #8, #4 @ r9<- A
+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
+ lsl r9, rINST, #(32-(4)-(8))
+ lsr r9, r9, #((32-(4))
+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
$instr @ s0<- op
GET_INST_OPCODE ip @ extract opcode from rINST
mov r3, rINST, lsr #12 @ r3<- B
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
GET_VREG_FLOAT_BY_ADDR s0, r3 @ s0<- vB
- ubfx r9, rINST, #8, #4 @ r9<- A
+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
+ lsl r9, rINST, #(32-(4)-(8))
+ lsr r9, r9, #((32-(4))
+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
$instr @ d0<- op
CLEAR_SHADOW_PAIR r9, ip, lr @ Zero shadow regs
* to modest integer. The EABI convert function isn't doing this for us.
*/
d2l_doconv:
- ubfx r2, r1, #20, #11 @ grab the exponent
+ @ begin replacement of ubfx r2, r1, #20, #11 @ grab the exponent
+ lsl r2, r1, #(32-(11)-(20))
+ lsr r2, r2, #((32-(11))
+ @ end replacement of ubfx r2, r1, #20, #11 @ grab the exponent
movw r3, #0x43e
cmp r2, r3 @ MINLONG < x > MAXLONG?
bhs d2l_special_cases
* to modest integer. The EABI convert function isn't doing this for us.
*/
f2l_doconv:
- ubfx r2, r0, #23, #8 @ grab the exponent
+ @ begin replacement of ubfx r2, r0, #23, #8 @ grab the exponent
+ lsl r2, r0, #(32-(8)-(23))
+ lsr r2, r2, #((32-(8))
+ @ end replacement of ubfx r2, r0, #23, #8 @ grab the exponent
cmp r2, #0xbe @ MININT < x > MAXINT?
bhs f2l_special_cases
b __aeabi_f2lz @ tail call to convert float to long
* For: long-to-double
*/
mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
+ lsl r9, rINST, #(32-(4)-(8))
+ lsr r9, r9, #((32-(4))
+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
CLEAR_SHADOW_PAIR r9, ip, lr @ Zero shadow regs
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[B]
VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[A]
%def op_iget(is_object=False, is_wide=False, load="ldr", helper="MterpIGetU32"):
@ Fast-path which gets the field offset from thread-local cache.
add r0, rSELF, #THREAD_INTERPRETER_CACHE_OFFSET @ cache address
- ubfx r1, rPC, #2, #THREAD_INTERPRETER_CACHE_SIZE_LOG2 @ entry index
+ @ begin replacement of ubfx r1, rPC, #2, #THREAD_INTERPRETER_CACHE_SIZE_LOG2 @ entry index
+ lsl r1, rPC, #(32-(THREAD_INTERPRETER_CACHE_SIZE_LOG2)-(2))
+ lsr r1, r1, #((32-(THREAD_INTERPRETER_CACHE_SIZE_LOG2))
+ @ end replacement of ubfx r1, rPC, #2, #THREAD_INTERPRETER_CACHE_SIZE_LOG2 @ entry index
add r0, r0, r1, lsl #3 @ entry address within the cache
ldrd r0, r1, [r0] @ entry key (pc) and value (offset)
mov r2, rINST, lsr #12 @ B
# endif
#endif
% #endif
- ubfx r2, rINST, #8, #4 @ A
+ @ begin replacement of ubfx r2, rINST, #8, #4 @ A
+ lsl r2, rINST, #(32-(4)-(8))
+ lsr r2, r2, #((32-(4))
+ @ end replacement of ubfx r2, rINST, #8, #4 @ A
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
% if is_object:
SET_VREG_OBJECT r0, r2 @ fp[A]<- r0
GET_VREG r0, r2 @ r0<- object we're operating on
bl artIGetObjectFromMterp @ (obj, offset)
ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
- ubfx r2, rINST, #8, #4 @ r2<- A
+ @ begin replacement of ubfx r2, rINST, #8, #4 @ r2<- A
+ lsl r2, rINST, #(32-(4)-(8))
+ lsr r2, r2, #((32-(4))
+ @ end replacement of ubfx r2, rINST, #8, #4 @ r2<- A
PREFETCH_INST 2
cmp r3, #0
bne MterpPossibleException @ bail out
mov r2, rINST, lsr #12 @ r2<- B
FETCH r1, 1 @ r1<- field byte offset
GET_VREG r3, r2 @ r3<- object we're operating on
- ubfx r2, rINST, #8, #4 @ r2<- A
+ @ begin replacement of ubfx r2, rINST, #8, #4 @ r2<- A
+ lsl r2, rINST, #(32-(4)-(8))
+ lsr r2, r2, #((32-(4))
+ @ end replacement of ubfx r2, rINST, #8, #4 @ r2<- A
cmp r3, #0 @ check object for null
beq common_errNullObject @ object was null
$load r0, [r3, r1] @ r0<- obj.field
mov r2, rINST, lsr #12 @ r2<- B
FETCH ip, 1 @ ip<- field byte offset
GET_VREG r3, r2 @ r3<- object we're operating on
- ubfx r2, rINST, #8, #4 @ r2<- A
+ @ begin replacement of ubfx r2, rINST, #8, #4 @ r2<- A
+ lsl r2, rINST, #(32-(4)-(8))
+ lsr r2, r2, #((32-(4))
+ @ end replacement of ubfx r2, rINST, #8, #4 @ r2<- A
cmp r3, #0 @ check object for null
beq common_errNullObject @ object was null
ldrd r0, [r3, ip] @ r0<- obj.field (64 bits, aligned)
mov r3, rSELF @ r3<- self
bl MterpInstanceOf @ (index, &obj, method, self)
ldr r1, [rSELF, #THREAD_EXCEPTION_OFFSET]
- ubfx r9, rINST, #8, #4 @ r9<- A
+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
+ lsl r9, rINST, #(32-(4)-(8))
+ lsr r9, r9, #((32-(4))
+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
PREFETCH_INST 2
cmp r1, #0 @ exception pending?
bne MterpException
mov r2, rINST, lsr #12 @ r2<- B
FETCH r1, 1 @ r1<- field byte offset
GET_VREG r3, r2 @ r3<- fp[B], the object pointer
- ubfx r2, rINST, #8, #4 @ r2<- A
+ @ begin replacement of ubfx r2, rINST, #8, #4 @ r2<- A
+ lsl r2, rINST, #(32-(4)-(8))
+ lsr r2, r2, #((32-(4))
+ @ end replacement of ubfx r2, rINST, #8, #4 @ r2<- A
cmp r3, #0 @ check object for null
beq common_errNullObject @ object was null
GET_VREG r0, r2 @ r0<- fp[A]
mov r2, rINST, lsr #12 @ r2<- B
FETCH r3, 1 @ r3<- field byte offset
GET_VREG r2, r2 @ r2<- fp[B], the object pointer
- ubfx r0, rINST, #8, #4 @ r0<- A
+ @ begin replacement of ubfx r0, rINST, #8, #4 @ r0<- A
+ lsl r0, rINST, #(32-(4)-(8))
+ lsr r0, r0, #((32-(4))
+ @ end replacement of ubfx r0, rINST, #8, #4 @ r0<- A
cmp r2, #0 @ check object for null
beq common_errNullObject @ object was null
VREG_INDEX_TO_ADDR r0, r0 @ r0<- &fp[A]
%def op_const_4():
/* const/4 vA, #+B */
- sbfx r1, rINST, #12, #4 @ r1<- sssssssB (sign-extended)
- ubfx r0, rINST, #8, #4 @ r0<- A
+ @ begin replacement of sbfx r1, rINST, #12, #4 @ r1<- sssssssB (sign-extended)
+ lsl r1, rINST, #(32-(4)-(12))
+ asr r1, r1, #((32-(4))
+ @ end replacement of sbfx r1, rINST, #12, #4 @ r1<- sssssssB (sign-extended)
+ @ begin replacement of ubfx r0, rINST, #8, #4 @ r0<- A
+ lsl r0, rINST, #(32-(4)-(8))
+ lsr r0, r0, #((32-(4))
+ @ end replacement of ubfx r0, rINST, #8, #4 @ r0<- A
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
GET_INST_OPCODE ip @ ip<- opcode from rINST
SET_VREG r1, r0 @ fp[A]<- r1
/* for move, move-object, long-to-int */
/* op vA, vB */
mov r1, rINST, lsr #12 @ r1<- B from 15:12
- ubfx r0, rINST, #8, #4 @ r0<- A from 11:8
+ @ begin replacement of ubfx r0, rINST, #8, #4 @ r0<- A from 11:8
+ lsl r0, rINST, #(32-(4)-(8))
+ lsr r0, r0, #((32-(4))
+ @ end replacement of ubfx r0, rINST, #8, #4 @ r0<- A from 11:8
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
GET_VREG r2, r1 @ r2<- fp[B]
GET_INST_OPCODE ip @ ip<- opcode from rINST
/* move-wide vA, vB */
/* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
mov r3, rINST, lsr #12 @ r3<- B
- ubfx rINST, rINST, #8, #4 @ rINST<- A
+ @ begin replacement of ubfx rINST, rINST, #8, #4 @ rINST<- A
+ lsl rINST, rINST, #(32-(4)-(8))
+ lsr rINST, rINST, #((32-(4))
+ @ end replacement of ubfx rINST, rINST, #8, #4 @ rINST<- A
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[B]
VREG_INDEX_TO_ADDR r2, rINST @ r2<- &fp[A]
GET_VREG_WIDE_BY_ADDR r0, r1, r3 @ r0/r1<- fp[B]
linestartwhitespace = line[:(len(line)-len(linels))]
destreg = linesplit[1][:-1]
sourcereg = linesplit[2][:-1]
- lsb = int(linesplit[3][1:-1])
- width = int(linesplit[4][1:])
+ lsb = linesplit[3][1:-1]
+ width = linesplit[4][1:]
#print(linesplit)
#print((destreg,sourcereg,lsb,width))
print(linestartwhitespace+'@ begin replacement of '+linels)
- print(linestartwhitespace+'lsl '+destreg+', '+sourcereg+', #'+str(32-width-lsb))
+ print(linestartwhitespace+'lsl '+destreg+', '+sourcereg+', #(32-('+width+')-('+lsb+'))')
if linesplit[0] == 'ubfx':
rightshift = 'lsr'
else:
rightshift = 'asr'
- print(linestartwhitespace+rightshift+' '+destreg+', '+destreg+', #'+str(32-width))
+ print(linestartwhitespace+rightshift+' '+destreg+', '+destreg+', #((32-('+width+'))')
print(linestartwhitespace+'@ end replacement of '+linels)
else:
print(line)