-Description: hack out ubfx and sbfx which are not supported on armv6
- I wrote a script to replace ubfx and sbfx with shift operations,
- unfortunately these clobber the carry flag, so there is some risk,
- hopefully it's ok.....
-Author: Peter Michael Green <plugwash@raspbian.org>
-
----
-The information above should follow the Patch Tagging Guidelines, please
-checkout http://dep.debian.net/deps/dep3/ to learn about the format. Here
-are templates for supplementary fields that you might want to add:
-
-Origin: <vendor|upstream|other>, <url of original patch>
-Bug: <url in upstream bugtracker>
-Bug-Debian: https://bugs.debian.org/<bugnumber>
-Bug-Ubuntu: https://launchpad.net/bugs/<bugnumber>
-Forwarded: <no|not-needed|url proving that it has been forwarded>
-Reviewed-By: <name and email of someone who approved the patch>
-Last-Update: 2019-03-19
-
-Index: android-platform-art-8.1.0+r23/runtime/interpreter/mterp/out/mterp_arm.S
-===================================================================
---- android-platform-art-8.1.0+r23.orig/runtime/interpreter/mterp/out/mterp_arm.S
-+++ android-platform-art-8.1.0+r23/runtime/interpreter/mterp/out/mterp_arm.S
-@@ -416,7 +416,10 @@ artMterpAsmInstructionStart = .L_op_nop
- /* for move, move-object, long-to-int */
- /* op vA, vB */
- mov r1, rINST, lsr #12 @ r1<- B from 15:12
-- ubfx r0, rINST, #8, #4 @ r0<- A from 11:8
-+ @ begin replacement of ubfx r0, rINST, #8, #4 @ r0<- A from 11:8
-+ lsl r0, rINST, #20
-+ lsr r0, r0, #28
-+ @ end replacement of ubfx r0, rINST, #8, #4 @ r0<- A from 11:8
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- GET_VREG r2, r1 @ r2<- fp[B]
- GET_INST_OPCODE ip @ ip<- opcode from rINST
-@@ -470,7 +473,10 @@ artMterpAsmInstructionStart = .L_op_nop
- /* move-wide vA, vB */
- /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
- mov r3, rINST, lsr #12 @ r3<- B
-- ubfx rINST, rINST, #8, #4 @ rINST<- A
-+ @ begin replacement of ubfx rINST, rINST, #8, #4 @ rINST<- A
-+ lsl rINST, rINST, #20
-+ lsr rINST, rINST, #28
-+ @ end replacement of ubfx rINST, rINST, #8, #4 @ rINST<- A
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[B]
- VREG_INDEX_TO_ADDR r2, rINST @ r2<- &fp[A]
- ldmia r3, {r0-r1} @ r0/r1<- fp[B]
-@@ -522,7 +528,10 @@ artMterpAsmInstructionStart = .L_op_nop
- /* for move, move-object, long-to-int */
- /* op vA, vB */
- mov r1, rINST, lsr #12 @ r1<- B from 15:12
-- ubfx r0, rINST, #8, #4 @ r0<- A from 11:8
-+ @ begin replacement of ubfx r0, rINST, #8, #4 @ r0<- A from 11:8
-+ lsl r0, rINST, #20
-+ lsr r0, r0, #28
-+ @ end replacement of ubfx r0, rINST, #8, #4 @ r0<- A from 11:8
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- GET_VREG r2, r1 @ r2<- fp[B]
- GET_INST_OPCODE ip @ ip<- opcode from rINST
-@@ -723,8 +732,14 @@ artMterpAsmInstructionStart = .L_op_nop
- .L_op_const_4: /* 0x12 */
- /* File: arm/op_const_4.S */
- /* const/4 vA, #+B */
-- sbfx r1, rINST, #12, #4 @ r1<- sssssssB (sign-extended)
-- ubfx r0, rINST, #8, #4 @ r0<- A
-+ @ begin replacement of sbfx r1, rINST, #12, #4 @ r1<- sssssssB (sign-extended)
-+ lsl r1, rINST, #16
-+ asr r1, r1, #28
-+ @ end replacement of sbfx r1, rINST, #12, #4 @ r1<- sssssssB (sign-extended)
-+ @ begin replacement of ubfx r0, rINST, #8, #4 @ r0<- A
-+ lsl r0, rINST, #20
-+ lsr r0, r0, #28
-+ @ end replacement of ubfx r0, rINST, #8, #4 @ r0<- A
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- GET_INST_OPCODE ip @ ip<- opcode from rINST
- SET_VREG r1, r0 @ fp[A]<- r1
-@@ -975,7 +990,10 @@ artMterpAsmInstructionStart = .L_op_nop
- mov r3, rSELF @ r3<- self
- bl MterpInstanceOf @ (index, &obj, method, self)
- ldr r1, [rSELF, #THREAD_EXCEPTION_OFFSET]
-- ubfx r9, rINST, #8, #4 @ r9<- A
-+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
-+ lsl r9, rINST, #20
-+ lsr r9, r9, #28
-+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
- PREFETCH_INST 2
- cmp r1, #0 @ exception pending?
- bne MterpException
-@@ -992,7 +1010,10 @@ artMterpAsmInstructionStart = .L_op_nop
- * Return the length of an array.
- */
- mov r1, rINST, lsr #12 @ r1<- B
-- ubfx r2, rINST, #8, #4 @ r2<- A
-+ @ begin replacement of ubfx r2, rINST, #8, #4 @ r2<- A
-+ lsl r2, rINST, #20
-+ lsr r2, r2, #28
-+ @ end replacement of ubfx r2, rINST, #8, #4 @ r2<- A
- GET_VREG r0, r1 @ r0<- vB (object ref)
- cmp r0, #0 @ is object null?
- beq common_errNullObject @ yup, fail
-@@ -1139,7 +1160,10 @@ artMterpAsmInstructionStart = .L_op_nop
- * double to get a byte offset.
- */
- /* goto +AA */
-- sbfx rINST, rINST, #8, #8 @ rINST<- ssssssAA (sign-extended)
-+ @ begin replacement of sbfx rINST, rINST, #8, #8 @ rINST<- ssssssAA (sign-extended)
-+ lsl rINST, rINST, #16
-+ asr rINST, rINST, #24
-+ @ end replacement of sbfx rINST, rINST, #8, #8 @ rINST<- ssssssAA (sign-extended)
- b MterpCommonTakenBranchNoFlags
+diff --git a/debian/patches/auto-10.0.0+r36-3+rpi1-48ad93a9499c82257780931f80c66f84ab10b3eb-1611398508 b/debian/patches/auto-10.0.0+r36-3+rpi1-48ad93a9499c82257780931f80c66f84ab10b3eb-1611398508
+new file mode 100644
+index 0000000..1b1f85d
+diff --git a/debian/patches/dont-tag-asm-as-armv7.patch b/debian/patches/dont-tag-asm-as-armv7.patch
+new file mode 100644
+index 0000000..b1002d4
+diff --git a/debian/patches/hack-out-ubfx-and-sbfx.patch b/debian/patches/hack-out-ubfx-and-sbfx.patch
+new file mode 100644
+index 0000000..e754d1c
+diff --git a/debian/patches/replace-movw.patch b/debian/patches/replace-movw.patch
+new file mode 100644
+index 0000000..d30924c
+diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
+index b57e119..7531f9e 100644
+--- a/runtime/arch/arm/quick_entrypoints_arm.S
++++ b/runtime/arch/arm/quick_entrypoints_arm.S
+@@ -714,7 +714,10 @@ ENTRY art_quick_lock_object
+ @ thread id did not match, go slow path.
+ add r3, r2, #LOCK_WORD_THIN_LOCK_COUNT_ONE @ Increment the recursive lock count.
+ @ Extract the new thin lock count for overflow check.
+- ubfx r2, r3, #LOCK_WORD_THIN_LOCK_COUNT_SHIFT, #LOCK_WORD_THIN_LOCK_COUNT_SIZE
++ @ begin replacement of ubfx r2, r3, #LOCK_WORD_THIN_LOCK_COUNT_SHIFT, #LOCK_WORD_THIN_LOCK_COUNT_SIZE
++ lsl r2, r3, #(32-(LOCK_WORD_THIN_LOCK_COUNT_SIZE)-(LOCK_WORD_THIN_LOCK_COUNT_SHIFT))
++ lsr r2, r2, #((32-(LOCK_WORD_THIN_LOCK_COUNT_SIZE))
++ @ end replacement of ubfx r2, r3, #LOCK_WORD_THIN_LOCK_COUNT_SHIFT, #LOCK_WORD_THIN_LOCK_COUNT_SIZE
+ cbz r2, .Lslow_lock @ Zero as the new count indicates overflow, go slow path.
+ strex r2, r3, [r0, #MIRROR_OBJECT_LOCK_WORD_OFFSET] @ strex necessary for read barrier bits.
+ cbnz r2, .Llock_strex_fail @ If strex failed, retry.
+@@ -1621,7 +1624,10 @@ ENTRY art_quick_imt_conflict_trampoline
+ bcs .Limt_conflict_trampoline_dex_cache_miss
+ ldr r4, [r0, #MIRROR_CLASS_DEX_CACHE_OFFSET] // Load the DexCache (without read barrier).
+ UNPOISON_HEAP_REF r4
+- ubfx r1, r12, #0, #METHOD_DEX_CACHE_HASH_BITS // Calculate DexCache method slot index.
++ @ begin replacement of ubfx r1, r12, #0, #METHOD_DEX_CACHE_HASH_BITS // Calculate DexCache method slot index.
++ lsl r1, r12, #(32-(METHOD_DEX_CACHE_HASH_BITS)-(0))
++ lsr r1, r1, #((32-(METHOD_DEX_CACHE_HASH_BITS))
++ @ end replacement of ubfx r1, r12, #0, #METHOD_DEX_CACHE_HASH_BITS // Calculate DexCache method slot index.
+ ldr r4, [r4, #MIRROR_DEX_CACHE_RESOLVED_METHODS_OFFSET] // Load the resolved methods.
+ add r4, r4, r1, lsl #(POINTER_SIZE_SHIFT + 1) // Load DexCache method slot address.
- /* ------------------------------ */
-@@ -1424,7 +1448,10 @@ artMterpAsmInstructionStart = .L_op_nop
- */
- /* if-cmp vA, vB, +CCCC */
- mov r1, rINST, lsr #12 @ r1<- B
-- ubfx r0, rINST, #8, #4 @ r0<- A
-+ @ begin replacement of ubfx r0, rINST, #8, #4 @ r0<- A
-+ lsl r0, rINST, #20
-+ lsr r0, r0, #28
-+ @ end replacement of ubfx r0, rINST, #8, #4 @ r0<- A
- GET_VREG r3, r1 @ r3<- vB
- GET_VREG r0, r0 @ r0<- vA
- FETCH_S rINST, 1 @ rINST<- branch offset, in code units
-@@ -1450,7 +1477,10 @@ artMterpAsmInstructionStart = .L_op_nop
- */
- /* if-cmp vA, vB, +CCCC */
- mov r1, rINST, lsr #12 @ r1<- B
-- ubfx r0, rINST, #8, #4 @ r0<- A
-+ @ begin replacement of ubfx r0, rINST, #8, #4 @ r0<- A
-+ lsl r0, rINST, #20
-+ lsr r0, r0, #28
-+ @ end replacement of ubfx r0, rINST, #8, #4 @ r0<- A
- GET_VREG r3, r1 @ r3<- vB
- GET_VREG r0, r0 @ r0<- vA
- FETCH_S rINST, 1 @ rINST<- branch offset, in code units
-@@ -1476,7 +1506,10 @@ artMterpAsmInstructionStart = .L_op_nop
- */
- /* if-cmp vA, vB, +CCCC */
- mov r1, rINST, lsr #12 @ r1<- B
-- ubfx r0, rINST, #8, #4 @ r0<- A
-+ @ begin replacement of ubfx r0, rINST, #8, #4 @ r0<- A
-+ lsl r0, rINST, #20
-+ lsr r0, r0, #28
-+ @ end replacement of ubfx r0, rINST, #8, #4 @ r0<- A
- GET_VREG r3, r1 @ r3<- vB
- GET_VREG r0, r0 @ r0<- vA
- FETCH_S rINST, 1 @ rINST<- branch offset, in code units
-@@ -1502,7 +1535,10 @@ artMterpAsmInstructionStart = .L_op_nop
- */
- /* if-cmp vA, vB, +CCCC */
- mov r1, rINST, lsr #12 @ r1<- B
-- ubfx r0, rINST, #8, #4 @ r0<- A
-+ @ begin replacement of ubfx r0, rINST, #8, #4 @ r0<- A
-+ lsl r0, rINST, #20
-+ lsr r0, r0, #28
-+ @ end replacement of ubfx r0, rINST, #8, #4 @ r0<- A
- GET_VREG r3, r1 @ r3<- vB
- GET_VREG r0, r0 @ r0<- vA
- FETCH_S rINST, 1 @ rINST<- branch offset, in code units
-@@ -1528,7 +1564,10 @@ artMterpAsmInstructionStart = .L_op_nop
- */
- /* if-cmp vA, vB, +CCCC */
- mov r1, rINST, lsr #12 @ r1<- B
-- ubfx r0, rINST, #8, #4 @ r0<- A
-+ @ begin replacement of ubfx r0, rINST, #8, #4 @ r0<- A
-+ lsl r0, rINST, #20
-+ lsr r0, r0, #28
-+ @ end replacement of ubfx r0, rINST, #8, #4 @ r0<- A
- GET_VREG r3, r1 @ r3<- vB
- GET_VREG r0, r0 @ r0<- vA
- FETCH_S rINST, 1 @ rINST<- branch offset, in code units
-@@ -1554,7 +1593,10 @@ artMterpAsmInstructionStart = .L_op_nop
- */
- /* if-cmp vA, vB, +CCCC */
- mov r1, rINST, lsr #12 @ r1<- B
-- ubfx r0, rINST, #8, #4 @ r0<- A
-+ @ begin replacement of ubfx r0, rINST, #8, #4 @ r0<- A
-+ lsl r0, rINST, #20
-+ lsr r0, r0, #28
-+ @ end replacement of ubfx r0, rINST, #8, #4 @ r0<- A
- GET_VREG r3, r1 @ r3<- vB
- GET_VREG r0, r0 @ r0<- vA
- FETCH_S rINST, 1 @ rINST<- branch offset, in code units
-@@ -2244,7 +2286,10 @@ artMterpAsmInstructionStart = .L_op_nop
- mov r3, rSELF @ r3<- self
- bl artGet32InstanceFromCode
- ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
-- ubfx r2, rINST, #8, #4 @ r2<- A
-+ @ begin replacement of ubfx r2, rINST, #8, #4 @ r2<- A
-+ lsl r2, rINST, #20
-+ lsr r2, r2, #28
-+ @ end replacement of ubfx r2, rINST, #8, #4 @ r2<- A
- PREFETCH_INST 2
- cmp r3, #0
- bne MterpPossibleException @ bail out
-@@ -2274,7 +2319,10 @@ artMterpAsmInstructionStart = .L_op_nop
- mov r3, rSELF @ r3<- self
- bl artGet64InstanceFromCode
- ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
-- ubfx r2, rINST, #8, #4 @ r2<- A
-+ @ begin replacement of ubfx r2, rINST, #8, #4 @ r2<- A
-+ lsl r2, rINST, #20
-+ lsr r2, r2, #28
-+ @ end replacement of ubfx r2, rINST, #8, #4 @ r2<- A
- PREFETCH_INST 2
- cmp r3, #0
- bne MterpException @ bail out
-@@ -2303,7 +2351,10 @@ artMterpAsmInstructionStart = .L_op_nop
- mov r3, rSELF @ r3<- self
- bl artGetObjInstanceFromCode
- ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
-- ubfx r2, rINST, #8, #4 @ r2<- A
-+ @ begin replacement of ubfx r2, rINST, #8, #4 @ r2<- A
-+ lsl r2, rINST, #20
-+ lsr r2, r2, #28
-+ @ end replacement of ubfx r2, rINST, #8, #4 @ r2<- A
- PREFETCH_INST 2
- cmp r3, #0
- bne MterpPossibleException @ bail out
-@@ -2335,7 +2386,10 @@ artMterpAsmInstructionStart = .L_op_nop
- mov r3, rSELF @ r3<- self
- bl artGetBooleanInstanceFromCode
- ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
-- ubfx r2, rINST, #8, #4 @ r2<- A
-+ @ begin replacement of ubfx r2, rINST, #8, #4 @ r2<- A
-+ lsl r2, rINST, #20
-+ lsr r2, r2, #28
-+ @ end replacement of ubfx r2, rINST, #8, #4 @ r2<- A
- PREFETCH_INST 2
- cmp r3, #0
- bne MterpPossibleException @ bail out
-@@ -2367,7 +2421,10 @@ artMterpAsmInstructionStart = .L_op_nop
- mov r3, rSELF @ r3<- self
- bl artGetByteInstanceFromCode
- ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
-- ubfx r2, rINST, #8, #4 @ r2<- A
-+ @ begin replacement of ubfx r2, rINST, #8, #4 @ r2<- A
-+ lsl r2, rINST, #20
-+ lsr r2, r2, #28
-+ @ end replacement of ubfx r2, rINST, #8, #4 @ r2<- A
- PREFETCH_INST 2
- cmp r3, #0
- bne MterpPossibleException @ bail out
-@@ -2399,7 +2456,10 @@ artMterpAsmInstructionStart = .L_op_nop
- mov r3, rSELF @ r3<- self
- bl artGetCharInstanceFromCode
- ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
-- ubfx r2, rINST, #8, #4 @ r2<- A
-+ @ begin replacement of ubfx r2, rINST, #8, #4 @ r2<- A
-+ lsl r2, rINST, #20
-+ lsr r2, r2, #28
-+ @ end replacement of ubfx r2, rINST, #8, #4 @ r2<- A
- PREFETCH_INST 2
- cmp r3, #0
- bne MterpPossibleException @ bail out
-@@ -2431,7 +2491,10 @@ artMterpAsmInstructionStart = .L_op_nop
- mov r3, rSELF @ r3<- self
- bl artGetShortInstanceFromCode
- ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
-- ubfx r2, rINST, #8, #4 @ r2<- A
-+ @ begin replacement of ubfx r2, rINST, #8, #4 @ r2<- A
-+ lsl r2, rINST, #20
-+ lsr r2, r2, #28
-+ @ end replacement of ubfx r2, rINST, #8, #4 @ r2<- A
- PREFETCH_INST 2
- cmp r3, #0
- bne MterpPossibleException @ bail out
-@@ -2460,7 +2523,10 @@ artMterpAsmInstructionStart = .L_op_nop
- FETCH r0, 1 @ r0<- field ref CCCC
- mov r1, rINST, lsr #12 @ r1<- B
- GET_VREG r1, r1 @ r1<- fp[B], the object pointer
-- ubfx r2, rINST, #8, #4 @ r2<- A
-+ @ begin replacement of ubfx r2, rINST, #8, #4 @ r2<- A
-+ lsl r2, rINST, #20
-+ lsr r2, r2, #28
-+ @ end replacement of ubfx r2, rINST, #8, #4 @ r2<- A
- GET_VREG r2, r2 @ r2<- fp[A]
- ldr r3, [rFP, #OFF_FP_METHOD] @ r3<- referrer
- PREFETCH_INST 2
-@@ -2481,7 +2547,10 @@ artMterpAsmInstructionStart = .L_op_nop
- FETCH r0, 1 @ r0<- field ref CCCC
- mov r1, rINST, lsr #12 @ r1<- B
- GET_VREG r1, r1 @ r1<- fp[B], the object pointer
-- ubfx r2, rINST, #8, #4 @ r2<- A
-+ @ begin replacement of ubfx r2, rINST, #8, #4 @ r2<- A
-+ lsl r2, rINST, #20
-+ lsr r2, r2, #28
-+ @ end replacement of ubfx r2, rINST, #8, #4 @ r2<- A
- VREG_INDEX_TO_ADDR r2, r2 @ r2<- &fp[A]
- ldr r3, [rFP, #OFF_FP_METHOD] @ r3<- referrer
- PREFETCH_INST 2
-@@ -2524,7 +2593,10 @@ artMterpAsmInstructionStart = .L_op_nop
- FETCH r0, 1 @ r0<- field ref CCCC
- mov r1, rINST, lsr #12 @ r1<- B
- GET_VREG r1, r1 @ r1<- fp[B], the object pointer
-- ubfx r2, rINST, #8, #4 @ r2<- A
-+ @ begin replacement of ubfx r2, rINST, #8, #4 @ r2<- A
-+ lsl r2, rINST, #20
-+ lsr r2, r2, #28
-+ @ end replacement of ubfx r2, rINST, #8, #4 @ r2<- A
- GET_VREG r2, r2 @ r2<- fp[A]
- ldr r3, [rFP, #OFF_FP_METHOD] @ r3<- referrer
- PREFETCH_INST 2
-@@ -2552,7 +2624,10 @@ artMterpAsmInstructionStart = .L_op_nop
- FETCH r0, 1 @ r0<- field ref CCCC
- mov r1, rINST, lsr #12 @ r1<- B
- GET_VREG r1, r1 @ r1<- fp[B], the object pointer
-- ubfx r2, rINST, #8, #4 @ r2<- A
-+ @ begin replacement of ubfx r2, rINST, #8, #4 @ r2<- A
-+ lsl r2, rINST, #20
-+ lsr r2, r2, #28
-+ @ end replacement of ubfx r2, rINST, #8, #4 @ r2<- A
- GET_VREG r2, r2 @ r2<- fp[A]
- ldr r3, [rFP, #OFF_FP_METHOD] @ r3<- referrer
- PREFETCH_INST 2
-@@ -2580,7 +2655,10 @@ artMterpAsmInstructionStart = .L_op_nop
- FETCH r0, 1 @ r0<- field ref CCCC
- mov r1, rINST, lsr #12 @ r1<- B
- GET_VREG r1, r1 @ r1<- fp[B], the object pointer
-- ubfx r2, rINST, #8, #4 @ r2<- A
-+ @ begin replacement of ubfx r2, rINST, #8, #4 @ r2<- A
-+ lsl r2, rINST, #20
-+ lsr r2, r2, #28
-+ @ end replacement of ubfx r2, rINST, #8, #4 @ r2<- A
- GET_VREG r2, r2 @ r2<- fp[A]
- ldr r3, [rFP, #OFF_FP_METHOD] @ r3<- referrer
- PREFETCH_INST 2
-@@ -2608,7 +2686,10 @@ artMterpAsmInstructionStart = .L_op_nop
- FETCH r0, 1 @ r0<- field ref CCCC
- mov r1, rINST, lsr #12 @ r1<- B
- GET_VREG r1, r1 @ r1<- fp[B], the object pointer
-- ubfx r2, rINST, #8, #4 @ r2<- A
-+ @ begin replacement of ubfx r2, rINST, #8, #4 @ r2<- A
-+ lsl r2, rINST, #20
-+ lsr r2, r2, #28
-+ @ end replacement of ubfx r2, rINST, #8, #4 @ r2<- A
- GET_VREG r2, r2 @ r2<- fp[A]
- ldr r3, [rFP, #OFF_FP_METHOD] @ r3<- referrer
- PREFETCH_INST 2
-@@ -3362,7 +3443,10 @@ artMterpAsmInstructionStart = .L_op_nop
+diff --git a/runtime/interpreter/mterp/arm/arithmetic.S b/runtime/interpreter/mterp/arm/arithmetic.S
+index a6ba454..0e98485 100644
+--- a/runtime/interpreter/mterp/arm/arithmetic.S
++++ b/runtime/interpreter/mterp/arm/arithmetic.S
+@@ -51,7 +51,10 @@
*/
- /* unop vA, vB */
+ /* binop/2addr vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
-+ lsl r9, rINST, #20
-+ lsr r9, r9, #28
++ lsl r9, rINST, #(32-(4)-(8))
++ lsr r9, r9, #((32-(4))
+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
- GET_VREG r0, r3 @ r0<- vB
- @ optional op; may set condition codes
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
-@@ -3388,7 +3472,10 @@ artMterpAsmInstructionStart = .L_op_nop
- */
- /* unop vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
+ GET_VREG r1, r3 @ r1<- vB
+ GET_VREG r0, r9 @ r0<- vA
+ .if $chkzero
+@@ -83,7 +86,10 @@
+ /* binop/lit16 vA, vB, #+CCCC */
+ FETCH_S r1, 1 @ r1<- ssssCCCC (sign-extended)
+ mov r2, rINST, lsr #12 @ r2<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
-+ lsl r9, rINST, #20
-+ lsr r9, r9, #28
++ lsl r9, rINST, #(32-(4)-(8))
++ lsr r9, r9, #((32-(4))
+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
- GET_VREG r0, r3 @ r0<- vB
- @ optional op; may set condition codes
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
-@@ -3413,7 +3500,10 @@ artMterpAsmInstructionStart = .L_op_nop
+ GET_VREG r0, r2 @ r0<- vB
+ .if $chkzero
+ cmp r1, #0 @ is second operand zero?
+@@ -189,7 +195,10 @@
*/
- /* unop vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
+ /* binop/2addr vA, vB */
+ mov r1, rINST, lsr #12 @ r1<- B
- ubfx rINST, rINST, #8, #4 @ rINST<- A
+ @ begin replacement of ubfx rINST, rINST, #8, #4 @ rINST<- A
-+ lsl rINST, rINST, #20
-+ lsr rINST, rINST, #28
++ lsl rINST, rINST, #(32-(4)-(8))
++ lsr rINST, rINST, #((32-(4))
+ @ end replacement of ubfx rINST, rINST, #8, #4 @ rINST<- A
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[B]
+ VREG_INDEX_TO_ADDR r1, r1 @ r1<- &fp[B]
VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A]
- ldmia r3, {r0-r1} @ r0/r1<- vAA
-@@ -3441,7 +3531,10 @@ artMterpAsmInstructionStart = .L_op_nop
+ GET_VREG_WIDE_BY_ADDR r2, r3, r1 @ r2/r3<- vBB/vBB+1
+@@ -218,7 +227,10 @@
*/
/* unop vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
-- ubfx rINST, rINST, #8, #4 @ rINST<- A
-+ @ begin replacement of ubfx rINST, rINST, #8, #4 @ rINST<- A
-+ lsl rINST, rINST, #20
-+ lsr rINST, rINST, #28
-+ @ end replacement of ubfx rINST, rINST, #8, #4 @ rINST<- A
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[B]
- VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A]
- ldmia r3, {r0-r1} @ r0/r1<- vAA
-@@ -3470,7 +3563,10 @@ artMterpAsmInstructionStart = .L_op_nop
+- ubfx r9, rINST, #8, #4 @ r9<- A
++ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
++ lsl r9, rINST, #(32-(4)-(8))
++ lsr r9, r9, #((32-(4))
++ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
+ GET_VREG r0, r3 @ r0<- vB
+ $preinstr @ optional op; may set condition codes
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+@@ -241,7 +253,10 @@
*/
/* unop vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
-+ lsl r9, rINST, #20
-+ lsr r9, r9, #28
++ lsl r9, rINST, #(32-(4)-(8))
++ lsr r9, r9, #((32-(4))
+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
- GET_VREG r0, r3 @ r0<- vB
- @ optional op; may set condition codes
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[B]
+ GET_VREG_WIDE_BY_ADDR r0, r1, r3 @ r0/r1<- vB/vB+1
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
-@@ -3495,7 +3591,10 @@ artMterpAsmInstructionStart = .L_op_nop
+@@ -262,7 +277,10 @@
*/
/* unop vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
- ubfx rINST, rINST, #8, #4 @ rINST<- A
+ @ begin replacement of ubfx rINST, rINST, #8, #4 @ rINST<- A
-+ lsl rINST, rINST, #20
-+ lsr rINST, rINST, #28
++ lsl rINST, rINST, #(32-(4)-(8))
++ lsr rINST, rINST, #((32-(4))
+ @ end replacement of ubfx rINST, rINST, #8, #4 @ rINST<- A
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[B]
VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A]
- ldmia r3, {r0-r1} @ r0/r1<- vAA
-@@ -3523,7 +3622,10 @@ artMterpAsmInstructionStart = .L_op_nop
+ GET_VREG_WIDE_BY_ADDR r0, r1, r3 @ r0/r1<- vAA
+@@ -285,7 +303,10 @@
*/
/* unop vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
- ubfx rINST, rINST, #8, #4 @ rINST<- A
+ @ begin replacement of ubfx rINST, rINST, #8, #4 @ rINST<- A
-+ lsl rINST, rINST, #20
-+ lsr rINST, rINST, #28
++ lsl rINST, rINST, #(32-(4)-(8))
++ lsr rINST, rINST, #((32-(4))
+ @ end replacement of ubfx rINST, rINST, #8, #4 @ rINST<- A
GET_VREG r0, r3 @ r0<- vB
VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A]
- @ optional op; may set condition codes
-@@ -3551,7 +3653,10 @@ artMterpAsmInstructionStart = .L_op_nop
- mov r3, rINST, lsr #12 @ r3<- B
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
- flds s0, [r3] @ s0<- vB
-- ubfx r9, rINST, #8, #4 @ r9<- A
-+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
-+ lsl r9, rINST, #20
-+ lsr r9, r9, #28
-+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- fsitos s1, s0 @ s1<- op
- GET_INST_OPCODE ip @ extract opcode from rINST
-@@ -3575,7 +3680,10 @@ artMterpAsmInstructionStart = .L_op_nop
+ $preinstr @ optional op; may set condition codes
+@@ -401,7 +422,10 @@
+ *
+ */
mov r3, rINST, lsr #12 @ r3<- B
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
- flds s0, [r3] @ s0<- vB
- ubfx r9, rINST, #8, #4 @ r9<- A
+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
-+ lsl r9, rINST, #20
-+ lsr r9, r9, #28
++ lsl r9, rINST, #(32-(4)-(8))
++ lsr r9, r9, #((32-(4))
+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- fsitod d0, s0 @ d0<- op
- CLEAR_SHADOW_PAIR r9, ip, lr @ Zero shadow regs
-@@ -3594,7 +3702,10 @@ artMterpAsmInstructionStart = .L_op_nop
- /* for move, move-object, long-to-int */
- /* op vA, vB */
- mov r1, rINST, lsr #12 @ r1<- B from 15:12
-- ubfx r0, rINST, #8, #4 @ r0<- A from 11:8
-+ @ begin replacement of ubfx r0, rINST, #8, #4 @ r0<- A from 11:8
-+ lsl r0, rINST, #20
-+ lsr r0, r0, #28
-+ @ end replacement of ubfx r0, rINST, #8, #4 @ r0<- A from 11:8
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- GET_VREG r2, r1 @ r2<- fp[B]
- GET_INST_OPCODE ip @ ip<- opcode from rINST
-@@ -3623,7 +3734,10 @@ artMterpAsmInstructionStart = .L_op_nop
+ GET_VREG r1, r3 @ r1<- vB
+ GET_VREG r0, r9 @ r0<- vA
+ cmp r1, #0 @ is second operand zero?
+@@ -432,7 +456,10 @@
*/
- /* unop vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
+ FETCH_S r1, 1 @ r1<- ssssCCCC (sign-extended)
+ mov r2, rINST, lsr #12 @ r2<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
-+ lsl r9, rINST, #20
-+ lsr r9, r9, #28
++ lsl r9, rINST, #(32-(4)-(8))
++ lsr r9, r9, #((32-(4))
+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[B]
- ldmia r3, {r0-r1} @ r0/r1<- vB/vB+1
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
-@@ -3647,7 +3761,10 @@ artMterpAsmInstructionStart = .L_op_nop
- * For: long-to-double
+ GET_VREG r0, r2 @ r0<- vB
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+@@ -566,7 +593,10 @@
*/
- mov r3, rINST, lsr #12 @ r3<- B
+ /* mul-long/2addr vA, vB */
+ mov r1, rINST, lsr #12 @ r1<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
-+ lsl r9, rINST, #20
-+ lsr r9, r9, #28
++ lsl r9, rINST, #(32-(4)-(8))
++ lsr r9, r9, #((32-(4))
+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[B]
- VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[A]
- vldr d0, [r3] @ d0<- vAA
-@@ -3681,7 +3798,10 @@ constvalop_long_to_double:
+ VREG_INDEX_TO_ADDR r1, r1 @ r1<- &fp[B]
+ VREG_INDEX_TO_ADDR rINST, r9 @ rINST<- &fp[A]
+ GET_VREG_WIDE_BY_ADDR r2, r3, r1 @ r2/r3<- vBB/vBB+1
+@@ -659,7 +689,10 @@
+ *
+ */
mov r3, rINST, lsr #12 @ r3<- B
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
- flds s0, [r3] @ s0<- vB
- ubfx r9, rINST, #8, #4 @ r9<- A
+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
-+ lsl r9, rINST, #20
-+ lsr r9, r9, #28
++ lsl r9, rINST, #(32-(4)-(8))
++ lsr r9, r9, #((32-(4))
+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- ftosizs s1, s0 @ s1<- op
- GET_INST_OPCODE ip @ extract opcode from rINST
-@@ -3704,7 +3824,10 @@ constvalop_long_to_double:
+ GET_VREG r1, r3 @ r1<- vB
+ GET_VREG r0, r9 @ r0<- vA
+ cmp r1, #0 @ is second operand zero?
+@@ -693,7 +726,10 @@
*/
- /* unop vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
-- ubfx rINST, rINST, #8, #4 @ rINST<- A
-+ @ begin replacement of ubfx rINST, rINST, #8, #4 @ rINST<- A
-+ lsl rINST, rINST, #20
-+ lsr rINST, rINST, #28
-+ @ end replacement of ubfx rINST, rINST, #8, #4 @ rINST<- A
- GET_VREG r0, r3 @ r0<- vB
- VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A]
- @ optional op; may set condition codes
-@@ -3733,7 +3856,10 @@ constvalop_long_to_double:
- mov r3, rINST, lsr #12 @ r3<- B
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
- flds s0, [r3] @ s0<- vB
+ FETCH_S r1, 1 @ r1<- ssssCCCC (sign-extended)
+ mov r2, rINST, lsr #12 @ r2<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
-+ lsl r9, rINST, #20
-+ lsr r9, r9, #28
++ lsl r9, rINST, #(32-(4)-(8))
++ lsr r9, r9, #((32-(4))
+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- vcvt.f64.f32 d0, s0 @ d0<- op
- CLEAR_SHADOW_PAIR r9, ip, lr @ Zero shadow regs
-@@ -3758,7 +3884,10 @@ constvalop_long_to_double:
+ GET_VREG r0, r2 @ r0<- vB
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+@@ -803,7 +839,10 @@
+ */
+ /* shl-long/2addr vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
- fldd d0, [r3] @ d0<- vB
- ubfx r9, rINST, #8, #4 @ r9<- A
+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
-+ lsl r9, rINST, #20
-+ lsr r9, r9, #28
++ lsl r9, rINST, #(32-(4)-(8))
++ lsr r9, r9, #((32-(4))
+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- ftosizd s0, d0 @ s0<- op
- GET_INST_OPCODE ip @ extract opcode from rINST
-@@ -3781,7 +3910,10 @@ constvalop_long_to_double:
+ GET_VREG r2, r3 @ r2<- vB
+ CLEAR_SHADOW_PAIR r9, lr, ip @ Zero out the shadow regs
+ VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[A]
+@@ -865,7 +904,10 @@
*/
- /* unop vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
-- ubfx rINST, rINST, #8, #4 @ rINST<- A
-+ @ begin replacement of ubfx rINST, rINST, #8, #4 @ rINST<- A
-+ lsl rINST, rINST, #20
-+ lsr rINST, rINST, #28
-+ @ end replacement of ubfx rINST, rINST, #8, #4 @ rINST<- A
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[B]
- VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A]
- ldmia r3, {r0-r1} @ r0/r1<- vAA
-@@ -3811,7 +3943,10 @@ constvalop_long_to_double:
+ /* shr-long/2addr vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
- fldd d0, [r3] @ d0<- vB
- ubfx r9, rINST, #8, #4 @ r9<- A
+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
-+ lsl r9, rINST, #20
-+ lsr r9, r9, #28
++ lsl r9, rINST, #(32-(4)-(8))
++ lsr r9, r9, #((32-(4))
+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- vcvt.f32.f64 s0, d0 @ s0<- op
- GET_INST_OPCODE ip @ extract opcode from rINST
-@@ -3835,7 +3970,10 @@ constvalop_long_to_double:
+ GET_VREG r2, r3 @ r2<- vB
+ CLEAR_SHADOW_PAIR r9, lr, ip @ Zero out the shadow regs
+ VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[A]
+@@ -939,7 +981,10 @@
*/
- /* unop vA, vB */
+ /* ushr-long/2addr vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
-+ lsl r9, rINST, #20
-+ lsr r9, r9, #28
-+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
- GET_VREG r0, r3 @ r0<- vB
- @ optional op; may set condition codes
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
-@@ -3861,7 +3999,10 @@ constvalop_long_to_double:
- */
- /* unop vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
-- ubfx r9, rINST, #8, #4 @ r9<- A
-+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
-+ lsl r9, rINST, #20
-+ lsr r9, r9, #28
-+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
- GET_VREG r0, r3 @ r0<- vB
- @ optional op; may set condition codes
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
-@@ -3887,7 +4028,10 @@ constvalop_long_to_double:
- */
- /* unop vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
-- ubfx r9, rINST, #8, #4 @ r9<- A
-+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
-+ lsl r9, rINST, #20
-+ lsr r9, r9, #28
-+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
- GET_VREG r0, r3 @ r0<- vB
- @ optional op; may set condition codes
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
-@@ -5133,7 +5277,10 @@ constvalop_long_to_double:
- */
- /* binop/2addr vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
-- ubfx r9, rINST, #8, #4 @ r9<- A
-+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
-+ lsl r9, rINST, #20
-+ lsr r9, r9, #28
-+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
- GET_VREG r1, r3 @ r1<- vB
- GET_VREG r0, r9 @ r0<- vA
- .if 0
-@@ -5171,7 +5318,10 @@ constvalop_long_to_double:
- */
- /* binop/2addr vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
-- ubfx r9, rINST, #8, #4 @ r9<- A
-+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
-+ lsl r9, rINST, #20
-+ lsr r9, r9, #28
-+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
- GET_VREG r1, r3 @ r1<- vB
- GET_VREG r0, r9 @ r0<- vA
- .if 0
-@@ -5210,7 +5360,10 @@ constvalop_long_to_double:
- */
- /* binop/2addr vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
-- ubfx r9, rINST, #8, #4 @ r9<- A
-+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
-+ lsl r9, rINST, #20
-+ lsr r9, r9, #28
-+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
- GET_VREG r1, r3 @ r1<- vB
- GET_VREG r0, r9 @ r0<- vA
- .if 0
-@@ -5242,7 +5395,10 @@ constvalop_long_to_double:
- *
- */
- mov r3, rINST, lsr #12 @ r3<- B
-- ubfx r9, rINST, #8, #4 @ r9<- A
-+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
-+ lsl r9, rINST, #20
-+ lsr r9, r9, #28
-+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
- GET_VREG r1, r3 @ r1<- vB
- GET_VREG r0, r9 @ r0<- vA
- cmp r1, #0 @ is second operand zero?
-@@ -5277,7 +5433,10 @@ constvalop_long_to_double:
- *
- */
- mov r3, rINST, lsr #12 @ r3<- B
-- ubfx r9, rINST, #8, #4 @ r9<- A
-+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
-+ lsl r9, rINST, #20
-+ lsr r9, r9, #28
-+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
- GET_VREG r1, r3 @ r1<- vB
- GET_VREG r0, r9 @ r0<- vA
- cmp r1, #0 @ is second operand zero?
-@@ -5317,7 +5476,10 @@ constvalop_long_to_double:
- */
- /* binop/2addr vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
-- ubfx r9, rINST, #8, #4 @ r9<- A
-+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
-+ lsl r9, rINST, #20
-+ lsr r9, r9, #28
-+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
- GET_VREG r1, r3 @ r1<- vB
- GET_VREG r0, r9 @ r0<- vA
- .if 0
-@@ -5355,7 +5517,10 @@ constvalop_long_to_double:
- */
- /* binop/2addr vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
-- ubfx r9, rINST, #8, #4 @ r9<- A
-+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
-+ lsl r9, rINST, #20
-+ lsr r9, r9, #28
-+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
- GET_VREG r1, r3 @ r1<- vB
- GET_VREG r0, r9 @ r0<- vA
- .if 0
-@@ -5393,7 +5558,10 @@ constvalop_long_to_double:
- */
- /* binop/2addr vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
-- ubfx r9, rINST, #8, #4 @ r9<- A
-+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
-+ lsl r9, rINST, #20
-+ lsr r9, r9, #28
-+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
- GET_VREG r1, r3 @ r1<- vB
- GET_VREG r0, r9 @ r0<- vA
- .if 0
-@@ -5431,7 +5599,10 @@ constvalop_long_to_double:
- */
- /* binop/2addr vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
-- ubfx r9, rINST, #8, #4 @ r9<- A
-+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
-+ lsl r9, rINST, #20
-+ lsr r9, r9, #28
-+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
- GET_VREG r1, r3 @ r1<- vB
- GET_VREG r0, r9 @ r0<- vA
- .if 0
-@@ -5469,7 +5640,10 @@ constvalop_long_to_double:
- */
- /* binop/2addr vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
-- ubfx r9, rINST, #8, #4 @ r9<- A
-+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
-+ lsl r9, rINST, #20
-+ lsr r9, r9, #28
-+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
- GET_VREG r1, r3 @ r1<- vB
- GET_VREG r0, r9 @ r0<- vA
- .if 0
-@@ -5507,7 +5681,10 @@ constvalop_long_to_double:
- */
- /* binop/2addr vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
-- ubfx r9, rINST, #8, #4 @ r9<- A
-+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
-+ lsl r9, rINST, #20
-+ lsr r9, r9, #28
-+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
- GET_VREG r1, r3 @ r1<- vB
- GET_VREG r0, r9 @ r0<- vA
- .if 0
-@@ -5545,7 +5722,10 @@ constvalop_long_to_double:
- */
- /* binop/2addr vA, vB */
- mov r1, rINST, lsr #12 @ r1<- B
-- ubfx rINST, rINST, #8, #4 @ rINST<- A
-+ @ begin replacement of ubfx rINST, rINST, #8, #4 @ rINST<- A
-+ lsl rINST, rINST, #20
-+ lsr rINST, rINST, #28
-+ @ end replacement of ubfx rINST, rINST, #8, #4 @ rINST<- A
- VREG_INDEX_TO_ADDR r1, r1 @ r1<- &fp[B]
- VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A]
- ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
-@@ -5585,7 +5765,10 @@ constvalop_long_to_double:
- */
- /* binop/2addr vA, vB */
- mov r1, rINST, lsr #12 @ r1<- B
-- ubfx rINST, rINST, #8, #4 @ rINST<- A
-+ @ begin replacement of ubfx rINST, rINST, #8, #4 @ rINST<- A
-+ lsl rINST, rINST, #20
-+ lsr rINST, rINST, #28
-+ @ end replacement of ubfx rINST, rINST, #8, #4 @ rINST<- A
- VREG_INDEX_TO_ADDR r1, r1 @ r1<- &fp[B]
- VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A]
- ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
-@@ -5618,7 +5801,10 @@ constvalop_long_to_double:
- */
- /* mul-long/2addr vA, vB */
- mov r1, rINST, lsr #12 @ r1<- B
-- ubfx r9, rINST, #8, #4 @ r9<- A
-+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
-+ lsl r9, rINST, #20
-+ lsr r9, r9, #28
-+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
- VREG_INDEX_TO_ADDR r1, r1 @ r1<- &fp[B]
- VREG_INDEX_TO_ADDR rINST, r9 @ rINST<- &fp[A]
- ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
-@@ -5654,7 +5840,10 @@ constvalop_long_to_double:
- */
- /* binop/2addr vA, vB */
- mov r1, rINST, lsr #12 @ r1<- B
-- ubfx rINST, rINST, #8, #4 @ rINST<- A
-+ @ begin replacement of ubfx rINST, rINST, #8, #4 @ rINST<- A
-+ lsl rINST, rINST, #20
-+ lsr rINST, rINST, #28
-+ @ end replacement of ubfx rINST, rINST, #8, #4 @ rINST<- A
- VREG_INDEX_TO_ADDR r1, r1 @ r1<- &fp[B]
- VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A]
- ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
-@@ -5695,7 +5884,10 @@ constvalop_long_to_double:
- */
- /* binop/2addr vA, vB */
- mov r1, rINST, lsr #12 @ r1<- B
-- ubfx rINST, rINST, #8, #4 @ rINST<- A
-+ @ begin replacement of ubfx rINST, rINST, #8, #4 @ rINST<- A
-+ lsl rINST, rINST, #20
-+ lsr rINST, rINST, #28
-+ @ end replacement of ubfx rINST, rINST, #8, #4 @ rINST<- A
- VREG_INDEX_TO_ADDR r1, r1 @ r1<- &fp[B]
- VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A]
- ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
-@@ -5735,7 +5927,10 @@ constvalop_long_to_double:
- */
- /* binop/2addr vA, vB */
- mov r1, rINST, lsr #12 @ r1<- B
-- ubfx rINST, rINST, #8, #4 @ rINST<- A
-+ @ begin replacement of ubfx rINST, rINST, #8, #4 @ rINST<- A
-+ lsl rINST, rINST, #20
-+ lsr rINST, rINST, #28
-+ @ end replacement of ubfx rINST, rINST, #8, #4 @ rINST<- A
- VREG_INDEX_TO_ADDR r1, r1 @ r1<- &fp[B]
- VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A]
- ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
-@@ -5775,7 +5970,10 @@ constvalop_long_to_double:
- */
- /* binop/2addr vA, vB */
- mov r1, rINST, lsr #12 @ r1<- B
-- ubfx rINST, rINST, #8, #4 @ rINST<- A
-+ @ begin replacement of ubfx rINST, rINST, #8, #4 @ rINST<- A
-+ lsl rINST, rINST, #20
-+ lsr rINST, rINST, #28
-+ @ end replacement of ubfx rINST, rINST, #8, #4 @ rINST<- A
- VREG_INDEX_TO_ADDR r1, r1 @ r1<- &fp[B]
- VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A]
- ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
-@@ -5815,7 +6013,10 @@ constvalop_long_to_double:
- */
- /* binop/2addr vA, vB */
- mov r1, rINST, lsr #12 @ r1<- B
-- ubfx rINST, rINST, #8, #4 @ rINST<- A
-+ @ begin replacement of ubfx rINST, rINST, #8, #4 @ rINST<- A
-+ lsl rINST, rINST, #20
-+ lsr rINST, rINST, #28
-+ @ end replacement of ubfx rINST, rINST, #8, #4 @ rINST<- A
- VREG_INDEX_TO_ADDR r1, r1 @ r1<- &fp[B]
- VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A]
- ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
-@@ -5844,7 +6045,10 @@ constvalop_long_to_double:
- */
- /* shl-long/2addr vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
-- ubfx r9, rINST, #8, #4 @ r9<- A
-+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
-+ lsl r9, rINST, #20
-+ lsr r9, r9, #28
++ lsl r9, rINST, #(32-(4)-(8))
++ lsr r9, r9, #((32-(4))
+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
GET_VREG r2, r3 @ r2<- vB
CLEAR_SHADOW_PAIR r9, lr, ip @ Zero out the shadow regs
VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[A]
-@@ -5871,7 +6075,10 @@ constvalop_long_to_double:
+diff --git a/runtime/interpreter/mterp/arm/array.S b/runtime/interpreter/mterp/arm/array.S
+index 7b3db61..0c12dbf 100644
+--- a/runtime/interpreter/mterp/arm/array.S
++++ b/runtime/interpreter/mterp/arm/array.S
+@@ -179,7 +179,10 @@
+ * Return the length of an array.
*/
- /* shr-long/2addr vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
-- ubfx r9, rINST, #8, #4 @ r9<- A
-+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
-+ lsl r9, rINST, #20
-+ lsr r9, r9, #28
-+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
- GET_VREG r2, r3 @ r2<- vB
- CLEAR_SHADOW_PAIR r9, lr, ip @ Zero out the shadow regs
- VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[A]
-@@ -5898,7 +6105,10 @@ constvalop_long_to_double:
+ mov r1, rINST, lsr #12 @ r1<- B
+- ubfx r2, rINST, #8, #4 @ r2<- A
++ @ begin replacement of ubfx r2, rINST, #8, #4 @ r2<- A
++ lsl r2, rINST, #(32-(4)-(8))
++ lsr r2, r2, #((32-(4))
++ @ end replacement of ubfx r2, rINST, #8, #4 @ r2<- A
+ GET_VREG r0, r1 @ r0<- vB (object ref)
+ cmp r0, #0 @ is object null?
+ beq common_errNullObject @ yup, fail
+diff --git a/runtime/interpreter/mterp/arm/control_flow.S b/runtime/interpreter/mterp/arm/control_flow.S
+index 2299ef9..89c3b93 100644
+--- a/runtime/interpreter/mterp/arm/control_flow.S
++++ b/runtime/interpreter/mterp/arm/control_flow.S
+@@ -7,7 +7,10 @@
*/
- /* ushr-long/2addr vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
-- ubfx r9, rINST, #8, #4 @ r9<- A
-+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
-+ lsl r9, rINST, #20
-+ lsr r9, r9, #28
-+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
- GET_VREG r2, r3 @ r2<- vB
- CLEAR_SHADOW_PAIR r9, lr, ip @ Zero out the shadow regs
- VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[A]
-@@ -5929,7 +6139,10 @@ constvalop_long_to_double:
+ /* if-cmp vA, vB, +CCCC */
+ mov r1, rINST, lsr #12 @ r1<- B
+- ubfx r0, rINST, #8, #4 @ r0<- A
++ @ begin replacement of ubfx r0, rINST, #8, #4 @ r0<- A
++ lsl r0, rINST, #(32-(4)-(8))
++ lsr r0, r0, #((32-(4))
++ @ end replacement of ubfx r0, rINST, #8, #4 @ r0<- A
+ GET_VREG r3, r1 @ r3<- vB
+ GET_VREG r0, r0 @ r0<- vA
+ FETCH_S rINST, 1 @ rINST<- branch offset, in code units
+@@ -46,7 +49,10 @@
+ * double to get a byte offset.
*/
- /* binop/2addr vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
-- ubfx r9, rINST, #8, #4 @ r9<- A
-+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
-+ lsl r9, rINST, #20
-+ lsr r9, r9, #28
-+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
- VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
- flds s1, [r3] @ s1<- vB
-@@ -5955,7 +6168,10 @@ constvalop_long_to_double:
+ /* goto +AA */
+- sbfx rINST, rINST, #8, #8 @ rINST<- ssssssAA (sign-extended)
++ @ begin replacement of sbfx rINST, rINST, #8, #8 @ rINST<- ssssssAA (sign-extended)
++ lsl rINST, rINST, #(32-(8)-(8))
++ asr rINST, rINST, #((32-(8))
++ @ end replacement of sbfx rINST, rINST, #8, #8 @ rINST<- ssssssAA (sign-extended)
+ b MterpCommonTakenBranchNoFlags
+
+ %def op_goto_16():
+diff --git a/runtime/interpreter/mterp/arm/floating_point.S b/runtime/interpreter/mterp/arm/floating_point.S
+index 035fc13..f2b7b3b 100644
+--- a/runtime/interpreter/mterp/arm/floating_point.S
++++ b/runtime/interpreter/mterp/arm/floating_point.S
+@@ -32,7 +32,10 @@
*/
/* binop/2addr vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
-+ lsl r9, rINST, #20
-+ lsr r9, r9, #28
++ lsl r9, rINST, #(32-(4)-(8))
++ lsr r9, r9, #((32-(4))
+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
- flds s1, [r3] @ s1<- vB
-@@ -5981,7 +6197,10 @@ constvalop_long_to_double:
+ GET_VREG_FLOAT_BY_ADDR s1, r3 @ s1<- vB
+@@ -79,7 +82,10 @@
*/
/* binop/2addr vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
-+ lsl r9, rINST, #20
-+ lsr r9, r9, #28
++ lsl r9, rINST, #(32-(4)-(8))
++ lsr r9, r9, #((32-(4))
+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
- VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
- flds s1, [r3] @ s1<- vB
-@@ -6007,7 +6226,10 @@ constvalop_long_to_double:
- */
- /* binop/2addr vA, vB */
+ CLEAR_SHADOW_PAIR r9, ip, r0 @ Zero out shadow regs
+ GET_VREG_DOUBLE_BY_ADDR d1, r3 @ d1<- vB
+@@ -102,7 +108,10 @@
mov r3, rINST, lsr #12 @ r3<- B
-- ubfx r9, rINST, #8, #4 @ r9<- A
-+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
-+ lsl r9, rINST, #20
-+ lsr r9, r9, #28
-+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
- VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
- flds s1, [r3] @ s1<- vB
-@@ -6041,7 +6263,10 @@ constvalop_long_to_double:
- */
- /* binop/2addr vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
+ GET_VREG_FLOAT_BY_ADDR s0, r3 @ s0<- vB
- ubfx r9, rINST, #8, #4 @ r9<- A
+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
-+ lsl r9, rINST, #20
-+ lsr r9, r9, #28
++ lsl r9, rINST, #(32-(4)-(8))
++ lsr r9, r9, #((32-(4))
+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
- GET_VREG r1, r3 @ r1<- vB
- GET_VREG r0, r9 @ r0<- vA
- .if 0
-@@ -6073,7 +6298,10 @@ constvalop_long_to_double:
- */
- /* binop/2addr vA, vB */
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ $instr @ s1<- op
+ GET_INST_OPCODE ip @ extract opcode from rINST
+@@ -120,7 +129,10 @@
mov r3, rINST, lsr #12 @ r3<- B
-- ubfx r9, rINST, #8, #4 @ r9<- A
-+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
-+ lsl r9, rINST, #20
-+ lsr r9, r9, #28
-+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
- CLEAR_SHADOW_PAIR r9, ip, r0 @ Zero out shadow regs
- fldd d1, [r3] @ d1<- vB
-@@ -6101,7 +6329,10 @@ constvalop_long_to_double:
- */
- /* binop/2addr vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
+ GET_VREG_DOUBLE_BY_ADDR d0, r3 @ d0<- vB
- ubfx r9, rINST, #8, #4 @ r9<- A
+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
-+ lsl r9, rINST, #20
-+ lsr r9, r9, #28
++ lsl r9, rINST, #(32-(4)-(8))
++ lsr r9, r9, #((32-(4))
+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
- CLEAR_SHADOW_PAIR r9, ip, r0 @ Zero out shadow regs
- fldd d1, [r3] @ d1<- vB
-@@ -6129,7 +6360,10 @@ constvalop_long_to_double:
- */
- /* binop/2addr vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
-- ubfx r9, rINST, #8, #4 @ r9<- A
-+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
-+ lsl r9, rINST, #20
-+ lsr r9, r9, #28
-+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
- CLEAR_SHADOW_PAIR r9, ip, r0 @ Zero out shadow regs
- fldd d1, [r3] @ d1<- vB
-@@ -6157,7 +6391,10 @@ constvalop_long_to_double:
- */
- /* binop/2addr vA, vB */
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ $instr @ s0<- op
+ GET_INST_OPCODE ip @ extract opcode from rINST
+@@ -138,7 +150,10 @@
mov r3, rINST, lsr #12 @ r3<- B
-- ubfx r9, rINST, #8, #4 @ r9<- A
-+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
-+ lsl r9, rINST, #20
-+ lsr r9, r9, #28
-+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
- CLEAR_SHADOW_PAIR r9, ip, r0 @ Zero out shadow regs
- fldd d1, [r3] @ d1<- vB
-@@ -6192,7 +6429,10 @@ constvalop_long_to_double:
- */
- /* binop/2addr vA, vB */
- mov r1, rINST, lsr #12 @ r1<- B
-- ubfx rINST, rINST, #8, #4 @ rINST<- A
-+ @ begin replacement of ubfx rINST, rINST, #8, #4 @ rINST<- A
-+ lsl rINST, rINST, #20
-+ lsr rINST, rINST, #28
-+ @ end replacement of ubfx rINST, rINST, #8, #4 @ rINST<- A
- VREG_INDEX_TO_ADDR r1, r1 @ r1<- &fp[B]
- VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A]
- ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
-@@ -6231,7 +6471,10 @@ constvalop_long_to_double:
- /* binop/lit16 vA, vB, #+CCCC */
- FETCH_S r1, 1 @ r1<- ssssCCCC (sign-extended)
- mov r2, rINST, lsr #12 @ r2<- B
-- ubfx r9, rINST, #8, #4 @ r9<- A
-+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
-+ lsl r9, rINST, #20
-+ lsr r9, r9, #28
-+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
- GET_VREG r0, r2 @ r0<- vB
- .if 0
- cmp r1, #0 @ is second operand zero?
-@@ -6267,7 +6510,10 @@ constvalop_long_to_double:
- /* binop/lit16 vA, vB, #+CCCC */
- FETCH_S r1, 1 @ r1<- ssssCCCC (sign-extended)
- mov r2, rINST, lsr #12 @ r2<- B
-- ubfx r9, rINST, #8, #4 @ r9<- A
-+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
-+ lsl r9, rINST, #20
-+ lsr r9, r9, #28
-+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
- GET_VREG r0, r2 @ r0<- vB
- .if 0
- cmp r1, #0 @ is second operand zero?
-@@ -6303,7 +6549,10 @@ constvalop_long_to_double:
- /* binop/lit16 vA, vB, #+CCCC */
- FETCH_S r1, 1 @ r1<- ssssCCCC (sign-extended)
- mov r2, rINST, lsr #12 @ r2<- B
+ GET_VREG_FLOAT_BY_ADDR s0, r3 @ s0<- vB
- ubfx r9, rINST, #8, #4 @ r9<- A
+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
-+ lsl r9, rINST, #20
-+ lsr r9, r9, #28
++ lsl r9, rINST, #(32-(4)-(8))
++ lsr r9, r9, #((32-(4))
+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
- GET_VREG r0, r2 @ r0<- vB
- .if 0
- cmp r1, #0 @ is second operand zero?
-@@ -6334,7 +6583,10 @@ constvalop_long_to_double:
- */
- FETCH_S r1, 1 @ r1<- ssssCCCC (sign-extended)
- mov r2, rINST, lsr #12 @ r2<- B
-- ubfx r9, rINST, #8, #4 @ r9<- A
-+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
-+ lsl r9, rINST, #20
-+ lsr r9, r9, #28
-+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
- GET_VREG r0, r2 @ r0<- vB
- cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
-@@ -6368,7 +6620,10 @@ constvalop_long_to_double:
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ $instr @ d0<- op
+ CLEAR_SHADOW_PAIR r9, ip, lr @ Zero shadow regs
+@@ -334,7 +349,10 @@
+ * to modest integer. The EABI convert function isn't doing this for us.
+ */
+ d2l_doconv:
+- ubfx r2, r1, #20, #11 @ grab the exponent
++ @ begin replacement of ubfx r2, r1, #20, #11 @ grab the exponent
++ lsl r2, r1, #(32-(11)-(20))
++ lsr r2, r2, #((32-(11))
++ @ end replacement of ubfx r2, r1, #20, #11 @ grab the exponent
+ movw r3, #0x43e
+ cmp r2, r3 @ MINLONG < x > MAXLONG?
+ bhs d2l_special_cases
+@@ -376,7 +394,10 @@ d2l_maybeNaN:
+ * to modest integer. The EABI convert function isn't doing this for us.
+ */
+ f2l_doconv:
+- ubfx r2, r0, #23, #8 @ grab the exponent
++ @ begin replacement of ubfx r2, r0, #23, #8 @ grab the exponent
++ lsl r2, r0, #(32-(8)-(23))
++ lsr r2, r2, #((32-(8))
++ @ end replacement of ubfx r2, r0, #23, #8 @ grab the exponent
+ cmp r2, #0xbe @ MININT < x > MAXINT?
+ bhs f2l_special_cases
+ b __aeabi_f2lz @ tail call to convert float to long
+@@ -412,7 +433,10 @@ f2l_maybeNaN:
+ * For: long-to-double
*/
- FETCH_S r1, 1 @ r1<- ssssCCCC (sign-extended)
- mov r2, rINST, lsr #12 @ r2<- B
-- ubfx r9, rINST, #8, #4 @ r9<- A
-+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
-+ lsl r9, rINST, #20
-+ lsr r9, r9, #28
-+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
- GET_VREG r0, r2 @ r0<- vB
- cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
-@@ -6405,7 +6660,10 @@ constvalop_long_to_double:
- /* binop/lit16 vA, vB, #+CCCC */
- FETCH_S r1, 1 @ r1<- ssssCCCC (sign-extended)
- mov r2, rINST, lsr #12 @ r2<- B
-- ubfx r9, rINST, #8, #4 @ r9<- A
-+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
-+ lsl r9, rINST, #20
-+ lsr r9, r9, #28
-+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
- GET_VREG r0, r2 @ r0<- vB
- .if 0
- cmp r1, #0 @ is second operand zero?
-@@ -6440,7 +6698,10 @@ constvalop_long_to_double:
- /* binop/lit16 vA, vB, #+CCCC */
- FETCH_S r1, 1 @ r1<- ssssCCCC (sign-extended)
- mov r2, rINST, lsr #12 @ r2<- B
-- ubfx r9, rINST, #8, #4 @ r9<- A
-+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
-+ lsl r9, rINST, #20
-+ lsr r9, r9, #28
-+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
- GET_VREG r0, r2 @ r0<- vB
- .if 0
- cmp r1, #0 @ is second operand zero?
-@@ -6475,7 +6736,10 @@ constvalop_long_to_double:
- /* binop/lit16 vA, vB, #+CCCC */
- FETCH_S r1, 1 @ r1<- ssssCCCC (sign-extended)
- mov r2, rINST, lsr #12 @ r2<- B
+ mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
+ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
-+ lsl r9, rINST, #20
-+ lsr r9, r9, #28
++ lsl r9, rINST, #(32-(4)-(8))
++ lsr r9, r9, #((32-(4))
+ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
- GET_VREG r0, r2 @ r0<- vB
- .if 0
- cmp r1, #0 @ is second operand zero?
-@@ -6833,7 +7097,10 @@ constvalop_long_to_double:
- mov r9, rINST, lsr #8 @ r9<- AA
- and r2, r3, #255 @ r2<- BB
- GET_VREG r0, r2 @ r0<- vBB
-- ubfx r1, r3, #8, #5 @ optional; typically r1<- ssssssCC (sign extended)
-+ @ begin replacement of ubfx r1, r3, #8, #5 @ optional; typically r1<- ssssssCC (sign extended)
-+ lsl r1, r3, #19
-+ lsr r1, r1, #27
-+ @ end replacement of ubfx r1, r3, #8, #5 @ optional; typically r1<- ssssssCC (sign extended)
- .if 0
- @cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
-@@ -6874,7 +7141,10 @@ constvalop_long_to_double:
- mov r9, rINST, lsr #8 @ r9<- AA
- and r2, r3, #255 @ r2<- BB
- GET_VREG r0, r2 @ r0<- vBB
-- ubfx r1, r3, #8, #5 @ optional; typically r1<- ssssssCC (sign extended)
-+ @ begin replacement of ubfx r1, r3, #8, #5 @ optional; typically r1<- ssssssCC (sign extended)
-+ lsl r1, r3, #19
-+ lsr r1, r1, #27
-+ @ end replacement of ubfx r1, r3, #8, #5 @ optional; typically r1<- ssssssCC (sign extended)
- .if 0
- @cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
-@@ -6915,7 +7185,10 @@ constvalop_long_to_double:
- mov r9, rINST, lsr #8 @ r9<- AA
- and r2, r3, #255 @ r2<- BB
- GET_VREG r0, r2 @ r0<- vBB
-- ubfx r1, r3, #8, #5 @ optional; typically r1<- ssssssCC (sign extended)
-+ @ begin replacement of ubfx r1, r3, #8, #5 @ optional; typically r1<- ssssssCC (sign extended)
-+ lsl r1, r3, #19
-+ lsr r1, r1, #27
-+ @ end replacement of ubfx r1, r3, #8, #5 @ optional; typically r1<- ssssssCC (sign extended)
- .if 0
- @cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
-@@ -6938,7 +7211,10 @@ constvalop_long_to_double:
+ CLEAR_SHADOW_PAIR r9, ip, lr @ Zero shadow regs
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[B]
+ VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[A]
+diff --git a/runtime/interpreter/mterp/arm/object.S b/runtime/interpreter/mterp/arm/object.S
+index a044d91..404e9ce 100644
+--- a/runtime/interpreter/mterp/arm/object.S
++++ b/runtime/interpreter/mterp/arm/object.S
+@@ -37,7 +37,10 @@
+ %def op_iget(is_object=False, is_wide=False, load="ldr", helper="MterpIGetU32"):
+ @ Fast-path which gets the field offset from thread-local cache.
+ add r0, rSELF, #THREAD_INTERPRETER_CACHE_OFFSET @ cache address
+- ubfx r1, rPC, #2, #THREAD_INTERPRETER_CACHE_SIZE_LOG2 @ entry index
++ @ begin replacement of ubfx r1, rPC, #2, #THREAD_INTERPRETER_CACHE_SIZE_LOG2 @ entry index
++ lsl r1, rPC, #(32-(THREAD_INTERPRETER_CACHE_SIZE_LOG2)-(2))
++ lsr r1, r1, #((32-(THREAD_INTERPRETER_CACHE_SIZE_LOG2))
++ @ end replacement of ubfx r1, rPC, #2, #THREAD_INTERPRETER_CACHE_SIZE_LOG2 @ entry index
+ add r0, r0, r1, lsl #3 @ entry address within the cache
+ ldrd r0, r1, [r0] @ entry key (pc) and value (offset)
+ mov r2, rINST, lsr #12 @ B
+@@ -65,7 +68,10 @@
+ # endif
+ #endif
+ % #endif
+- ubfx r2, rINST, #8, #4 @ A
++ @ begin replacement of ubfx r2, rINST, #8, #4 @ A
++ lsl r2, rINST, #(32-(4)-(8))
++ lsr r2, r2, #((32-(4))
++ @ end replacement of ubfx r2, rINST, #8, #4 @ A
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ % if is_object:
+ SET_VREG_OBJECT r0, r2 @ fp[A]<- r0
+@@ -114,7 +120,10 @@
+ GET_VREG r0, r2 @ r0<- object we're operating on
+ bl artIGetObjectFromMterp @ (obj, offset)
+ ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
+- ubfx r2, rINST, #8, #4 @ r2<- A
++ @ begin replacement of ubfx r2, rINST, #8, #4 @ r2<- A
++ lsl r2, rINST, #(32-(4)-(8))
++ lsr r2, r2, #((32-(4))
++ @ end replacement of ubfx r2, rINST, #8, #4 @ r2<- A
+ PREFETCH_INST 2
+ cmp r3, #0
+ bne MterpPossibleException @ bail out
+@@ -129,7 +138,10 @@
mov r2, rINST, lsr #12 @ r2<- B
FETCH r1, 1 @ r1<- field byte offset
GET_VREG r3, r2 @ r3<- object we're operating on
- ubfx r2, rINST, #8, #4 @ r2<- A
+ @ begin replacement of ubfx r2, rINST, #8, #4 @ r2<- A
-+ lsl r2, rINST, #20
-+ lsr r2, r2, #28
++ lsl r2, rINST, #(32-(4)-(8))
++ lsr r2, r2, #((32-(4))
+ @ end replacement of ubfx r2, rINST, #8, #4 @ r2<- A
cmp r3, #0 @ check object for null
beq common_errNullObject @ object was null
- ldr r0, [r3, r1] @ r0<- obj.field
-@@ -6955,7 +7231,10 @@ constvalop_long_to_double:
+ $load r0, [r3, r1] @ r0<- obj.field
+@@ -152,7 +164,10 @@
mov r2, rINST, lsr #12 @ r2<- B
FETCH ip, 1 @ ip<- field byte offset
GET_VREG r3, r2 @ r3<- object we're operating on
- ubfx r2, rINST, #8, #4 @ r2<- A
+ @ begin replacement of ubfx r2, rINST, #8, #4 @ r2<- A
-+ lsl r2, rINST, #20
-+ lsr r2, r2, #28
++ lsl r2, rINST, #(32-(4)-(8))
++ lsr r2, r2, #((32-(4))
+ @ end replacement of ubfx r2, rINST, #8, #4 @ r2<- A
cmp r3, #0 @ check object for null
beq common_errNullObject @ object was null
ldrd r0, [r3, ip] @ r0<- obj.field (64 bits, aligned)
-@@ -6978,7 +7257,10 @@ constvalop_long_to_double:
- GET_VREG r0, r2 @ r0<- object we're operating on
- bl artIGetObjectFromMterp @ (obj, offset)
- ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
-- ubfx r2, rINST, #8, #4 @ r2<- A
-+ @ begin replacement of ubfx r2, rINST, #8, #4 @ r2<- A
-+ lsl r2, rINST, #20
-+ lsr r2, r2, #28
-+ @ end replacement of ubfx r2, rINST, #8, #4 @ r2<- A
+@@ -179,7 +194,10 @@
+ mov r3, rSELF @ r3<- self
+ bl MterpInstanceOf @ (index, &obj, method, self)
+ ldr r1, [rSELF, #THREAD_EXCEPTION_OFFSET]
+- ubfx r9, rINST, #8, #4 @ r9<- A
++ @ begin replacement of ubfx r9, rINST, #8, #4 @ r9<- A
++ lsl r9, rINST, #(32-(4)-(8))
++ lsr r9, r9, #((32-(4))
++ @ end replacement of ubfx r9, rINST, #8, #4 @ r9<- A
PREFETCH_INST 2
- cmp r3, #0
- bne MterpPossibleException @ bail out
-@@ -6996,7 +7278,10 @@ constvalop_long_to_double:
+ cmp r1, #0 @ exception pending?
+ bne MterpException
+@@ -230,7 +248,10 @@
mov r2, rINST, lsr #12 @ r2<- B
FETCH r1, 1 @ r1<- field byte offset
GET_VREG r3, r2 @ r3<- fp[B], the object pointer
- ubfx r2, rINST, #8, #4 @ r2<- A
+ @ begin replacement of ubfx r2, rINST, #8, #4 @ r2<- A
-+ lsl r2, rINST, #20
-+ lsr r2, r2, #28
++ lsl r2, rINST, #(32-(4)-(8))
++ lsr r2, r2, #((32-(4))
+ @ end replacement of ubfx r2, rINST, #8, #4 @ r2<- A
cmp r3, #0 @ check object for null
beq common_errNullObject @ object was null
GET_VREG r0, r2 @ r0<- fp[A]
-@@ -7013,7 +7298,10 @@ constvalop_long_to_double:
+@@ -253,7 +274,10 @@
mov r2, rINST, lsr #12 @ r2<- B
FETCH r3, 1 @ r3<- field byte offset
GET_VREG r2, r2 @ r2<- fp[B], the object pointer
- ubfx r0, rINST, #8, #4 @ r0<- A
+ @ begin replacement of ubfx r0, rINST, #8, #4 @ r0<- A
-+ lsl r0, rINST, #20
-+ lsr r0, r0, #28
++ lsl r0, rINST, #(32-(4)-(8))
++ lsr r0, r0, #((32-(4))
+ @ end replacement of ubfx r0, rINST, #8, #4 @ r0<- A
cmp r2, #0 @ check object for null
beq common_errNullObject @ object was null
VREG_INDEX_TO_ADDR r0, r0 @ r0<- &fp[A]
-@@ -7104,7 +7392,10 @@ constvalop_long_to_double:
- mov r2, rINST, lsr #12 @ r2<- B
- FETCH r1, 1 @ r1<- field byte offset
- GET_VREG r3, r2 @ r3<- fp[B], the object pointer
-- ubfx r2, rINST, #8, #4 @ r2<- A
-+ @ begin replacement of ubfx r2, rINST, #8, #4 @ r2<- A
-+ lsl r2, rINST, #20
-+ lsr r2, r2, #28
-+ @ end replacement of ubfx r2, rINST, #8, #4 @ r2<- A
- cmp r3, #0 @ check object for null
- beq common_errNullObject @ object was null
- GET_VREG r0, r2 @ r0<- fp[A]
-@@ -7124,7 +7415,10 @@ constvalop_long_to_double:
- mov r2, rINST, lsr #12 @ r2<- B
- FETCH r1, 1 @ r1<- field byte offset
- GET_VREG r3, r2 @ r3<- fp[B], the object pointer
-- ubfx r2, rINST, #8, #4 @ r2<- A
-+ @ begin replacement of ubfx r2, rINST, #8, #4 @ r2<- A
-+ lsl r2, rINST, #20
-+ lsr r2, r2, #28
-+ @ end replacement of ubfx r2, rINST, #8, #4 @ r2<- A
- cmp r3, #0 @ check object for null
- beq common_errNullObject @ object was null
- GET_VREG r0, r2 @ r0<- fp[A]
-@@ -7144,7 +7438,10 @@ constvalop_long_to_double:
- mov r2, rINST, lsr #12 @ r2<- B
- FETCH r1, 1 @ r1<- field byte offset
- GET_VREG r3, r2 @ r3<- fp[B], the object pointer
-- ubfx r2, rINST, #8, #4 @ r2<- A
-+ @ begin replacement of ubfx r2, rINST, #8, #4 @ r2<- A
-+ lsl r2, rINST, #20
-+ lsr r2, r2, #28
-+ @ end replacement of ubfx r2, rINST, #8, #4 @ r2<- A
- cmp r3, #0 @ check object for null
- beq common_errNullObject @ object was null
- GET_VREG r0, r2 @ r0<- fp[A]
-@@ -7164,7 +7461,10 @@ constvalop_long_to_double:
- mov r2, rINST, lsr #12 @ r2<- B
- FETCH r1, 1 @ r1<- field byte offset
- GET_VREG r3, r2 @ r3<- fp[B], the object pointer
-- ubfx r2, rINST, #8, #4 @ r2<- A
-+ @ begin replacement of ubfx r2, rINST, #8, #4 @ r2<- A
-+ lsl r2, rINST, #20
-+ lsr r2, r2, #28
-+ @ end replacement of ubfx r2, rINST, #8, #4 @ r2<- A
- cmp r3, #0 @ check object for null
- beq common_errNullObject @ object was null
- GET_VREG r0, r2 @ r0<- fp[A]
-@@ -7184,7 +7484,10 @@ constvalop_long_to_double:
- mov r2, rINST, lsr #12 @ r2<- B
- FETCH r1, 1 @ r1<- field byte offset
- GET_VREG r3, r2 @ r3<- object we're operating on
-- ubfx r2, rINST, #8, #4 @ r2<- A
-+ @ begin replacement of ubfx r2, rINST, #8, #4 @ r2<- A
-+ lsl r2, rINST, #20
-+ lsr r2, r2, #28
-+ @ end replacement of ubfx r2, rINST, #8, #4 @ r2<- A
- cmp r3, #0 @ check object for null
- beq common_errNullObject @ object was null
- ldrb r0, [r3, r1] @ r0<- obj.field
-@@ -7204,7 +7507,10 @@ constvalop_long_to_double:
- mov r2, rINST, lsr #12 @ r2<- B
- FETCH r1, 1 @ r1<- field byte offset
- GET_VREG r3, r2 @ r3<- object we're operating on
-- ubfx r2, rINST, #8, #4 @ r2<- A
-+ @ begin replacement of ubfx r2, rINST, #8, #4 @ r2<- A
-+ lsl r2, rINST, #20
-+ lsr r2, r2, #28
-+ @ end replacement of ubfx r2, rINST, #8, #4 @ r2<- A
- cmp r3, #0 @ check object for null
- beq common_errNullObject @ object was null
- ldrsb r0, [r3, r1] @ r0<- obj.field
-@@ -7224,7 +7530,10 @@ constvalop_long_to_double:
- mov r2, rINST, lsr #12 @ r2<- B
- FETCH r1, 1 @ r1<- field byte offset
- GET_VREG r3, r2 @ r3<- object we're operating on
-- ubfx r2, rINST, #8, #4 @ r2<- A
-+ @ begin replacement of ubfx r2, rINST, #8, #4 @ r2<- A
-+ lsl r2, rINST, #20
-+ lsr r2, r2, #28
-+ @ end replacement of ubfx r2, rINST, #8, #4 @ r2<- A
- cmp r3, #0 @ check object for null
- beq common_errNullObject @ object was null
- ldrh r0, [r3, r1] @ r0<- obj.field
-@@ -7244,7 +7553,10 @@ constvalop_long_to_double:
- mov r2, rINST, lsr #12 @ r2<- B
- FETCH r1, 1 @ r1<- field byte offset
- GET_VREG r3, r2 @ r3<- object we're operating on
-- ubfx r2, rINST, #8, #4 @ r2<- A
-+ @ begin replacement of ubfx r2, rINST, #8, #4 @ r2<- A
-+ lsl r2, rINST, #20
-+ lsr r2, r2, #28
-+ @ end replacement of ubfx r2, rINST, #8, #4 @ r2<- A
- cmp r3, #0 @ check object for null
- beq common_errNullObject @ object was null
- ldrsh r0, [r3, r1] @ r0<- obj.field
-@@ -7406,7 +7718,10 @@ artMterpAsmSisterStart:
- * to modest integer. The EABI convert function isn't doing this for us.
- */
- f2l_doconv:
-- ubfx r2, r0, #23, #8 @ grab the exponent
-+ @ begin replacement of ubfx r2, r0, #23, #8 @ grab the exponent
-+ lsl r2, r0, #1
-+ lsr r2, r2, #24
-+ @ end replacement of ubfx r2, r0, #23, #8 @ grab the exponent
- cmp r2, #0xbe @ MININT < x > MAXINT?
- bhs f2l_special_cases
- b __aeabi_f2lz @ tail call to convert float to long
-@@ -7436,7 +7751,10 @@ f2l_maybeNaN:
- * to modest integer. The EABI convert function isn't doing this for us.
- */
- d2l_doconv:
-- ubfx r2, r1, #20, #11 @ grab the exponent
-+ @ begin replacement of ubfx r2, r1, #20, #11 @ grab the exponent
-+ lsl r2, r1, #1
-+ lsr r2, r2, #21
-+ @ end replacement of ubfx r2, r1, #20, #11 @ grab the exponent
- movw r3, #0x43e
- cmp r2, r3 @ MINLONG < x > MAXLONG?
- bhs d2l_special_cases
-Index: android-platform-art-8.1.0+r23/runtime/interpreter/mterp/replace-ubfx.py
-===================================================================
+diff --git a/runtime/interpreter/mterp/arm/other.S b/runtime/interpreter/mterp/arm/other.S
+index 31b9354..491219a 100644
+--- a/runtime/interpreter/mterp/arm/other.S
++++ b/runtime/interpreter/mterp/arm/other.S
+@@ -45,8 +45,14 @@
+
+ %def op_const_4():
+ /* const/4 vA, #+B */
+- sbfx r1, rINST, #12, #4 @ r1<- sssssssB (sign-extended)
+- ubfx r0, rINST, #8, #4 @ r0<- A
++ @ begin replacement of sbfx r1, rINST, #12, #4 @ r1<- sssssssB (sign-extended)
++ lsl r1, rINST, #(32-(4)-(12))
++ asr r1, r1, #((32-(4))
++ @ end replacement of sbfx r1, rINST, #12, #4 @ r1<- sssssssB (sign-extended)
++ @ begin replacement of ubfx r0, rINST, #8, #4 @ r0<- A
++ lsl r0, rINST, #(32-(4)-(8))
++ lsr r0, r0, #((32-(4))
++ @ end replacement of ubfx r0, rINST, #8, #4 @ r0<- A
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ GET_INST_OPCODE ip @ ip<- opcode from rINST
+ SET_VREG r1, r0 @ fp[A]<- r1
+@@ -192,7 +198,10 @@
+ /* for move, move-object, long-to-int */
+ /* op vA, vB */
+ mov r1, rINST, lsr #12 @ r1<- B from 15:12
+- ubfx r0, rINST, #8, #4 @ r0<- A from 11:8
++ @ begin replacement of ubfx r0, rINST, #8, #4 @ r0<- A from 11:8
++ lsl r0, rINST, #(32-(4)-(8))
++ lsr r0, r0, #((32-(4))
++ @ end replacement of ubfx r0, rINST, #8, #4 @ r0<- A from 11:8
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ GET_VREG r2, r1 @ r2<- fp[B]
+ GET_INST_OPCODE ip @ ip<- opcode from rINST
+@@ -287,7 +296,10 @@
+ /* move-wide vA, vB */
+ /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
+ mov r3, rINST, lsr #12 @ r3<- B
+- ubfx rINST, rINST, #8, #4 @ rINST<- A
++ @ begin replacement of ubfx rINST, rINST, #8, #4 @ rINST<- A
++ lsl rINST, rINST, #(32-(4)-(8))
++ lsr rINST, rINST, #((32-(4))
++ @ end replacement of ubfx rINST, rINST, #8, #4 @ rINST<- A
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[B]
+ VREG_INDEX_TO_ADDR r2, rINST @ r2<- &fp[A]
+ GET_VREG_WIDE_BY_ADDR r0, r1, r3 @ r0/r1<- fp[B]
+diff --git a/runtime/interpreter/mterp/replace-ubfx.py b/runtime/interpreter/mterp/replace-ubfx.py
+new file mode 100755
+index 0000000..8a79cc8
--- /dev/null
-+++ android-platform-art-8.1.0+r23/runtime/interpreter/mterp/replace-ubfx.py
++++ b/runtime/interpreter/mterp/replace-ubfx.py
@@ -0,0 +1,51 @@
+#!/usr/bin/python3
+#script to replace ubfx with equivilent code for older arm
+ linestartwhitespace = line[:(len(line)-len(linels))]
+ destreg = linesplit[1][:-1]
+ sourcereg = linesplit[2][:-1]
-+ lsb = int(linesplit[3][1:-1])
-+ width = int(linesplit[4][1:])
++ lsb = linesplit[3][1:-1]
++ width = linesplit[4][1:]
+ #print(linesplit)
+ #print((destreg,sourcereg,lsb,width))
+ print(linestartwhitespace+'@ begin replacement of '+linels)
-+ print(linestartwhitespace+'lsl '+destreg+', '+sourcereg+', #'+str(32-width-lsb))
++ print(linestartwhitespace+'lsl '+destreg+', '+sourcereg+', #(32-('+width+')-('+lsb+'))')
+ if linesplit[0] == 'ubfx':
+ rightshift = 'lsr'
+ else:
+ rightshift = 'asr'
-+ print(linestartwhitespace+rightshift+' '+destreg+', '+destreg+', #'+str(32-width))
++ print(linestartwhitespace+rightshift+' '+destreg+', '+destreg+', #((32-('+width+'))')
+ print(linestartwhitespace+'@ end replacement of '+linels)
+ else:
+ print(line)