--- /dev/null
- .fpu vfpv3-d16
+ #include "asm-arm.h"
+ .arch armv6
+ .eabi_attribute 28, 1
+ .eabi_attribute 20, 1
+ .eabi_attribute 21, 1
+ .eabi_attribute 23, 3
+ .eabi_attribute 24, 1
+ .eabi_attribute 25, 1
+ .eabi_attribute 26, 1
+ .eabi_attribute 30, 2
+ .eabi_attribute 34, 1
+ .eabi_attribute 18, 4
+ .text
+ .align 2
+ .global C(avcall_call)
+ .syntax unified
+ .arm
++ .fpu vfpv2
+ .type avcall_call, %function
+ FUNBEGIN(avcall_call)
+ // args = 0, pretend = 0, frame = 0
+ // frame_needed = 1, uses_anonymous_args = 0
+ push {r4, r5, fp, lr}
+ mov r4, r0
+ ldr r0, [r0, $20]
+ ldr ip, [r4, $24]
+ sub r3, sp, $1024
+ add fp, sp, $12
+ sub sp, r3, $8
+ sub r3, r0, ip
+ cmp r3, $19
+ movgt r2, sp
+ subgt r2, r2, $4
+ addgt r3, ip, $16
+ ble L(6)
+ L(5):
+ ldr r1, [r3], $4
+ cmp r0, r3
+ str r1, [r2, $4]!
+ bne L(5)
+ L(6):
+ ldr r3, [r4, $48]
+ tst r3, $1
+ beq L(4)
+ // 89 "avcall-armhf.c" 1
+ vldr.32 s0,[r4, $56]
+ // 0 "" 2
+ .arm
+ .syntax unified
+ L(4):
+ tst r3, $2
+ beq L(7)
+ // 91 "avcall-armhf.c" 1
+ vldr.32 s1,[r4, $60]
+ // 0 "" 2
+ .arm
+ .syntax unified
+ L(7):
+ tst r3, $4
+ beq L(8)
+ // 93 "avcall-armhf.c" 1
+ vldr.32 s2,[r4, $64]
+ // 0 "" 2
+ .arm
+ .syntax unified
+ L(8):
+ tst r3, $8
+ beq L(9)
+ // 95 "avcall-armhf.c" 1
+ vldr.32 s3,[r4, $68]
+ // 0 "" 2
+ .arm
+ .syntax unified
+ L(9):
+ tst r3, $16
+ beq L(10)
+ // 97 "avcall-armhf.c" 1
+ vldr.32 s4,[r4, $72]
+ // 0 "" 2
+ .arm
+ .syntax unified
+ L(10):
+ tst r3, $32
+ beq L(11)
+ // 99 "avcall-armhf.c" 1
+ vldr.32 s5,[r4, $76]
+ // 0 "" 2
+ .arm
+ .syntax unified
+ L(11):
+ tst r3, $64
+ beq L(12)
+ // 101 "avcall-armhf.c" 1
+ vldr.32 s6,[r4, $80]
+ // 0 "" 2
+ .arm
+ .syntax unified
+ L(12):
+ tst r3, $128
+ beq L(13)
+ // 103 "avcall-armhf.c" 1
+ vldr.32 s7,[r4, $84]
+ // 0 "" 2
+ .arm
+ .syntax unified
+ L(13):
+ tst r3, $256
+ beq L(14)
+ // 105 "avcall-armhf.c" 1
+ vldr.32 s8,[r4, $88]
+ // 0 "" 2
+ .arm
+ .syntax unified
+ L(14):
+ tst r3, $512
+ beq L(15)
+ // 107 "avcall-armhf.c" 1
+ vldr.32 s9,[r4, $92]
+ // 0 "" 2
+ .arm
+ .syntax unified
+ L(15):
+ tst r3, $1024
+ beq L(16)
+ // 109 "avcall-armhf.c" 1
+ vldr.32 s10,[r4, $96]
+ // 0 "" 2
+ .arm
+ .syntax unified
+ L(16):
+ tst r3, $2048
+ beq L(17)
+ // 111 "avcall-armhf.c" 1
+ vldr.32 s11,[r4, $100]
+ // 0 "" 2
+ .arm
+ .syntax unified
+ L(17):
+ tst r3, $4096
+ beq L(18)
+ // 113 "avcall-armhf.c" 1
+ vldr.32 s12,[r4, $104]
+ // 0 "" 2
+ .arm
+ .syntax unified
+ L(18):
+ tst r3, $8192
+ beq L(19)
+ // 115 "avcall-armhf.c" 1
+ vldr.32 s13,[r4, $108]
+ // 0 "" 2
+ .arm
+ .syntax unified
+ L(19):
+ tst r3, $16384
+ beq L(20)
+ // 117 "avcall-armhf.c" 1
+ vldr.32 s14,[r4, $112]
+ // 0 "" 2
+ .arm
+ .syntax unified
+ L(20):
+ tst r3, $32768
+ beq L(21)
+ // 119 "avcall-armhf.c" 1
+ vldr.32 s15,[r4, $116]
+ // 0 "" 2
+ .arm
+ .syntax unified
+ L(21):
+ ldr r3, [r4, $52]
+ tst r3, $1
+ beq L(22)
+ // 123 "avcall-armhf.c" 1
+ vldr.64 d0,[r4, $120]
+ // 0 "" 2
+ .arm
+ .syntax unified
+ L(22):
+ tst r3, $2
+ beq L(23)
+ // 125 "avcall-armhf.c" 1
+ vldr.64 d1,[r4, $128]
+ // 0 "" 2
+ .arm
+ .syntax unified
+ L(23):
+ tst r3, $4
+ beq L(24)
+ // 127 "avcall-armhf.c" 1
+ vldr.64 d2,[r4, $136]
+ // 0 "" 2
+ .arm
+ .syntax unified
+ L(24):
+ tst r3, $8
+ beq L(25)
+ // 129 "avcall-armhf.c" 1
+ vldr.64 d3,[r4, $144]
+ // 0 "" 2
+ .arm
+ .syntax unified
+ L(25):
+ tst r3, $16
+ beq L(26)
+ // 131 "avcall-armhf.c" 1
+ vldr.64 d4,[r4, $152]
+ // 0 "" 2
+ .arm
+ .syntax unified
+ L(26):
+ tst r3, $32
+ beq L(27)
+ // 133 "avcall-armhf.c" 1
+ vldr.64 d5,[r4, $160]
+ // 0 "" 2
+ .arm
+ .syntax unified
+ L(27):
+ tst r3, $64
+ beq L(28)
+ // 135 "avcall-armhf.c" 1
+ vldr.64 d6,[r4, $168]
+ // 0 "" 2
+ .arm
+ .syntax unified
+ L(28):
+ tst r3, $128
+ beq L(29)
+ // 137 "avcall-armhf.c" 1
+ vldr.64 d7,[r4, $176]
+ // 0 "" 2
+ .arm
+ .syntax unified
+ L(29):
+ ldm ip, {r0, r1, r2, r3}
+ ldr r5, [r4, $4]
+ blx r5
+ ldrb r3, [r4, $12] // zero_extendqisi2
+ cmp r3, $1
+ beq L(30)
+ cmp r3, $2
+ beq L(126)
+ cmp r3, $3
+ beq L(126)
+ cmp r3, $4
+ beq L(126)
+ cmp r3, $5
+ beq L(128)
+ cmp r3, $6
+ beq L(128)
+ cmp r3, $7
+ beq L(129)
+ cmp r3, $8
+ beq L(129)
+ cmp r3, $9
+ beq L(129)
+ cmp r3, $10
+ beq L(129)
+ sub r2, r3, $11
+ cmp r2, $1
+ bls L(130)
+ cmp r3, $13
+ beq L(132)
+ cmp r3, $14
+ beq L(133)
+ cmp r3, $15
+ beq L(129)
+ cmp r3, $16
+ bne L(30)
+ ldr r3, [r4]
+ tst r3, $512
+ beq L(30)
+ ldr r3, [r4, $16]
+ cmp r3, $1
+ beq L(126)
+ cmp r3, $2
+ beq L(128)
+ cmp r3, $4
+ bls L(129)
+ cmp r3, $8
+ bne L(30)
+ L(130):
+ ldr r3, [r4, $8]
+ stm r3, {r0, r1}
+ L(30):
+ mov r0, $0
+ sub sp, fp, $12
+ // sp needed
+ pop {r4, r5, fp, pc}
+ L(126):
+ ldr r3, [r4, $8]
+ strb r0, [r3]
+ mov r0, $0
+ sub sp, fp, $12
+ // sp needed
+ pop {r4, r5, fp, pc}
+ L(129):
+ ldr r3, [r4, $8]
+ str r0, [r3]
+ mov r0, $0
+ sub sp, fp, $12
+ // sp needed
+ pop {r4, r5, fp, pc}
+ L(128):
+ ldr r3, [r4, $8]
+ strh r0, [r3] // movhi
+ mov r0, $0
+ sub sp, fp, $12
+ // sp needed
+ pop {r4, r5, fp, pc}
+ L(132):
+ ldr r3, [r4, $8]
+ vstr.32 s0, [r3]
+ b L(30)
+ L(133):
+ ldr r3, [r4, $8]
+ vstr.64 d0, [r3]
+ b L(30)
+ FUNEND(avcall_call)
+ #if defined __linux__ || defined __FreeBSD__ || defined __FreeBSD_kernel__ || defined __DragonFly__
+ .section .note.GNU-stack,"",%progbits
+ #endif
--- /dev/null
- .fpu vfpv3-d16
+ #include "asm-arm.h"
+ .arch armv6
+ .eabi_attribute 28, 1
+ .eabi_attribute 20, 1
+ .eabi_attribute 21, 1
+ .eabi_attribute 23, 3
+ .eabi_attribute 24, 1
+ .eabi_attribute 25, 1
+ .eabi_attribute 26, 1
+ .eabi_attribute 30, 2
+ .eabi_attribute 34, 1
+ .eabi_attribute 18, 4
+ .text
+ .align 2
+ .global C(callback_receiver)
+ .syntax unified
+ .arm
++ .fpu vfpv2
+ .type callback_receiver, %function
+ FUNBEGIN(callback_receiver)
+ // args = 28, pretend = 0, frame = 176
+ // frame_needed = 1, uses_anonymous_args = 0
+ push {fp, lr}
+ add fp, sp, $4
+ sub sp, sp, $176
+ ldr r2, [fp, $4]
+ mov r3, $0
+ add lr, fp, $28
+ add ip, fp, $44
+ vstr.32 s0, [fp, $-136]
+ vstr.32 s1, [fp, $-132]
+ vstr.32 s2, [fp, $-128]
+ vstr.32 s3, [fp, $-124]
+ vstr.32 s4, [fp, $-120]
+ vstr.32 s5, [fp, $-116]
+ vstr.32 s6, [fp, $-112]
+ vstr.32 s7, [fp, $-108]
+ vstr.32 s8, [fp, $-104]
+ vstr.32 s9, [fp, $-100]
+ vstr.32 s10, [fp, $-96]
+ vstr.32 s11, [fp, $-92]
+ vstr.32 s12, [fp, $-88]
+ vstr.32 s13, [fp, $-84]
+ vstr.32 s14, [fp, $-80]
+ vstr.32 s15, [fp, $-76]
+ vstr.64 d0, [fp, $-68]
+ vstr.64 d1, [fp, $-60]
+ vstr.64 d2, [fp, $-52]
+ vstr.64 d3, [fp, $-44]
+ vstr.64 d4, [fp, $-36]
+ vstr.64 d5, [fp, $-28]
+ vstr.64 d6, [fp, $-20]
+ vstr.64 d7, [fp, $-12]
+ str r3, [fp, $-180]
+ str r3, [fp, $-144]
+ str r3, [fp, $-140]
+ str r3, [fp, $-160]
+ strb r3, [fp, $-156]
+ sub r1, fp, $180
+ str lr, [fp, $-148]
+ ldr r3, [r2]
+ str ip, [fp, $-164]
+ ldr r0, [r2, $4]
+ blx r3
+ ldrb r3, [fp, $-156] // zero_extendqisi2
+ cmp r3, $0
+ beq L(1)
+ cmp r3, $1
+ beq L(25)
+ cmp r3, $2
+ ldrsbeq r0, [fp, $-172]
+ beq L(1)
+ cmp r3, $3
+ beq L(25)
+ cmp r3, $4
+ ldrsheq r0, [fp, $-172]
+ beq L(1)
+ cmp r3, $5
+ ldrheq r0, [fp, $-172]
+ beq L(1)
+ cmp r3, $6
+ beq L(27)
+ cmp r3, $7
+ beq L(27)
+ cmp r3, $8
+ beq L(27)
+ cmp r3, $9
+ beq L(27)
+ sub r2, r3, $10
+ cmp r2, $1
+ bls L(29)
+ cmp r3, $12
+ vldreq.32 s0, [fp, $-172]
+ beq L(1)
+ cmp r3, $13
+ beq L(30)
+ cmp r3, $14
+ beq L(27)
+ cmp r3, $15
+ bne L(1)
+ ldr r3, [fp, $-180]
+ tst r3, $1024
+ beq L(1)
+ ldr r3, [fp, $-152]
+ cmp r3, $1
+ beq L(31)
+ cmp r3, $2
+ ldr r3, [fp, $-160]
+ ldrheq r0, [r3]
+ ldrne r0, [r3]
+ L(1):
+ sub sp, fp, $4
+ // sp needed
+ pop {fp, pc}
+ L(25):
+ ldrb r0, [fp, $-172] // zero_extendqisi2
+ sub sp, fp, $4
+ // sp needed
+ pop {fp, pc}
+ L(27):
+ ldr r0, [fp, $-172]
+ sub sp, fp, $4
+ // sp needed
+ pop {fp, pc}
+ L(30):
+ vldr.64 d0, [fp, $-172]
+ b L(1)
+ L(29):
+ ldr r0, [fp, $-172]
+ ldr r1, [fp, $-168]
+ b L(1)
+ L(31):
+ ldr r3, [fp, $-160]
+ ldrb r0, [r3] // zero_extendqisi2
+ b L(1)
+ FUNEND(callback_receiver)
+ .align 2
+ .global C(callback_get_receiver)
+ .syntax unified
+ .arm
+ .fpu vfpv3-d16
+ .type callback_get_receiver, %function
+ FUNBEGIN(callback_get_receiver)
+ // args = 0, pretend = 0, frame = 0
+ // frame_needed = 1, uses_anonymous_args = 0
+ // link register save eliminated.
+ ldr r3, L(34)
+ ldr r2, L(34)+4
+ L(PIC0):
+ add r3, pc, r3
+ str fp, [sp, $-4]!
+ add fp, sp, $0
+ ldr r3, [r3, r2]
+ mov r0, r3
+ add sp, fp, $0
+ // sp needed
+ ldr fp, [sp], $4
+ bx lr
+ L(35):
+ .align 2
+ L(34):
+ .word _GLOBAL_OFFSET_TABLE_-(L(PIC0)+8)
+ .word callback_receiver(GOT)
+ FUNEND(callback_get_receiver)
+ #if defined __linux__ || defined __FreeBSD__ || defined __FreeBSD_kernel__ || defined __DragonFly__
+ .section .note.GNU-stack,"",%progbits
+ #endif
- ffcall (2.4-2.1+rpi1) trixie-staging; urgency=medium
++ffcall (2.5-2+rpi1) trixie-staging; urgency=medium
+
+ [changes brought forward from 2.0-2+rpi1 by Peter Michael Green <plugwash@raspbian.org> at Thu, 07 Dec 2017 01:23:49 +0000]
+ * Mark binaries as vpfv2 not vfpv3_d16
+ * Disable testsuite, it fails on some of our buildboxes.
+
- -- Raspbian forward porter <root@raspbian.org> Thu, 21 Sep 2023 00:01:35 +0000
++ -- Raspbian forward porter <root@raspbian.org> Tue, 10 Dec 2024 22:55:31 +0000
+
- ffcall (2.4-2.1) unstable; urgency=medium
+ ffcall (2.5-2) unstable; urgency=medium
- * Non-maintainer upload.
- * fix ftbfs on riscv64 and this is rdep of clisp which to bootstrap sbcl on
- riscv64. (Closes: #1038803)
+ * Worked fine in experimental, uploading to unstable.
- -- Bo YU <tsu.yubo@gmail.com> Wed, 21 Jun 2023 06:07:35 +0800
+ -- Peter Van Eynde <pvaneynd@debian.org> Fri, 22 Nov 2024 07:39:15 +0100
+
+ ffcall (2.5-1) experimental; urgency=medium
+
+ [ Sébastien Villemot ]
+ * Remove myself from Uploaders
+
+ [ Peter Van Eynde ]
+ * New upstream version 2.5. (Closes: 883044, 1058649)
+ * Removed integrated m4-dirs patch
+ * Added myself as uploader
+ * Updated Standards-Version, no changes
+
+ -- Peter Van Eynde <pvaneynd@debian.org> Wed, 20 Nov 2024 08:09:29 +0100
ffcall (2.4-2) unstable; urgency=medium
- m4-dirs.patch
- riscv64-pic.patch
+raspbian.patch
--- /dev/null
- .fpu vfpv3-d16
+ #include "asm-arm.h"
+ #ifdef __PIC__
+ .arch armv6
+ .eabi_attribute 28, 1
+ .eabi_attribute 20, 1
+ .eabi_attribute 21, 1
+ .eabi_attribute 23, 3
+ .eabi_attribute 24, 1
+ .eabi_attribute 25, 1
+ .eabi_attribute 26, 1
+ .eabi_attribute 30, 2
+ .eabi_attribute 34, 1
+ .eabi_attribute 18, 4
+ .text
+ .align 2
+ .global C(vacall_receiver)
+ .syntax unified
+ .arm
+ .fpu vfpv3-d16
+ .type vacall_receiver, %function
+ FUNBEGIN(vacall_receiver)
+ // args = 20, pretend = 16, frame = 176
+ // frame_needed = 1, uses_anonymous_args = 0
+ sub sp, sp, $16
+ mov ip, $0
+ push {r4, r5, r6, fp, lr}
+ add fp, sp, $16
+ ldr r4, L(32)
+ ldr r5, L(32)+4
+ add lr, fp, $4
+ L(PIC0):
+ add r4, pc, r4
+ add r6, fp, $20
+ sub sp, sp, $180
+ stm lr, {r0, r1, r2, r3}
+ vstr.32 s0, [fp, $-152]
+ vstr.32 s1, [fp, $-148]
+ vstr.32 s2, [fp, $-144]
+ vstr.32 s3, [fp, $-140]
+ vstr.32 s4, [fp, $-136]
+ vstr.32 s5, [fp, $-132]
+ vstr.32 s6, [fp, $-128]
+ vstr.32 s7, [fp, $-124]
+ vstr.32 s8, [fp, $-120]
+ vstr.32 s9, [fp, $-116]
+ vstr.32 s10, [fp, $-112]
+ vstr.32 s11, [fp, $-108]
+ vstr.32 s12, [fp, $-104]
+ vstr.32 s13, [fp, $-100]
+ vstr.32 s14, [fp, $-96]
+ vstr.32 s15, [fp, $-92]
+ vstr.64 d0, [fp, $-84]
+ vstr.64 d1, [fp, $-76]
+ vstr.64 d2, [fp, $-68]
+ vstr.64 d3, [fp, $-60]
+ vstr.64 d4, [fp, $-52]
+ vstr.64 d5, [fp, $-44]
+ vstr.64 d6, [fp, $-36]
+ vstr.64 d7, [fp, $-28]
+ str lr, [fp, $-164]
+ str ip, [fp, $-196]
+ str ip, [fp, $-160]
+ str r6, [fp, $-180]
+ str ip, [fp, $-156]
+ str ip, [fp, $-176]
+ strb ip, [fp, $-172]
+ ldr r2, [r4, r5]
+ mov r3, r4
+ sub r0, fp, $196
+ ldr r3, [r2]
+ blx r3
+ ldrb r3, [fp, $-172] // zero_extendqisi2
+ cmp r3, $0
+ beq L(1)
+ cmp r3, $1
+ beq L(25)
+ cmp r3, $2
+ ldrsbeq r0, [fp, $-188]
+ beq L(1)
+ cmp r3, $3
+ beq L(25)
+ cmp r3, $4
+ ldrsheq r0, [fp, $-188]
+ beq L(1)
+ cmp r3, $5
+ ldrheq r0, [fp, $-188]
+ beq L(1)
+ cmp r3, $6
+ beq L(27)
+ cmp r3, $7
+ beq L(27)
+ cmp r3, $8
+ beq L(27)
+ cmp r3, $9
+ beq L(27)
+ sub r2, r3, $10
+ cmp r2, $1
+ bls L(29)
+ cmp r3, $12
+ vldreq.32 s0, [fp, $-188]
+ beq L(1)
+ cmp r3, $13
+ beq L(30)
+ cmp r3, $14
+ beq L(27)
+ cmp r3, $15
+ bne L(1)
+ ldr r3, [fp, $-196]
+ tst r3, $1024
+ beq L(1)
+ ldr r3, [fp, $-168]
+ cmp r3, $1
+ beq L(31)
+ cmp r3, $2
+ ldr r3, [fp, $-176]
+ ldrheq r0, [r3]
+ ldrne r0, [r3]
+ L(1):
+ sub sp, fp, $16
+ // sp needed
+ pop {r4, r5, r6, fp, lr}
+ add sp, sp, $16
+ bx lr
+ L(25):
+ ldrb r0, [fp, $-188] // zero_extendqisi2
+ sub sp, fp, $16
+ // sp needed
+ pop {r4, r5, r6, fp, lr}
+ add sp, sp, $16
+ bx lr
+ L(27):
+ ldr r0, [fp, $-188]
+ sub sp, fp, $16
+ // sp needed
+ pop {r4, r5, r6, fp, lr}
+ add sp, sp, $16
+ bx lr
+ L(30):
+ vldr.64 d0, [fp, $-188]
+ b L(1)
+ L(29):
+ ldr r0, [fp, $-188]
+ ldr r1, [fp, $-184]
+ b L(1)
+ L(31):
+ ldr r3, [fp, $-176]
+ ldrb r0, [r3] // zero_extendqisi2
+ b L(1)
+ L(33):
+ .align 2
+ L(32):
+ .word _GLOBAL_OFFSET_TABLE_-(L(PIC0)+8)
+ .word C(vacall_function)(GOT)
+ FUNEND(vacall_receiver)
+ #else
+ .arch armv6
+ .eabi_attribute 28, 1
+ .eabi_attribute 20, 1
+ .eabi_attribute 21, 1
+ .eabi_attribute 23, 3
+ .eabi_attribute 24, 1
+ .eabi_attribute 25, 1
+ .eabi_attribute 26, 1
+ .eabi_attribute 30, 2
+ .eabi_attribute 34, 1
+ .eabi_attribute 18, 4
+ .text
+ .align 2
+ .global C(vacall_receiver)
+ .syntax unified
+ .arm
++ .fpu vfpv2
+ .type vacall_receiver, %function
+ FUNBEGIN(vacall_receiver)
+ // args = 20, pretend = 16, frame = 176
+ // frame_needed = 1, uses_anonymous_args = 0
+ sub sp, sp, $16
+ mov ip, $0
+ push {r4, r5, fp, lr}
+ add fp, sp, $12
+ ldr r4, L(32)
+ add lr, fp, $4
+ add r5, fp, $20
+ sub sp, sp, $176
+ stm lr, {r0, r1, r2, r3}
+ vstr.32 s0, [fp, $-144]
+ vstr.32 s1, [fp, $-140]
+ vstr.32 s2, [fp, $-136]
+ vstr.32 s3, [fp, $-132]
+ vstr.32 s4, [fp, $-128]
+ vstr.32 s5, [fp, $-124]
+ vstr.32 s6, [fp, $-120]
+ vstr.32 s7, [fp, $-116]
+ vstr.32 s8, [fp, $-112]
+ vstr.32 s9, [fp, $-108]
+ vstr.32 s10, [fp, $-104]
+ vstr.32 s11, [fp, $-100]
+ vstr.32 s12, [fp, $-96]
+ vstr.32 s13, [fp, $-92]
+ vstr.32 s14, [fp, $-88]
+ vstr.32 s15, [fp, $-84]
+ vstr.64 d0, [fp, $-76]
+ vstr.64 d1, [fp, $-68]
+ vstr.64 d2, [fp, $-60]
+ vstr.64 d3, [fp, $-52]
+ vstr.64 d4, [fp, $-44]
+ vstr.64 d5, [fp, $-36]
+ vstr.64 d6, [fp, $-28]
+ vstr.64 d7, [fp, $-20]
+ str lr, [fp, $-156]
+ str ip, [fp, $-188]
+ sub r0, fp, $188
+ str ip, [fp, $-152]
+ ldr r3, [r4]
+ str r5, [fp, $-172]
+ str ip, [fp, $-148]
+ str ip, [fp, $-168]
+ strb ip, [fp, $-164]
+ blx r3
+ ldrb r3, [fp, $-164] // zero_extendqisi2
+ cmp r3, $0
+ beq L(1)
+ cmp r3, $1
+ beq L(25)
+ cmp r3, $2
+ ldrsbeq r0, [fp, $-180]
+ beq L(1)
+ cmp r3, $3
+ beq L(25)
+ cmp r3, $4
+ ldrsheq r0, [fp, $-180]
+ beq L(1)
+ cmp r3, $5
+ ldrheq r0, [fp, $-180]
+ beq L(1)
+ cmp r3, $6
+ beq L(27)
+ cmp r3, $7
+ beq L(27)
+ cmp r3, $8
+ beq L(27)
+ cmp r3, $9
+ beq L(27)
+ sub r2, r3, $10
+ cmp r2, $1
+ bls L(29)
+ cmp r3, $12
+ vldreq.32 s0, [fp, $-180]
+ beq L(1)
+ cmp r3, $13
+ beq L(30)
+ cmp r3, $14
+ beq L(27)
+ cmp r3, $15
+ bne L(1)
+ ldr r3, [fp, $-188]
+ tst r3, $1024
+ beq L(1)
+ ldr r3, [fp, $-160]
+ cmp r3, $1
+ beq L(31)
+ cmp r3, $2
+ ldr r3, [fp, $-168]
+ ldrheq r0, [r3]
+ ldrne r0, [r3]
+ L(1):
+ sub sp, fp, $12
+ // sp needed
+ pop {r4, r5, fp, lr}
+ add sp, sp, $16
+ bx lr
+ L(25):
+ ldrb r0, [fp, $-180] // zero_extendqisi2
+ sub sp, fp, $12
+ // sp needed
+ pop {r4, r5, fp, lr}
+ add sp, sp, $16
+ bx lr
+ L(27):
+ ldr r0, [fp, $-180]
+ sub sp, fp, $12
+ // sp needed
+ pop {r4, r5, fp, lr}
+ add sp, sp, $16
+ bx lr
+ L(30):
+ vldr.64 d0, [fp, $-180]
+ b L(1)
+ L(29):
+ ldr r0, [fp, $-180]
+ ldr r1, [fp, $-176]
+ b L(1)
+ L(31):
+ ldr r3, [fp, $-168]
+ ldrb r0, [r3] // zero_extendqisi2
+ b L(1)
+ L(33):
+ .align 2
+ L(32):
+ .word C(vacall_function)
+ FUNEND(vacall_receiver)
+ #endif
+ #if defined __linux__ || defined __FreeBSD__ || defined __FreeBSD_kernel__ || defined __DragonFly__
+ .section .note.GNU-stack,"",%progbits
+ #endif