│ Disassembly of section .text:
│
│ 0000000000000040 <Function:compute_2d-2-turbofan>:
│ Function:compute_2d-2-turbofan():
│ push %rbp
│ mov %rsp,%rbp
│ pushq $0xa
│ push %rsi
│ sub $0x1a0,%rsp
│ mov 0x23(%rsi),%rdi
│ mov %rdx,-0x38(%rbp)
│ mov %rcx,-0x18(%rbp)
│ mov %rax,-0x28(%rbp)
│ mov %rbx,-0x20(%rbp)
│ cmp (%rdi),%rsp
│ ↓ jbe d05
│ 2b: mov %edx,%edi
│ vpcmpeqd %xmm0,%xmm0,%xmm0
│ mov 0xb(%rsi),%r8
│ mov %rdi,-0x30(%rbp)
│ vmovups %xmm0,-0x1b0(%rbp)
0.02 │ ↓ jmpq 58
│ nop
│ nop
│ 50: mov -0x30(%rbp),%rdi
│ mov -0x18(%rbp),%rcx
│ 58: mov 0x23(%rsi),%r12
│ mov %r8,-0xc8(%rbp)
│ cmp (%r12),%rsp
│ ↓ jbe d23
│ 6d: mov 0x64(%rdi,%r8,1),%r12d
│ mov 0x70(%rdi,%r8,1),%r14d
│ mov 0x38(%rdi,%r8,1),%r15d
│ mov 0x2c(%rdi,%r8,1),%ebx
│ mov 0x7c(%rdi,%r8,1),%eax
│ mov 0x10(%rdi,%r8,1),%edx
│ mov %r12,-0xf0(%rbp)
│ mov %r14,-0xb8(%rbp)
│ mov %r15,-0x118(%rbp)
│ mov %rbx,-0x120(%rbp)
│ mov %rax,-0x110(%rbp)
│ mov %rdx,-0xc0(%rbp)
│ movq $0x0,-0x48(%rbp)
│ movq $0x0,-0x50(%rbp)
│ movq $0x1,-0x40(%rbp)
│ vmovups -0x1b0(%rbp),%xmm1
│ ↓ jmpq f0
│ nop
│ e0: movq $0x0,-0x40(%rbp)
│ movq $0x1,-0x50(%rbp)
│ f0: mov 0x23(%rsi),%rdi
│ cmp (%rdi),%rsp
│ ↓ jbe d40
│ fd: cmp $0x0,%edx
│ ↓ je 972
│ mov %rdx,%rdi
│ imul -0x50(%rbp),%edi
│ mov %rdx,%r11
│ imul %edi,%r11d
│ mov %rdi,-0x88(%rbp)
│ lea (%r12,%r11,4),%edi
│ mov %rdx,%r12
│ shr $0x2,%r12d
│ mov %r11,-0x108(%rbp)
│ cmp $0x0,%r12d
│ sete %r11b
│ movzbl %r11b,%r11d
│ mov %r12,-0x148(%rbp)
│ cmpl $0x0,-0x50(%rbp)
│ sete %r12b
│ movzbl %r12b,%r12d
│ mov %r12,-0x128(%rbp)
│ mov %rdx,%r12
│ and $0xfffffffc,%r12d
│ mov %r11,-0x198(%rbp)
│ mov %r12,-0x100(%rbp)
│ mov -0x110(%rbp),%r10
│ mov %r10,-0x70(%rbp)
│ mov -0x118(%rbp),%r15
│ mov -0x120(%rbp),%rax
│ mov %rdi,%r12
│ mov -0xb8(%rbp),%rdi
│ xor %r14d,%r14d
│ mov -0x88(%rbp),%rdx
│ vpcmpeqd %xmm3,%xmm3,%xmm3
│ vpslld $0x19,%xmm3,%xmm3
│ vpsrld $0x2,%xmm3,%xmm3
│ mov $0x3089705f,%r10d
│ vmovd %r10d,%xmm2
│ ↓ jmpq 1fb
│ nop
│ nop
│ 1c0: mov -0x70(%rbp),%r8
0.03 │ lea (%r8,%rdx,4),%r8d
0.02 │ mov -0x68(%rbp),%r15
0.08 │ lea (%r15,%rdx,4),%r15d
│ mov -0x60(%rbp),%rax
│ lea (%rax,%rdx,4),%eax
│ mov -0x58(%rbp),%r9
│ lea (%r9,%rdx,4),%r12d
0.02 │ mov -0x50(%rbp),%r9
│ lea (%r9,%rdx,4),%r9d
│ mov %rdi,%rdx
0.02 │ mov %r8,-0x70(%rbp)
│ mov -0xc8(%rbp),%r8
0.02 │ mov %rbx,%r14
│ mov %r9,%rdi
│ 1fb: mov 0x23(%rsi),%rbx
│ mov %rdi,-0x50(%rbp)
0.06 │ mov %r12,-0x58(%rbp)
│ cmp (%rbx),%rsp
0.04 │ ↓ jbe d77
│ 210: cmpl $0x0,-0x198(%rbp)
│ ↓ jne 410
0.02 │ lea (%r14,%rdx,1),%ebx
│ lea (%rcx,%rbx,4),%ebx
│ lea (%rcx,%rdx,4),%r11d
│ vbroadcastss (%r8,%rbx,1),%xmm4
0.03 │ cmpl $0x0,-0x128(%rbp)
│ ↓ jne 398
│ movabs $0x3089705f3089705f,%r10
│ vmovq %r10,%xmm5
│ mov Function:compute_2d-2-turbofan+0x23d,%r10
0.02 │ vpinsrq $0x1,%r10,%xmm5,%xmm5
0.06 │ vpxor %xmm6,%xmm6,%xmm6
0.02 │ movabs $0x3f8000003f800000,%r10
│ vmovq %r10,%xmm7
│ mov Function:compute_2d-2-turbofan+0x25d,%r10
0.02 │ vpinsrq $0x1,%r10,%xmm7,%xmm7
0.02 │ movabs $0x5d5e0b6b5d5e0b6b,%r10
│ vmovq %r10,%xmm8
│ mov Function:compute_2d-2-turbofan+0x279,%r10
│ vpinsrq $0x1,%r10,%xmm8,%xmm8
│ mov -0x70(%rbp),%rcx
│ xor %ebx,%ebx
│ mov -0x148(%rbp),%r10
│ mov %r10,-0x90(%rbp)
│ ↓ jmpq 2c7
│ nop
│ 2b0: add $0x10,%ebx
│ vmovapd %xmm11,%xmm1
0.02 │ mov -0x58(%rbp),%r12
0.14 │ mov %rdi,-0x90(%rbp)
0.27 │ mov -0x50(%rbp),%rdi
0.92 │ 2c7: mov 0x23(%rsi),%rdx
0.02 │ cmp (%rdx),%rsp
0.10 │ ↓ jbe de0
0.04 │ 2d4: lea (%r11,%rbx,1),%edx
│ vmovdqu (%r8,%rdx,1),%xmm9
0.02 │ lea (%rbx,%r12,1),%edx
0.05 │ vsubps %xmm9,%xmm4,%xmm9
0.23 │ lea (%rbx,%rdi,1),%r12d
0.14 │ vmovdqu %xmm9,(%r8,%rdx,1)
1.57 │ vmovdqu (%r8,%r12,1),%xmm10
0.08 │ vmulps %xmm9,%xmm9,%xmm9
0.06 │ vaddps %xmm10,%xmm9,%xmm9
0.08 │ vsqrtps %xmm9,%xmm9
1.74 │ vcmpltps %xmm9,%xmm5,%xmm11
0.07 │ vmovaps %xmm1,%xmm15
0.13 │ vxorps %xmm6,%xmm15,%xmm15
0.14 │ vandps %xmm15,%xmm11,%xmm11
1.46 │ vxorps %xmm6,%xmm11,%xmm11
0.68 │ lea (%rbx,%rax,1),%edx
0.02 │ lea (%rbx,%r15,1),%edi
│ vmovdqu (%r8,%rdx,1),%xmm1
0.07 │ vmovdqu (%r8,%rdi,1),%xmm12
0.19 │ vcmpltps %xmm12,%xmm7,%xmm12
0.13 │ vcmpltps %xmm9,%xmm1,%xmm1
0.26 │ lea (%rbx,%rcx,1),%edi
0.80 │ vpand %xmm12,%xmm1,%xmm1
0.87 │ vmovapd %xmm1,%xmm12
0.02 │ vandnps %xmm9,%xmm12,%xmm12
1.56 │ vcmpeqps %xmm8,%xmm10,%xmm9
0.02 │ vmovdqu %xmm1,(%r8,%rdi,1)
0.02 │ vmovaps %xmm10,%xmm15
0.10 │ vxorps %xmm12,%xmm15,%xmm15
1.75 │ vandps %xmm15,%xmm9,%xmm9
1.89 │ vxorps %xmm12,%xmm9,%xmm9
1.32 │ vmovdqu %xmm9,(%r8,%r12,1)
2.09 │ mov -0x90(%rbp),%rdi
│ add $0xffffffff,%edi
0.02 │ ↑ jne 2b0
│ mov -0x88(%rbp),%rdx
0.04 │ mov -0x18(%rbp),%rcx
│ vmovapd %xmm11,%xmm1
│ ↓ jmpq 410
0.02 │ 398: mov %rdi,%rbx
│ mov %r12,%rdi
│ mov -0x148(%rbp),%r10
│ mov %r10,-0x98(%rbp)
│ ↓ jmpq 3d1
│ nop
│ nop
│ 3c0: add $0x10,%edi
0.04 │ add $0x10,%r11d
│ add $0x10,%ebx
│ mov %r12,-0x98(%rbp)
│ 3d1: mov 0x23(%rsi),%r12
1.90 │ cmp (%r12),%rsp
0.95 │ ↓ jbe ea8
│ 3df: vmovdqu (%r8,%r11,1),%xmm5
0.22 │ vsubps %xmm5,%xmm4,%xmm5
0.03 │ vmovdqu %xmm5,(%r8,%rdi,1)
0.60 │ vmovdqu (%r8,%rbx,1),%xmm6
0.33 │ vmulps %xmm5,%xmm5,%xmm5
0.51 │ vaddps %xmm6,%xmm5,%xmm5
0.51 │ vmovdqu %xmm5,(%r8,%rbx,1)
2.97 │ mov -0x98(%rbp),%r12
0.14 │ add $0xffffffff,%r12d
│ ↑ jne 3c0
│ 410: lea 0x1(%r14),%ebx
0.02 │ mov %rax,-0x60(%rbp)
0.03 │ mov %r15,-0x68(%rbp)
│ mov %rbx,-0x78(%rbp)
0.02 │ cmpl $0x0,-0x40(%rbp)
0.03 │ ↓ jne 6ca
│ cmpl $0x0,-0x128(%rbp)
│ ↓ jne 5c2
│ mov %rcx,%rax
│ mov -0x48(%rbp),%rcx
│ mov -0x100(%rbp),%r12
│ mov -0x108(%rbp),%rdi
│ mov %rdx,%r11
│ ↓ jmpq 550
│ nop
│ nop
│ 460: mov -0xc0(%rbp),%rcx
│ imul %r14d,%ecx
│ lea (%rcx,%rdi,1),%r15d
│ lea (%r14,%r11,1),%ebx
│ lea (%rax,%rbx,4),%ebx
│ lea (%r12,%r11,1),%esi
│ lea -0x4(%rax,%rsi,4),%esi
│ add %r12d,%r15d
│ add %edx,%esi
│ add %r12d,%ecx
│ vmovss (%r8,%rbx,1),%xmm4
│ mov -0xf0(%rbp),%rbx
│ lea -0x4(%rbx,%r15,4),%r12d
│ vmovss (%r8,%rsi,1),%xmm5
│ add %edx,%r12d
│ mov -0xb8(%rbp),%r15
│ lea -0x4(%r15,%rcx,4),%esi
│ vsubss %xmm5,%xmm4,%xmm4
│ add %edx,%esi
│ vmovss %xmm4,(%r8,%r12,1)
│ vmovss (%r8,%rsi,1),%xmm5
│ vmulss %xmm4,%xmm4,%xmm4
│ vaddss %xmm5,%xmm4,%xmm4
│ sqrtss %xmm4,%xmm4
│ vucomiss %xmm4,%xmm2
│ seta %r12b
│ movzbl %r12b,%r12d
│ or -0x48(%rbp),%r12d
│ mov -0x120(%rbp),%r15
│ lea -0x4(%r15,%rcx,4),%eax
│ add %edx,%eax
│ mov -0x118(%rbp),%r15
│ lea -0x4(%r15,%rcx,4),%r11d
│ vmovss %xmm4,(%r8,%rsi,1)
│ add %edx,%r11d
│ vmovss (%r8,%rax,1),%xmm5
│ vmovss (%r8,%r11,1),%xmm6
│ vucomiss %xmm3,%xmm6
│ seta %r11b
│ movzbl %r11b,%r11d
│ vucomiss %xmm5,%xmm4
│ seta %al
│ movzbl %al,%eax
│ mov -0x110(%rbp),%rsi
│ lea -0x4(%rsi,%rcx,4),%ecx
│ and %eax,%r11d
│ add %edx,%ecx
│ neg %r11d
│ mov %r11d,(%r8,%rcx,1)
│ mov %r12,%rcx
│ mov %r9,%r12
│ mov -0x18(%rbp),%rax
│ mov -0x10(%rbp),%rsi
│ mov -0x88(%rbp),%r11
│ mov -0x78(%rbp),%rbx
│ 550: mov %rcx,-0x48(%rbp)
│ mov 0x23(%rsi),%rcx
│ mov -0xc0(%rbp),%rdx
│ cmp (%rcx),%rsp
│ ↓ jbe f3d
│ 568: cmp %r12d,%edx
│ ↓ jb 579
│ mov %rdx,%rcx
│ ↓ jmpq 57c
│ 579: mov %r12,%rcx
│ 57c: xor %edx,%edx
│ mov %r12,%r9
│ nop
│ nop
│ 590: mov 0x23(%rsi),%r15
│ cmp (%r15),%rsp
│ ↓ jbe fae
│ 59d: cmp %r9d,%ecx
0.02 │ ↓ je 5b7
│ add $0x4,%edx
│ add $0x1,%r9d
│ cmp %r9d,%ebx
│ ↑ je 590
│ ↑ jmpq 460
│ 5b7: mov %r11,%rdi
│ mov %rax,%rcx
│ ↓ jmpq 955
│ 5c2: mov -0xc0(%rbp),%r9
│ imul %r14d,%r9d
│ mov -0x108(%rbp),%rdi
│ lea (%r9,%rdi,1),%r11d
│ mov %r11,-0xf8(%rbp)
│ mov -0x100(%rbp),%r12
│ mov %rbx,%r15
│ mov %rdx,%rbx
│ ↓ jmpq 63d
│ nop
│ nop
│ 600: lea (%r14,%rbx,1),%edi
│ lea (%rcx,%rdi,4),%edi
│ mov -0xc8(%rbp),%rax
│ vmovss (%rax,%rdi,1),%xmm4
│ vmovss (%rax,%rdx,1),%xmm5
│ vsubss %xmm5,%xmm4,%xmm4
│ vmovss %xmm4,(%rax,%r8,1)
│ vmovss (%rax,%r11,1),%xmm5
│ vmulss %xmm4,%xmm4,%xmm4
│ vaddss %xmm5,%xmm4,%xmm4
│ vmovss %xmm4,(%rax,%r11,1)
│ mov -0xf8(%rbp),%r11
│ 63d: mov 0x23(%rsi),%rdi
│ mov -0xc0(%rbp),%rdx
│ mov -0xb8(%rbp),%rax
│ cmp (%rdi),%rsp
│ ↓ jbe 1042
│ 658: cmp %r12d,%edx
│ ↓ jb 669
│ mov %rdx,%rdi
│ ↓ jmpq 66c
│ 669: mov %r12,%rdi
│ 66c: lea (%r12,%r11,1),%r8d
│ mov -0xf0(%rbp),%rdx
│ lea -0x4(%rdx,%r8,4),%r8d
│ lea (%r12,%rbx,1),%edx
│ lea -0x4(%rcx,%rdx,4),%edx
│ lea (%r12,%r9,1),%r11d
│ lea -0x4(%rax,%r11,4),%r11d
│ nop
│ 690: mov 0x23(%rsi),%rax
│ cmp (%rax),%rsp
│ ↓ jbe 10c1
│ 69d: cmp %r12d,%edi
│ ↓ je 6bf
│ add $0x4,%edx
│ add $0x4,%r8d
│ add $0x4,%r11d
│ add $0x1,%r12d
│ cmp %r12d,%r15d
│ ↑ je 690
│ ↑ jmpq 600
│ 6bf: mov %rbx,%rdi
│ mov %r15,%rbx
│ ↓ jmpq 955
│ 6ca: cmpl $0x0,-0x128(%rbp)
│ ↓ jne 850
│ mov -0x48(%rbp),%rdi
│ mov -0x100(%rbp),%r12
│ mov -0x108(%rbp),%r11
│ ↓ jmpq 7e7
│ xchg %ax,%ax
│ 6f0: mov -0xc0(%rbp),%rdi
│ imul %r14d,%edi
│ lea (%rdi,%r11,1),%r8d
│ lea (%r14,%rdx,1),%ebx
│ lea (%rcx,%rbx,4),%ebx
│ add %r12d,%edi
│ lea -0x4(%rax,%rdi,4),%esi
│ lea (%r12,%rdx,1),%eax
│ add %r15d,%esi
│ lea -0x4(%rcx,%rax,4),%eax
│ add %r12d,%r8d
│ mov -0xc8(%rbp),%r12
│ movl $0x0,(%r12,%rsi,1)
│ add %r15d,%eax
│ vmovss (%r12,%rbx,1),%xmm4
│ mov -0xf0(%rbp),%rbx
│ lea -0x4(%rbx,%r8,4),%r8d
│ vmovss (%r12,%rax,1),%xmm5
│ add %r15d,%r8d
│ vsubss %xmm5,%xmm4,%xmm4
│ vmovss %xmm4,(%r12,%r8,1)
│ vmovss (%r12,%rsi,1),%xmm5
│ vmulss %xmm4,%xmm4,%xmm4
│ vaddss %xmm5,%xmm4,%xmm4
│ sqrtss %xmm4,%xmm4
│ vucomiss %xmm4,%xmm2
│ seta %r8b
│ movzbl %r8b,%r8d
│ or -0x48(%rbp),%r8d
│ mov -0x120(%rbp),%rax
│ lea -0x4(%rax,%rdi,4),%ecx
│ add %r15d,%ecx
│ mov -0x118(%rbp),%rax
│ lea -0x4(%rax,%rdi,4),%edx
│ vmovss %xmm4,(%r12,%rsi,1)
│ add %r15d,%edx
│ vmovss (%r12,%rcx,1),%xmm5
│ vmovss (%r12,%rdx,1),%xmm6
│ vucomiss %xmm3,%xmm6
│ seta %cl
│ movzbl %cl,%ecx
│ vucomiss %xmm5,%xmm4
│ seta %sil
│ movzbl %sil,%esi
│ mov -0x110(%rbp),%rdx
│ lea -0x4(%rdx,%rdi,4),%edi
│ and %esi,%ecx
│ add %r15d,%edi
│ neg %ecx
│ mov %ecx,(%r12,%rdi,1)
│ mov %r8,%rdi
│ mov %r9,%r12
│ mov -0x18(%rbp),%rcx
│ mov -0x10(%rbp),%rsi
│ mov -0x88(%rbp),%rdx
│ mov -0x78(%rbp),%rbx
│ 7e7: mov %rdi,-0x48(%rbp)
│ mov 0x23(%rsi),%rdi
│ mov -0xc0(%rbp),%r9
│ mov -0xb8(%rbp),%rax
│ cmp (%rdi),%rsp
│ ↓ jbe 1163
│ 806: cmp %r12d,%r9d
│ ↓ jb 817
│ mov %r9,%rdi
│ ↓ jmpq 81a
│ 817: mov %r12,%rdi
│ 81a: xor %r15d,%r15d
│ mov %r12,%r9
│ 820: mov 0x23(%rsi),%r8
│ cmp (%r8),%rsp
│ ↓ jbe 11d4
│ 82d: cmp %r9d,%edi
│ ↓ je 848
│ add $0x4,%r15d
│ add $0x1,%r9d
│ cmp %r9d,%ebx
│ ↑ je 820
│ ↑ jmpq 6f0
│ 848: mov %rdx,%rdi
│ ↓ jmpq 955
│ 850: mov -0xc0(%rbp),%r9
0.02 │ imul %r14d,%r9d
0.03 │ mov -0x108(%rbp),%rdi
0.02 │ lea (%r9,%rdi,1),%r11d
│ mov %r11,-0xf8(%rbp)
│ mov -0x100(%rbp),%r12
0.06 │ mov %rbx,%rax
│ mov %rdx,%rbx
│ ↓ jmpq 8c7
│ nop
│ 880: lea (%r14,%rbx,1),%edi
│ lea (%rcx,%rdi,4),%edi
│ mov -0xc8(%rbp),%r15
│ movl $0x0,(%r15,%r11,1)
│ vmovss (%r15,%rdi,1),%xmm4
│ vmovss (%r15,%rdx,1),%xmm5
│ vsubss %xmm5,%xmm4,%xmm4
│ vmovss %xmm4,(%r15,%r8,1)
│ vmovss (%r15,%r11,1),%xmm5
│ vmulss %xmm4,%xmm4,%xmm4
│ vaddss %xmm5,%xmm4,%xmm4
│ vmovss %xmm4,(%r15,%r11,1)
│ mov -0xf8(%rbp),%r11
│ 8c7: mov 0x23(%rsi),%r15
0.04 │ mov -0xc0(%rbp),%rdx
│ mov -0xb8(%rbp),%rdi
0.02 │ cmp (%r15),%rsp
0.03 │ ↓ jbe 1268
│ 8e2: cmp %r12d,%edx
│ ↓ jb 8f3
│ mov %rdx,%r15
│ ↓ jmpq 8f6
│ 8f3: mov %r12,%r15
│ 8f6: lea (%r12,%r11,1),%r8d
│ mov -0xf0(%rbp),%rdx
│ lea -0x4(%rdx,%r8,4),%r8d
│ lea (%r12,%rbx,1),%edx
│ lea -0x4(%rcx,%rdx,4),%edx
│ lea (%r12,%r9,1),%r11d
│ lea -0x4(%rdi,%r11,4),%r11d
│ nop
│ 920: mov 0x23(%rsi),%rdi
│ cmp (%rdi),%rsp
0.02 │ ↓ jbe 12e7
│ 92d: cmp %r12d,%r15d
│ ↓ je 94f
│ add $0x1,%r12d
│ add $0x4,%r11d
│ add $0x4,%edx
│ add $0x4,%r8d
│ cmp %r12d,%eax
│ ↑ je 920
│ ↑ jmpq 880
│ 94f: mov %rbx,%rdi
│ mov %rax,%rbx
│ 955: mov -0xc0(%rbp),%rdx
│ cmp %ebx,%edx
│ ↑ jne 1c0
│ mov -0xf0(%rbp),%r12
│ mov -0xc8(%rbp),%r8
│ 972: cmpl $0x0,-0x40(%rbp)
│ ↑ jne e0
│ mov -0x48(%rbp),%rbx
│ not %ebx
│ xor %edi,%edi
│ vpxor %xmm2,%xmm2,%xmm2
│ vpcmpeqd %xmm1,%xmm2,%xmm2
│ vptest %xmm2,%xmm2
│ sete %dil
│ test %ebx,%edi
│ ↓ jne 9d5
│ mov %rcx,%rbx
│ mov -0x38(%rbp),%rax
│ mov %rcx,%rdx
│ mov -0x28(%rbp),%r8
│ mov %rsi,%rcx
│ mov %rax,%rdi
│ → callq Function:compute_2d-2-turbofan+0xfffffffffffeac77
│ mov -0x10(%rbp),%rsi
│ mov 0xb(%rsi),%r8
Percent│ cmp $0x0,%eax
│ ↑ jne 50
│ mov -0x30(%rbp),%rbx
│ mov 0x10(%rbx,%r8,1),%edx
│ ↓ jmpq 9d9
│ 9d5: mov -0x30(%rbp),%rbx
│ 9d9: mov -0x38(%rbp),%rcx
│ add $0x1c,%ecx
│ mov (%r8,%rcx,1),%ecx
│ mov %rdx,-0xc0(%rbp)
│ cmp $0x0,%ecx
│ ↓ je a36
│ mov 0x14(%rbx,%r8,1),%eax
│ shl $0x2,%ecx
│ mov %rax,-0xb8(%rbp)
│ mov %rdx,%r8
│ mov %rcx,%rdx
│ mov %rsi,%rcx
│ mov %rax,%rdi
│ → callq Function:compute_2d-2-turbofan+0xfffffffffffeacc0
│ mov -0x10(%rbp),%rbx
│ mov 0xb(%rbx),%r8
│ mov -0xb8(%rbp),%rax
│ mov %rbx,%rsi
│ mov -0x30(%rbp),%rbx
│ mov -0xc0(%rbp),%rdx
│ ↓ jmpq a3b
│ a36: mov 0x14(%rbx,%r8,1),%eax
│ a3b: cmp $0x0,%edx
│ ↓ jne a4d
│ vxorps %xmm0,%xmm0,%xmm0
│ ↓ jmpq ce5
│ a4d: mov 0x64(%rbx,%r8,1),%ecx
│ mov 0x2c(%rbx,%r8,1),%edi
│ mov 0x70(%rbx,%r8,1),%r9d
│ mov 0x7c(%rbx,%r8,1),%r11d
│ mov %rdx,%r12
│ imul %edx,%r12d
│ mov 0x20(%rbx,%r8,1),%r14d
│ lea (%r14,%r12,4),%r15d
│ mov %rdi,-0xa0(%rbp)
│ mov %r9,-0xa8(%rbp)
│ mov %r11,-0xb0(%rbp)
│ mov %r12,-0x98(%rbp)
│ mov %r14,-0x50(%rbp)
│ vxorps %xmm0,%xmm0,%xmm0
│ xor %ebx,%ebx
│ mov %rdx,%r12
│ mov %r14,%rdx
│ mov %rbx,%r14
│ ↓ jmpq ac8
│ nop
│ xchg %ax,%ax
│ ab0: add %edi,%ebx
│ lea (%rdx,%r12,4),%edx
│ lea (%r15,%r12,4),%r15d
│ mov -0x10(%rbp),%rsi
0.05 │ mov %r11,%r14
│ mov -0xb0(%rbp),%r11
0.05 │ ac8: mov 0x23(%rsi),%rdi
│ mov %r15,-0x38(%rbp)
│ mov %rdx,-0x40(%rbp)
│ cmp (%rdi),%rsp
│ ↓ jbe 1389
│ add: lea (%r14,%r12,1),%edi
│ mov %rdi,-0x60(%rbp)
│ vxorps %xmm1,%xmm1,%xmm1
│ xor %r12d,%r12d
│ vmovapd %xmm1,%xmm2
│ mov %r12,-0x78(%rbp)
│ ↓ jmpq b13
│ nop
│ b00: add $0x4,%r12d
0.91 │ mov -0x10(%rbp),%rsi
0.65 │ mov -0xb0(%rbp),%r11
0.97 │ mov %rdi,-0x78(%rbp)
1.57 │ b13: mov 0x23(%rsi),%rdi
1.48 │ cmp (%rdi),%rsp
7.01 │ ↓ jbe 13f8
│ b20: cmp -0x78(%rbp),%r14d
2.51 │ ↓ je c6a
│ lea (%r11,%rbx,4),%edi
1.08 │ add %r12d,%edi
0.27 │ mov (%r8,%rdi,1),%edi
2.07 │ lea (%r12,%rdx,1),%r11d
0.46 │ lea (%r12,%r15,1),%esi
0.72 │ cmp $0x0,%edi
0.58 │ ↓ jne c5a
│ lea (%r9,%rbx,4),%edi
6.93 │ add %r12d,%edi
0.04 │ vmovss (%r8,%rdi,1),%xmm3
4.03 │ lea (%rax,%r14,4),%r9d
│ lea (%rcx,%rbx,4),%r15d
│ movl $0x0,(%r8,%rdi,1)
0.02 │ lea (%r12,%r15,1),%edi
0.02 │ mov -0xa0(%rbp),%r15
0.02 │ lea (%r15,%rbx,4),%edx
0.11 │ vmovss (%r8,%r9,1),%xmm4
0.75 │ add %r12d,%edx
0.02 │ vmovss (%r8,%rdi,1),%xmm5
0.56 │ vmovss (%r8,%rdx,1),%xmm6
2.57 │ vmulss %xmm6,%xmm6,%xmm7
3.32 │ vsubss %xmm6,%xmm3,%xmm8
0.07 │ vmulss %xmm7,%xmm3,%xmm9
1.52 │ vaddss %xmm8,%xmm8,%xmm8
0.04 │ vmulss %xmm3,%xmm3,%xmm10
│ vmulss %xmm5,%xmm5,%xmm11
0.14 │ vdivss %xmm9,%xmm8,%xmm8
7.77 │ vmovaps %xmm8,%xmm8
│ vsubss %xmm10,%xmm11,%xmm9
│ vmulss %xmm10,%xmm3,%xmm3
│ vmulss %xmm8,%xmm5,%xmm5
2.45 │ vmulss %xmm6,%xmm9,%xmm9
0.02 │ vaddss %xmm3,%xmm3,%xmm11
│ vmulss %xmm3,%xmm7,%xmm3
│ mov -0x98(%rbp),%rdi
0.07 │ lea (%rbx,%rdi,1),%edx
│ vaddss %xmm4,%xmm5,%xmm4
0.49 │ vaddss %xmm11,%xmm9,%xmm5
│ vpcmpeqd %xmm7,%xmm7,%xmm7
0.05 │ vpslld $0x1e,%xmm7,%xmm7
│ vdivss %xmm3,%xmm7,%xmm3
0.03 │ vmovaps %xmm3,%xmm3
│ vmovss %xmm4,(%r8,%r9,1)
2.36 │ vmulss %xmm3,%xmm5,%xmm4
0.03 │ mov -0x60(%rbp),%r9
0.02 │ lea (%rax,%r9,4),%edi
│ lea (%rcx,%rdx,4),%edx
0.01 │ vmovss %xmm4,(%r8,%r11,1)
0.02 │ lea (%r12,%rdx,1),%r11d
│ vmovss (%r8,%rdi,1),%xmm5
│ vmovss (%r8,%r11,1),%xmm7
0.55 │ vmulss %xmm7,%xmm7,%xmm9
0.04 │ vsubss %xmm10,%xmm9,%xmm9
│ vmulss %xmm6,%xmm9,%xmm6
0.04 │ vaddss %xmm11,%xmm6,%xmm6
0.04 │ vmulss %xmm3,%xmm6,%xmm3
0.25 │ vsubss %xmm3,%xmm1,%xmm1
0.05 │ vmulss %xmm7,%xmm8,%xmm6
0.02 │ vaddss %xmm5,%xmm6,%xmm5
1.03 │ vmovss %xmm5,(%r8,%rdi,1)
0.05 │ vmovss %xmm3,(%r8,%rsi,1)
0.27 │ vsubss %xmm4,%xmm2,%xmm2
0.31 │ mov -0xa8(%rbp),%r9
0.02 │ mov -0x40(%rbp),%rdx
│ mov -0x38(%rbp),%r15
0.02 │ ↓ jmpq c6a
│ c5a: movl $0x0,(%r8,%r11,1)
2.27 │ movl $0x0,(%r8,%rsi,1)
1.72 │ c6a: mov -0x78(%rbp),%rdi
7.09 │ add $0x1,%edi
0.37 │ cmp -0xc0(%rbp),%edi
3.41 │ ↑ jne b00
│ vucomiss %xmm0,%xmm2
0.19 │ ↓ jbe c8b
│ vmovapd %xmm2,%xmm0
│ c8b: vucomiss %xmm0,%xmm1
0.01 │ ↓ jbe c99
│ vmovapd %xmm1,%xmm0
│ c99: lea 0x1(%r14),%r11d
│ mov %r14,%r12
│ imul -0xc0(%rbp),%r12d
│ add %r14d,%r12d
0.02 │ mov -0x60(%rbp),%rsi
│ imul -0xc0(%rbp),%esi
│ add %esi,%r14d
0.04 │ mov -0x50(%rbp),%rsi
│ lea (%rsi,%r12,4),%r12d
│ lea (%rsi,%r14,4),%r14d
0.03 │ vmovss %xmm2,(%r8,%r12,1)
0.04 │ vmovss %xmm1,(%r8,%r14,1)
0.09 │ mov -0xc0(%rbp),%r12
│ cmp %r11d,%r12d
│ ↑ jne ab0
│ mov -0x30(%rbp),%rbx
│ ce5: vmovss %xmm0,0x48(%rbx,%r8,1)
│ mov -0x28(%rbp),%ebx
│ mov -0x20(%rbp),%rcx
│ mov %ecx,0x4(%rbx,%r8,1)
0.02 │ mov -0x18(%rbp),%rcx
│ mov %ecx,(%r8,%rbx,1)
│ mov %rbp,%rsp
│ pop %rbp
│ ← retq
│ d05: → callq Function:compute_2d-2-turbofan+0xfffffffffffeaf40
│ mov -0x18(%rbp),%rcx
│ mov -0x20(%rbp),%rbx
│ mov -0x28(%rbp),%rax
│ mov -0x10(%rbp),%rsi
│ mov -0x38(%rbp),%rdx
│ ↑ jmpq 2b
│ d23: → callq Function:compute_2d-2-turbofan+0xfffffffffffeaf40
│ mov -0x18(%rbp),%rcx
│ mov -0x30(%rbp),%rdi
│ mov -0x10(%rbp),%rsi
│ mov -0xc8(%rbp),%r8
│ ↑ jmpq 6d
│ d40: vmovups %xmm1,-0xe0(%rbp)
│ → callq Function:compute_2d-2-turbofan+0xfffffffffffeaf40
│ mov -0x18(%rbp),%rcx
│ mov -0x10(%rbp),%rsi
│ mov -0xc0(%rbp),%rdx
│ mov -0xc8(%rbp),%r8
│ vmovups -0xe0(%rbp),%xmm1
│ mov -0xf0(%rbp),%r12
│ ↑ jmpq fd
│ d77: mov %r14,-0x80(%rbp)
│ vmovups %xmm1,-0xe0(%rbp)
│ mov %rax,-0x60(%rbp)
│ mov %r15,-0x68(%rbp)
│ → callq Function:compute_2d-2-turbofan+0xfffffffffffeaf40
│ mov -0x18(%rbp),%rcx
│ mov -0x10(%rbp),%rsi
│ mov -0xc8(%rbp),%r8
│ mov -0x50(%rbp),%rdi
│ mov -0x58(%rbp),%r12
│ mov -0x60(%rbp),%rax
│ mov -0x68(%rbp),%r15
│ mov -0x80(%rbp),%r14
│ mov -0x88(%rbp),%rdx
│ vpcmpeqd %xmm3,%xmm3,%xmm3
│ vpslld $0x19,%xmm3,%xmm3
│ vpsrld $0x2,%xmm3,%xmm3
│ mov $0x3089705f,%r10d
│ vmovd %r10d,%xmm2
│ vmovups -0xe0(%rbp),%xmm1
│ ↑ jmpq 210
│ de0: mov %r14,-0x80(%rbp)
│ vmovups %xmm4,-0x140(%rbp)
│ mov %r11,-0xa0(%rbp)
│ mov %rbx,-0x78(%rbp)
│ vmovups %xmm8,-0x160(%rbp)
│ vmovups %xmm7,-0x170(%rbp)
│ vmovups %xmm1,-0xe0(%rbp)
│ vmovups %xmm6,-0x180(%rbp)
│ vmovups %xmm5,-0x190(%rbp)
│ mov %rax,-0x60(%rbp)
│ mov %r15,-0x68(%rbp)
│ → callq Function:compute_2d-2-turbofan+0xfffffffffffeaf40
│ mov -0x10(%rbp),%rsi
│ mov -0xc8(%rbp),%r8
│ mov -0x50(%rbp),%rdi
│ mov -0x58(%rbp),%r12
│ mov -0x60(%rbp),%rax
│ mov -0x68(%rbp),%r15
│ mov -0x70(%rbp),%rcx
│ mov -0x80(%rbp),%r14
│ vpcmpeqd %xmm3,%xmm3,%xmm3
│ vpslld $0x19,%xmm3,%xmm3
│ vpsrld $0x2,%xmm3,%xmm3
│ mov $0x3089705f,%r10d
│ vmovd %r10d,%xmm2
│ vmovups -0x140(%rbp),%xmm4
│ mov -0xa0(%rbp),%r11
│ mov -0x78(%rbp),%rbx
│ vmovups -0x160(%rbp),%xmm8
│ vmovups -0x170(%rbp),%xmm7
│ vmovups -0xe0(%rbp),%xmm1
│ vmovups -0x180(%rbp),%xmm6
│ vmovups -0x190(%rbp),%xmm5
│ ↑ jmpq 2d4
│ ea8: mov %r14,-0x80(%rbp)
│ vmovups %xmm1,-0xe0(%rbp)
│ mov %rbx,-0x78(%rbp)
│ mov %r11,-0xa0(%rbp)
│ mov %rdi,-0x90(%rbp)
│ vmovups %xmm4,-0x140(%rbp)
│ mov %rax,-0x60(%rbp)
│ mov %r15,-0x68(%rbp)
│ → callq Function:compute_2d-2-turbofan+0xfffffffffffeaf40
│ mov -0x18(%rbp),%rcx
│ mov -0x10(%rbp),%rsi
│ mov -0xc8(%rbp),%r8
│ mov -0x60(%rbp),%rax
│ mov -0x68(%rbp),%r15
│ mov -0x80(%rbp),%r14
│ mov -0x88(%rbp),%rdx
│ vpcmpeqd %xmm3,%xmm3,%xmm3
│ vpslld $0x19,%xmm3,%xmm3
│ vpsrld $0x2,%xmm3,%xmm3
│ mov $0x3089705f,%r10d
│ vmovd %r10d,%xmm2
│ vmovups -0xe0(%rbp),%xmm1
│ mov -0x78(%rbp),%rbx
│ mov -0xa0(%rbp),%r11
│ mov -0x90(%rbp),%rdi
│ vmovups -0x140(%rbp),%xmm4
│ ↑ jmpq 3df
│ f3d: vmovups %xmm1,-0xe0(%rbp)
│ mov %r14,-0x80(%rbp)
│ mov %r12,-0x90(%rbp)
│ → callq Function:compute_2d-2-turbofan+0xfffffffffffeaf40
│ mov -0x18(%rbp),%rax
│ mov -0x10(%rbp),%rsi
│ mov -0xc0(%rbp),%rdx
│ mov -0xc8(%rbp),%r8
│ vmovups -0xe0(%rbp),%xmm1
│ mov -0x78(%rbp),%rbx
│ mov -0x80(%rbp),%r14
│ mov -0x88(%rbp),%r11
│ mov -0x108(%rbp),%rdi
│ vpcmpeqd %xmm3,%xmm3,%xmm3
│ vpslld $0x19,%xmm3,%xmm3
│ vpsrld $0x2,%xmm3,%xmm3
│ mov $0x3089705f,%r10d
│ vmovd %r10d,%xmm2
│ mov -0x90(%rbp),%r12
│ ↑ jmpq 568
│ fae: vmovups %xmm1,-0xe0(%rbp)
│ mov %r14,-0x80(%rbp)
│ mov %r12,-0x90(%rbp)
│ mov %r9,-0x98(%rbp)
│ mov %rdx,-0xa0(%rbp)
│ mov %rcx,-0xa8(%rbp)
│ → callq Function:compute_2d-2-turbofan+0xfffffffffffeaf40
│ mov -0x18(%rbp),%rax
│ mov -0x10(%rbp),%rsi
│ mov -0xc8(%rbp),%r8
│ vmovups -0xe0(%rbp),%xmm1
│ mov -0x78(%rbp),%rbx
│ mov -0x80(%rbp),%r14
│ mov -0x88(%rbp),%r11
│ mov -0x108(%rbp),%rdi
│ vpcmpeqd %xmm3,%xmm3,%xmm3
│ vpslld $0x19,%xmm3,%xmm3
│ vpsrld $0x2,%xmm3,%xmm3
│ mov $0x3089705f,%r10d
│ vmovd %r10d,%xmm2
│ mov -0x90(%rbp),%r12
│ mov -0x98(%rbp),%r9
│ mov -0xa0(%rbp),%rdx
│ mov -0xa8(%rbp),%rcx
│ ↑ jmpq 59d
│1042: vmovups %xmm1,-0xe0(%rbp)
│ mov %r14,-0x80(%rbp)
│ mov %r12,-0xb0(%rbp)
│ mov %r9,-0xe8(%rbp)
│ → callq Function:compute_2d-2-turbofan+0xfffffffffffeaf40
│ mov -0x18(%rbp),%rcx
│ mov -0x10(%rbp),%rsi
│ mov -0xc0(%rbp),%rdx
│ vmovups -0xe0(%rbp),%xmm1
│ mov -0x78(%rbp),%r15
│ mov -0x80(%rbp),%r14
│ mov -0x88(%rbp),%rbx
│ mov -0xb8(%rbp),%rax
│ vpcmpeqd %xmm3,%xmm3,%xmm3
│ vpslld $0x19,%xmm3,%xmm3
│ vpsrld $0x2,%xmm3,%xmm3
│ mov $0x3089705f,%r10d
│ vmovd %r10d,%xmm2
│ mov -0xb0(%rbp),%r12
│ mov -0xe8(%rbp),%r9
│ mov -0xf8(%rbp),%r11
│ ↑ jmpq 658
│10c1: vmovups %xmm1,-0xe0(%rbp)
│ mov %r14,-0x80(%rbp)
│ mov %r12,-0xb0(%rbp)
│ mov %r11,-0x90(%rbp)
│ mov %r8,-0x98(%rbp)
│ mov %rdx,-0xa0(%rbp)
│ mov %rdi,-0xa8(%rbp)
│ mov %r9,-0xe8(%rbp)
│ → callq Function:compute_2d-2-turbofan+0xfffffffffffeaf40
│ mov -0x18(%rbp),%rcx
│ mov -0x10(%rbp),%rsi
│ vmovups -0xe0(%rbp),%xmm1
│ mov -0x78(%rbp),%r15
│ mov -0x80(%rbp),%r14
│ mov -0x88(%rbp),%rbx
│ vpcmpeqd %xmm3,%xmm3,%xmm3
│ vpslld $0x19,%xmm3,%xmm3
│ vpsrld $0x2,%xmm3,%xmm3
│ mov $0x3089705f,%r10d
│ vmovd %r10d,%xmm2
│ mov -0xb0(%rbp),%r12
│ mov -0x90(%rbp),%r11
│ mov -0x98(%rbp),%r8
│ mov -0xa0(%rbp),%rdx
│ mov -0xa8(%rbp),%rdi
│ mov -0xe8(%rbp),%r9
│ ↑ jmpq 69d
│1163: vmovups %xmm1,-0xe0(%rbp)
│ mov %r14,-0x80(%rbp)
│ mov %r12,-0x90(%rbp)
│ → callq Function:compute_2d-2-turbofan+0xfffffffffffeaf40
│ mov -0x18(%rbp),%rcx
│ mov -0x10(%rbp),%rsi
│ mov -0xc0(%rbp),%r9
│ vmovups -0xe0(%rbp),%xmm1
│ mov -0x78(%rbp),%rbx
│ mov -0x80(%rbp),%r14
│ mov -0x88(%rbp),%rdx
│ mov -0xb8(%rbp),%rax
│ mov -0x108(%rbp),%r11
│ vpcmpeqd %xmm3,%xmm3,%xmm3
│ vpslld $0x19,%xmm3,%xmm3
│ vpsrld $0x2,%xmm3,%xmm3
│ mov $0x3089705f,%r10d
│ vmovd %r10d,%xmm2
│ mov -0x90(%rbp),%r12
│ ↑ jmpq 806
│11d4: vmovups %xmm1,-0xe0(%rbp)
│ mov %r14,-0x80(%rbp)
│ mov %r12,-0x90(%rbp)
│ mov %r9,-0x98(%rbp)
│ mov %r15,-0xa0(%rbp)
│ mov %rdi,-0xa8(%rbp)
│ → callq Function:compute_2d-2-turbofan+0xfffffffffffeaf40
│ mov -0x18(%rbp),%rcx
│ mov -0x10(%rbp),%rsi
│ vmovups -0xe0(%rbp),%xmm1
│ mov -0x78(%rbp),%rbx
│ mov -0x80(%rbp),%r14
│ mov -0x88(%rbp),%rdx
│ mov -0xb8(%rbp),%rax
│ mov -0x108(%rbp),%r11
│ vpcmpeqd %xmm3,%xmm3,%xmm3
│ vpslld $0x19,%xmm3,%xmm3
│ vpsrld $0x2,%xmm3,%xmm3
│ mov $0x3089705f,%r10d
│ vmovd %r10d,%xmm2
│ mov -0x90(%rbp),%r12
│ mov -0x98(%rbp),%r9
│ mov -0xa0(%rbp),%r15
│ mov -0xa8(%rbp),%rdi
│ ↑ jmpq 82d
│1268: vmovups %xmm1,-0xe0(%rbp)
│ mov %r14,-0x80(%rbp)
│ mov %r12,-0xb0(%rbp)
│ mov %r9,-0xe8(%rbp)
│ → callq Function:compute_2d-2-turbofan+0xfffffffffffeaf40
│ mov -0x18(%rbp),%rcx
│ mov -0x10(%rbp),%rsi
│ mov -0xc0(%rbp),%rdx
│ vmovups -0xe0(%rbp),%xmm1
│ mov -0x78(%rbp),%rax
│ mov -0x80(%rbp),%r14
│ mov -0x88(%rbp),%rbx
│ mov -0xb0(%rbp),%r12
│ mov -0xb8(%rbp),%rdi
│ mov -0xe8(%rbp),%r9
│ mov -0xf8(%rbp),%r11
│ vpcmpeqd %xmm3,%xmm3,%xmm3
│ vpslld $0x19,%xmm3,%xmm3
│ vpsrld $0x2,%xmm3,%xmm3
│ mov $0x3089705f,%r10d
│ vmovd %r10d,%xmm2
│ ↑ jmpq 8e2
│12e7: vmovups %xmm1,-0xe0(%rbp)
│ mov %r14,-0x80(%rbp)
│ mov %r8,-0x90(%rbp)
│ mov %rdx,-0x98(%rbp)
│ mov %r11,-0xa0(%rbp)
│ mov %r12,-0xb0(%rbp)
│ mov %r15,-0xa8(%rbp)
│ mov %r9,-0xe8(%rbp)
│ → callq Function:compute_2d-2-turbofan+0xfffffffffffeaf40
│ mov -0x18(%rbp),%rcx
│ mov -0x10(%rbp),%rsi
│ vmovups -0xe0(%rbp),%xmm1
│ mov -0x78(%rbp),%rax
│ mov -0x80(%rbp),%r14
│ mov -0x88(%rbp),%rbx
│ mov -0x90(%rbp),%r8
│ mov -0x98(%rbp),%rdx
│ mov -0xa0(%rbp),%r11
│ mov -0xb0(%rbp),%r12
│ mov -0xa8(%rbp),%r15
│ mov -0xe8(%rbp),%r9
│ vpcmpeqd %xmm3,%xmm3,%xmm3
│ vpslld $0x19,%xmm3,%xmm3
│ vpsrld $0x2,%xmm3,%xmm3
│ mov $0x3089705f,%r10d
│ vmovd %r10d,%xmm2
│ ↑ jmpq 92d
│1389: mov %r8,-0xc8(%rbp)
│ mov %rbx,-0x48(%rbp)
│ mov %r14,-0x58(%rbp)
│ vmovsd %xmm0,-0x68(%rbp)
│ mov %rcx,-0x90(%rbp)
│ mov %rax,-0xb8(%rbp)
│ → callq Function:compute_2d-2-turbofan+0xfffffffffffeaf40
│ mov -0xc8(%rbp),%r8
│ mov -0x38(%rbp),%r15
│ mov -0xc0(%rbp),%r12
│ mov -0x40(%rbp),%rdx
│ mov -0x48(%rbp),%rbx
│ mov -0x58(%rbp),%r14
│ vmovsd -0x68(%rbp),%xmm0
│ mov -0x90(%rbp),%rcx
│ mov -0xb8(%rbp),%rax
│ mov -0xa8(%rbp),%r9
│ mov -0xb0(%rbp),%r11
│ mov -0x10(%rbp),%rsi
│ ↑ jmpq add
│13f8: mov %r8,-0xc8(%rbp)
│ mov %rbx,-0x48(%rbp)
│ mov %r14,-0x58(%rbp)
│ vmovsd %xmm0,-0x68(%rbp)
│ mov %r12,-0x70(%rbp)
│ vmovsd %xmm1,-0x80(%rbp)
│ vmovsd %xmm2,-0x88(%rbp)
│ mov %rcx,-0x90(%rbp)
│ mov %rax,-0xb8(%rbp)
│ → callq Function:compute_2d-2-turbofan+0xfffffffffffeaf40
│ mov -0xc8(%rbp),%r8
│ mov -0x38(%rbp),%r15
│ mov -0x40(%rbp),%rdx
│ mov -0x48(%rbp),%rbx
│ mov -0x58(%rbp),%r14
│ vmovsd -0x68(%rbp),%xmm0
│ mov -0x70(%rbp),%r12
│ vmovsd -0x80(%rbp),%xmm1
│ vmovsd -0x88(%rbp),%xmm2
│ mov -0x90(%rbp),%rcx
│ mov -0xb8(%rbp),%rax
│ mov -0xa8(%rbp),%r9
│ mov -0xb0(%rbp),%r11
│ mov -0x10(%rbp),%rsi
│ ↑ jmpq b20
│ → callq Function:compute_2d-2-turbofan+0xfffffffffffead10
│ → callq Function:compute_2d-2-turbofan+0xfffffffffffead10
│ → callq Function:compute_2d-2-turbofan+0xfffffffffffead10
│ → callq Function:compute_2d-2-turbofan+0xfffffffffffead10
│ → callq Function:compute_2d-2-turbofan+0xfffffffffffead10
│ → callq Function:compute_2d-2-turbofan+0xfffffffffffead10
│ → callq Function:compute_2d-2-turbofan+0xfffffffffffead10
│ → callq Function:compute_2d-2-turbofan+0xfffffffffffead10
│ → callq Function:compute_2d-2-turbofan+0xfffffffffffead10
│ → callq Function:compute_2d-2-turbofan+0xfffffffffffead10
│ → callq Function:compute_2d-2-turbofan+0xfffffffffffead10
│ → callq Function:compute_2d-2-turbofan+0xfffffffffffead10
│ → callq Function:compute_2d-2-turbofan+0xfffffffffffead10
│ → callq Function:compute_2d-2-turbofan+0xfffffffffffead10
│ → callq Function:compute_2d-2-turbofan+0xfffffffffffead10
│ → callq Function:compute_2d-2-turbofan+0xfffffffffffead10
│ → callq Function:compute_2d-2-turbofan+0xfffffffffffead10
│ → callq Function:compute_2d-2-turbofan+0xfffffffffffead10
│ → callq Function:compute_2d-2-turbofan+0xfffffffffffead10
│ → callq Function:compute_2d-2-turbofan+0xfffffffffffead10
│ → callq Function:compute_2d-2-turbofan+0xfffffffffffead10
│ → callq Function:compute_2d-2-turbofan+0xfffffffffffead10
│ → callq Function:compute_2d-2-turbofan+0xfffffffffffead10
│ → callq Function:compute_2d-2-turbofan+0xfffffffffffead10
│ → callq Function:compute_2d-2-turbofan+0xfffffffffffead10
│ → callq Function:compute_2d-2-turbofan+0xfffffffffffead10
│ → callq Function:compute_2d-2-turbofan+0xfffffffffffead10
│ → callq Function:compute_2d-2-turbofan+0xfffffffffffead10
│ → callq Function:compute_2d-2-turbofan+0xfffffffffffead10
│ → callq Function:compute_2d-2-turbofan+0xfffffffffffead10
│ → callq Function:compute_2d-2-turbofan+0xfffffffffffead10
│ → callq Function:compute_2d-2-turbofan+0xfffffffffffead10
│ → callq Function:compute_2d-2-turbofan+0xfffffffffffead10
│ → callq Function:compute_2d-2-turbofan+0xfffffffffffead10
│ → callq Function:compute_2d-2-turbofan+0xfffffffffffead10
│ → callq Function:compute_2d-2-turbofan+0xfffffffffffead10
│ → callq Function:compute_2d-2-turbofan+0xfffffffffffead10
│ → callq Function:compute_2d-2-turbofan+0xfffffffffffead10
│ → callq Function:compute_2d-2-turbofan+0xfffffffffffead10
│ → callq Function:compute_2d-2-turbofan+0xfffffffffffead10
│ → callq Function:compute_2d-2-turbofan+0xfffffffffffead10
│ → callq Function:compute_2d-2-turbofan+0xfffffffffffead10
│ → callq Function:compute_2d-2-turbofan+0xfffffffffffead10
│ → callq Function:compute_2d-2-turbofan+0xfffffffffffead10
│ → callq Function:compute_2d-2-turbofan+0xfffffffffffead10
│ → callq Function:compute_2d-2-turbofan+0xfffffffffffead10
│ → callq Function:compute_2d-2-turbofan+0xfffffffffffead10
│ → callq Function:compute_2d-2-turbofan+0xfffffffffffead10
│ → callq Function:compute_2d-2-turbofan+0xfffffffffffead10
│ → callq Function:compute_2d-2-turbofan+0xfffffffffffead10
│ → callq Function:compute_2d-2-turbofan+0xfffffffffffead10
│ → callq Function:compute_2d-2-turbofan+0xfffffffffffead10
│ → callq Function:compute_2d-2-turbofan+0xfffffffffffead10
│ → callq Function:compute_2d-2-turbofan+0xfffffffffffead10
│ → callq Function:compute_2d-2-turbofan+0xfffffffffffead10
│ → callq Function:compute_2d-2-turbofan+0xfffffffffffead10
│ → callq Function:compute_2d-2-turbofan+0xfffffffffffead10
│ → callq Function:compute_2d-2-turbofan+0xfffffffffffead10
│ → callq Function:compute_2d-2-turbofan+0xfffffffffffead10
│ → callq Function:compute_2d-2-turbofan+0xfffffffffffead10
│ → callq Function:compute_2d-2-turbofan+0xfffffffffffead10
│ → callq Function:compute_2d-2-turbofan+0xfffffffffffead10
│ → callq Function:compute_2d-2-turbofan+0xfffffffffffead10
│ → callq Function:compute_2d-2-turbofan+0xfffffffffffead10
│ → callq Function:compute_2d-2-turbofan+0xfffffffffffead10
│ nop
│ nop
│ add %eax,(%rax)
│ add %al,(%rax)
│ (bad)
│ add %al,(%rax)
│ add %bh,%bh
│ (bad)
│ (bad)
│ (bad)
│ (bad)
│ (bad)
│ (bad)
│ (bad)
│ (bad)
│ (bad)
│ (bad)
│ incl (%rax)
│ add %al,(%rax)
│ add %al,(%rax)
│ add %al,(%rax)
│ int3