aboutsummaryrefslogtreecommitdiff
path: root/llvm/test/CodeGen
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test/CodeGen')
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/flat-scratch.ll72
-rw-r--r--llvm/test/CodeGen/AMDGPU/add-max.ll295
-rw-r--r--llvm/test/CodeGen/AMDGPU/bf16-conversions.ll149
-rw-r--r--llvm/test/CodeGen/AMDGPU/branch-relaxation-gfx1250.ll7
-rw-r--r--llvm/test/CodeGen/AMDGPU/div_i128.ll64
-rw-r--r--llvm/test/CodeGen/AMDGPU/fcanonicalize-elimination.bf16.ll44
-rw-r--r--llvm/test/CodeGen/AMDGPU/finalizebundle.mir18
-rw-r--r--llvm/test/CodeGen/AMDGPU/flat-saddr-load.ll182
-rw-r--r--llvm/test/CodeGen/AMDGPU/flat-scratch-svs.ll86
-rw-r--r--llvm/test/CodeGen/AMDGPU/global-load-xcnt.ll14
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.round.f64.ll3
-rw-r--r--llvm/test/CodeGen/AMDGPU/max3.ll59
-rw-r--r--llvm/test/CodeGen/AMDGPU/memory-legalizer-atomic-insert-end.mir46
-rw-r--r--llvm/test/CodeGen/AMDGPU/memory-legalizer-multiple-mem-operands-atomics.mir60
-rw-r--r--llvm/test/CodeGen/AMDGPU/min3.ll59
-rw-r--r--llvm/test/CodeGen/AMDGPU/rem_i128.ll64
-rw-r--r--llvm/test/CodeGen/AMDGPU/scale-offset-flat.ll436
-rw-r--r--llvm/test/CodeGen/AMDGPU/scale-offset-global.ll351
-rw-r--r--llvm/test/CodeGen/AMDGPU/scale-offset-scratch.ll322
-rw-r--r--llvm/test/CodeGen/AMDGPU/scale-offset-smem.ll372
-rw-r--r--llvm/test/CodeGen/AMDGPU/srem.ll4
-rw-r--r--llvm/test/CodeGen/AMDGPU/wait-xcnt.mir42
-rw-r--r--llvm/test/CodeGen/DirectX/ShaderFlags/lifetimes-noint64op.ll36
-rw-r--r--llvm/test/CodeGen/DirectX/legalize-lifetimes-valver-1.6.ll34
-rw-r--r--llvm/test/CodeGen/NVPTX/LoadStoreVectorizer.ll102
-rw-r--r--llvm/test/CodeGen/NVPTX/extractelement.ll71
-rw-r--r--llvm/test/CodeGen/NVPTX/i1-select.ll30
-rw-r--r--llvm/test/CodeGen/NVPTX/i128.ll582
-rw-r--r--llvm/test/CodeGen/NVPTX/i8x4-instructions.ll133
-rw-r--r--llvm/test/CodeGen/NVPTX/pr126337.ll2
-rw-r--r--llvm/test/CodeGen/PowerPC/ppc64-P9-vabsd.ll417
-rw-r--r--llvm/test/CodeGen/RISCV/fpclamptosat.ll88
-rw-r--r--llvm/test/CodeGen/RISCV/intrinsic-cttz-elts-vscale.ll36
-rw-r--r--llvm/test/CodeGen/RISCV/pr148084.ll279
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access.ll104
-rw-r--r--llvm/test/CodeGen/RISCV/wide-scalar-shift-legalization.ll1901
-rw-r--r--llvm/test/CodeGen/RISCV/xqcisls.ll47
-rw-r--r--llvm/test/CodeGen/RISCV/xtheadfmemidx.ll128
-rw-r--r--llvm/test/CodeGen/RISCV/xtheadmemidx.ll775
-rw-r--r--llvm/test/CodeGen/SPARC/tls-sp.ll105
-rw-r--r--llvm/test/CodeGen/SystemZ/pr60413.ll36
-rw-r--r--llvm/test/CodeGen/WebAssembly/ref-test-func.ll120
-rw-r--r--llvm/test/CodeGen/WebAssembly/removed-terminator.ll26
-rw-r--r--llvm/test/CodeGen/WinEH/wineh-noret-cleanup.ll14
-rw-r--r--llvm/test/CodeGen/X86/abds-neg.ll92
-rw-r--r--llvm/test/CodeGen/X86/avg.ll156
-rw-r--r--llvm/test/CodeGen/X86/catchret-empty-fallthrough.ll2
-rw-r--r--llvm/test/CodeGen/X86/conditional-tailcall-pgso.ll1
-rw-r--r--llvm/test/CodeGen/X86/conditional-tailcall.ll1
-rw-r--r--llvm/test/CodeGen/X86/div-rem-pair-recomposition-signed.ll39
-rw-r--r--llvm/test/CodeGen/X86/div-rem-pair-recomposition-unsigned.ll12
-rw-r--r--llvm/test/CodeGen/X86/freeze-vector.ll24
-rw-r--r--llvm/test/CodeGen/X86/noreturn-call-win64.ll12
-rw-r--r--llvm/test/CodeGen/X86/seh-catch-all.ll2
-rw-r--r--llvm/test/CodeGen/X86/seh-catchpad.ll10
-rw-r--r--llvm/test/CodeGen/X86/seh-except-finally.ll6
-rw-r--r--llvm/test/CodeGen/X86/seh-finally.ll2
-rw-r--r--llvm/test/CodeGen/X86/seh-safe-div.ll5
-rw-r--r--llvm/test/CodeGen/X86/seh-unwind-inline-asm-codegen.ll4
-rw-r--r--llvm/test/CodeGen/X86/setcc-non-simple-type.ll4
-rw-r--r--llvm/test/CodeGen/X86/stack-coloring-wineh.ll2
-rw-r--r--llvm/test/CodeGen/X86/taildup-heapallocsite.ll2
-rw-r--r--llvm/test/CodeGen/X86/vec_extract.ll66
-rw-r--r--llvm/test/CodeGen/X86/win-catchpad-nested-cxx.ll8
-rw-r--r--llvm/test/CodeGen/X86/win-catchpad.ll8
-rw-r--r--llvm/test/CodeGen/X86/win-cleanuppad.ll2
-rw-r--r--llvm/test/CodeGen/X86/win32-eh-states.ll14
-rw-r--r--llvm/test/CodeGen/X86/win64-seh-epilogue-statepoint.ll2
-rw-r--r--llvm/test/CodeGen/X86/wineh-coreclr.ll14
-rw-r--r--llvm/test/CodeGen/XCore/exception.ll2
70 files changed, 5507 insertions, 2800 deletions
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/flat-scratch.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/flat-scratch.ll
index 8a80afd..fa0e4b9 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/flat-scratch.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/flat-scratch.ll
@@ -257,20 +257,16 @@ define amdgpu_kernel void @store_load_vindex_kernel(i32 %n) {
; GFX12: ; %bb.0: ; %bb
; GFX12-NEXT: s_load_b32 s0, s[4:5], 0x0
; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; GFX12-NEXT: v_mov_b32_e32 v2, 15
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
; GFX12-NEXT: v_sub_nc_u32_e32 v1, 0, v0
+; GFX12-NEXT: v_mov_b32_e32 v2, 15
; GFX12-NEXT: v_lshlrev_b32_e32 v0, 2, v0
; GFX12-NEXT: v_lshlrev_b32_e32 v1, 2, v1
; GFX12-NEXT: s_wait_kmcnt 0x0
; GFX12-NEXT: s_lshl_b32 s0, s0, 7
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instid1(SALU_CYCLE_1)
-; GFX12-NEXT: v_add_nc_u32_e32 v0, s0, v0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX12-NEXT: v_add_nc_u32_e32 v1, s0, v1
-; GFX12-NEXT: scratch_store_b32 v0, v2, off scope:SCOPE_SYS
+; GFX12-NEXT: scratch_store_b32 v0, v2, s0 scope:SCOPE_SYS
; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: scratch_load_b32 v0, v1, off offset:124 scope:SCOPE_SYS
+; GFX12-NEXT: scratch_load_b32 v0, v1, s0 offset:124 scope:SCOPE_SYS
; GFX12-NEXT: s_wait_loadcnt 0x0
; GFX12-NEXT: s_endpgm
;
@@ -357,20 +353,16 @@ define amdgpu_kernel void @store_load_vindex_kernel(i32 %n) {
; UNALIGNED_GFX12: ; %bb.0: ; %bb
; UNALIGNED_GFX12-NEXT: s_load_b32 s0, s[4:5], 0x0
; UNALIGNED_GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; UNALIGNED_GFX12-NEXT: v_mov_b32_e32 v2, 15
-; UNALIGNED_GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; UNALIGNED_GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
; UNALIGNED_GFX12-NEXT: v_sub_nc_u32_e32 v1, 0, v0
+; UNALIGNED_GFX12-NEXT: v_mov_b32_e32 v2, 15
; UNALIGNED_GFX12-NEXT: v_lshlrev_b32_e32 v0, 2, v0
; UNALIGNED_GFX12-NEXT: v_lshlrev_b32_e32 v1, 2, v1
; UNALIGNED_GFX12-NEXT: s_wait_kmcnt 0x0
; UNALIGNED_GFX12-NEXT: s_lshl_b32 s0, s0, 7
-; UNALIGNED_GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instid1(SALU_CYCLE_1)
-; UNALIGNED_GFX12-NEXT: v_add_nc_u32_e32 v0, s0, v0
-; UNALIGNED_GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; UNALIGNED_GFX12-NEXT: v_add_nc_u32_e32 v1, s0, v1
-; UNALIGNED_GFX12-NEXT: scratch_store_b32 v0, v2, off scope:SCOPE_SYS
+; UNALIGNED_GFX12-NEXT: scratch_store_b32 v0, v2, s0 scope:SCOPE_SYS
; UNALIGNED_GFX12-NEXT: s_wait_storecnt 0x0
-; UNALIGNED_GFX12-NEXT: scratch_load_b32 v0, v1, off offset:124 scope:SCOPE_SYS
+; UNALIGNED_GFX12-NEXT: scratch_load_b32 v0, v1, s0 offset:124 scope:SCOPE_SYS
; UNALIGNED_GFX12-NEXT: s_wait_loadcnt 0x0
; UNALIGNED_GFX12-NEXT: s_endpgm
bb:
@@ -937,19 +929,17 @@ define amdgpu_kernel void @store_load_vindex_small_offset_kernel(i32 %n) {
; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0
; GFX12-NEXT: scratch_load_b32 v3, off, off scope:SCOPE_SYS
; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v2, 15
; GFX12-NEXT: v_sub_nc_u32_e32 v1, 0, v0
+; GFX12-NEXT: v_mov_b32_e32 v2, 15
; GFX12-NEXT: v_lshlrev_b32_e32 v0, 2, v0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
; GFX12-NEXT: v_lshlrev_b32_e32 v1, 2, v1
; GFX12-NEXT: s_wait_kmcnt 0x0
; GFX12-NEXT: scratch_store_b32 v0, v2, off offset:384 scope:SCOPE_SYS
; GFX12-NEXT: s_wait_storecnt 0x0
; GFX12-NEXT: s_lshl_b32 s0, s0, 7
; GFX12-NEXT: s_add_co_u32 s0, 0x100, s0
-; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX12-NEXT: v_add_nc_u32_e32 v1, s0, v1
-; GFX12-NEXT: scratch_load_b32 v0, v1, off offset:124 scope:SCOPE_SYS
+; GFX12-NEXT: scratch_load_b32 v0, v1, s0 offset:124 scope:SCOPE_SYS
; GFX12-NEXT: s_wait_loadcnt 0x0
; GFX12-NEXT: s_endpgm
;
@@ -1048,19 +1038,17 @@ define amdgpu_kernel void @store_load_vindex_small_offset_kernel(i32 %n) {
; UNALIGNED_GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0
; UNALIGNED_GFX12-NEXT: scratch_load_b32 v3, off, off scope:SCOPE_SYS
; UNALIGNED_GFX12-NEXT: s_wait_loadcnt 0x0
-; UNALIGNED_GFX12-NEXT: v_mov_b32_e32 v2, 15
; UNALIGNED_GFX12-NEXT: v_sub_nc_u32_e32 v1, 0, v0
+; UNALIGNED_GFX12-NEXT: v_mov_b32_e32 v2, 15
; UNALIGNED_GFX12-NEXT: v_lshlrev_b32_e32 v0, 2, v0
-; UNALIGNED_GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
+; UNALIGNED_GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
; UNALIGNED_GFX12-NEXT: v_lshlrev_b32_e32 v1, 2, v1
; UNALIGNED_GFX12-NEXT: s_wait_kmcnt 0x0
; UNALIGNED_GFX12-NEXT: scratch_store_b32 v0, v2, off offset:384 scope:SCOPE_SYS
; UNALIGNED_GFX12-NEXT: s_wait_storecnt 0x0
; UNALIGNED_GFX12-NEXT: s_lshl_b32 s0, s0, 7
; UNALIGNED_GFX12-NEXT: s_add_co_u32 s0, 0x100, s0
-; UNALIGNED_GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; UNALIGNED_GFX12-NEXT: v_add_nc_u32_e32 v1, s0, v1
-; UNALIGNED_GFX12-NEXT: scratch_load_b32 v0, v1, off offset:124 scope:SCOPE_SYS
+; UNALIGNED_GFX12-NEXT: scratch_load_b32 v0, v1, s0 offset:124 scope:SCOPE_SYS
; UNALIGNED_GFX12-NEXT: s_wait_loadcnt 0x0
; UNALIGNED_GFX12-NEXT: s_endpgm
bb:
@@ -1579,19 +1567,17 @@ define amdgpu_kernel void @store_load_vindex_large_offset_kernel(i32 %n) {
; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0
; GFX12-NEXT: scratch_load_b32 v3, off, off scope:SCOPE_SYS
; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v2, 15
; GFX12-NEXT: v_sub_nc_u32_e32 v1, 0, v0
+; GFX12-NEXT: v_mov_b32_e32 v2, 15
; GFX12-NEXT: v_lshlrev_b32_e32 v0, 2, v0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
; GFX12-NEXT: v_lshlrev_b32_e32 v1, 2, v1
; GFX12-NEXT: s_wait_kmcnt 0x0
; GFX12-NEXT: scratch_store_b32 v0, v2, off offset:16512 scope:SCOPE_SYS
; GFX12-NEXT: s_wait_storecnt 0x0
; GFX12-NEXT: s_lshl_b32 s0, s0, 7
; GFX12-NEXT: s_add_co_u32 s0, 0x4000, s0
-; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX12-NEXT: v_add_nc_u32_e32 v1, s0, v1
-; GFX12-NEXT: scratch_load_b32 v0, v1, off offset:124 scope:SCOPE_SYS
+; GFX12-NEXT: scratch_load_b32 v0, v1, s0 offset:124 scope:SCOPE_SYS
; GFX12-NEXT: s_wait_loadcnt 0x0
; GFX12-NEXT: s_endpgm
;
@@ -1692,19 +1678,17 @@ define amdgpu_kernel void @store_load_vindex_large_offset_kernel(i32 %n) {
; UNALIGNED_GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0
; UNALIGNED_GFX12-NEXT: scratch_load_b32 v3, off, off scope:SCOPE_SYS
; UNALIGNED_GFX12-NEXT: s_wait_loadcnt 0x0
-; UNALIGNED_GFX12-NEXT: v_mov_b32_e32 v2, 15
; UNALIGNED_GFX12-NEXT: v_sub_nc_u32_e32 v1, 0, v0
+; UNALIGNED_GFX12-NEXT: v_mov_b32_e32 v2, 15
; UNALIGNED_GFX12-NEXT: v_lshlrev_b32_e32 v0, 2, v0
-; UNALIGNED_GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
+; UNALIGNED_GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
; UNALIGNED_GFX12-NEXT: v_lshlrev_b32_e32 v1, 2, v1
; UNALIGNED_GFX12-NEXT: s_wait_kmcnt 0x0
; UNALIGNED_GFX12-NEXT: scratch_store_b32 v0, v2, off offset:16512 scope:SCOPE_SYS
; UNALIGNED_GFX12-NEXT: s_wait_storecnt 0x0
; UNALIGNED_GFX12-NEXT: s_lshl_b32 s0, s0, 7
; UNALIGNED_GFX12-NEXT: s_add_co_u32 s0, 0x4000, s0
-; UNALIGNED_GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; UNALIGNED_GFX12-NEXT: v_add_nc_u32_e32 v1, s0, v1
-; UNALIGNED_GFX12-NEXT: scratch_load_b32 v0, v1, off offset:124 scope:SCOPE_SYS
+; UNALIGNED_GFX12-NEXT: scratch_load_b32 v0, v1, s0 offset:124 scope:SCOPE_SYS
; UNALIGNED_GFX12-NEXT: s_wait_loadcnt 0x0
; UNALIGNED_GFX12-NEXT: s_endpgm
bb:
@@ -4060,9 +4044,7 @@ define amdgpu_gs void @sgpr_base_plus_sgpr_plus_vgpr_plus_large_imm_offset(ptr a
; GFX12-LABEL: sgpr_base_plus_sgpr_plus_vgpr_plus_large_imm_offset:
; GFX12: ; %bb.0: ; %bb
; GFX12-NEXT: v_dual_mov_b32 v1, 15 :: v_dual_add_nc_u32 v0, s1, v0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_add_nc_u32_e32 v0, s0, v0
-; GFX12-NEXT: scratch_store_b32 v0, v1, off offset:65512 scope:SCOPE_SYS
+; GFX12-NEXT: scratch_store_b32 v0, v1, s0 offset:65512 scope:SCOPE_SYS
; GFX12-NEXT: s_wait_storecnt 0x0
; GFX12-NEXT: s_endpgm
;
@@ -4113,9 +4095,7 @@ define amdgpu_gs void @sgpr_base_plus_sgpr_plus_vgpr_plus_large_imm_offset(ptr a
; UNALIGNED_GFX12-LABEL: sgpr_base_plus_sgpr_plus_vgpr_plus_large_imm_offset:
; UNALIGNED_GFX12: ; %bb.0: ; %bb
; UNALIGNED_GFX12-NEXT: v_dual_mov_b32 v1, 15 :: v_dual_add_nc_u32 v0, s1, v0
-; UNALIGNED_GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; UNALIGNED_GFX12-NEXT: v_add_nc_u32_e32 v0, s0, v0
-; UNALIGNED_GFX12-NEXT: scratch_store_b32 v0, v1, off offset:65512 scope:SCOPE_SYS
+; UNALIGNED_GFX12-NEXT: scratch_store_b32 v0, v1, s0 offset:65512 scope:SCOPE_SYS
; UNALIGNED_GFX12-NEXT: s_wait_storecnt 0x0
; UNALIGNED_GFX12-NEXT: s_endpgm
bb:
@@ -4172,9 +4152,7 @@ define amdgpu_gs void @sgpr_base_plus_sgpr_plus_vgpr_plus_negative_imm_offset(pt
; GFX12-LABEL: sgpr_base_plus_sgpr_plus_vgpr_plus_negative_imm_offset:
; GFX12: ; %bb.0: ; %bb
; GFX12-NEXT: v_dual_mov_b32 v1, 15 :: v_dual_add_nc_u32 v0, s1, v0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_add_nc_u32_e32 v0, s0, v0
-; GFX12-NEXT: scratch_store_b32 v0, v1, off offset:-16 scope:SCOPE_SYS
+; GFX12-NEXT: scratch_store_b32 v0, v1, s0 offset:-16 scope:SCOPE_SYS
; GFX12-NEXT: s_wait_storecnt 0x0
; GFX12-NEXT: s_endpgm
;
@@ -4223,9 +4201,7 @@ define amdgpu_gs void @sgpr_base_plus_sgpr_plus_vgpr_plus_negative_imm_offset(pt
; UNALIGNED_GFX12-LABEL: sgpr_base_plus_sgpr_plus_vgpr_plus_negative_imm_offset:
; UNALIGNED_GFX12: ; %bb.0: ; %bb
; UNALIGNED_GFX12-NEXT: v_dual_mov_b32 v1, 15 :: v_dual_add_nc_u32 v0, s1, v0
-; UNALIGNED_GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; UNALIGNED_GFX12-NEXT: v_add_nc_u32_e32 v0, s0, v0
-; UNALIGNED_GFX12-NEXT: scratch_store_b32 v0, v1, off offset:-16 scope:SCOPE_SYS
+; UNALIGNED_GFX12-NEXT: scratch_store_b32 v0, v1, s0 offset:-16 scope:SCOPE_SYS
; UNALIGNED_GFX12-NEXT: s_wait_storecnt 0x0
; UNALIGNED_GFX12-NEXT: s_endpgm
bb:
diff --git a/llvm/test/CodeGen/AMDGPU/add-max.ll b/llvm/test/CodeGen/AMDGPU/add-max.ll
new file mode 100644
index 0000000..b992506
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/add-max.ll
@@ -0,0 +1,295 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
+; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1250 < %s | FileCheck -check-prefixes=GCN,SDAG %s
+; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1250 < %s | FileCheck -check-prefixes=GCN,GISEL %s
+
+define amdgpu_ps float @add_max_u32_vvv(i32 %a, i32 %b, i32 %c) {
+; GCN-LABEL: add_max_u32_vvv:
+; GCN: ; %bb.0:
+; GCN-NEXT: v_add_nc_u32_e32 v0, v0, v1
+; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GCN-NEXT: v_max_u32_e32 v0, v0, v2
+; GCN-NEXT: ; return to shader part epilog
+ %add = add i32 %a, %b
+ %max = call i32 @llvm.umax.i32(i32 %add, i32 %c)
+ %ret = bitcast i32 %max to float
+ ret float %ret
+}
+
+define amdgpu_ps float @add_max_u32_svv(i32 inreg %a, i32 %b, i32 %c) {
+; GCN-LABEL: add_max_u32_svv:
+; GCN: ; %bb.0:
+; GCN-NEXT: v_add_nc_u32_e32 v0, s0, v0
+; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GCN-NEXT: v_max_u32_e32 v0, v0, v1
+; GCN-NEXT: ; return to shader part epilog
+ %add = add i32 %a, %b
+ %max = call i32 @llvm.umax.i32(i32 %add, i32 %c)
+ %ret = bitcast i32 %max to float
+ ret float %ret
+}
+
+define amdgpu_ps float @add_max_u32_ssv(i32 inreg %a, i32 inreg %b, i32 %c) {
+; GCN-LABEL: add_max_u32_ssv:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_add_co_i32 s0, s0, s1
+; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GCN-NEXT: v_max_u32_e32 v0, s0, v0
+; GCN-NEXT: ; return to shader part epilog
+ %add = add i32 %a, %b
+ %max = call i32 @llvm.umax.i32(i32 %add, i32 %c)
+ %ret = bitcast i32 %max to float
+ ret float %ret
+}
+
+define amdgpu_ps float @add_max_u32_sss(i32 inreg %a, i32 inreg %b, i32 inreg %c) {
+; GCN-LABEL: add_max_u32_sss:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_add_co_i32 s0, s0, s1
+; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GCN-NEXT: s_max_u32 s0, s0, s2
+; GCN-NEXT: v_mov_b32_e32 v0, s0
+; GCN-NEXT: ; return to shader part epilog
+ %add = add i32 %a, %b
+ %max = call i32 @llvm.umax.i32(i32 %add, i32 %c)
+ %ret = bitcast i32 %max to float
+ ret float %ret
+}
+
+define amdgpu_ps float @add_max_u32_vsi(i32 %a, i32 inreg %b) {
+; GCN-LABEL: add_max_u32_vsi:
+; GCN: ; %bb.0:
+; GCN-NEXT: v_add_nc_u32_e32 v0, s0, v0
+; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GCN-NEXT: v_max_u32_e32 v0, 4, v0
+; GCN-NEXT: ; return to shader part epilog
+ %add = add i32 %a, %b
+ %max = call i32 @llvm.umax.i32(i32 %add, i32 4)
+ %ret = bitcast i32 %max to float
+ ret float %ret
+}
+
+define amdgpu_ps float @add_max_u32_svl(i32 inreg %a, i32 %b) {
+; GCN-LABEL: add_max_u32_svl:
+; GCN: ; %bb.0:
+; GCN-NEXT: v_add_nc_u32_e32 v0, s0, v0
+; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GCN-NEXT: v_max_u32_e32 v0, 0x64, v0
+; GCN-NEXT: ; return to shader part epilog
+ %add = add i32 %a, %b
+ %max = call i32 @llvm.umax.i32(i32 %add, i32 100)
+ %ret = bitcast i32 %max to float
+ ret float %ret
+}
+
+define amdgpu_ps float @add_max_u32_slv(i32 inreg %a, i32 %b) {
+; GCN-LABEL: add_max_u32_slv:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_addk_co_i32 s0, 0x64
+; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GCN-NEXT: v_max_u32_e32 v0, s0, v0
+; GCN-NEXT: ; return to shader part epilog
+ %add = add i32 %a, 100
+ %max = call i32 @llvm.umax.i32(i32 %add, i32 %b)
+ %ret = bitcast i32 %max to float
+ ret float %ret
+}
+
+define amdgpu_ps float @add_max_i32_vvv(i32 %a, i32 %b, i32 %c) {
+; GCN-LABEL: add_max_i32_vvv:
+; GCN: ; %bb.0:
+; GCN-NEXT: v_add_nc_u32_e32 v0, v0, v1
+; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GCN-NEXT: v_max_i32_e32 v0, v0, v2
+; GCN-NEXT: ; return to shader part epilog
+ %add = add i32 %a, %b
+ %max = call i32 @llvm.smax.i32(i32 %add, i32 %c)
+ %ret = bitcast i32 %max to float
+ ret float %ret
+}
+
+define amdgpu_ps float @add_min_u32_vvv(i32 %a, i32 %b, i32 %c) {
+; GCN-LABEL: add_min_u32_vvv:
+; GCN: ; %bb.0:
+; GCN-NEXT: v_add_nc_u32_e32 v0, v0, v1
+; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GCN-NEXT: v_min_u32_e32 v0, v0, v2
+; GCN-NEXT: ; return to shader part epilog
+ %add = add i32 %a, %b
+ %max = call i32 @llvm.umin.i32(i32 %add, i32 %c)
+ %ret = bitcast i32 %max to float
+ ret float %ret
+}
+
+define amdgpu_ps float @add_min_i32_vvv(i32 %a, i32 %b, i32 %c) {
+; GCN-LABEL: add_min_i32_vvv:
+; GCN: ; %bb.0:
+; GCN-NEXT: v_add_nc_u32_e32 v0, v0, v1
+; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GCN-NEXT: v_min_i32_e32 v0, v0, v2
+; GCN-NEXT: ; return to shader part epilog
+ %add = add i32 %a, %b
+ %max = call i32 @llvm.smin.i32(i32 %add, i32 %c)
+ %ret = bitcast i32 %max to float
+ ret float %ret
+}
+
+define amdgpu_ps float @add_max_v2u16_vvv(<2 x i16> %a, <2 x i16> %b, <2 x i16> %c) {
+; GCN-LABEL: add_max_v2u16_vvv:
+; GCN: ; %bb.0:
+; GCN-NEXT: v_pk_add_max_u16 v0, v0, v1, v2
+; GCN-NEXT: ; return to shader part epilog
+ %add = add <2 x i16> %a, %b
+ %max = call <2 x i16> @llvm.umax.v216(<2 x i16> %add, <2 x i16> %c)
+ %ret = bitcast <2 x i16> %max to float
+ ret float %ret
+}
+
+define amdgpu_ps float @add_max_v2u16_svv(<2 x i16> inreg %a, <2 x i16> %b, <2 x i16> %c) {
+; GCN-LABEL: add_max_v2u16_svv:
+; GCN: ; %bb.0:
+; GCN-NEXT: v_pk_add_max_u16 v0, s0, v0, v1
+; GCN-NEXT: ; return to shader part epilog
+ %add = add <2 x i16> %a, %b
+ %max = call <2 x i16> @llvm.umax.v216(<2 x i16> %add, <2 x i16> %c)
+ %ret = bitcast <2 x i16> %max to float
+ ret float %ret
+}
+
+define amdgpu_ps float @add_max_v2u16_ssv(<2 x i16> inreg %a, <2 x i16> inreg %b, <2 x i16> %c) {
+; SDAG-LABEL: add_max_v2u16_ssv:
+; SDAG: ; %bb.0:
+; SDAG-NEXT: v_pk_add_max_u16 v0, s0, s1, v0
+; SDAG-NEXT: ; return to shader part epilog
+;
+; GISEL-LABEL: add_max_v2u16_ssv:
+; GISEL: ; %bb.0:
+; GISEL-NEXT: s_lshr_b32 s2, s0, 16
+; GISEL-NEXT: s_lshr_b32 s3, s1, 16
+; GISEL-NEXT: s_add_co_i32 s0, s0, s1
+; GISEL-NEXT: s_add_co_i32 s2, s2, s3
+; GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GISEL-NEXT: s_pack_ll_b32_b16 s0, s0, s2
+; GISEL-NEXT: v_pk_max_u16 v0, s0, v0
+; GISEL-NEXT: ; return to shader part epilog
+ %add = add <2 x i16> %a, %b
+ %max = call <2 x i16> @llvm.umax.v216(<2 x i16> %add, <2 x i16> %c)
+ %ret = bitcast <2 x i16> %max to float
+ ret float %ret
+}
+
+define amdgpu_ps float @add_max_v2u16_sss(<2 x i16> inreg %a, <2 x i16> inreg %b, <2 x i16> inreg %c) {
+; SDAG-LABEL: add_max_v2u16_sss:
+; SDAG: ; %bb.0:
+; SDAG-NEXT: v_pk_add_u16 v0, s0, s1
+; SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; SDAG-NEXT: v_pk_max_u16 v0, v0, s2
+; SDAG-NEXT: ; return to shader part epilog
+;
+; GISEL-LABEL: add_max_v2u16_sss:
+; GISEL: ; %bb.0:
+; GISEL-NEXT: s_lshr_b32 s3, s0, 16
+; GISEL-NEXT: s_lshr_b32 s4, s1, 16
+; GISEL-NEXT: s_add_co_i32 s0, s0, s1
+; GISEL-NEXT: s_add_co_i32 s3, s3, s4
+; GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GISEL-NEXT: s_pack_ll_b32_b16 s0, s0, s3
+; GISEL-NEXT: s_and_b32 s3, s2, 0xffff
+; GISEL-NEXT: s_lshr_b32 s1, s0, 16
+; GISEL-NEXT: s_and_b32 s0, s0, 0xffff
+; GISEL-NEXT: s_lshr_b32 s2, s2, 16
+; GISEL-NEXT: s_max_u32 s0, s0, s3
+; GISEL-NEXT: s_max_u32 s1, s1, s2
+; GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GISEL-NEXT: s_pack_ll_b32_b16 s0, s0, s1
+; GISEL-NEXT: v_mov_b32_e32 v0, s0
+; GISEL-NEXT: ; return to shader part epilog
+ %add = add <2 x i16> %a, %b
+ %max = call <2 x i16> @llvm.umax.v216(<2 x i16> %add, <2 x i16> %c)
+ %ret = bitcast <2 x i16> %max to float
+ ret float %ret
+}
+
+define amdgpu_ps float @add_max_v2u16_vsi(<2 x i16> %a, <2 x i16> inreg %b) {
+; GCN-LABEL: add_max_v2u16_vsi:
+; GCN: ; %bb.0:
+; GCN-NEXT: v_pk_add_max_u16 v0, v0, s0, 4
+; GCN-NEXT: ; return to shader part epilog
+ %add = add <2 x i16> %a, %b
+ %max = call <2 x i16> @llvm.umax.v216(<2 x i16> %add, <2 x i16> <i16 4, i16 0>)
+ %ret = bitcast <2 x i16> %max to float
+ ret float %ret
+}
+
+define amdgpu_ps float @add_max_v2u16_svl(<2 x i16> inreg %a, <2 x i16> %b) {
+; GCN-LABEL: add_max_v2u16_svl:
+; GCN: ; %bb.0:
+; GCN-NEXT: v_pk_add_max_u16 v0, s0, v0, 0x650064
+; GCN-NEXT: ; return to shader part epilog
+ %add = add <2 x i16> %a, %b
+ %max = call <2 x i16> @llvm.umax.v216(<2 x i16> %add, <2 x i16> <i16 100, i16 101>)
+ %ret = bitcast <2 x i16> %max to float
+ ret float %ret
+}
+
+define amdgpu_ps float @add_max_v2u16_slv(<2 x i16> inreg %a, <2 x i16> %b) {
+; SDAG-LABEL: add_max_v2u16_slv:
+; SDAG: ; %bb.0:
+; SDAG-NEXT: v_pk_add_max_u16 v0, 0x640064, s0, v0
+; SDAG-NEXT: ; return to shader part epilog
+;
+; GISEL-LABEL: add_max_v2u16_slv:
+; GISEL: ; %bb.0:
+; GISEL-NEXT: s_lshr_b32 s1, s0, 16
+; GISEL-NEXT: s_add_co_i32 s0, s0, 0x640064
+; GISEL-NEXT: s_addk_co_i32 s1, 0x64
+; GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GISEL-NEXT: s_pack_ll_b32_b16 s0, s0, s1
+; GISEL-NEXT: v_pk_max_u16 v0, s0, v0
+; GISEL-NEXT: ; return to shader part epilog
+ %add = add <2 x i16> %a, <i16 100, i16 100>
+ %max = call <2 x i16> @llvm.umax.v216(<2 x i16> %add, <2 x i16> %b)
+ %ret = bitcast <2 x i16> %max to float
+ ret float %ret
+}
+
+define amdgpu_ps float @add_max_v2s16_vvv(<2 x i16> %a, <2 x i16> %b, <2 x i16> %c) {
+; GCN-LABEL: add_max_v2s16_vvv:
+; GCN: ; %bb.0:
+; GCN-NEXT: v_pk_add_max_i16 v0, v0, v1, v2
+; GCN-NEXT: ; return to shader part epilog
+ %add = add <2 x i16> %a, %b
+ %max = call <2 x i16> @llvm.smax.v216(<2 x i16> %add, <2 x i16> %c)
+ %ret = bitcast <2 x i16> %max to float
+ ret float %ret
+}
+
+define amdgpu_ps float @add_min_v2u16_vvv(<2 x i16> %a, <2 x i16> %b, <2 x i16> %c) {
+; GCN-LABEL: add_min_v2u16_vvv:
+; GCN: ; %bb.0:
+; GCN-NEXT: v_pk_add_min_u16 v0, v0, v1, v2
+; GCN-NEXT: ; return to shader part epilog
+ %add = add <2 x i16> %a, %b
+ %max = call <2 x i16> @llvm.umin.v216(<2 x i16> %add, <2 x i16> %c)
+ %ret = bitcast <2 x i16> %max to float
+ ret float %ret
+}
+
+define amdgpu_ps float @add_min_v2s16_vvv(<2 x i16> %a, <2 x i16> %b, <2 x i16> %c) {
+; GCN-LABEL: add_min_v2s16_vvv:
+; GCN: ; %bb.0:
+; GCN-NEXT: v_pk_add_min_i16 v0, v0, v1, v2
+; GCN-NEXT: ; return to shader part epilog
+ %add = add <2 x i16> %a, %b
+ %max = call <2 x i16> @llvm.smin.v216(<2 x i16> %add, <2 x i16> %c)
+ %ret = bitcast <2 x i16> %max to float
+ ret float %ret
+}
+
+declare <2 x i16> @llvm.smin.v216(<2 x i16>, <2 x i16>)
+declare <2 x i16> @llvm.smax.v216(<2 x i16>, <2 x i16>)
+declare <2 x i16> @llvm.umin.v216(<2 x i16>, <2 x i16>)
+declare <2 x i16> @llvm.umax.v216(<2 x i16>, <2 x i16>)
+declare i32 @llvm.smin.i32(i32, i32)
+declare i32 @llvm.smax.i32(i32, i32)
+declare i32 @llvm.umin.i32(i32, i32)
+declare i32 @llvm.umax.i32(i32, i32)
diff --git a/llvm/test/CodeGen/AMDGPU/bf16-conversions.ll b/llvm/test/CodeGen/AMDGPU/bf16-conversions.ll
index 5b4866c..6823a47 100644
--- a/llvm/test/CodeGen/AMDGPU/bf16-conversions.ll
+++ b/llvm/test/CodeGen/AMDGPU/bf16-conversions.ll
@@ -1,6 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
; RUN: llc -mtriple=amdgcn -mcpu=gfx942 < %s | FileCheck --check-prefixes=GCN,GFX-942 %s
; RUN: llc -mtriple=amdgcn -mcpu=gfx950 < %s | FileCheck --check-prefixes=GCN,GFX-950 %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1250 < %s | FileCheck --check-prefix=GFX1250 %s
; TODO: Add global-isel when it can support bf16
@@ -9,6 +10,11 @@ define amdgpu_ps float @v_test_cvt_bf16_f32_v(bfloat %v) {
; GCN: ; %bb.0:
; GCN-NEXT: v_lshlrev_b32_e32 v0, 16, v0
; GCN-NEXT: ; return to shader part epilog
+;
+; GFX1250-LABEL: v_test_cvt_bf16_f32_v:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; GFX1250-NEXT: ; return to shader part epilog
%cvt = fpext bfloat %v to float
ret float %cvt
}
@@ -19,6 +25,13 @@ define amdgpu_ps float @v_test_cvt_bf16_f32_s(bfloat inreg %v) {
; GCN-NEXT: s_lshl_b32 s0, s0, 16
; GCN-NEXT: v_mov_b32_e32 v0, s0
; GCN-NEXT: ; return to shader part epilog
+;
+; GFX1250-LABEL: v_test_cvt_bf16_f32_s:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_lshl_b32 s0, s0, 16
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-NEXT: v_mov_b32_e32 v0, s0
+; GFX1250-NEXT: ; return to shader part epilog
%cvt = fpext bfloat %v to float
ret float %cvt
}
@@ -47,6 +60,11 @@ define amdgpu_ps float @v_test_cvt_v2f32_v2bf16_v(<2 x float> %src) {
; GFX-950: ; %bb.0:
; GFX-950-NEXT: v_cvt_pk_bf16_f32 v0, v0, v1
; GFX-950-NEXT: ; return to shader part epilog
+;
+; GFX1250-LABEL: v_test_cvt_v2f32_v2bf16_v:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: v_cvt_pk_bf16_f32 v0, v0, v1
+; GFX1250-NEXT: ; return to shader part epilog
%res = fptrunc <2 x float> %src to <2 x bfloat>
%cast = bitcast <2 x bfloat> %res to float
ret float %cast
@@ -80,6 +98,11 @@ define amdgpu_ps float @v_test_cvt_v2f32_v2bf16_s(<2 x float> inreg %src) {
; GFX-950-NEXT: v_mov_b32_e32 v0, s1
; GFX-950-NEXT: v_cvt_pk_bf16_f32 v0, s0, v0
; GFX-950-NEXT: ; return to shader part epilog
+;
+; GFX1250-LABEL: v_test_cvt_v2f32_v2bf16_s:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: v_cvt_pk_bf16_f32 v0, s0, s1
+; GFX1250-NEXT: ; return to shader part epilog
%res = fptrunc <2 x float> %src to <2 x bfloat>
%cast = bitcast <2 x bfloat> %res to float
ret float %cast
@@ -103,6 +126,13 @@ define amdgpu_ps float @v_test_cvt_f32_bf16_v(float %src) {
; GFX-950-NEXT: v_cvt_pk_bf16_f32 v0, v0, s0
; GFX-950-NEXT: v_lshlrev_b32_e32 v0, 16, v0
; GFX-950-NEXT: ; return to shader part epilog
+;
+; GFX1250-LABEL: v_test_cvt_f32_bf16_v:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: v_cvt_pk_bf16_f32 v0, v0, s0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; GFX1250-NEXT: ; return to shader part epilog
%trunc = fptrunc float %src to bfloat
%ext = fpext bfloat %trunc to float
ret float %ext
@@ -172,6 +202,38 @@ define amdgpu_ps float @v_test_cvt_v2f64_v2bf16_v(<2 x double> %src) {
; GFX-950-NEXT: v_cndmask_b32_e32 v0, v0, v5, vcc
; GFX-950-NEXT: v_cvt_pk_bf16_f32 v0, v0, v4
; GFX-950-NEXT: ; return to shader part epilog
+;
+; GFX1250-LABEL: v_test_cvt_v2f64_v2bf16_v:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: v_cvt_f32_f64_e32 v8, v[2:3]
+; GFX1250-NEXT: v_cvt_f32_f64_e32 v9, v[0:1]
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-NEXT: v_cvt_f64_f32_e32 v[4:5], v8
+; GFX1250-NEXT: v_cvt_f64_f32_e32 v[6:7], v9
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX1250-NEXT: v_cmp_gt_f64_e64 s1, |v[2:3]|, |v[4:5]|
+; GFX1250-NEXT: v_cmp_nlg_f64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX1250-NEXT: v_cmp_nlg_f64_e64 s0, v[0:1], v[6:7]
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX1250-NEXT: v_cndmask_b32_e64 v2, -1, 1, s1
+; GFX1250-NEXT: v_cmp_gt_f64_e64 s1, |v[0:1]|, |v[6:7]|
+; GFX1250-NEXT: v_dual_add_nc_u32 v1, v8, v2 :: v_dual_bitop2_b32 v10, 1, v8 bitop3:0x40
+; GFX1250-NEXT: s_wait_alu 0xf1ff
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX1250-NEXT: v_cndmask_b32_e64 v0, -1, 1, s1
+; GFX1250-NEXT: v_and_b32_e32 v11, 1, v9
+; GFX1250-NEXT: v_cmp_eq_u32_e64 s1, 1, v10
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX1250-NEXT: v_add_nc_u32_e32 v0, v9, v0
+; GFX1250-NEXT: v_cmp_eq_u32_e64 s2, 1, v11
+; GFX1250-NEXT: s_or_b32 vcc_lo, s1, vcc_lo
+; GFX1250-NEXT: v_cndmask_b32_e32 v1, v1, v8, vcc_lo
+; GFX1250-NEXT: s_or_b32 vcc_lo, s2, s0
+; GFX1250-NEXT: s_wait_alu 0xfffe
+; GFX1250-NEXT: v_cndmask_b32_e32 v0, v0, v9, vcc_lo
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_cvt_pk_bf16_f32 v0, v0, v1
+; GFX1250-NEXT: ; return to shader part epilog
%res = fptrunc <2 x double> %src to <2 x bfloat>
%cast = bitcast <2 x bfloat> %res to float
ret float %cast
@@ -201,6 +263,11 @@ define amdgpu_ps float @fptrunc_f32_f32_to_v2bf16(float %a, float %b) {
; GFX-950: ; %bb.0: ; %entry
; GFX-950-NEXT: v_cvt_pk_bf16_f32 v0, v0, v1
; GFX-950-NEXT: ; return to shader part epilog
+;
+; GFX1250-LABEL: fptrunc_f32_f32_to_v2bf16:
+; GFX1250: ; %bb.0: ; %entry
+; GFX1250-NEXT: v_cvt_pk_bf16_f32 v0, v0, v1
+; GFX1250-NEXT: ; return to shader part epilog
entry:
%a.cvt = fptrunc float %a to bfloat
%b.cvt = fptrunc float %b to bfloat
@@ -236,6 +303,11 @@ define amdgpu_ps float @fptrunc_f32_f32_to_v2bf16_mods(float %a, float %b) {
; GFX-950: ; %bb.0: ; %entry
; GFX-950-NEXT: v_cvt_pk_bf16_f32 v0, -v0, |v1|
; GFX-950-NEXT: ; return to shader part epilog
+;
+; GFX1250-LABEL: fptrunc_f32_f32_to_v2bf16_mods:
+; GFX1250: ; %bb.0: ; %entry
+; GFX1250-NEXT: v_cvt_pk_bf16_f32 v0, -v0, |v1|
+; GFX1250-NEXT: ; return to shader part epilog
entry:
%a.neg = fneg float %a
%a.cvt = fptrunc float %a.neg to bfloat
@@ -269,6 +341,13 @@ define amdgpu_ps void @fptrunc_f32_to_bf16(float %a, ptr %out) {
; GFX-950-NEXT: v_cvt_pk_bf16_f32 v0, v0, s0
; GFX-950-NEXT: flat_store_short v[2:3], v0
; GFX-950-NEXT: s_endpgm
+;
+; GFX1250-LABEL: fptrunc_f32_to_bf16:
+; GFX1250: ; %bb.0: ; %entry
+; GFX1250-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-NEXT: v_cvt_pk_bf16_f32 v0, v0, s0
+; GFX1250-NEXT: flat_store_b16 v[2:3], v0
+; GFX1250-NEXT: s_endpgm
entry:
%a.cvt = fptrunc float %a to bfloat
store bfloat %a.cvt, ptr %out
@@ -298,6 +377,13 @@ define amdgpu_ps void @fptrunc_f32_to_bf16_abs(float %a, ptr %out) {
; GFX-950-NEXT: v_cvt_pk_bf16_f32 v0, |v0|, s0
; GFX-950-NEXT: flat_store_short v[2:3], v0
; GFX-950-NEXT: s_endpgm
+;
+; GFX1250-LABEL: fptrunc_f32_to_bf16_abs:
+; GFX1250: ; %bb.0: ; %entry
+; GFX1250-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-NEXT: v_cvt_pk_bf16_f32 v0, |v0|, s0
+; GFX1250-NEXT: flat_store_b16 v[2:3], v0
+; GFX1250-NEXT: s_endpgm
entry:
%a.abs = call float @llvm.fabs.f32(float %a)
%a.cvt = fptrunc float %a.abs to bfloat
@@ -328,6 +414,13 @@ define amdgpu_ps void @fptrunc_f32_to_bf16_neg(float %a, ptr %out) {
; GFX-950-NEXT: v_cvt_pk_bf16_f32 v0, -v0, s0
; GFX-950-NEXT: flat_store_short v[2:3], v0
; GFX-950-NEXT: s_endpgm
+;
+; GFX1250-LABEL: fptrunc_f32_to_bf16_neg:
+; GFX1250: ; %bb.0: ; %entry
+; GFX1250-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-NEXT: v_cvt_pk_bf16_f32 v0, -v0, s0
+; GFX1250-NEXT: flat_store_b16 v[2:3], v0
+; GFX1250-NEXT: s_endpgm
entry:
%a.neg = fneg float %a
%a.cvt = fptrunc float %a.neg to bfloat
@@ -373,6 +466,24 @@ define amdgpu_ps void @fptrunc_f64_to_bf16(double %a, ptr %out) {
; GFX-950-NEXT: v_cvt_pk_bf16_f32 v0, v0, s0
; GFX-950-NEXT: flat_store_short v[2:3], v0
; GFX-950-NEXT: s_endpgm
+;
+; GFX1250-LABEL: fptrunc_f64_to_bf16:
+; GFX1250: ; %bb.0: ; %entry
+; GFX1250-NEXT: v_cvt_f32_f64_e32 v6, v[0:1]
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_cvt_f64_f32_e32 v[4:5], v6
+; GFX1250-NEXT: v_cmp_gt_f64_e64 s0, |v[0:1]|, |v[4:5]|
+; GFX1250-NEXT: v_cmp_nlg_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_cndmask_b32_e64 v0, -1, 1, s0
+; GFX1250-NEXT: v_dual_add_nc_u32 v0, v6, v0 :: v_dual_bitop2_b32 v7, 1, v6 bitop3:0x40
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_cmp_eq_u32_e64 s0, 1, v7
+; GFX1250-NEXT: s_or_b32 vcc_lo, vcc_lo, s0
+; GFX1250-NEXT: v_cndmask_b32_e32 v0, v0, v6, vcc_lo
+; GFX1250-NEXT: v_cvt_pk_bf16_f32 v0, v0, s0
+; GFX1250-NEXT: flat_store_b16 v[2:3], v0
+; GFX1250-NEXT: s_endpgm
entry:
%a.cvt = fptrunc double %a to bfloat
store bfloat %a.cvt, ptr %out
@@ -417,6 +528,25 @@ define amdgpu_ps void @fptrunc_f64_to_bf16_neg(double %a, ptr %out) {
; GFX-950-NEXT: v_cvt_pk_bf16_f32 v0, v0, s0
; GFX-950-NEXT: flat_store_short v[2:3], v0
; GFX-950-NEXT: s_endpgm
+;
+; GFX1250-LABEL: fptrunc_f64_to_bf16_neg:
+; GFX1250: ; %bb.0: ; %entry
+; GFX1250-NEXT: v_cvt_f32_f64_e64 v6, -v[0:1]
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_cvt_f64_f32_e32 v[4:5], v6
+; GFX1250-NEXT: v_cmp_gt_f64_e64 s1, |v[0:1]|, |v[4:5]|
+; GFX1250-NEXT: v_cmp_nlg_f64_e64 s0, -v[0:1], v[4:5]
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_cndmask_b32_e64 v0, -1, 1, s1
+; GFX1250-NEXT: v_dual_add_nc_u32 v0, v6, v0 :: v_dual_bitop2_b32 v7, 1, v6 bitop3:0x40
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX1250-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v7
+; GFX1250-NEXT: s_or_b32 vcc_lo, s0, vcc_lo
+; GFX1250-NEXT: v_cndmask_b32_e32 v0, v0, v6, vcc_lo
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_cvt_pk_bf16_f32 v0, v0, s0
+; GFX1250-NEXT: flat_store_b16 v[2:3], v0
+; GFX1250-NEXT: s_endpgm
entry:
%a.neg = fneg double %a
%a.cvt = fptrunc double %a.neg to bfloat
@@ -462,6 +592,25 @@ define amdgpu_ps void @fptrunc_f64_to_bf16_abs(double %a, ptr %out) {
; GFX-950-NEXT: v_cvt_pk_bf16_f32 v0, v0, s0
; GFX-950-NEXT: flat_store_short v[2:3], v0
; GFX-950-NEXT: s_endpgm
+;
+; GFX1250-LABEL: fptrunc_f64_to_bf16_abs:
+; GFX1250: ; %bb.0: ; %entry
+; GFX1250-NEXT: v_cvt_f32_f64_e64 v6, |v[0:1]|
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_cvt_f64_f32_e32 v[4:5], v6
+; GFX1250-NEXT: v_cmp_gt_f64_e64 s1, |v[0:1]|, |v[4:5]|
+; GFX1250-NEXT: v_cmp_nlg_f64_e64 s0, |v[0:1]|, v[4:5]
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_cndmask_b32_e64 v0, -1, 1, s1
+; GFX1250-NEXT: v_dual_add_nc_u32 v0, v6, v0 :: v_dual_bitop2_b32 v7, 1, v6 bitop3:0x40
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX1250-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v7
+; GFX1250-NEXT: s_or_b32 vcc_lo, s0, vcc_lo
+; GFX1250-NEXT: v_cndmask_b32_e32 v0, v0, v6, vcc_lo
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_cvt_pk_bf16_f32 v0, v0, s0
+; GFX1250-NEXT: flat_store_b16 v[2:3], v0
+; GFX1250-NEXT: s_endpgm
entry:
%a.abs = call double @llvm.fabs.f64(double %a)
%a.cvt = fptrunc double %a.abs to bfloat
diff --git a/llvm/test/CodeGen/AMDGPU/branch-relaxation-gfx1250.ll b/llvm/test/CodeGen/AMDGPU/branch-relaxation-gfx1250.ll
index d103423..9550405 100644
--- a/llvm/test/CodeGen/AMDGPU/branch-relaxation-gfx1250.ll
+++ b/llvm/test/CodeGen/AMDGPU/branch-relaxation-gfx1250.ll
@@ -145,12 +145,13 @@ define amdgpu_kernel void @min_long_forward_vbranch(ptr addrspace(1) %arg) #0 {
; GCN: ; %bb.0: ; %bb
; GCN-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
; GCN-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GCN-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_lshlrev_b32 v0, 2, v0
+; GCN-NEXT: v_mov_b32_e32 v1, 0
; GCN-NEXT: s_wait_kmcnt 0x0
-; GCN-NEXT: global_load_b32 v2, v0, s[0:1] scope:SCOPE_SYS
+; GCN-NEXT: global_load_b32 v2, v0, s[0:1] scale_offset scope:SCOPE_SYS
; GCN-NEXT: s_wait_loadcnt 0x0
; GCN-NEXT: s_wait_xcnt 0x0
+; GCN-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GCN-NEXT: v_lshl_add_u64 v[0:1], s[0:1], 0, v[0:1]
; GCN-NEXT: s_mov_b32 s0, exec_lo
; GCN-NEXT: v_cmpx_ne_u32_e32 0, v2
diff --git a/llvm/test/CodeGen/AMDGPU/div_i128.ll b/llvm/test/CodeGen/AMDGPU/div_i128.ll
index 4cb0d2d..e6c38d2 100644
--- a/llvm/test/CodeGen/AMDGPU/div_i128.ll
+++ b/llvm/test/CodeGen/AMDGPU/div_i128.ll
@@ -475,28 +475,21 @@ define i128 @v_sdiv_i128_vv(i128 %lhs, i128 %rhs) {
; GFX9-O0-NEXT: ; implicit-def: $sgpr8
; GFX9-O0-NEXT: ; kill: def $vgpr4 killed $vgpr4 def $vgpr4_vgpr5 killed $exec
; GFX9-O0-NEXT: v_mov_b32_e32 v5, v8
+; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_nop 0
+; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill
; GFX9-O0-NEXT: ; implicit-def: $sgpr8
; GFX9-O0-NEXT: ; implicit-def: $sgpr8
; GFX9-O0-NEXT: ; kill: def $vgpr7 killed $vgpr7 def $vgpr7_vgpr8 killed $exec
; GFX9-O0-NEXT: v_mov_b32_e32 v8, v6
-; GFX9-O0-NEXT: v_mov_b32_e32 v10, v8
-; GFX9-O0-NEXT: v_mov_b32_e32 v9, v7
-; GFX9-O0-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_nop 0
-; GFX9-O0-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: v_mov_b32_e32 v10, v5
-; GFX9-O0-NEXT: v_mov_b32_e32 v9, v4
-; GFX9-O0-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
; GFX9-O0-NEXT: s_nop 0
-; GFX9-O0-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_mov_b64 s[8:9], s[6:7]
-; GFX9-O0-NEXT: v_cmp_eq_u64_e64 s[8:9], v[7:8], s[8:9]
+; GFX9-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: v_cmp_eq_u64_e64 s[8:9], v[7:8], s[6:7]
; GFX9-O0-NEXT: s_mov_b64 s[12:13], 0x7f
-; GFX9-O0-NEXT: s_mov_b64 s[14:15], s[12:13]
-; GFX9-O0-NEXT: v_cmp_gt_u64_e64 s[14:15], v[4:5], s[14:15]
+; GFX9-O0-NEXT: v_cmp_gt_u64_e64 s[14:15], v[4:5], s[12:13]
; GFX9-O0-NEXT: v_cndmask_b32_e64 v9, 0, 1, s[14:15]
-; GFX9-O0-NEXT: s_mov_b64 s[14:15], s[6:7]
-; GFX9-O0-NEXT: v_cmp_ne_u64_e64 s[14:15], v[7:8], s[14:15]
+; GFX9-O0-NEXT: v_cmp_ne_u64_e64 s[14:15], v[7:8], s[6:7]
; GFX9-O0-NEXT: v_cndmask_b32_e64 v6, 0, 1, s[14:15]
; GFX9-O0-NEXT: v_cndmask_b32_e64 v6, v6, v9, s[8:9]
; GFX9-O0-NEXT: v_and_b32_e64 v6, 1, v6
@@ -507,7 +500,6 @@ define i128 @v_sdiv_i128_vv(i128 %lhs, i128 %rhs) {
; GFX9-O0-NEXT: v_mov_b32_e32 v6, v5
; GFX9-O0-NEXT: s_mov_b32 s14, s13
; GFX9-O0-NEXT: v_xor_b32_e64 v6, v6, s14
-; GFX9-O0-NEXT: ; kill: def $vgpr4 killed $vgpr4 killed $vgpr4_vgpr5 killed $exec
; GFX9-O0-NEXT: ; kill: def $sgpr12 killed $sgpr12 killed $sgpr12_sgpr13
; GFX9-O0-NEXT: v_xor_b32_e64 v4, v4, s12
; GFX9-O0-NEXT: ; kill: def $vgpr4 killed $vgpr4 def $vgpr4_vgpr5 killed $exec
@@ -1046,10 +1038,10 @@ define i128 @v_sdiv_i128_vv(i128 %lhs, i128 %rhs) {
; GFX9-O0-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:40 ; 4-byte Folded Reload
; GFX9-O0-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:44 ; 4-byte Folded Reload
; GFX9-O0-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:48 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:28 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:32 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:24 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:24 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:28 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:32 ; 4-byte Folded Reload
; GFX9-O0-NEXT: s_mov_b64 s[6:7], 1
; GFX9-O0-NEXT: s_mov_b32 s5, s6
; GFX9-O0-NEXT: s_waitcnt vmcnt(1)
@@ -2667,28 +2659,21 @@ define i128 @v_udiv_i128_vv(i128 %lhs, i128 %rhs) {
; GFX9-O0-NEXT: ; implicit-def: $sgpr8
; GFX9-O0-NEXT: ; kill: def $vgpr4 killed $vgpr4 def $vgpr4_vgpr5 killed $exec
; GFX9-O0-NEXT: v_mov_b32_e32 v5, v8
+; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_nop 0
+; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill
; GFX9-O0-NEXT: ; implicit-def: $sgpr8
; GFX9-O0-NEXT: ; implicit-def: $sgpr8
; GFX9-O0-NEXT: ; kill: def $vgpr7 killed $vgpr7 def $vgpr7_vgpr8 killed $exec
; GFX9-O0-NEXT: v_mov_b32_e32 v8, v6
-; GFX9-O0-NEXT: v_mov_b32_e32 v10, v8
-; GFX9-O0-NEXT: v_mov_b32_e32 v9, v7
-; GFX9-O0-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_nop 0
-; GFX9-O0-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: v_mov_b32_e32 v10, v5
-; GFX9-O0-NEXT: v_mov_b32_e32 v9, v4
-; GFX9-O0-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
; GFX9-O0-NEXT: s_nop 0
-; GFX9-O0-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_mov_b64 s[8:9], s[6:7]
-; GFX9-O0-NEXT: v_cmp_eq_u64_e64 s[8:9], v[7:8], s[8:9]
+; GFX9-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: v_cmp_eq_u64_e64 s[8:9], v[7:8], s[6:7]
; GFX9-O0-NEXT: s_mov_b64 s[12:13], 0x7f
-; GFX9-O0-NEXT: s_mov_b64 s[14:15], s[12:13]
-; GFX9-O0-NEXT: v_cmp_gt_u64_e64 s[14:15], v[4:5], s[14:15]
+; GFX9-O0-NEXT: v_cmp_gt_u64_e64 s[14:15], v[4:5], s[12:13]
; GFX9-O0-NEXT: v_cndmask_b32_e64 v9, 0, 1, s[14:15]
-; GFX9-O0-NEXT: s_mov_b64 s[14:15], s[6:7]
-; GFX9-O0-NEXT: v_cmp_ne_u64_e64 s[14:15], v[7:8], s[14:15]
+; GFX9-O0-NEXT: v_cmp_ne_u64_e64 s[14:15], v[7:8], s[6:7]
; GFX9-O0-NEXT: v_cndmask_b32_e64 v6, 0, 1, s[14:15]
; GFX9-O0-NEXT: v_cndmask_b32_e64 v6, v6, v9, s[8:9]
; GFX9-O0-NEXT: v_and_b32_e64 v6, 1, v6
@@ -2699,7 +2684,6 @@ define i128 @v_udiv_i128_vv(i128 %lhs, i128 %rhs) {
; GFX9-O0-NEXT: v_mov_b32_e32 v6, v5
; GFX9-O0-NEXT: s_mov_b32 s14, s13
; GFX9-O0-NEXT: v_xor_b32_e64 v6, v6, s14
-; GFX9-O0-NEXT: ; kill: def $vgpr4 killed $vgpr4 killed $vgpr4_vgpr5 killed $exec
; GFX9-O0-NEXT: ; kill: def $sgpr12 killed $sgpr12 killed $sgpr12_sgpr13
; GFX9-O0-NEXT: v_xor_b32_e64 v4, v4, s12
; GFX9-O0-NEXT: ; kill: def $vgpr4 killed $vgpr4 def $vgpr4_vgpr5 killed $exec
@@ -3238,10 +3222,10 @@ define i128 @v_udiv_i128_vv(i128 %lhs, i128 %rhs) {
; GFX9-O0-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:40 ; 4-byte Folded Reload
; GFX9-O0-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:44 ; 4-byte Folded Reload
; GFX9-O0-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:48 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:28 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:32 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:24 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:24 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:28 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:32 ; 4-byte Folded Reload
; GFX9-O0-NEXT: s_mov_b64 s[6:7], 1
; GFX9-O0-NEXT: s_mov_b32 s5, s6
; GFX9-O0-NEXT: s_waitcnt vmcnt(1)
diff --git a/llvm/test/CodeGen/AMDGPU/fcanonicalize-elimination.bf16.ll b/llvm/test/CodeGen/AMDGPU/fcanonicalize-elimination.bf16.ll
new file mode 100644
index 0000000..85e7038
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/fcanonicalize-elimination.bf16.ll
@@ -0,0 +1,44 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1250 %s -o - | FileCheck -check-prefix=GCN %s
+
+define float @test_canonicalize_amdgcn_tanh_f32(float %a) {
+; GCN-LABEL: test_canonicalize_amdgcn_tanh_f32:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_wait_loadcnt_dscnt 0x0
+; GCN-NEXT: s_wait_kmcnt 0x0
+; GCN-NEXT: v_tanh_f32_e32 v0, v0
+; GCN-NEXT: s_set_pc_i64 s[30:31]
+ %tanh = call float @llvm.amdgcn.tanh.f32(float %a)
+ %canonicalized = call float @llvm.canonicalize.f32(float %tanh)
+ ret float %canonicalized
+}
+
+define bfloat @test_canonicalize_amdgcn_tanh_bf16(bfloat %a) {
+; GCN-LABEL: test_canonicalize_amdgcn_tanh_bf16:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_wait_loadcnt_dscnt 0x0
+; GCN-NEXT: s_wait_kmcnt 0x0
+; GCN-NEXT: v_tanh_bf16_e32 v0, v0
+; GCN-NEXT: v_nop
+; GCN-NEXT: s_delay_alu instid0(TRANS32_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GCN-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; GCN-NEXT: v_max_num_f32_e32 v0, v0, v0
+; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GCN-NEXT: v_cvt_pk_bf16_f32 v0, v0, s0
+; GCN-NEXT: s_set_pc_i64 s[30:31]
+ %tanh = call bfloat @llvm.amdgcn.tanh.bf16(bfloat %a)
+ %canonicalized = call bfloat @llvm.canonicalize.bf16(bfloat %tanh)
+ ret bfloat %canonicalized
+}
+
+define half @test_canonicalize_amdgcn_tanh_f16(half %a) {
+; GCN-LABEL: test_canonicalize_amdgcn_tanh_f16:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_wait_loadcnt_dscnt 0x0
+; GCN-NEXT: s_wait_kmcnt 0x0
+; GCN-NEXT: v_tanh_f16_e32 v0, v0
+; GCN-NEXT: s_set_pc_i64 s[30:31]
+ %tanh = call half @llvm.amdgcn.tanh.f16(half %a)
+ %canonicalized = call half @llvm.canonicalize.f16(half %tanh)
+ ret half %canonicalized
+}
diff --git a/llvm/test/CodeGen/AMDGPU/finalizebundle.mir b/llvm/test/CodeGen/AMDGPU/finalizebundle.mir
new file mode 100644
index 0000000..ea1ae04
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/finalizebundle.mir
@@ -0,0 +1,18 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
+# RUN: llc -mtriple=amdgcn -mcpu=gfx1200 -passes=finalizebundle-test %s -o - | FileCheck %s
+
+---
+name: test_overlap
+body: |
+ bb.0:
+ liveins: $vgpr0_vgpr1
+ ; CHECK-LABEL: name: test_overlap
+ ; CHECK: liveins: $vgpr0_vgpr1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: BUNDLE implicit-def $vgpr2_vgpr3, implicit-def $vgpr2, implicit-def $vgpr2_lo16, implicit-def $vgpr2_hi16, implicit-def $vgpr3, implicit-def $vgpr3_lo16, implicit-def $vgpr3_hi16, implicit-def $vgpr3_vgpr4, implicit-def $vgpr4, implicit-def $vgpr4_lo16, implicit-def $vgpr4_hi16, implicit $vgpr0_vgpr1, implicit $exec, implicit $vgpr1_vgpr2 {
+ ; CHECK-NEXT: $vgpr2_vgpr3 = V_LSHLREV_B64_pseudo_e32 1, $vgpr0_vgpr1, implicit $exec
+ ; CHECK-NEXT: $vgpr3_vgpr4 = V_LSHLREV_B64_pseudo_e32 1, $vgpr1_vgpr2, implicit $exec
+ ; CHECK-NEXT: }
+ $vgpr2_vgpr3 = V_LSHLREV_B64_pseudo_e32 1, $vgpr0_vgpr1, implicit $exec
+ $vgpr3_vgpr4 = V_LSHLREV_B64_pseudo_e32 1, $vgpr1_vgpr2, implicit $exec
+...
diff --git a/llvm/test/CodeGen/AMDGPU/flat-saddr-load.ll b/llvm/test/CodeGen/AMDGPU/flat-saddr-load.ll
index f54fbba..e6018e4 100644
--- a/llvm/test/CodeGen/AMDGPU/flat-saddr-load.ll
+++ b/llvm/test/CodeGen/AMDGPU/flat-saddr-load.ll
@@ -95,12 +95,24 @@ define amdgpu_ps float @flat_load_saddr_i8_offset_neg8388609(ptr inreg %sbase) {
}
define amdgpu_ps float @flat_load_saddr_i8_offset_0xFFFFFFFF(ptr inreg %sbase) {
-; GFX1250-LABEL: flat_load_saddr_i8_offset_0xFFFFFFFF:
-; GFX1250: ; %bb.0:
-; GFX1250-NEXT: v_mov_b32_e32 v0, 0xff800000
-; GFX1250-NEXT: flat_load_u8 v0, v0, s[2:3] offset:8388607
-; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX1250-NEXT: ; return to shader part epilog
+; GFX1250-SDAG-LABEL: flat_load_saddr_i8_offset_0xFFFFFFFF:
+; GFX1250-SDAG: ; %bb.0:
+; GFX1250-SDAG-NEXT: v_add_co_u32 v0, s0, 0xff800000, s2
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_add_co_ci_u32_e64 v1, null, 0, s3, s0
+; GFX1250-SDAG-NEXT: flat_load_u8 v0, v[0:1] offset:8388607
+; GFX1250-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-SDAG-NEXT: ; return to shader part epilog
+;
+; GFX1250-GISEL-LABEL: flat_load_saddr_i8_offset_0xFFFFFFFF:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: s_add_co_u32 s0, s2, -1
+; GFX1250-GISEL-NEXT: s_add_co_ci_u32 s1, s3, 0
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[0:1]
+; GFX1250-GISEL-NEXT: flat_load_u8 v0, v[0:1]
+; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-GISEL-NEXT: ; return to shader part epilog
%gep0 = getelementptr inbounds i8, ptr %sbase, i64 4294967295
%load = load i8, ptr %gep0
%zext = zext i8 %load to i32
@@ -551,12 +563,21 @@ define amdgpu_ps float @flat_load_saddr_uniform_ptr_in_vgprs_immoffset(i32 %voff
; Both 64-bit base and 32-bit offset are scalar
define amdgpu_ps float @flat_load_saddr_i8_zext_uniform_offset(ptr inreg %sbase, i32 inreg %soffset) {
-; GFX1250-LABEL: flat_load_saddr_i8_zext_uniform_offset:
-; GFX1250: ; %bb.0:
-; GFX1250-NEXT: v_mov_b32_e32 v0, s4
-; GFX1250-NEXT: flat_load_u8 v0, v0, s[2:3]
-; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX1250-NEXT: ; return to shader part epilog
+; GFX1250-SDAG-LABEL: flat_load_saddr_i8_zext_uniform_offset:
+; GFX1250-SDAG: ; %bb.0:
+; GFX1250-SDAG-NEXT: v_mov_b32_e32 v0, s4
+; GFX1250-SDAG-NEXT: flat_load_u8 v0, v0, s[2:3]
+; GFX1250-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-SDAG-NEXT: ; return to shader part epilog
+;
+; GFX1250-GISEL-LABEL: flat_load_saddr_i8_zext_uniform_offset:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, 0
+; GFX1250-GISEL-NEXT: s_add_co_u32 s0, s2, s4
+; GFX1250-GISEL-NEXT: s_add_co_ci_u32 s1, s3, 0
+; GFX1250-GISEL-NEXT: flat_load_u8 v0, v0, s[0:1]
+; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-GISEL-NEXT: ; return to shader part epilog
%zext.offset = zext i32 %soffset to i64
%gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
%load = load i8, ptr %gep0
@@ -567,12 +588,21 @@ define amdgpu_ps float @flat_load_saddr_i8_zext_uniform_offset(ptr inreg %sbase,
; Both 64-bit base and 32-bit offset are scalar, with immediate offset.
define amdgpu_ps float @flat_load_saddr_i8_zext_uniform_offset_immoffset(ptr inreg %sbase, i32 inreg %soffset) {
-; GFX1250-LABEL: flat_load_saddr_i8_zext_uniform_offset_immoffset:
-; GFX1250: ; %bb.0:
-; GFX1250-NEXT: v_mov_b32_e32 v0, s4
-; GFX1250-NEXT: flat_load_u8 v0, v0, s[2:3] offset:-24
-; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX1250-NEXT: ; return to shader part epilog
+; GFX1250-SDAG-LABEL: flat_load_saddr_i8_zext_uniform_offset_immoffset:
+; GFX1250-SDAG: ; %bb.0:
+; GFX1250-SDAG-NEXT: v_mov_b32_e32 v0, s4
+; GFX1250-SDAG-NEXT: flat_load_u8 v0, v0, s[2:3] offset:-24
+; GFX1250-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-SDAG-NEXT: ; return to shader part epilog
+;
+; GFX1250-GISEL-LABEL: flat_load_saddr_i8_zext_uniform_offset_immoffset:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, 0
+; GFX1250-GISEL-NEXT: s_add_co_u32 s0, s2, s4
+; GFX1250-GISEL-NEXT: s_add_co_ci_u32 s1, s3, 0
+; GFX1250-GISEL-NEXT: flat_load_u8 v0, v0, s[0:1] offset:-24
+; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-GISEL-NEXT: ; return to shader part epilog
%zext.offset = zext i32 %soffset to i64
%gep0 = getelementptr inbounds i8, ptr %sbase, i64 %zext.offset
%gep1 = getelementptr inbounds i8, ptr %gep0, i64 -24
@@ -584,12 +614,21 @@ define amdgpu_ps float @flat_load_saddr_i8_zext_uniform_offset_immoffset(ptr inr
; Both components uniform, zext forced to LHS of addressing expression
define amdgpu_ps float @flat_load_saddr_i8_zext_sgpr_ptrtoint_commute_add(ptr inreg %sbase, i32 inreg %soffset) {
-; GFX1250-LABEL: flat_load_saddr_i8_zext_sgpr_ptrtoint_commute_add:
-; GFX1250: ; %bb.0:
-; GFX1250-NEXT: v_mov_b32_e32 v0, s4
-; GFX1250-NEXT: flat_load_u8 v0, v0, s[2:3]
-; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX1250-NEXT: ; return to shader part epilog
+; GFX1250-SDAG-LABEL: flat_load_saddr_i8_zext_sgpr_ptrtoint_commute_add:
+; GFX1250-SDAG: ; %bb.0:
+; GFX1250-SDAG-NEXT: v_mov_b32_e32 v0, s4
+; GFX1250-SDAG-NEXT: flat_load_u8 v0, v0, s[2:3]
+; GFX1250-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-SDAG-NEXT: ; return to shader part epilog
+;
+; GFX1250-GISEL-LABEL: flat_load_saddr_i8_zext_sgpr_ptrtoint_commute_add:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, 0
+; GFX1250-GISEL-NEXT: s_add_co_u32 s0, s2, s4
+; GFX1250-GISEL-NEXT: s_add_co_ci_u32 s1, s3, 0
+; GFX1250-GISEL-NEXT: flat_load_u8 v0, v0, s[0:1]
+; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-GISEL-NEXT: ; return to shader part epilog
%zext.offset = zext i32 %soffset to i64
%sbase.as.int = ptrtoint ptr %sbase to i64
%add = add i64 %zext.offset, %sbase.as.int
@@ -602,12 +641,21 @@ define amdgpu_ps float @flat_load_saddr_i8_zext_sgpr_ptrtoint_commute_add(ptr in
; Both components uniform, zext forced to LHS of addressing expression, with immediate offset
define amdgpu_ps float @flat_load_saddr_i8_zext_sgpr_ptrtoint_commute_add_imm_offset0(ptr inreg %sbase, i32 inreg %soffset) {
-; GFX1250-LABEL: flat_load_saddr_i8_zext_sgpr_ptrtoint_commute_add_imm_offset0:
-; GFX1250: ; %bb.0:
-; GFX1250-NEXT: v_mov_b32_e32 v0, s4
-; GFX1250-NEXT: flat_load_u8 v0, v0, s[2:3] offset:128
-; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX1250-NEXT: ; return to shader part epilog
+; GFX1250-SDAG-LABEL: flat_load_saddr_i8_zext_sgpr_ptrtoint_commute_add_imm_offset0:
+; GFX1250-SDAG: ; %bb.0:
+; GFX1250-SDAG-NEXT: v_mov_b32_e32 v0, s4
+; GFX1250-SDAG-NEXT: flat_load_u8 v0, v0, s[2:3] offset:128
+; GFX1250-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-SDAG-NEXT: ; return to shader part epilog
+;
+; GFX1250-GISEL-LABEL: flat_load_saddr_i8_zext_sgpr_ptrtoint_commute_add_imm_offset0:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, 0
+; GFX1250-GISEL-NEXT: s_add_co_u32 s0, s2, s4
+; GFX1250-GISEL-NEXT: s_add_co_ci_u32 s1, s3, 0
+; GFX1250-GISEL-NEXT: flat_load_u8 v0, v0, s[0:1] offset:128
+; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-GISEL-NEXT: ; return to shader part epilog
%zext.offset = zext i32 %soffset to i64
%sbase.as.int = ptrtoint ptr %sbase to i64
%add = add i64 %zext.offset, %sbase.as.int
@@ -686,33 +734,13 @@ define amdgpu_ps float @flat_load_i8_vgpr64_sgpr32_offset_8388607(ptr %vbase, i3
; Cannot push the shift into 32-bits, and cannot match.
define amdgpu_ps float @flat_load_saddr_f32_natural_addressing(ptr inreg %sbase, ptr %voffset.ptr) {
-; GFX1250-SDAG-LABEL: flat_load_saddr_f32_natural_addressing:
-; GFX1250-SDAG: ; %bb.0:
-; GFX1250-SDAG-NEXT: flat_load_b32 v0, v[0:1]
-; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
-; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
-; GFX1250-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-SDAG-NEXT: v_lshl_add_u64 v[0:1], v[0:1], 2, s[2:3]
-; GFX1250-SDAG-NEXT: flat_load_b32 v0, v[0:1]
-; GFX1250-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX1250-SDAG-NEXT: ; return to shader part epilog
-;
-; GFX1250-GISEL-LABEL: flat_load_saddr_f32_natural_addressing:
-; GFX1250-GISEL: ; %bb.0:
-; GFX1250-GISEL-NEXT: flat_load_b32 v0, v[0:1]
-; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v1, 0
-; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
-; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_lshlrev_b64_e32 v[0:1], 2, v[0:1]
-; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v2, v0
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v1, null, v3, v1, vcc_lo
-; GFX1250-GISEL-NEXT: flat_load_b32 v0, v[0:1]
-; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX1250-GISEL-NEXT: ; return to shader part epilog
+; GFX1250-LABEL: flat_load_saddr_f32_natural_addressing:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: flat_load_b32 v0, v[0:1]
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: flat_load_b32 v0, v0, s[2:3] scale_offset
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: ; return to shader part epilog
%voffset = load i32, ptr %voffset.ptr
%zext.offset = zext i32 %voffset to i64
%gep = getelementptr inbounds float, ptr %sbase, i64 %zext.offset
@@ -743,8 +771,7 @@ define amdgpu_ps float @flat_load_f32_saddr_zext_vgpr_range(ptr inreg %sbase, pt
; GFX1250: ; %bb.0:
; GFX1250-NEXT: flat_load_b32 v0, v[0:1]
; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX1250-NEXT: v_lshlrev_b32_e32 v0, 2, v0
-; GFX1250-NEXT: flat_load_b32 v0, v0, s[2:3]
+; GFX1250-NEXT: flat_load_b32 v0, v0, s[2:3] scale_offset
; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX1250-NEXT: ; return to shader part epilog
%voffset = load i32, ptr %voffset.ptr, !range !0, !noundef !{}
@@ -760,8 +787,7 @@ define amdgpu_ps float @flat_load_f32_saddr_zext_vgpr_range_imm_offset(ptr inreg
; GFX1250: ; %bb.0:
; GFX1250-NEXT: flat_load_b32 v0, v[0:1]
; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX1250-NEXT: v_lshlrev_b32_e32 v0, 2, v0
-; GFX1250-NEXT: flat_load_b32 v0, v0, s[2:3] offset:400
+; GFX1250-NEXT: flat_load_b32 v0, v0, s[2:3] offset:400 scale_offset
; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX1250-NEXT: ; return to shader part epilog
%voffset = load i32, ptr %voffset.ptr, !range !0, !noundef !{}
@@ -774,33 +800,13 @@ define amdgpu_ps float @flat_load_f32_saddr_zext_vgpr_range_imm_offset(ptr inreg
; Range is 1 beyond the limit where we can move the shift into 32-bits.
define amdgpu_ps float @flat_load_f32_saddr_zext_vgpr_range_too_large(ptr inreg %sbase, ptr %voffset.ptr) {
-; GFX1250-SDAG-LABEL: flat_load_f32_saddr_zext_vgpr_range_too_large:
-; GFX1250-SDAG: ; %bb.0:
-; GFX1250-SDAG-NEXT: flat_load_b32 v0, v[0:1]
-; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
-; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
-; GFX1250-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-SDAG-NEXT: v_lshl_add_u64 v[0:1], v[0:1], 2, s[2:3]
-; GFX1250-SDAG-NEXT: flat_load_b32 v0, v[0:1]
-; GFX1250-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX1250-SDAG-NEXT: ; return to shader part epilog
-;
-; GFX1250-GISEL-LABEL: flat_load_f32_saddr_zext_vgpr_range_too_large:
-; GFX1250-GISEL: ; %bb.0:
-; GFX1250-GISEL-NEXT: flat_load_b32 v0, v[0:1]
-; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v1, 0
-; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
-; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_lshlrev_b64_e32 v[0:1], 2, v[0:1]
-; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v2, v0
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v1, null, v3, v1, vcc_lo
-; GFX1250-GISEL-NEXT: flat_load_b32 v0, v[0:1]
-; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX1250-GISEL-NEXT: ; return to shader part epilog
+; GFX1250-LABEL: flat_load_f32_saddr_zext_vgpr_range_too_large:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: flat_load_b32 v0, v[0:1]
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: flat_load_b32 v0, v0, s[2:3] scale_offset
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: ; return to shader part epilog
%voffset = load i32, ptr %voffset.ptr, !range !1, !noundef !{}
%zext.offset = zext i32 %voffset to i64
%gep = getelementptr inbounds float, ptr %sbase, i64 %zext.offset
diff --git a/llvm/test/CodeGen/AMDGPU/flat-scratch-svs.ll b/llvm/test/CodeGen/AMDGPU/flat-scratch-svs.ll
index a98df5c..b0e6752 100644
--- a/llvm/test/CodeGen/AMDGPU/flat-scratch-svs.ll
+++ b/llvm/test/CodeGen/AMDGPU/flat-scratch-svs.ll
@@ -150,13 +150,11 @@ define amdgpu_kernel void @soff1_voff1(i32 %soff) {
; GFX12-GISEL-NEXT: v_dual_mov_b32 v1, 1 :: v_dual_and_b32 v0, 0x3ff, v0
; GFX12-GISEL-NEXT: v_dual_mov_b32 v2, 2 :: v_dual_mov_b32 v3, 4
; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
-; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX12-GISEL-NEXT: v_add_nc_u32_e32 v0, s0, v0
-; GFX12-GISEL-NEXT: scratch_store_b8 v0, v1, off offset:1 scope:SCOPE_SYS
+; GFX12-GISEL-NEXT: scratch_store_b8 v0, v1, s0 offset:1 scope:SCOPE_SYS
; GFX12-GISEL-NEXT: s_wait_storecnt 0x0
-; GFX12-GISEL-NEXT: scratch_store_b8 v0, v2, off offset:2 scope:SCOPE_SYS
+; GFX12-GISEL-NEXT: scratch_store_b8 v0, v2, s0 offset:2 scope:SCOPE_SYS
; GFX12-GISEL-NEXT: s_wait_storecnt 0x0
-; GFX12-GISEL-NEXT: scratch_store_b8 v0, v3, off offset:4 scope:SCOPE_SYS
+; GFX12-GISEL-NEXT: scratch_store_b8 v0, v3, s0 offset:4 scope:SCOPE_SYS
; GFX12-GISEL-NEXT: s_wait_storecnt 0x0
; GFX12-GISEL-NEXT: s_endpgm
bb:
@@ -321,15 +319,14 @@ define amdgpu_kernel void @soff1_voff2(i32 %soff) {
; GFX12-GISEL-NEXT: s_load_b32 s0, s[4:5], 0x24
; GFX12-GISEL-NEXT: v_dual_mov_b32 v1, 1 :: v_dual_and_b32 v0, 0x3ff, v0
; GFX12-GISEL-NEXT: v_dual_mov_b32 v2, 2 :: v_dual_mov_b32 v3, 4
-; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2)
; GFX12-GISEL-NEXT: v_mul_u32_u24_e32 v0, 2, v0
; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
-; GFX12-GISEL-NEXT: v_add_nc_u32_e32 v0, s0, v0
-; GFX12-GISEL-NEXT: scratch_store_b8 v0, v1, off offset:1 scope:SCOPE_SYS
+; GFX12-GISEL-NEXT: scratch_store_b8 v0, v1, s0 offset:1 scope:SCOPE_SYS
; GFX12-GISEL-NEXT: s_wait_storecnt 0x0
-; GFX12-GISEL-NEXT: scratch_store_b8 v0, v2, off offset:2 scope:SCOPE_SYS
+; GFX12-GISEL-NEXT: scratch_store_b8 v0, v2, s0 offset:2 scope:SCOPE_SYS
; GFX12-GISEL-NEXT: s_wait_storecnt 0x0
-; GFX12-GISEL-NEXT: scratch_store_b8 v0, v3, off offset:4 scope:SCOPE_SYS
+; GFX12-GISEL-NEXT: scratch_store_b8 v0, v3, s0 offset:4 scope:SCOPE_SYS
; GFX12-GISEL-NEXT: s_wait_storecnt 0x0
; GFX12-GISEL-NEXT: s_endpgm
bb:
@@ -494,15 +491,14 @@ define amdgpu_kernel void @soff1_voff4(i32 %soff) {
; GFX12-GISEL-NEXT: s_load_b32 s0, s[4:5], 0x24
; GFX12-GISEL-NEXT: v_dual_mov_b32 v1, 1 :: v_dual_and_b32 v0, 0x3ff, v0
; GFX12-GISEL-NEXT: v_dual_mov_b32 v2, 2 :: v_dual_mov_b32 v3, 4
-; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2)
; GFX12-GISEL-NEXT: v_mul_u32_u24_e32 v0, 4, v0
; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
-; GFX12-GISEL-NEXT: v_add_nc_u32_e32 v0, s0, v0
-; GFX12-GISEL-NEXT: scratch_store_b8 v0, v1, off offset:1 scope:SCOPE_SYS
+; GFX12-GISEL-NEXT: scratch_store_b8 v0, v1, s0 offset:1 scope:SCOPE_SYS
; GFX12-GISEL-NEXT: s_wait_storecnt 0x0
-; GFX12-GISEL-NEXT: scratch_store_b8 v0, v2, off offset:2 scope:SCOPE_SYS
+; GFX12-GISEL-NEXT: scratch_store_b8 v0, v2, s0 offset:2 scope:SCOPE_SYS
; GFX12-GISEL-NEXT: s_wait_storecnt 0x0
-; GFX12-GISEL-NEXT: scratch_store_b8 v0, v3, off offset:4 scope:SCOPE_SYS
+; GFX12-GISEL-NEXT: scratch_store_b8 v0, v3, s0 offset:4 scope:SCOPE_SYS
; GFX12-GISEL-NEXT: s_wait_storecnt 0x0
; GFX12-GISEL-NEXT: s_endpgm
bb:
@@ -664,17 +660,15 @@ define amdgpu_kernel void @soff2_voff1(i32 %soff) {
; GFX12-GISEL-LABEL: soff2_voff1:
; GFX12-GISEL: ; %bb.0: ; %bb
; GFX12-GISEL-NEXT: s_load_b32 s0, s[4:5], 0x24
-; GFX12-GISEL-NEXT: v_dual_mov_b32 v1, 1 :: v_dual_and_b32 v0, 0x3ff, v0
-; GFX12-GISEL-NEXT: v_dual_mov_b32 v2, 2 :: v_dual_mov_b32 v3, 4
+; GFX12-GISEL-NEXT: v_dual_mov_b32 v1, 1 :: v_dual_mov_b32 v2, 2
+; GFX12-GISEL-NEXT: v_dual_mov_b32 v3, 4 :: v_dual_and_b32 v0, 0x3ff, v0
; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
; GFX12-GISEL-NEXT: s_lshl_b32 s0, s0, 1
-; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instid1(SALU_CYCLE_1)
-; GFX12-GISEL-NEXT: v_add_nc_u32_e32 v0, s0, v0
-; GFX12-GISEL-NEXT: scratch_store_b8 v0, v1, off offset:1 scope:SCOPE_SYS
+; GFX12-GISEL-NEXT: scratch_store_b8 v0, v1, s0 offset:1 scope:SCOPE_SYS
; GFX12-GISEL-NEXT: s_wait_storecnt 0x0
-; GFX12-GISEL-NEXT: scratch_store_b8 v0, v2, off offset:2 scope:SCOPE_SYS
+; GFX12-GISEL-NEXT: scratch_store_b8 v0, v2, s0 offset:2 scope:SCOPE_SYS
; GFX12-GISEL-NEXT: s_wait_storecnt 0x0
-; GFX12-GISEL-NEXT: scratch_store_b8 v0, v3, off offset:4 scope:SCOPE_SYS
+; GFX12-GISEL-NEXT: scratch_store_b8 v0, v3, s0 offset:4 scope:SCOPE_SYS
; GFX12-GISEL-NEXT: s_wait_storecnt 0x0
; GFX12-GISEL-NEXT: s_endpgm
bb:
@@ -850,13 +844,11 @@ define amdgpu_kernel void @soff2_voff2(i32 %soff) {
; GFX12-GISEL-NEXT: v_mul_u32_u24_e32 v0, 2, v0
; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
; GFX12-GISEL-NEXT: s_lshl_b32 s0, s0, 1
-; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
-; GFX12-GISEL-NEXT: v_add_nc_u32_e32 v0, s0, v0
-; GFX12-GISEL-NEXT: scratch_store_b8 v0, v1, off offset:1 scope:SCOPE_SYS
+; GFX12-GISEL-NEXT: scratch_store_b8 v0, v1, s0 offset:1 scope:SCOPE_SYS
; GFX12-GISEL-NEXT: s_wait_storecnt 0x0
-; GFX12-GISEL-NEXT: scratch_store_b8 v0, v2, off offset:2 scope:SCOPE_SYS
+; GFX12-GISEL-NEXT: scratch_store_b8 v0, v2, s0 offset:2 scope:SCOPE_SYS
; GFX12-GISEL-NEXT: s_wait_storecnt 0x0
-; GFX12-GISEL-NEXT: scratch_store_b8 v0, v3, off offset:4 scope:SCOPE_SYS
+; GFX12-GISEL-NEXT: scratch_store_b8 v0, v3, s0 offset:4 scope:SCOPE_SYS
; GFX12-GISEL-NEXT: s_wait_storecnt 0x0
; GFX12-GISEL-NEXT: s_endpgm
bb:
@@ -1032,13 +1024,11 @@ define amdgpu_kernel void @soff2_voff4(i32 %soff) {
; GFX12-GISEL-NEXT: v_mul_u32_u24_e32 v0, 4, v0
; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
; GFX12-GISEL-NEXT: s_lshl_b32 s0, s0, 1
-; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
-; GFX12-GISEL-NEXT: v_add_nc_u32_e32 v0, s0, v0
-; GFX12-GISEL-NEXT: scratch_store_b8 v0, v1, off offset:1 scope:SCOPE_SYS
+; GFX12-GISEL-NEXT: scratch_store_b8 v0, v1, s0 offset:1 scope:SCOPE_SYS
; GFX12-GISEL-NEXT: s_wait_storecnt 0x0
-; GFX12-GISEL-NEXT: scratch_store_b8 v0, v2, off offset:2 scope:SCOPE_SYS
+; GFX12-GISEL-NEXT: scratch_store_b8 v0, v2, s0 offset:2 scope:SCOPE_SYS
; GFX12-GISEL-NEXT: s_wait_storecnt 0x0
-; GFX12-GISEL-NEXT: scratch_store_b8 v0, v3, off offset:4 scope:SCOPE_SYS
+; GFX12-GISEL-NEXT: scratch_store_b8 v0, v3, s0 offset:4 scope:SCOPE_SYS
; GFX12-GISEL-NEXT: s_wait_storecnt 0x0
; GFX12-GISEL-NEXT: s_endpgm
bb:
@@ -1200,17 +1190,15 @@ define amdgpu_kernel void @soff4_voff1(i32 %soff) {
; GFX12-GISEL-LABEL: soff4_voff1:
; GFX12-GISEL: ; %bb.0: ; %bb
; GFX12-GISEL-NEXT: s_load_b32 s0, s[4:5], 0x24
-; GFX12-GISEL-NEXT: v_dual_mov_b32 v1, 1 :: v_dual_and_b32 v0, 0x3ff, v0
-; GFX12-GISEL-NEXT: v_dual_mov_b32 v2, 2 :: v_dual_mov_b32 v3, 4
+; GFX12-GISEL-NEXT: v_dual_mov_b32 v1, 1 :: v_dual_mov_b32 v2, 2
+; GFX12-GISEL-NEXT: v_dual_mov_b32 v3, 4 :: v_dual_and_b32 v0, 0x3ff, v0
; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
; GFX12-GISEL-NEXT: s_lshl_b32 s0, s0, 2
-; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instid1(SALU_CYCLE_1)
-; GFX12-GISEL-NEXT: v_add_nc_u32_e32 v0, s0, v0
-; GFX12-GISEL-NEXT: scratch_store_b8 v0, v1, off offset:1 scope:SCOPE_SYS
+; GFX12-GISEL-NEXT: scratch_store_b8 v0, v1, s0 offset:1 scope:SCOPE_SYS
; GFX12-GISEL-NEXT: s_wait_storecnt 0x0
-; GFX12-GISEL-NEXT: scratch_store_b8 v0, v2, off offset:2 scope:SCOPE_SYS
+; GFX12-GISEL-NEXT: scratch_store_b8 v0, v2, s0 offset:2 scope:SCOPE_SYS
; GFX12-GISEL-NEXT: s_wait_storecnt 0x0
-; GFX12-GISEL-NEXT: scratch_store_b8 v0, v3, off offset:4 scope:SCOPE_SYS
+; GFX12-GISEL-NEXT: scratch_store_b8 v0, v3, s0 offset:4 scope:SCOPE_SYS
; GFX12-GISEL-NEXT: s_wait_storecnt 0x0
; GFX12-GISEL-NEXT: s_endpgm
bb:
@@ -1386,13 +1374,11 @@ define amdgpu_kernel void @soff4_voff2(i32 %soff) {
; GFX12-GISEL-NEXT: v_mul_u32_u24_e32 v0, 2, v0
; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
; GFX12-GISEL-NEXT: s_lshl_b32 s0, s0, 2
-; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
-; GFX12-GISEL-NEXT: v_add_nc_u32_e32 v0, s0, v0
-; GFX12-GISEL-NEXT: scratch_store_b8 v0, v1, off offset:1 scope:SCOPE_SYS
+; GFX12-GISEL-NEXT: scratch_store_b8 v0, v1, s0 offset:1 scope:SCOPE_SYS
; GFX12-GISEL-NEXT: s_wait_storecnt 0x0
-; GFX12-GISEL-NEXT: scratch_store_b8 v0, v2, off offset:2 scope:SCOPE_SYS
+; GFX12-GISEL-NEXT: scratch_store_b8 v0, v2, s0 offset:2 scope:SCOPE_SYS
; GFX12-GISEL-NEXT: s_wait_storecnt 0x0
-; GFX12-GISEL-NEXT: scratch_store_b8 v0, v3, off offset:4 scope:SCOPE_SYS
+; GFX12-GISEL-NEXT: scratch_store_b8 v0, v3, s0 offset:4 scope:SCOPE_SYS
; GFX12-GISEL-NEXT: s_wait_storecnt 0x0
; GFX12-GISEL-NEXT: s_endpgm
bb:
@@ -1565,13 +1551,11 @@ define amdgpu_kernel void @soff4_voff4(i32 %soff) {
; GFX12-GISEL-NEXT: v_mul_u32_u24_e32 v0, 4, v0
; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
; GFX12-GISEL-NEXT: s_lshl_b32 s0, s0, 2
-; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
-; GFX12-GISEL-NEXT: v_add_nc_u32_e32 v0, s0, v0
-; GFX12-GISEL-NEXT: scratch_store_b8 v0, v1, off offset:1 scope:SCOPE_SYS
+; GFX12-GISEL-NEXT: scratch_store_b8 v0, v1, s0 offset:1 scope:SCOPE_SYS
; GFX12-GISEL-NEXT: s_wait_storecnt 0x0
-; GFX12-GISEL-NEXT: scratch_store_b8 v0, v2, off offset:2 scope:SCOPE_SYS
+; GFX12-GISEL-NEXT: scratch_store_b8 v0, v2, s0 offset:2 scope:SCOPE_SYS
; GFX12-GISEL-NEXT: s_wait_storecnt 0x0
-; GFX12-GISEL-NEXT: scratch_store_b8 v0, v3, off offset:4 scope:SCOPE_SYS
+; GFX12-GISEL-NEXT: scratch_store_b8 v0, v3, s0 offset:4 scope:SCOPE_SYS
; GFX12-GISEL-NEXT: s_wait_storecnt 0x0
; GFX12-GISEL-NEXT: s_endpgm
bb:
@@ -1672,9 +1656,7 @@ define amdgpu_kernel void @soff1_voff1_negative(i32 %soff) {
; GFX12-GISEL-NEXT: s_load_b32 s0, s[4:5], 0x24
; GFX12-GISEL-NEXT: v_dual_mov_b32 v1, 1 :: v_dual_and_b32 v0, 0x3ff, v0
; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
-; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-GISEL-NEXT: v_add_nc_u32_e32 v0, s0, v0
-; GFX12-GISEL-NEXT: scratch_store_b8 v0, v1, off offset:-1 scope:SCOPE_SYS
+; GFX12-GISEL-NEXT: scratch_store_b8 v0, v1, s0 offset:-1 scope:SCOPE_SYS
; GFX12-GISEL-NEXT: s_wait_storecnt 0x0
; GFX12-GISEL-NEXT: s_endpgm
bb:
diff --git a/llvm/test/CodeGen/AMDGPU/global-load-xcnt.ll b/llvm/test/CodeGen/AMDGPU/global-load-xcnt.ll
index 5d35adc..79907fd 100644
--- a/llvm/test/CodeGen/AMDGPU/global-load-xcnt.ll
+++ b/llvm/test/CodeGen/AMDGPU/global-load-xcnt.ll
@@ -482,17 +482,16 @@ define amdgpu_kernel void @test_v7i16_load_store_kernel(ptr addrspace(1) %ptr1,
; GCN-SDAG-LABEL: test_v7i16_load_store_kernel:
; GCN-SDAG: ; %bb.0:
; GCN-SDAG-NEXT: s_load_b128 s[0:3], s[4:5], 0x0
-; GCN-SDAG-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GCN-SDAG-NEXT: v_and_b32_e32 v4, 0x3ff, v0
; GCN-SDAG-NEXT: s_wait_xcnt 0x0
; GCN-SDAG-NEXT: s_load_b64 s[4:5], s[4:5], 0x10
; GCN-SDAG-NEXT: v_mov_b64_e32 v[8:9], 12
; GCN-SDAG-NEXT: v_mov_b64_e32 v[10:11], 8
; GCN-SDAG-NEXT: v_mov_b64_e32 v[12:13], 0
-; GCN-SDAG-NEXT: v_lshlrev_b32_e32 v4, 4, v0
; GCN-SDAG-NEXT: s_wait_kmcnt 0x0
; GCN-SDAG-NEXT: s_clause 0x1
-; GCN-SDAG-NEXT: global_load_b128 v[0:3], v4, s[0:1]
-; GCN-SDAG-NEXT: global_load_b128 v[4:7], v4, s[2:3]
+; GCN-SDAG-NEXT: global_load_b128 v[0:3], v4, s[0:1] scale_offset
+; GCN-SDAG-NEXT: global_load_b128 v[4:7], v4, s[2:3] scale_offset
; GCN-SDAG-NEXT: s_wait_loadcnt 0x0
; GCN-SDAG-NEXT: v_pk_add_u16 v3, v3, v7
; GCN-SDAG-NEXT: v_pk_add_u16 v2, v2, v6
@@ -509,21 +508,20 @@ define amdgpu_kernel void @test_v7i16_load_store_kernel(ptr addrspace(1) %ptr1,
; GCN-GISEL-LABEL: test_v7i16_load_store_kernel:
; GCN-GISEL: ; %bb.0:
; GCN-GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x0
-; GCN-GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GCN-GISEL-NEXT: v_and_b32_e32 v4, 0x3ff, v0
; GCN-GISEL-NEXT: s_wait_xcnt 0x0
; GCN-GISEL-NEXT: s_load_b64 s[4:5], s[4:5], 0x10
; GCN-GISEL-NEXT: v_mov_b64_e32 v[8:9], 0
; GCN-GISEL-NEXT: v_mov_b64_e32 v[10:11], 2
; GCN-GISEL-NEXT: v_mov_b64_e32 v[12:13], 4
-; GCN-GISEL-NEXT: v_lshlrev_b32_e32 v4, 4, v0
; GCN-GISEL-NEXT: v_mov_b64_e32 v[14:15], 6
; GCN-GISEL-NEXT: v_mov_b64_e32 v[16:17], 8
; GCN-GISEL-NEXT: v_mov_b64_e32 v[18:19], 10
; GCN-GISEL-NEXT: v_mov_b64_e32 v[20:21], 12
; GCN-GISEL-NEXT: s_wait_kmcnt 0x0
; GCN-GISEL-NEXT: s_clause 0x1
-; GCN-GISEL-NEXT: global_load_b128 v[0:3], v4, s[0:1]
-; GCN-GISEL-NEXT: global_load_b128 v[4:7], v4, s[2:3]
+; GCN-GISEL-NEXT: global_load_b128 v[0:3], v4, s[0:1] scale_offset
+; GCN-GISEL-NEXT: global_load_b128 v[4:7], v4, s[2:3] scale_offset
; GCN-GISEL-NEXT: s_wait_loadcnt 0x0
; GCN-GISEL-NEXT: v_pk_add_u16 v0, v0, v4
; GCN-GISEL-NEXT: v_pk_add_u16 v1, v1, v5
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.round.f64.ll b/llvm/test/CodeGen/AMDGPU/llvm.round.f64.ll
index 355f77a..af914bd 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.round.f64.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.round.f64.ll
@@ -76,13 +76,12 @@ define amdgpu_kernel void @v_round_f64(ptr addrspace(1) %out, ptr addrspace(1) %
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_mov_b64 s[4:5], s[2:3]
; SI-NEXT: buffer_load_dwordx2 v[2:3], v[0:1], s[4:7], 0 addr64
-; SI-NEXT: s_movk_i32 s4, 0xfc01
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_mov_b32 s3, 0xfffff
; SI-NEXT: v_mov_b32_e32 v8, 0x3ff00000
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_bfe_u32 v4, v3, 20, 11
-; SI-NEXT: v_add_i32_e32 v6, vcc, s4, v4
+; SI-NEXT: v_add_i32_e32 v6, vcc, 0xfffffc01, v4
; SI-NEXT: v_lshr_b64 v[4:5], s[2:3], v6
; SI-NEXT: v_and_b32_e32 v7, 0x80000000, v3
; SI-NEXT: v_not_b32_e32 v5, v5
diff --git a/llvm/test/CodeGen/AMDGPU/max3.ll b/llvm/test/CodeGen/AMDGPU/max3.ll
index a757bb0..b922854 100644
--- a/llvm/test/CodeGen/AMDGPU/max3.ll
+++ b/llvm/test/CodeGen/AMDGPU/max3.ll
@@ -1,6 +1,7 @@
; RUN: llc -mtriple=amdgcn < %s | FileCheck -check-prefixes=GCN,SI %s
; RUN: llc -mtriple=amdgcn -mcpu=tonga < %s | FileCheck -check-prefixes=GCN,VI %s
-; RUN: llc -mtriple=amdgcn -mcpu=gfx900 < %s | FileCheck -check-prefixes=GCN,GFX9 %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx900 < %s | FileCheck -check-prefixes=GCN,GFX9,GFX9_1250 %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1250 < %s | FileCheck -check-prefixes=GCN,GFX1250,GFX9_1250 %s
; GCN-LABEL: {{^}}v_test_imax3_sgt_i32:
; GCN: v_max3_i32
@@ -46,7 +47,7 @@ define amdgpu_kernel void @v_test_umax3_ugt_i32(ptr addrspace(1) %out, ptr addrs
; VI: v_max_i16
; VI: v_max_i16
-; GFX9: v_max3_i16
+; GFX9_1250: v_max3_i16
define amdgpu_kernel void @v_test_imax3_sgt_i16(ptr addrspace(1) %out, ptr addrspace(1) %aptr, ptr addrspace(1) %bptr, ptr addrspace(1) %cptr) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%gep0 = getelementptr i16, ptr addrspace(1) %aptr, i32 %tid
@@ -70,7 +71,7 @@ define amdgpu_kernel void @v_test_imax3_sgt_i16(ptr addrspace(1) %out, ptr addrs
; VI: v_max_u16
; VI: v_max_u16
-; GFX9: v_max3_u16
+; GFX9_1250: v_max3_u16
define amdgpu_kernel void @v_test_umax3_ugt_i16(ptr addrspace(1) %out, ptr addrspace(1) %aptr, ptr addrspace(1) %bptr, ptr addrspace(1) %cptr) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%gep0 = getelementptr i16, ptr addrspace(1) %aptr, i32 %tid
@@ -94,7 +95,7 @@ define amdgpu_kernel void @v_test_umax3_ugt_i16(ptr addrspace(1) %out, ptr addrs
; VI: v_max_i16
; VI: v_max_i16
-; GFX9: v_max3_i16
+; GFX9_1250: v_max3_i16
define amdgpu_kernel void @v_test_imax3_sgt_i8(ptr addrspace(1) %out, ptr addrspace(1) %aptr, ptr addrspace(1) %bptr, ptr addrspace(1) %cptr) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%gep0 = getelementptr i8, ptr addrspace(1) %aptr, i32 %tid
@@ -118,7 +119,7 @@ define amdgpu_kernel void @v_test_imax3_sgt_i8(ptr addrspace(1) %out, ptr addrsp
; VI: v_max_u16
; VI: v_max_u16
-; GFX9: v_max3_u16
+; GFX9_1250: v_max3_u16
define amdgpu_kernel void @v_test_umax3_ugt_i8(ptr addrspace(1) %out, ptr addrspace(1) %aptr, ptr addrspace(1) %bptr, ptr addrspace(1) %cptr) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%gep0 = getelementptr i8, ptr addrspace(1) %aptr, i32 %tid
@@ -142,7 +143,7 @@ define amdgpu_kernel void @v_test_umax3_ugt_i8(ptr addrspace(1) %out, ptr addrsp
; VI: v_max_i16
; VI: v_max_i16
-; GFX9: v_max3_i16
+; GFX9_1250: v_max3_i16
define amdgpu_kernel void @v_test_imax3_sgt_i7(ptr addrspace(1) %out, ptr addrspace(1) %aptr, ptr addrspace(1) %bptr, ptr addrspace(1) %cptr) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%gep0 = getelementptr i7, ptr addrspace(1) %aptr, i32 %tid
@@ -166,7 +167,7 @@ define amdgpu_kernel void @v_test_imax3_sgt_i7(ptr addrspace(1) %out, ptr addrsp
; VI: v_max_u16
; VI: v_max_u16
-; GFX9: v_max3_u16
+; GFX9_1250: v_max3_u16
define amdgpu_kernel void @v_test_umax3_ugt_i7(ptr addrspace(1) %out, ptr addrspace(1) %aptr, ptr addrspace(1) %bptr, ptr addrspace(1) %cptr) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%gep0 = getelementptr i7, ptr addrspace(1) %aptr, i32 %tid
@@ -260,6 +261,50 @@ define amdgpu_kernel void @v_test_umax3_ugt_i64(ptr addrspace(1) %out, ptr addrs
ret void
}
+; GCN-LABEL: {{^}}v_test_imax3_sgt_v2i16:
+; SI-COUNT-2: v_max3_i32
+; VI-COUNT-2: v_max_i16
+; GFX9-COUNT-2: v_pk_max_i16
+; GFX1250: v_pk_max3_i16
+define amdgpu_kernel void @v_test_imax3_sgt_v2i16(ptr addrspace(1) %out, ptr addrspace(1) %aptr, ptr addrspace(1) %bptr, ptr addrspace(1) %cptr) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep0 = getelementptr <2 x i16>, ptr addrspace(1) %aptr, i32 %tid
+ %gep1 = getelementptr <2 x i16>, ptr addrspace(1) %bptr, i32 %tid
+ %gep2 = getelementptr <2 x i16>, ptr addrspace(1) %cptr, i32 %tid
+ %outgep = getelementptr <2 x i16>, ptr addrspace(1) %out, i32 %tid
+ %a = load <2 x i16>, ptr addrspace(1) %gep0
+ %b = load <2 x i16>, ptr addrspace(1) %gep1
+ %c = load <2 x i16>, ptr addrspace(1) %gep2
+ %icmp0 = icmp sgt <2 x i16> %a, %b
+ %i0 = select <2 x i1> %icmp0, <2 x i16> %a, <2 x i16> %b
+ %icmp1 = icmp sgt <2 x i16> %i0, %c
+ %i1 = select <2 x i1> %icmp1, <2 x i16> %i0, <2 x i16> %c
+ store <2 x i16> %i1, ptr addrspace(1) %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_test_imax3_ugt_v2i16:
+; SI-COUNT-2: v_max3_u32
+; VI-COUNT-2: v_max_u16
+; GFX9-COUNT-2: v_pk_max_u16
+; GFX1250: v_pk_max3_u16
+define amdgpu_kernel void @v_test_imax3_ugt_v2i16(ptr addrspace(1) %out, ptr addrspace(1) %aptr, ptr addrspace(1) %bptr, ptr addrspace(1) %cptr) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep0 = getelementptr <2 x i16>, ptr addrspace(1) %aptr, i32 %tid
+ %gep1 = getelementptr <2 x i16>, ptr addrspace(1) %bptr, i32 %tid
+ %gep2 = getelementptr <2 x i16>, ptr addrspace(1) %cptr, i32 %tid
+ %outgep = getelementptr <2 x i16>, ptr addrspace(1) %out, i32 %tid
+ %a = load <2 x i16>, ptr addrspace(1) %gep0
+ %b = load <2 x i16>, ptr addrspace(1) %gep1
+ %c = load <2 x i16>, ptr addrspace(1) %gep2
+ %icmp0 = icmp ugt <2 x i16> %a, %b
+ %i0 = select <2 x i1> %icmp0, <2 x i16> %a, <2 x i16> %b
+ %icmp1 = icmp ugt <2 x i16> %i0, %c
+ %i1 = select <2 x i1> %icmp1, <2 x i16> %i0, <2 x i16> %c
+ store <2 x i16> %i1, ptr addrspace(1) %out
+ ret void
+}
+
declare i32 @llvm.amdgcn.workitem.id.x() #1
attributes #0 = { nounwind }
diff --git a/llvm/test/CodeGen/AMDGPU/memory-legalizer-atomic-insert-end.mir b/llvm/test/CodeGen/AMDGPU/memory-legalizer-atomic-insert-end.mir
index 173c9cc..417a4c5 100644
--- a/llvm/test/CodeGen/AMDGPU/memory-legalizer-atomic-insert-end.mir
+++ b/llvm/test/CodeGen/AMDGPU/memory-legalizer-atomic-insert-end.mir
@@ -1,3 +1,4 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
# RUN: llc -mtriple=amdgcn -run-pass=si-memory-legalizer %s -o - | FileCheck %s
--- |
@@ -39,12 +40,7 @@
...
---
-# CHECK-LABEL: name: atomic_max_i32_noret
-# CHECK-LABEL: bb.1.atomic:
-# CHECK: BUFFER_ATOMIC_SMAX_ADDR64
-# CHECK-NEXT: S_WAITCNT_soft 3952
-# CHECK-NEXT: BUFFER_WBINVL1_VOL
name: atomic_max_i32_noret
alignment: 1
@@ -71,6 +67,46 @@ frameInfo:
hasVAStart: false
hasMustTailInVarArgFunc: false
body: |
+ ; CHECK-LABEL: name: atomic_max_i32_noret
+ ; CHECK: bb.0 (%ir-block.0):
+ ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
+ ; CHECK-NEXT: liveins: $vgpr0, $sgpr0_sgpr1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $sgpr4_sgpr5 = S_LOAD_DWORDX2_IMM $sgpr0_sgpr1, 11, 0 :: (non-temporal dereferenceable invariant load (s64) from `ptr addrspace(4) poison`, addrspace 4)
+ ; CHECK-NEXT: $vgpr1 = V_ASHRREV_I32_e32 31, $vgpr0, implicit $exec
+ ; CHECK-NEXT: $vgpr1_vgpr2 = V_LSHL_B64_e64 $vgpr0_vgpr1, 3, implicit $exec
+ ; CHECK-NEXT: $sgpr7 = S_MOV_B32 61440
+ ; CHECK-NEXT: $sgpr6 = S_MOV_B32 0
+ ; CHECK-NEXT: S_WAITCNT 127
+ ; CHECK-NEXT: $vgpr1_vgpr2 = BUFFER_LOAD_DWORDX2_ADDR64 killed $vgpr1_vgpr2, $sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 1, 0, implicit $exec :: (volatile load (s64) from %ir.tid.gep, addrspace 1)
+ ; CHECK-NEXT: S_WAITCNT_soft 3952
+ ; CHECK-NEXT: $vgpr0 = V_XOR_B32_e32 1, killed $vgpr0, implicit $exec
+ ; CHECK-NEXT: V_CMP_NE_U32_e32 0, killed $vgpr0, implicit-def $vcc, implicit $exec
+ ; CHECK-NEXT: $sgpr2_sgpr3 = S_AND_SAVEEXEC_B64 killed $vcc, implicit-def $exec, implicit-def $scc, implicit $exec
+ ; CHECK-NEXT: $sgpr2_sgpr3 = S_XOR_B64 $exec, killed $sgpr2_sgpr3, implicit-def dead $scc
+ ; CHECK-NEXT: S_CBRANCH_EXECZ %bb.2, implicit $exec
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.atomic:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: liveins: $sgpr4_sgpr5_sgpr6_sgpr7:0x000000000000000C, $sgpr0_sgpr1, $sgpr2_sgpr3, $vgpr1_vgpr2_vgpr3_vgpr4:0x0000000000000003
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 15, 0 :: (non-temporal dereferenceable invariant load (s32) from `ptr addrspace(4) poison`, addrspace 4)
+ ; CHECK-NEXT: dead $vgpr0 = V_MOV_B32_e32 -1, implicit $exec
+ ; CHECK-NEXT: dead $vgpr0 = V_MOV_B32_e32 61440, implicit $exec
+ ; CHECK-NEXT: $sgpr4_sgpr5 = S_MOV_B64 0
+ ; CHECK-NEXT: S_WAITCNT 127
+ ; CHECK-NEXT: $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec
+ ; CHECK-NEXT: S_WAITCNT 3952
+ ; CHECK-NEXT: S_WAITCNT_soft 3952
+ ; CHECK-NEXT: BUFFER_ATOMIC_SMAX_ADDR64 killed $vgpr0, killed $vgpr1_vgpr2, killed $sgpr4_sgpr5_sgpr6_sgpr7, 0, 400, 0, implicit $exec :: (volatile load syncscope("one-as") seq_cst (s32) from %ir.gep, addrspace 1)
+ ; CHECK-NEXT: S_WAITCNT_soft 3952
+ ; CHECK-NEXT: BUFFER_WBINVL1_VOL implicit $exec
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.exit:
+ ; CHECK-NEXT: liveins: $sgpr2_sgpr3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $exec = S_OR_B64 $exec, killed $sgpr2_sgpr3, implicit-def $scc
+ ; CHECK-NEXT: S_ENDPGM 0
bb.0 (%ir-block.0):
successors: %bb.1.atomic(0x40000000), %bb.2.exit(0x40000000)
liveins: $vgpr0, $sgpr0_sgpr1
diff --git a/llvm/test/CodeGen/AMDGPU/memory-legalizer-multiple-mem-operands-atomics.mir b/llvm/test/CodeGen/AMDGPU/memory-legalizer-multiple-mem-operands-atomics.mir
index e325071..064e3e0 100644
--- a/llvm/test/CodeGen/AMDGPU/memory-legalizer-multiple-mem-operands-atomics.mir
+++ b/llvm/test/CodeGen/AMDGPU/memory-legalizer-multiple-mem-operands-atomics.mir
@@ -1,17 +1,65 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
# RUN: llc -mtriple=amdgcn -mcpu=gfx803 -run-pass si-memory-legalizer %s -o - | FileCheck -check-prefix=GCN %s
---
-# GCN-LABEL: name: multiple_mem_operands
-# GCN-LABEL: bb.3:
-# GCN: S_WAITCNT_soft 3952
-# GCN-NEXT: BUFFER_LOAD_DWORD_OFFEN
-# GCN-NEXT: S_WAITCNT_soft 3952
-# GCN-NEXT: BUFFER_WBINVL1_VOL
name: multiple_mem_operands
body: |
+ ; GCN-LABEL: name: multiple_mem_operands
+ ; GCN: bb.0.entry:
+ ; GCN-NEXT: successors: %bb.2(0x30000000), %bb.1(0x50000000)
+ ; GCN-NEXT: liveins: $sgpr0_sgpr1, $sgpr3
+ ; GCN-NEXT: {{ $}}
+ ; GCN-NEXT: $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 44, 0 :: (non-temporal dereferenceable invariant load (s32) from `ptr addrspace(4) poison`, addrspace 4)
+ ; GCN-NEXT: $sgpr8 = S_MOV_B32 &SCRATCH_RSRC_DWORD0, implicit-def $sgpr8_sgpr9_sgpr10_sgpr11
+ ; GCN-NEXT: $sgpr4_sgpr5 = S_LOAD_DWORDX2_IMM $sgpr0_sgpr1, 36, 0 :: (non-temporal dereferenceable invariant load (s64) from `ptr addrspace(4) poison`, addrspace 4)
+ ; GCN-NEXT: $sgpr9 = S_MOV_B32 &SCRATCH_RSRC_DWORD1, implicit-def $sgpr8_sgpr9_sgpr10_sgpr11
+ ; GCN-NEXT: $sgpr10 = S_MOV_B32 4294967295, implicit-def $sgpr8_sgpr9_sgpr10_sgpr11
+ ; GCN-NEXT: $sgpr11 = S_MOV_B32 15204352, implicit-def $sgpr8_sgpr9_sgpr10_sgpr11
+ ; GCN-NEXT: $vgpr0 = V_MOV_B32_e32 1, implicit $exec
+ ; GCN-NEXT: BUFFER_STORE_DWORD_OFFSET killed $vgpr0, $sgpr8_sgpr9_sgpr10_sgpr11, $sgpr3, 4, 0, 0, implicit $exec :: (store (s32) into `ptr addrspace(5) poison`, addrspace 5)
+ ; GCN-NEXT: S_WAITCNT 127
+ ; GCN-NEXT: S_CMP_LG_U32 killed $sgpr2, 0, implicit-def $scc
+ ; GCN-NEXT: S_WAITCNT 3855
+ ; GCN-NEXT: $vgpr0 = V_MOV_B32_e32 2, implicit $exec
+ ; GCN-NEXT: $vgpr1 = V_MOV_B32_e32 32772, implicit $exec
+ ; GCN-NEXT: BUFFER_STORE_DWORD_OFFEN killed $vgpr0, killed $vgpr1, $sgpr8_sgpr9_sgpr10_sgpr11, $sgpr3, 0, 0, 0, implicit $exec :: (store (s32) into `ptr addrspace(5) poison`, addrspace 5)
+ ; GCN-NEXT: S_CBRANCH_SCC0 %bb.2, implicit killed $scc
+ ; GCN-NEXT: {{ $}}
+ ; GCN-NEXT: bb.1:
+ ; GCN-NEXT: successors: %bb.3(0x80000000)
+ ; GCN-NEXT: liveins: $sgpr0_sgpr1, $sgpr4_sgpr5, $sgpr3, $sgpr8_sgpr9_sgpr10_sgpr11
+ ; GCN-NEXT: {{ $}}
+ ; GCN-NEXT: $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 52, 0 :: (non-temporal dereferenceable invariant load (s32) from `ptr addrspace(4) poison`, addrspace 4)
+ ; GCN-NEXT: S_WAITCNT 3855
+ ; GCN-NEXT: $vgpr0 = V_MOV_B32_e32 32772, implicit $exec
+ ; GCN-NEXT: S_BRANCH %bb.3
+ ; GCN-NEXT: {{ $}}
+ ; GCN-NEXT: bb.2:
+ ; GCN-NEXT: successors: %bb.3(0x80000000)
+ ; GCN-NEXT: liveins: $sgpr0_sgpr1, $sgpr4_sgpr5, $sgpr3, $sgpr8_sgpr9_sgpr10_sgpr11
+ ; GCN-NEXT: {{ $}}
+ ; GCN-NEXT: $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 48, 0 :: (non-temporal dereferenceable invariant load (s32) from `ptr addrspace(4) poison`, addrspace 4)
+ ; GCN-NEXT: S_WAITCNT 3855
+ ; GCN-NEXT: $vgpr0 = V_MOV_B32_e32 4, implicit $exec
+ ; GCN-NEXT: {{ $}}
+ ; GCN-NEXT: bb.3:
+ ; GCN-NEXT: liveins: $sgpr3, $sgpr4_sgpr5, $sgpr8_sgpr9_sgpr10_sgpr11, $vgpr0, $sgpr0
+ ; GCN-NEXT: {{ $}}
+ ; GCN-NEXT: S_WAITCNT 127
+ ; GCN-NEXT: $sgpr0 = S_LSHL_B32 killed $sgpr0, 2, implicit-def dead $scc
+ ; GCN-NEXT: $vgpr0 = V_ADD_CO_U32_e32 killed $sgpr0, killed $vgpr0, implicit-def dead $vcc, implicit $exec
+ ; GCN-NEXT: S_WAITCNT_soft 3952
+ ; GCN-NEXT: $vgpr0 = BUFFER_LOAD_DWORD_OFFEN killed $vgpr0, killed $sgpr8_sgpr9_sgpr10_sgpr11, $sgpr3, 0, 1, 0, implicit $exec :: (load syncscope("agent-one-as") unordered (s32) from `ptr addrspace(1) poison`, addrspace 1), (load syncscope("workgroup-one-as") seq_cst (s32) from `ptr addrspace(5) poison`, addrspace 5)
+ ; GCN-NEXT: S_WAITCNT_soft 3952
+ ; GCN-NEXT: BUFFER_WBINVL1_VOL implicit $exec
+ ; GCN-NEXT: $vgpr1 = V_MOV_B32_e32 $sgpr4, implicit $exec, implicit-def $vgpr1_vgpr2, implicit $sgpr4_sgpr5
+ ; GCN-NEXT: $vgpr2 = V_MOV_B32_e32 killed $sgpr5, implicit $exec, implicit $sgpr4_sgpr5, implicit $exec
+ ; GCN-NEXT: S_WAITCNT 3952
+ ; GCN-NEXT: FLAT_STORE_DWORD killed $vgpr1_vgpr2, killed $vgpr0, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr addrspace(1) poison`, addrspace 1)
+ ; GCN-NEXT: S_ENDPGM 0
bb.0.entry:
successors: %bb.1(0x30000000), %bb.2(0x50000000)
liveins: $sgpr0_sgpr1, $sgpr3
diff --git a/llvm/test/CodeGen/AMDGPU/min3.ll b/llvm/test/CodeGen/AMDGPU/min3.ll
index 0e25540..e30b929 100644
--- a/llvm/test/CodeGen/AMDGPU/min3.ll
+++ b/llvm/test/CodeGen/AMDGPU/min3.ll
@@ -1,6 +1,7 @@
; RUN: llc -mtriple=amdgcn < %s | FileCheck -check-prefixes=GCN,SI %s
; RUN: llc -mtriple=amdgcn -mcpu=tonga < %s | FileCheck -check-prefixes=GCN,VI %s
-; RUN: llc -mtriple=amdgcn -mcpu=gfx900 < %s | FileCheck -check-prefixes=GCN,GFX9 %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx900 < %s | FileCheck -check-prefixes=GCN,GFX9,GFX9_1250 %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1250 < %s | FileCheck -check-prefixes=GCN,GFX1250,GFX9_1250 %s
; GCN-LABEL: {{^}}v_test_imin3_slt_i32:
; GCN: v_min3_i32
@@ -116,7 +117,7 @@ define amdgpu_kernel void @v_test_umin3_2_uses(ptr addrspace(1) %out, ptr addrsp
; VI: v_min_i16
; VI: v_min_i16
-; GFX9: v_min3_i16
+; GFX9_1250: v_min3_i16
define amdgpu_kernel void @v_test_imin3_slt_i16(ptr addrspace(1) %out, ptr addrspace(1) %aptr, ptr addrspace(1) %bptr, ptr addrspace(1) %cptr) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%gep0 = getelementptr i16, ptr addrspace(1) %aptr, i32 %tid
@@ -140,7 +141,7 @@ define amdgpu_kernel void @v_test_imin3_slt_i16(ptr addrspace(1) %out, ptr addrs
; VI: v_min_u16
; VI: v_min_u16
-; GFX9: v_min3_u16
+; GFX9_1250: v_min3_u16
define amdgpu_kernel void @v_test_umin3_ult_i16(ptr addrspace(1) %out, ptr addrspace(1) %aptr, ptr addrspace(1) %bptr, ptr addrspace(1) %cptr) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%gep0 = getelementptr i16, ptr addrspace(1) %aptr, i32 %tid
@@ -164,7 +165,7 @@ define amdgpu_kernel void @v_test_umin3_ult_i16(ptr addrspace(1) %out, ptr addrs
; VI: v_min_i16
; VI: v_min_i16
-; GFX9: v_min3_i16
+; GFX9_1250: v_min3_i16
define amdgpu_kernel void @v_test_imin3_slt_i8(ptr addrspace(1) %out, ptr addrspace(1) %aptr, ptr addrspace(1) %bptr, ptr addrspace(1) %cptr) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%gep0 = getelementptr i8, ptr addrspace(1) %aptr, i32 %tid
@@ -188,7 +189,7 @@ define amdgpu_kernel void @v_test_imin3_slt_i8(ptr addrspace(1) %out, ptr addrsp
; VI: v_min_u16
; VI: v_min_u16
-; GFX9: v_min3_u16
+; GFX9_1250: v_min3_u16
define amdgpu_kernel void @v_test_umin3_ult_i8(ptr addrspace(1) %out, ptr addrspace(1) %aptr, ptr addrspace(1) %bptr, ptr addrspace(1) %cptr) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%gep0 = getelementptr i8, ptr addrspace(1) %aptr, i32 %tid
@@ -212,7 +213,7 @@ define amdgpu_kernel void @v_test_umin3_ult_i8(ptr addrspace(1) %out, ptr addrsp
; VI: v_min_i16
; VI: v_min_i16
-; GFX9: v_min3_i16
+; GFX9_1250: v_min3_i16
define amdgpu_kernel void @v_test_imin3_slt_i7(ptr addrspace(1) %out, ptr addrspace(1) %aptr, ptr addrspace(1) %bptr, ptr addrspace(1) %cptr) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%gep0 = getelementptr i7, ptr addrspace(1) %aptr, i32 %tid
@@ -236,7 +237,7 @@ define amdgpu_kernel void @v_test_imin3_slt_i7(ptr addrspace(1) %out, ptr addrsp
; VI: v_min_u16
; VI: v_min_u16
-; GFX9: v_min3_u16
+; GFX9_1250: v_min3_u16
define amdgpu_kernel void @v_test_umin3_ult_i7(ptr addrspace(1) %out, ptr addrspace(1) %aptr, ptr addrspace(1) %bptr, ptr addrspace(1) %cptr) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%gep0 = getelementptr i7, ptr addrspace(1) %aptr, i32 %tid
@@ -330,6 +331,50 @@ define amdgpu_kernel void @v_test_umin3_ult_i64(ptr addrspace(1) %out, ptr addrs
ret void
}
+; GCN-LABEL: {{^}}v_test_imin3_slt_v2i16:
+; SI-COUNT-2: v_min3_i32
+; VI-COUNT-2: v_min_i16
+; GFX9-COUNT-2: v_pk_min_i16
+; GFX1250: v_pk_min3_i16
+define amdgpu_kernel void @v_test_imin3_slt_v2i16(ptr addrspace(1) %out, ptr addrspace(1) %aptr, ptr addrspace(1) %bptr, ptr addrspace(1) %cptr) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep0 = getelementptr i32, ptr addrspace(1) %aptr, i32 %tid
+ %gep1 = getelementptr i32, ptr addrspace(1) %bptr, i32 %tid
+ %gep2 = getelementptr i32, ptr addrspace(1) %cptr, i32 %tid
+ %outgep = getelementptr <2 x i16>, ptr addrspace(1) %out, i32 %tid
+ %a = load <2 x i16>, ptr addrspace(1) %gep0
+ %b = load <2 x i16>, ptr addrspace(1) %gep1
+ %c = load <2 x i16>, ptr addrspace(1) %gep2
+ %icmp0 = icmp slt <2 x i16> %a, %b
+ %i0 = select <2 x i1> %icmp0, <2 x i16> %a, <2 x i16> %b
+ %icmp1 = icmp slt <2 x i16> %i0, %c
+ %i1 = select <2 x i1> %icmp1, <2 x i16> %i0, <2 x i16> %c
+ store <2 x i16> %i1, ptr addrspace(1) %outgep
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_test_imin3_ult_v2i16:
+; SI-COUNT-2: v_min3_u32
+; VI-COUNT-2: v_min_u16
+; GFX9-COUNT-2: v_pk_min_u16
+; GFX1250: v_pk_min3_u16
+define amdgpu_kernel void @v_test_imin3_ult_v2i16(ptr addrspace(1) %out, ptr addrspace(1) %aptr, ptr addrspace(1) %bptr, ptr addrspace(1) %cptr) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep0 = getelementptr i32, ptr addrspace(1) %aptr, i32 %tid
+ %gep1 = getelementptr i32, ptr addrspace(1) %bptr, i32 %tid
+ %gep2 = getelementptr i32, ptr addrspace(1) %cptr, i32 %tid
+ %outgep = getelementptr <2 x i16>, ptr addrspace(1) %out, i32 %tid
+ %a = load <2 x i16>, ptr addrspace(1) %gep0
+ %b = load <2 x i16>, ptr addrspace(1) %gep1
+ %c = load <2 x i16>, ptr addrspace(1) %gep2
+ %icmp0 = icmp ult <2 x i16> %a, %b
+ %i0 = select <2 x i1> %icmp0, <2 x i16> %a, <2 x i16> %b
+ %icmp1 = icmp ult <2 x i16> %i0, %c
+ %i1 = select <2 x i1> %icmp1, <2 x i16> %i0, <2 x i16> %c
+ store <2 x i16> %i1, ptr addrspace(1) %outgep
+ ret void
+}
+
declare i32 @llvm.amdgcn.workitem.id.x() #1
attributes #0 = { nounwind }
diff --git a/llvm/test/CodeGen/AMDGPU/rem_i128.ll b/llvm/test/CodeGen/AMDGPU/rem_i128.ll
index 5d0e4bf..8fe68ba 100644
--- a/llvm/test/CodeGen/AMDGPU/rem_i128.ll
+++ b/llvm/test/CodeGen/AMDGPU/rem_i128.ll
@@ -513,28 +513,21 @@ define i128 @v_srem_i128_vv(i128 %lhs, i128 %rhs) {
; GFX9-O0-NEXT: ; implicit-def: $sgpr8
; GFX9-O0-NEXT: ; kill: def $vgpr4 killed $vgpr4 def $vgpr4_vgpr5 killed $exec
; GFX9-O0-NEXT: v_mov_b32_e32 v5, v8
+; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_nop 0
+; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill
; GFX9-O0-NEXT: ; implicit-def: $sgpr8
; GFX9-O0-NEXT: ; implicit-def: $sgpr8
; GFX9-O0-NEXT: ; kill: def $vgpr7 killed $vgpr7 def $vgpr7_vgpr8 killed $exec
; GFX9-O0-NEXT: v_mov_b32_e32 v8, v6
-; GFX9-O0-NEXT: v_mov_b32_e32 v10, v8
-; GFX9-O0-NEXT: v_mov_b32_e32 v9, v7
-; GFX9-O0-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_nop 0
-; GFX9-O0-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: v_mov_b32_e32 v10, v5
-; GFX9-O0-NEXT: v_mov_b32_e32 v9, v4
-; GFX9-O0-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
; GFX9-O0-NEXT: s_nop 0
-; GFX9-O0-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_mov_b64 s[8:9], s[6:7]
-; GFX9-O0-NEXT: v_cmp_eq_u64_e64 s[8:9], v[7:8], s[8:9]
+; GFX9-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: v_cmp_eq_u64_e64 s[8:9], v[7:8], s[6:7]
; GFX9-O0-NEXT: s_mov_b64 s[12:13], 0x7f
-; GFX9-O0-NEXT: s_mov_b64 s[14:15], s[12:13]
-; GFX9-O0-NEXT: v_cmp_gt_u64_e64 s[14:15], v[4:5], s[14:15]
+; GFX9-O0-NEXT: v_cmp_gt_u64_e64 s[14:15], v[4:5], s[12:13]
; GFX9-O0-NEXT: v_cndmask_b32_e64 v9, 0, 1, s[14:15]
-; GFX9-O0-NEXT: s_mov_b64 s[14:15], s[6:7]
-; GFX9-O0-NEXT: v_cmp_ne_u64_e64 s[14:15], v[7:8], s[14:15]
+; GFX9-O0-NEXT: v_cmp_ne_u64_e64 s[14:15], v[7:8], s[6:7]
; GFX9-O0-NEXT: v_cndmask_b32_e64 v6, 0, 1, s[14:15]
; GFX9-O0-NEXT: v_cndmask_b32_e64 v6, v6, v9, s[8:9]
; GFX9-O0-NEXT: v_and_b32_e64 v6, 1, v6
@@ -545,7 +538,6 @@ define i128 @v_srem_i128_vv(i128 %lhs, i128 %rhs) {
; GFX9-O0-NEXT: v_mov_b32_e32 v6, v5
; GFX9-O0-NEXT: s_mov_b32 s14, s13
; GFX9-O0-NEXT: v_xor_b32_e64 v6, v6, s14
-; GFX9-O0-NEXT: ; kill: def $vgpr4 killed $vgpr4 killed $vgpr4_vgpr5 killed $exec
; GFX9-O0-NEXT: ; kill: def $sgpr12 killed $sgpr12 killed $sgpr12_sgpr13
; GFX9-O0-NEXT: v_xor_b32_e64 v4, v4, s12
; GFX9-O0-NEXT: ; kill: def $vgpr4 killed $vgpr4 def $vgpr4_vgpr5 killed $exec
@@ -1084,10 +1076,10 @@ define i128 @v_srem_i128_vv(i128 %lhs, i128 %rhs) {
; GFX9-O0-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:40 ; 4-byte Folded Reload
; GFX9-O0-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:44 ; 4-byte Folded Reload
; GFX9-O0-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:48 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:28 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:32 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:24 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:24 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:28 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:32 ; 4-byte Folded Reload
; GFX9-O0-NEXT: s_mov_b64 s[6:7], 1
; GFX9-O0-NEXT: s_mov_b32 s5, s6
; GFX9-O0-NEXT: s_waitcnt vmcnt(1)
@@ -1900,28 +1892,21 @@ define i128 @v_urem_i128_vv(i128 %lhs, i128 %rhs) {
; GFX9-O0-NEXT: ; implicit-def: $sgpr8
; GFX9-O0-NEXT: ; kill: def $vgpr4 killed $vgpr4 def $vgpr4_vgpr5 killed $exec
; GFX9-O0-NEXT: v_mov_b32_e32 v5, v8
+; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_nop 0
+; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill
; GFX9-O0-NEXT: ; implicit-def: $sgpr8
; GFX9-O0-NEXT: ; implicit-def: $sgpr8
; GFX9-O0-NEXT: ; kill: def $vgpr7 killed $vgpr7 def $vgpr7_vgpr8 killed $exec
; GFX9-O0-NEXT: v_mov_b32_e32 v8, v6
-; GFX9-O0-NEXT: v_mov_b32_e32 v10, v8
-; GFX9-O0-NEXT: v_mov_b32_e32 v9, v7
-; GFX9-O0-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_nop 0
-; GFX9-O0-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: v_mov_b32_e32 v10, v5
-; GFX9-O0-NEXT: v_mov_b32_e32 v9, v4
-; GFX9-O0-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
; GFX9-O0-NEXT: s_nop 0
-; GFX9-O0-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_mov_b64 s[8:9], s[6:7]
-; GFX9-O0-NEXT: v_cmp_eq_u64_e64 s[8:9], v[7:8], s[8:9]
+; GFX9-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: v_cmp_eq_u64_e64 s[8:9], v[7:8], s[6:7]
; GFX9-O0-NEXT: s_mov_b64 s[12:13], 0x7f
-; GFX9-O0-NEXT: s_mov_b64 s[14:15], s[12:13]
-; GFX9-O0-NEXT: v_cmp_gt_u64_e64 s[14:15], v[4:5], s[14:15]
+; GFX9-O0-NEXT: v_cmp_gt_u64_e64 s[14:15], v[4:5], s[12:13]
; GFX9-O0-NEXT: v_cndmask_b32_e64 v9, 0, 1, s[14:15]
-; GFX9-O0-NEXT: s_mov_b64 s[14:15], s[6:7]
-; GFX9-O0-NEXT: v_cmp_ne_u64_e64 s[14:15], v[7:8], s[14:15]
+; GFX9-O0-NEXT: v_cmp_ne_u64_e64 s[14:15], v[7:8], s[6:7]
; GFX9-O0-NEXT: v_cndmask_b32_e64 v6, 0, 1, s[14:15]
; GFX9-O0-NEXT: v_cndmask_b32_e64 v6, v6, v9, s[8:9]
; GFX9-O0-NEXT: v_and_b32_e64 v6, 1, v6
@@ -1932,7 +1917,6 @@ define i128 @v_urem_i128_vv(i128 %lhs, i128 %rhs) {
; GFX9-O0-NEXT: v_mov_b32_e32 v6, v5
; GFX9-O0-NEXT: s_mov_b32 s14, s13
; GFX9-O0-NEXT: v_xor_b32_e64 v6, v6, s14
-; GFX9-O0-NEXT: ; kill: def $vgpr4 killed $vgpr4 killed $vgpr4_vgpr5 killed $exec
; GFX9-O0-NEXT: ; kill: def $sgpr12 killed $sgpr12 killed $sgpr12_sgpr13
; GFX9-O0-NEXT: v_xor_b32_e64 v4, v4, s12
; GFX9-O0-NEXT: ; kill: def $vgpr4 killed $vgpr4 def $vgpr4_vgpr5 killed $exec
@@ -2471,10 +2455,10 @@ define i128 @v_urem_i128_vv(i128 %lhs, i128 %rhs) {
; GFX9-O0-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:40 ; 4-byte Folded Reload
; GFX9-O0-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:44 ; 4-byte Folded Reload
; GFX9-O0-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:48 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:28 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:32 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:24 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:24 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:28 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:32 ; 4-byte Folded Reload
; GFX9-O0-NEXT: s_mov_b64 s[6:7], 1
; GFX9-O0-NEXT: s_mov_b32 s5, s6
; GFX9-O0-NEXT: s_waitcnt vmcnt(1)
diff --git a/llvm/test/CodeGen/AMDGPU/scale-offset-flat.ll b/llvm/test/CodeGen/AMDGPU/scale-offset-flat.ll
new file mode 100644
index 0000000..64392a1
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/scale-offset-flat.ll
@@ -0,0 +1,436 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1250 < %s | FileCheck --check-prefixes=GCN,SDAG %s
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx1250 < %s | FileCheck --check-prefixes=GCN,GISEL %s
+
+define amdgpu_ps float @flat_load_b32_idxprom(ptr align 4 inreg %p, i32 %idx) {
+; GCN-LABEL: flat_load_b32_idxprom:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: flat_load_b32 v0, v0, s[0:1] scale_offset
+; GCN-NEXT: s_wait_loadcnt_dscnt 0x0
+; GCN-NEXT: ; return to shader part epilog
+entry:
+ %idxprom = sext i32 %idx to i64
+ %arrayidx = getelementptr inbounds float, ptr %p, i64 %idxprom
+ %ret = load float, ptr %arrayidx, align 4
+ ret float %ret
+}
+
+define amdgpu_ps float @flat_load_b32_idx32(ptr align 4 inreg %p, i32 %idx) {
+; GCN-LABEL: flat_load_b32_idx32:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: flat_load_b32 v0, v0, s[0:1] scale_offset
+; GCN-NEXT: s_wait_loadcnt_dscnt 0x0
+; GCN-NEXT: ; return to shader part epilog
+entry:
+ %arrayidx = getelementptr inbounds float, ptr %p, i32 %idx
+ %ret = load float, ptr %arrayidx, align 4
+ ret float %ret
+}
+
+define amdgpu_ps float @flat_load_b32_idxprom_wrong_stride(ptr align 4 inreg %p, i32 %idx) {
+; SDAG-LABEL: flat_load_b32_idxprom_wrong_stride:
+; SDAG: ; %bb.0: ; %entry
+; SDAG-NEXT: v_ashrrev_i32_e32 v1, 31, v0
+; SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; SDAG-NEXT: v_lshl_add_u64 v[0:1], v[0:1], 3, s[0:1]
+; SDAG-NEXT: flat_load_b32 v0, v[0:1]
+; SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
+; SDAG-NEXT: ; return to shader part epilog
+;
+; GISEL-LABEL: flat_load_b32_idxprom_wrong_stride:
+; GISEL: ; %bb.0: ; %entry
+; GISEL-NEXT: v_ashrrev_i32_e32 v1, 31, v0
+; GISEL-NEXT: v_mov_b64_e32 v[2:3], s[0:1]
+; GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GISEL-NEXT: v_lshlrev_b64_e32 v[0:1], 3, v[0:1]
+; GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v2, v0
+; GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GISEL-NEXT: v_add_co_ci_u32_e64 v1, null, v3, v1, vcc_lo
+; GISEL-NEXT: flat_load_b32 v0, v[0:1]
+; GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GISEL-NEXT: ; return to shader part epilog
+entry:
+ %idxprom = sext i32 %idx to i64
+ %arrayidx = getelementptr inbounds <2 x float>, ptr %p, i64 %idxprom
+ %ret = load float, ptr %arrayidx, align 4
+ ret float %ret
+}
+
+define amdgpu_ps float @flat_load_b16_idxprom_ioffset(ptr align 4 inreg %p, i32 %idx) {
+; GCN-LABEL: flat_load_b16_idxprom_ioffset:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: flat_load_u16 v0, v0, s[0:1] offset:32 scale_offset
+; GCN-NEXT: s_wait_loadcnt_dscnt 0x0
+; GCN-NEXT: ; return to shader part epilog
+entry:
+ %idxprom = sext i32 %idx to i64
+ %idxadd = add i64 %idxprom, 16
+ %arrayidx = getelementptr inbounds i16, ptr %p, i64 %idxadd
+ %ld = load i16, ptr %arrayidx, align 2
+ %ret.i32 = zext i16 %ld to i32
+ %ret = bitcast i32 %ret.i32 to float
+ ret float %ret
+}
+
+define amdgpu_ps <2 x float> @flat_load_b64_idxprom(ptr align 4 inreg %p, i32 %idx) {
+; GCN-LABEL: flat_load_b64_idxprom:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: flat_load_b64 v[0:1], v0, s[0:1] scale_offset
+; GCN-NEXT: s_wait_loadcnt_dscnt 0x0
+; GCN-NEXT: ; return to shader part epilog
+entry:
+ %idxprom = sext i32 %idx to i64
+ %arrayidx = getelementptr inbounds <2 x float>, ptr %p, i64 %idxprom
+ %ret = load <2 x float>, ptr %arrayidx, align 4
+ ret <2 x float> %ret
+}
+
+define amdgpu_ps <3 x float> @flat_load_b96_idxprom(ptr align 4 inreg %p, i32 %idx) {
+; GCN-LABEL: flat_load_b96_idxprom:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: flat_load_b96 v[0:2], v0, s[0:1] scale_offset
+; GCN-NEXT: s_wait_loadcnt_dscnt 0x0
+; GCN-NEXT: ; return to shader part epilog
+entry:
+ %idxprom = sext i32 %idx to i64
+ %arrayidx = getelementptr inbounds [3 x float], ptr %p, i64 %idxprom
+ %ret = load <3 x float>, ptr %arrayidx, align 4
+ ret <3 x float> %ret
+}
+
+define amdgpu_ps <3 x float> @flat_load_b96_idxpromi_ioffset(ptr align 4 inreg %p, i32 %idx) {
+; GCN-LABEL: flat_load_b96_idxpromi_ioffset:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: flat_load_b96 v[0:2], v0, s[0:1] offset:192 scale_offset
+; GCN-NEXT: s_wait_loadcnt_dscnt 0x0
+; GCN-NEXT: ; return to shader part epilog
+entry:
+ %idxprom = sext i32 %idx to i64
+ %idxadd = add i64 %idxprom, 16
+ %arrayidx = getelementptr inbounds [3 x float], ptr %p, i64 %idxadd
+ %ret = load <3 x float>, ptr %arrayidx, align 4
+ ret <3 x float> %ret
+}
+
+define amdgpu_ps <4 x float> @flat_load_b128_idxprom(ptr align 4 inreg %p, i32 %idx) {
+; GCN-LABEL: flat_load_b128_idxprom:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: flat_load_b128 v[0:3], v0, s[0:1] scale_offset
+; GCN-NEXT: s_wait_loadcnt_dscnt 0x0
+; GCN-NEXT: ; return to shader part epilog
+entry:
+ %idxprom = sext i32 %idx to i64
+ %arrayidx = getelementptr inbounds <4 x float>, ptr %p, i64 %idxprom
+ %ret = load <4 x float>, ptr %arrayidx, align 4
+ ret <4 x float> %ret
+}
+
+define amdgpu_ps float @flat_load_b32_idxprom_range(ptr align 4 inreg %p, ptr align 4 %pp) {
+; GCN-LABEL: flat_load_b32_idxprom_range:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: flat_load_b32 v0, v[0:1]
+; GCN-NEXT: s_wait_loadcnt_dscnt 0x0
+; GCN-NEXT: flat_load_b32 v0, v0, s[0:1] scale_offset
+; GCN-NEXT: s_wait_loadcnt_dscnt 0x0
+; GCN-NEXT: ; return to shader part epilog
+entry:
+ %idx = load i32, ptr %pp, align 4, !range !0
+ %idxprom = sext i32 %idx to i64
+ %arrayidx = getelementptr inbounds float, ptr %p, i64 %idxprom
+ %ret = load float, ptr %arrayidx, align 4
+ ret float %ret
+}
+
+define amdgpu_ps float @flat_load_b32_idxprom_range_ioffset(ptr align 4 inreg %p, ptr align 4 %pp) {
+; GCN-LABEL: flat_load_b32_idxprom_range_ioffset:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: flat_load_b32 v0, v[0:1]
+; GCN-NEXT: s_wait_loadcnt_dscnt 0x0
+; GCN-NEXT: flat_load_b32 v0, v0, s[0:1] offset:64 scale_offset
+; GCN-NEXT: s_wait_loadcnt_dscnt 0x0
+; GCN-NEXT: ; return to shader part epilog
+entry:
+ %idx = load i32, ptr %pp, align 4, !range !0
+ %idxprom = sext i32 %idx to i64
+ %idxadd = add i64 %idxprom, 16
+ %arrayidx = getelementptr inbounds float, ptr %p, i64 %idxadd
+ %ret = load float, ptr %arrayidx, align 4
+ ret float %ret
+}
+
+; Note: this is a byte load, there is nothing to scale
+
+define amdgpu_ps float @flat_load_b8_idxprom_range_ioffset(ptr align 4 inreg %p, ptr align 4 %pp) {
+; GCN-LABEL: flat_load_b8_idxprom_range_ioffset:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: flat_load_b32 v0, v[0:1]
+; GCN-NEXT: s_wait_loadcnt_dscnt 0x0
+; GCN-NEXT: flat_load_u8 v0, v0, s[0:1] offset:16
+; GCN-NEXT: s_wait_loadcnt_dscnt 0x0
+; GCN-NEXT: ; return to shader part epilog
+entry:
+ %idx = load i32, ptr %pp, align 4, !range !0
+ %idxprom = sext i32 %idx to i64
+ %idxadd = add i64 %idxprom, 16
+ %arrayidx = getelementptr inbounds i8, ptr %p, i64 %idxadd
+ %ld = load i8, ptr %arrayidx
+ %ret.i32 = zext i8 %ld to i32
+ %ret = bitcast i32 %ret.i32 to float
+ ret float %ret
+}
+
+define amdgpu_ps float @flat_load_b16_idxprom_range(ptr align 4 inreg %p, ptr align 4 %pp) {
+; GCN-LABEL: flat_load_b16_idxprom_range:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: flat_load_b32 v0, v[0:1]
+; GCN-NEXT: s_wait_loadcnt_dscnt 0x0
+; GCN-NEXT: flat_load_u16 v0, v0, s[0:1] scale_offset
+; GCN-NEXT: s_wait_loadcnt_dscnt 0x0
+; GCN-NEXT: ; return to shader part epilog
+entry:
+ %idx = load i32, ptr %pp, align 4, !range !0
+ %idxprom = sext i32 %idx to i64
+ %arrayidx = getelementptr inbounds i16, ptr %p, i64 %idxprom
+ %ld = load i16, ptr %arrayidx, align 2
+ %ret.i32 = zext i16 %ld to i32
+ %ret = bitcast i32 %ret.i32 to float
+ ret float %ret
+}
+
+define amdgpu_ps float @flat_load_b16_idxprom_range_ioffset(ptr align 4 inreg %p, ptr align 4 %pp) {
+; GCN-LABEL: flat_load_b16_idxprom_range_ioffset:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: flat_load_b32 v0, v[0:1]
+; GCN-NEXT: s_wait_loadcnt_dscnt 0x0
+; GCN-NEXT: flat_load_u16 v0, v0, s[0:1] offset:32 scale_offset
+; GCN-NEXT: s_wait_loadcnt_dscnt 0x0
+; GCN-NEXT: ; return to shader part epilog
+entry:
+ %idx = load i32, ptr %pp, align 4, !range !0
+ %idxprom = sext i32 %idx to i64
+ %idxadd = add i64 %idxprom, 16
+ %arrayidx = getelementptr inbounds i16, ptr %p, i64 %idxadd
+ %ld = load i16, ptr %arrayidx, align 2
+ %ret.i32 = zext i16 %ld to i32
+ %ret = bitcast i32 %ret.i32 to float
+ ret float %ret
+}
+
+define amdgpu_ps <2 x float> @flat_load_b64_idxprom_range(ptr align 4 inreg %p, ptr align 4 %pp) {
+; GCN-LABEL: flat_load_b64_idxprom_range:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: flat_load_b32 v0, v[0:1]
+; GCN-NEXT: s_wait_loadcnt_dscnt 0x0
+; GCN-NEXT: flat_load_b64 v[0:1], v0, s[0:1] scale_offset
+; GCN-NEXT: s_wait_loadcnt_dscnt 0x0
+; GCN-NEXT: ; return to shader part epilog
+entry:
+ %idx = load i32, ptr %pp, align 4, !range !0
+ %idxprom = sext i32 %idx to i64
+ %arrayidx = getelementptr inbounds <2 x float>, ptr %p, i64 %idxprom
+ %ret = load <2 x float>, ptr %arrayidx, align 4
+ ret <2 x float> %ret
+}
+
+define amdgpu_ps <3 x float> @flat_load_b96_idxprom_range(ptr align 4 inreg %p, ptr align 4 %pp) {
+; GCN-LABEL: flat_load_b96_idxprom_range:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: flat_load_b32 v0, v[0:1]
+; GCN-NEXT: s_wait_loadcnt_dscnt 0x0
+; GCN-NEXT: flat_load_b96 v[0:2], v0, s[0:1] scale_offset
+; GCN-NEXT: s_wait_loadcnt_dscnt 0x0
+; GCN-NEXT: ; return to shader part epilog
+entry:
+ %idx = load i32, ptr %pp, align 4, !range !0
+ %idxprom = sext i32 %idx to i64
+ %arrayidx = getelementptr inbounds [3 x float], ptr %p, i64 %idxprom
+ %ret = load <3 x float>, ptr %arrayidx, align 4
+ ret <3 x float> %ret
+}
+
+define amdgpu_ps <3 x float> @flat_load_b96_idxprom_range_ioffset(ptr align 4 inreg %p, ptr align 4 %pp) {
+; GCN-LABEL: flat_load_b96_idxprom_range_ioffset:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: flat_load_b32 v0, v[0:1]
+; GCN-NEXT: s_wait_loadcnt_dscnt 0x0
+; GCN-NEXT: flat_load_b96 v[0:2], v0, s[0:1] offset:192 scale_offset
+; GCN-NEXT: s_wait_loadcnt_dscnt 0x0
+; GCN-NEXT: ; return to shader part epilog
+entry:
+ %idx = load i32, ptr %pp, align 4, !range !0
+ %idxprom = sext i32 %idx to i64
+ %idxadd = add i64 %idxprom, 16
+ %arrayidx = getelementptr inbounds [3 x float], ptr %p, i64 %idxadd
+ %ret = load <3 x float>, ptr %arrayidx, align 4
+ ret <3 x float> %ret
+}
+
+define amdgpu_ps <4 x float> @flat_load_b128_idxprom_range(ptr align 4 inreg %p, ptr align 4 %pp) {
+; GCN-LABEL: flat_load_b128_idxprom_range:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: flat_load_b32 v0, v[0:1]
+; GCN-NEXT: s_wait_loadcnt_dscnt 0x0
+; GCN-NEXT: flat_load_b128 v[0:3], v0, s[0:1] scale_offset
+; GCN-NEXT: s_wait_loadcnt_dscnt 0x0
+; GCN-NEXT: ; return to shader part epilog
+entry:
+ %idx = load i32, ptr %pp, align 4, !range !0
+ %idxprom = sext i32 %idx to i64
+ %arrayidx = getelementptr inbounds <4 x float>, ptr %p, i64 %idxprom
+ %ret = load <4 x float>, ptr %arrayidx, align 4
+ ret <4 x float> %ret
+}
+
+define amdgpu_ps void @flat_store_b32_idxprom(ptr align 4 inreg %p, i32 %idx) {
+; GCN-LABEL: flat_store_b32_idxprom:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: v_mov_b32_e32 v1, 1.0
+; GCN-NEXT: flat_store_b32 v0, v1, s[0:1] scale_offset
+; GCN-NEXT: s_endpgm
+entry:
+ %idxprom = sext i32 %idx to i64
+ %arrayidx = getelementptr inbounds float, ptr %p, i64 %idxprom
+ store float 1.0, ptr %arrayidx, align 4
+ ret void
+}
+
+define amdgpu_ps void @flat_store_b16_idxprom(ptr align 2 inreg %p, i32 %idx) {
+; GCN-LABEL: flat_store_b16_idxprom:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: v_mov_b32_e32 v1, 1
+; GCN-NEXT: flat_store_b16 v0, v1, s[0:1] scale_offset
+; GCN-NEXT: s_endpgm
+entry:
+ %idxprom = sext i32 %idx to i64
+ %arrayidx = getelementptr inbounds i16, ptr %p, i64 %idxprom
+ store i16 1, ptr %arrayidx, align 2
+ ret void
+}
+
+define amdgpu_ps void @flat_store_b64_idxprom(ptr align 4 inreg %p, i32 %idx) {
+; GCN-LABEL: flat_store_b64_idxprom:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: v_mov_b64_e32 v[2:3], 1.0
+; GCN-NEXT: flat_store_b64 v0, v[2:3], s[0:1] scale_offset
+; GCN-NEXT: s_endpgm
+entry:
+ %idxprom = sext i32 %idx to i64
+ %arrayidx = getelementptr inbounds double, ptr %p, i64 %idxprom
+ store double 1.0, ptr %arrayidx, align 4
+ ret void
+}
+
+define amdgpu_ps void @flat_atomicrmw_b32_idxprom(ptr align 4 inreg %p, i32 %idx) {
+; GCN-LABEL: flat_atomicrmw_b32_idxprom:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: v_mov_b32_e32 v1, 1
+; GCN-NEXT: flat_atomic_add_u32 v0, v1, s[0:1] scale_offset scope:SCOPE_SYS
+; GCN-NEXT: s_endpgm
+entry:
+ %idxprom = sext i32 %idx to i64
+ %arrayidx = getelementptr inbounds i32, ptr %p, i64 %idxprom
+ atomicrmw add ptr %arrayidx, i32 1 monotonic
+ ret void
+}
+
+define amdgpu_ps <2 x float> @flat_atomicrmw_b64_rtn_idxprom(ptr align 8 inreg %p, i32 %idx) {
+; SDAG-LABEL: flat_atomicrmw_b64_rtn_idxprom:
+; SDAG: ; %bb.0: ; %entry
+; SDAG-NEXT: v_ashrrev_i32_e32 v1, 31, v0
+; SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; SDAG-NEXT: v_lshl_add_u64 v[2:3], v[0:1], 3, s[0:1]
+; SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
+; SDAG-NEXT: s_mov_b32 s0, exec_lo
+; SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
+; SDAG-NEXT: s_wait_alu 0xfffe
+; SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v3
+; SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
+; SDAG-NEXT: s_cbranch_execnz .LBB21_3
+; SDAG-NEXT: ; %bb.1: ; %Flow
+; SDAG-NEXT: s_wait_alu 0xfffe
+; SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
+; SDAG-NEXT: s_cbranch_execnz .LBB21_4
+; SDAG-NEXT: .LBB21_2: ; %atomicrmw.phi
+; SDAG-NEXT: s_wait_alu 0xfffe
+; SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
+; SDAG-NEXT: s_branch .LBB21_5
+; SDAG-NEXT: .LBB21_3: ; %atomicrmw.global
+; SDAG-NEXT: v_mov_b64_e32 v[0:1], 1
+; SDAG-NEXT: flat_atomic_add_u64 v[0:1], v[2:3], v[0:1] th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; SDAG-NEXT: ; implicit-def: $vgpr2_vgpr3
+; SDAG-NEXT: s_wait_xcnt 0x0
+; SDAG-NEXT: s_wait_alu 0xfffe
+; SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
+; SDAG-NEXT: s_cbranch_execz .LBB21_2
+; SDAG-NEXT: .LBB21_4: ; %atomicrmw.private
+; SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3]
+; SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v2, vcc_lo
+; SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
+; SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
+; SDAG-NEXT: s_wait_loadcnt 0x0
+; SDAG-NEXT: v_lshl_add_u64 v[2:3], v[0:1], 0, 1
+; SDAG-NEXT: scratch_store_b64 v4, v[2:3], off
+; SDAG-NEXT: s_wait_xcnt 0x0
+; SDAG-NEXT: s_wait_alu 0xfffe
+; SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; SDAG-NEXT: s_branch .LBB21_5
+; SDAG-NEXT: .LBB21_5:
+;
+; GISEL-LABEL: flat_atomicrmw_b64_rtn_idxprom:
+; GISEL: ; %bb.0: ; %entry
+; GISEL-NEXT: v_mov_b32_e32 v2, v0
+; GISEL-NEXT: v_mov_b64_e32 v[4:5], s[0:1]
+; GISEL-NEXT: s_mov_b64 s[2:3], src_private_base
+; GISEL-NEXT: s_mov_b32 s2, exec_lo
+; GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GISEL-NEXT: v_ashrrev_i32_e32 v3, 31, v2
+; GISEL-NEXT: v_lshlrev_b64_e32 v[0:1], 3, v[2:3]
+; GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GISEL-NEXT: v_add_co_u32 v4, vcc_lo, v4, v0
+; GISEL-NEXT: v_add_co_ci_u32_e64 v5, null, v5, v1, vcc_lo
+; GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GISEL-NEXT: v_cmpx_ne_u32_e64 s3, v5
+; GISEL-NEXT: s_xor_b32 s2, exec_lo, s2
+; GISEL-NEXT: s_cbranch_execnz .LBB21_3
+; GISEL-NEXT: ; %bb.1: ; %Flow
+; GISEL-NEXT: s_wait_alu 0xfffe
+; GISEL-NEXT: s_and_not1_saveexec_b32 s0, s2
+; GISEL-NEXT: s_cbranch_execnz .LBB21_4
+; GISEL-NEXT: .LBB21_2: ; %atomicrmw.phi
+; GISEL-NEXT: s_wait_alu 0xfffe
+; GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GISEL-NEXT: s_branch .LBB21_5
+; GISEL-NEXT: .LBB21_3: ; %atomicrmw.global
+; GISEL-NEXT: v_mov_b64_e32 v[0:1], 1
+; GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GISEL-NEXT: flat_atomic_add_u64 v[0:1], v2, v[0:1], s[0:1] scale_offset th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GISEL-NEXT: s_wait_xcnt 0x0
+; GISEL-NEXT: s_wait_alu 0xfffe
+; GISEL-NEXT: s_and_not1_saveexec_b32 s0, s2
+; GISEL-NEXT: s_cbranch_execz .LBB21_2
+; GISEL-NEXT: .LBB21_4: ; %atomicrmw.private
+; GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5]
+; GISEL-NEXT: s_wait_alu 0xfffd
+; GISEL-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo
+; GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GISEL-NEXT: scratch_load_b64 v[0:1], v4, off
+; GISEL-NEXT: s_wait_loadcnt 0x0
+; GISEL-NEXT: v_lshl_add_u64 v[2:3], v[0:1], 0, 1
+; GISEL-NEXT: scratch_store_b64 v4, v[2:3], off
+; GISEL-NEXT: s_wait_xcnt 0x0
+; GISEL-NEXT: s_wait_alu 0xfffe
+; GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GISEL-NEXT: s_branch .LBB21_5
+; GISEL-NEXT: .LBB21_5:
+entry:
+ %idxprom = sext i32 %idx to i64
+ %arrayidx = getelementptr inbounds i64, ptr %p, i64 %idxprom
+ %ret = atomicrmw add ptr %arrayidx, i64 1 monotonic
+ %ret.cast = bitcast i64 %ret to <2 x float>
+ ret <2 x float> %ret.cast
+}
+
+!0 = !{i32 0, i32 1024}
diff --git a/llvm/test/CodeGen/AMDGPU/scale-offset-global.ll b/llvm/test/CodeGen/AMDGPU/scale-offset-global.ll
new file mode 100644
index 0000000..faea84e
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/scale-offset-global.ll
@@ -0,0 +1,351 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1250 < %s | FileCheck --check-prefixes=GCN,SDAG %s
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx1250 < %s | FileCheck --check-prefixes=GCN,GISEL %s
+
+define amdgpu_ps float @global_load_b32_idxprom(ptr addrspace(1) align 4 inreg %p, i32 %idx) {
+; GCN-LABEL: global_load_b32_idxprom:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: global_load_b32 v0, v0, s[0:1] scale_offset
+; GCN-NEXT: s_wait_loadcnt 0x0
+; GCN-NEXT: ; return to shader part epilog
+entry:
+ %idxprom = sext i32 %idx to i64
+ %arrayidx = getelementptr inbounds float, ptr addrspace(1) %p, i64 %idxprom
+ %ret = load float, ptr addrspace(1) %arrayidx, align 4
+ ret float %ret
+}
+
+define amdgpu_ps float @global_load_b32_idx32(ptr addrspace(1) align 4 inreg %p, i32 %idx) {
+; GCN-LABEL: global_load_b32_idx32:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: global_load_b32 v0, v0, s[0:1] scale_offset
+; GCN-NEXT: s_wait_loadcnt 0x0
+; GCN-NEXT: ; return to shader part epilog
+entry:
+ %arrayidx = getelementptr inbounds float, ptr addrspace(1) %p, i32 %idx
+ %ret = load float, ptr addrspace(1) %arrayidx, align 4
+ ret float %ret
+}
+
+define amdgpu_ps float @global_load_b32_idxprom_wrong_stride(ptr addrspace(1) align 4 inreg %p, i32 %idx) {
+; SDAG-LABEL: global_load_b32_idxprom_wrong_stride:
+; SDAG: ; %bb.0: ; %entry
+; SDAG-NEXT: v_ashrrev_i32_e32 v1, 31, v0
+; SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; SDAG-NEXT: v_lshl_add_u64 v[0:1], v[0:1], 3, s[0:1]
+; SDAG-NEXT: global_load_b32 v0, v[0:1], off
+; SDAG-NEXT: s_wait_loadcnt 0x0
+; SDAG-NEXT: ; return to shader part epilog
+;
+; GISEL-LABEL: global_load_b32_idxprom_wrong_stride:
+; GISEL: ; %bb.0: ; %entry
+; GISEL-NEXT: v_ashrrev_i32_e32 v1, 31, v0
+; GISEL-NEXT: v_mov_b64_e32 v[2:3], s[0:1]
+; GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GISEL-NEXT: v_lshlrev_b64_e32 v[0:1], 3, v[0:1]
+; GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v2, v0
+; GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GISEL-NEXT: v_add_co_ci_u32_e64 v1, null, v3, v1, vcc_lo
+; GISEL-NEXT: global_load_b32 v0, v[0:1], off
+; GISEL-NEXT: s_wait_loadcnt 0x0
+; GISEL-NEXT: ; return to shader part epilog
+entry:
+ %idxprom = sext i32 %idx to i64
+ %arrayidx = getelementptr inbounds <2 x float>, ptr addrspace(1) %p, i64 %idxprom
+ %ret = load float, ptr addrspace(1) %arrayidx, align 4
+ ret float %ret
+}
+
+define amdgpu_ps float @global_load_b16_idxprom_ioffset(ptr addrspace(1) align 4 inreg %p, i32 %idx) {
+; GCN-LABEL: global_load_b16_idxprom_ioffset:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: global_load_u16 v0, v0, s[0:1] offset:32 scale_offset
+; GCN-NEXT: s_wait_loadcnt 0x0
+; GCN-NEXT: ; return to shader part epilog
+entry:
+ %idxprom = sext i32 %idx to i64
+ %idxadd = add i64 %idxprom, 16
+ %arrayidx = getelementptr inbounds i16, ptr addrspace(1) %p, i64 %idxadd
+ %ld = load i16, ptr addrspace(1) %arrayidx, align 2
+ %ret.i32 = zext i16 %ld to i32
+ %ret = bitcast i32 %ret.i32 to float
+ ret float %ret
+}
+
+define amdgpu_ps <2 x float> @global_load_b64_idxprom(ptr addrspace(1) align 4 inreg %p, i32 %idx) {
+; GCN-LABEL: global_load_b64_idxprom:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: global_load_b64 v[0:1], v0, s[0:1] scale_offset
+; GCN-NEXT: s_wait_loadcnt 0x0
+; GCN-NEXT: ; return to shader part epilog
+entry:
+ %idxprom = sext i32 %idx to i64
+ %arrayidx = getelementptr inbounds <2 x float>, ptr addrspace(1) %p, i64 %idxprom
+ %ret = load <2 x float>, ptr addrspace(1) %arrayidx, align 4
+ ret <2 x float> %ret
+}
+
+define amdgpu_ps <3 x float> @global_load_b96_idxprom(ptr addrspace(1) align 4 inreg %p, i32 %idx) {
+; GCN-LABEL: global_load_b96_idxprom:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: global_load_b96 v[0:2], v0, s[0:1] scale_offset
+; GCN-NEXT: s_wait_loadcnt 0x0
+; GCN-NEXT: ; return to shader part epilog
+entry:
+ %idxprom = sext i32 %idx to i64
+ %arrayidx = getelementptr inbounds [3 x float], ptr addrspace(1) %p, i64 %idxprom
+ %ret = load <3 x float>, ptr addrspace(1) %arrayidx, align 4
+ ret <3 x float> %ret
+}
+
+define amdgpu_ps <3 x float> @global_load_b96_idxpromi_ioffset(ptr addrspace(1) align 4 inreg %p, i32 %idx) {
+; GCN-LABEL: global_load_b96_idxpromi_ioffset:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: global_load_b96 v[0:2], v0, s[0:1] offset:192 scale_offset
+; GCN-NEXT: s_wait_loadcnt 0x0
+; GCN-NEXT: ; return to shader part epilog
+entry:
+ %idxprom = sext i32 %idx to i64
+ %idxadd = add i64 %idxprom, 16
+ %arrayidx = getelementptr inbounds [3 x float], ptr addrspace(1) %p, i64 %idxadd
+ %ret = load <3 x float>, ptr addrspace(1) %arrayidx, align 4
+ ret <3 x float> %ret
+}
+
+define amdgpu_ps <4 x float> @global_load_b128_idxprom(ptr addrspace(1) align 4 inreg %p, i32 %idx) {
+; GCN-LABEL: global_load_b128_idxprom:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: global_load_b128 v[0:3], v0, s[0:1] scale_offset
+; GCN-NEXT: s_wait_loadcnt 0x0
+; GCN-NEXT: ; return to shader part epilog
+entry:
+ %idxprom = sext i32 %idx to i64
+ %arrayidx = getelementptr inbounds <4 x float>, ptr addrspace(1) %p, i64 %idxprom
+ %ret = load <4 x float>, ptr addrspace(1) %arrayidx, align 4
+ ret <4 x float> %ret
+}
+
+define amdgpu_ps float @global_load_b32_idxprom_range(ptr addrspace(1) align 4 inreg %p, ptr addrspace(1) align 4 %pp) {
+; GCN-LABEL: global_load_b32_idxprom_range:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: global_load_b32 v0, v[0:1], off
+; GCN-NEXT: s_wait_loadcnt 0x0
+; GCN-NEXT: global_load_b32 v0, v0, s[0:1] scale_offset
+; GCN-NEXT: s_wait_loadcnt 0x0
+; GCN-NEXT: ; return to shader part epilog
+entry:
+ %idx = load i32, ptr addrspace(1) %pp, align 4, !range !0
+ %idxprom = sext i32 %idx to i64
+ %arrayidx = getelementptr inbounds float, ptr addrspace(1) %p, i64 %idxprom
+ %ret = load float, ptr addrspace(1) %arrayidx, align 4
+ ret float %ret
+}
+
+define amdgpu_ps float @global_load_b32_idxprom_range_ioffset(ptr addrspace(1) align 4 inreg %p, ptr addrspace(1) align 4 %pp) {
+; GCN-LABEL: global_load_b32_idxprom_range_ioffset:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: global_load_b32 v0, v[0:1], off
+; GCN-NEXT: s_wait_loadcnt 0x0
+; GCN-NEXT: global_load_b32 v0, v0, s[0:1] offset:64 scale_offset
+; GCN-NEXT: s_wait_loadcnt 0x0
+; GCN-NEXT: ; return to shader part epilog
+entry:
+ %idx = load i32, ptr addrspace(1) %pp, align 4, !range !0
+ %idxprom = sext i32 %idx to i64
+ %idxadd = add i64 %idxprom, 16
+ %arrayidx = getelementptr inbounds float, ptr addrspace(1) %p, i64 %idxadd
+ %ret = load float, ptr addrspace(1) %arrayidx, align 4
+ ret float %ret
+}
+
+; Note: this is a byte load, there is nothing to scale
+
+define amdgpu_ps float @global_load_b8_idxprom_range_ioffset(ptr addrspace(1) align 4 inreg %p, ptr addrspace(1) align 4 %pp) {
+; GCN-LABEL: global_load_b8_idxprom_range_ioffset:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: global_load_b32 v0, v[0:1], off
+; GCN-NEXT: s_wait_loadcnt 0x0
+; GCN-NEXT: global_load_u8 v0, v0, s[0:1] offset:16
+; GCN-NEXT: s_wait_loadcnt 0x0
+; GCN-NEXT: ; return to shader part epilog
+entry:
+ %idx = load i32, ptr addrspace(1) %pp, align 4, !range !0
+ %idxprom = sext i32 %idx to i64
+ %idxadd = add i64 %idxprom, 16
+ %arrayidx = getelementptr inbounds i8, ptr addrspace(1) %p, i64 %idxadd
+ %ld = load i8, ptr addrspace(1) %arrayidx
+ %ret.i32 = zext i8 %ld to i32
+ %ret = bitcast i32 %ret.i32 to float
+ ret float %ret
+}
+
+define amdgpu_ps float @global_load_b16_idxprom_range(ptr addrspace(1) align 4 inreg %p, ptr addrspace(1) align 4 %pp) {
+; GCN-LABEL: global_load_b16_idxprom_range:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: global_load_b32 v0, v[0:1], off
+; GCN-NEXT: s_wait_loadcnt 0x0
+; GCN-NEXT: global_load_u16 v0, v0, s[0:1] scale_offset
+; GCN-NEXT: s_wait_loadcnt 0x0
+; GCN-NEXT: ; return to shader part epilog
+entry:
+ %idx = load i32, ptr addrspace(1) %pp, align 4, !range !0
+ %idxprom = sext i32 %idx to i64
+ %arrayidx = getelementptr inbounds i16, ptr addrspace(1) %p, i64 %idxprom
+ %ld = load i16, ptr addrspace(1) %arrayidx, align 2
+ %ret.i32 = zext i16 %ld to i32
+ %ret = bitcast i32 %ret.i32 to float
+ ret float %ret
+}
+
+define amdgpu_ps float @global_load_b16_idxprom_range_ioffset(ptr addrspace(1) align 4 inreg %p, ptr addrspace(1) align 4 %pp) {
+; GCN-LABEL: global_load_b16_idxprom_range_ioffset:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: global_load_b32 v0, v[0:1], off
+; GCN-NEXT: s_wait_loadcnt 0x0
+; GCN-NEXT: global_load_u16 v0, v0, s[0:1] offset:32 scale_offset
+; GCN-NEXT: s_wait_loadcnt 0x0
+; GCN-NEXT: ; return to shader part epilog
+entry:
+ %idx = load i32, ptr addrspace(1) %pp, align 4, !range !0
+ %idxprom = sext i32 %idx to i64
+ %idxadd = add i64 %idxprom, 16
+ %arrayidx = getelementptr inbounds i16, ptr addrspace(1) %p, i64 %idxadd
+ %ld = load i16, ptr addrspace(1) %arrayidx, align 2
+ %ret.i32 = zext i16 %ld to i32
+ %ret = bitcast i32 %ret.i32 to float
+ ret float %ret
+}
+
+define amdgpu_ps <2 x float> @global_load_b64_idxprom_range(ptr addrspace(1) align 4 inreg %p, ptr addrspace(1) align 4 %pp) {
+; GCN-LABEL: global_load_b64_idxprom_range:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: global_load_b32 v0, v[0:1], off
+; GCN-NEXT: s_wait_loadcnt 0x0
+; GCN-NEXT: global_load_b64 v[0:1], v0, s[0:1] scale_offset
+; GCN-NEXT: s_wait_loadcnt 0x0
+; GCN-NEXT: ; return to shader part epilog
+entry:
+ %idx = load i32, ptr addrspace(1) %pp, align 4, !range !0
+ %idxprom = sext i32 %idx to i64
+ %arrayidx = getelementptr inbounds <2 x float>, ptr addrspace(1) %p, i64 %idxprom
+ %ret = load <2 x float>, ptr addrspace(1) %arrayidx, align 4
+ ret <2 x float> %ret
+}
+
+define amdgpu_ps <3 x float> @global_load_b96_idxprom_range(ptr addrspace(1) align 4 inreg %p, ptr addrspace(1) align 4 %pp) {
+; GCN-LABEL: global_load_b96_idxprom_range:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: global_load_b32 v0, v[0:1], off
+; GCN-NEXT: s_wait_loadcnt 0x0
+; GCN-NEXT: global_load_b96 v[0:2], v0, s[0:1] scale_offset
+; GCN-NEXT: s_wait_loadcnt 0x0
+; GCN-NEXT: ; return to shader part epilog
+entry:
+ %idx = load i32, ptr addrspace(1) %pp, align 4, !range !0
+ %idxprom = sext i32 %idx to i64
+ %arrayidx = getelementptr inbounds [3 x float], ptr addrspace(1) %p, i64 %idxprom
+ %ret = load <3 x float>, ptr addrspace(1) %arrayidx, align 4
+ ret <3 x float> %ret
+}
+
+define amdgpu_ps <3 x float> @global_load_b96_idxprom_range_ioffset(ptr addrspace(1) align 4 inreg %p, ptr addrspace(1) align 4 %pp) {
+; GCN-LABEL: global_load_b96_idxprom_range_ioffset:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: global_load_b32 v0, v[0:1], off
+; GCN-NEXT: s_wait_loadcnt 0x0
+; GCN-NEXT: global_load_b96 v[0:2], v0, s[0:1] offset:192 scale_offset
+; GCN-NEXT: s_wait_loadcnt 0x0
+; GCN-NEXT: ; return to shader part epilog
+entry:
+ %idx = load i32, ptr addrspace(1) %pp, align 4, !range !0
+ %idxprom = sext i32 %idx to i64
+ %idxadd = add i64 %idxprom, 16
+ %arrayidx = getelementptr inbounds [3 x float], ptr addrspace(1) %p, i64 %idxadd
+ %ret = load <3 x float>, ptr addrspace(1) %arrayidx, align 4
+ ret <3 x float> %ret
+}
+
+define amdgpu_ps <4 x float> @global_load_b128_idxprom_range(ptr addrspace(1) align 4 inreg %p, ptr addrspace(1) align 4 %pp) {
+; GCN-LABEL: global_load_b128_idxprom_range:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: global_load_b32 v0, v[0:1], off
+; GCN-NEXT: s_wait_loadcnt 0x0
+; GCN-NEXT: global_load_b128 v[0:3], v0, s[0:1] scale_offset
+; GCN-NEXT: s_wait_loadcnt 0x0
+; GCN-NEXT: ; return to shader part epilog
+entry:
+ %idx = load i32, ptr addrspace(1) %pp, align 4, !range !0
+ %idxprom = sext i32 %idx to i64
+ %arrayidx = getelementptr inbounds <4 x float>, ptr addrspace(1) %p, i64 %idxprom
+ %ret = load <4 x float>, ptr addrspace(1) %arrayidx, align 4
+ ret <4 x float> %ret
+}
+
+define amdgpu_ps void @global_store_b32_idxprom(ptr addrspace(1) align 4 inreg %p, i32 %idx) {
+; GCN-LABEL: global_store_b32_idxprom:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: v_mov_b32_e32 v1, 1.0
+; GCN-NEXT: global_store_b32 v0, v1, s[0:1] scale_offset
+; GCN-NEXT: s_endpgm
+entry:
+ %idxprom = sext i32 %idx to i64
+ %arrayidx = getelementptr inbounds float, ptr addrspace(1) %p, i64 %idxprom
+ store float 1.0, ptr addrspace(1) %arrayidx, align 4
+ ret void
+}
+
+define amdgpu_ps void @global_store_b16_idxprom(ptr addrspace(1) align 2 inreg %p, i32 %idx) {
+; GCN-LABEL: global_store_b16_idxprom:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: v_mov_b32_e32 v1, 1
+; GCN-NEXT: global_store_b16 v0, v1, s[0:1] scale_offset
+; GCN-NEXT: s_endpgm
+entry:
+ %idxprom = sext i32 %idx to i64
+ %arrayidx = getelementptr inbounds i16, ptr addrspace(1) %p, i64 %idxprom
+ store i16 1, ptr addrspace(1) %arrayidx, align 2
+ ret void
+}
+
+define amdgpu_ps void @global_store_b64_idxprom(ptr addrspace(1) align 4 inreg %p, i32 %idx) {
+; GCN-LABEL: global_store_b64_idxprom:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: v_mov_b64_e32 v[2:3], 1.0
+; GCN-NEXT: global_store_b64 v0, v[2:3], s[0:1] scale_offset
+; GCN-NEXT: s_endpgm
+entry:
+ %idxprom = sext i32 %idx to i64
+ %arrayidx = getelementptr inbounds double, ptr addrspace(1) %p, i64 %idxprom
+ store double 1.0, ptr addrspace(1) %arrayidx, align 4
+ ret void
+}
+
+define amdgpu_ps void @global_atomicrmw_b32_idxprom(ptr addrspace(1) align 4 inreg %p, i32 %idx) {
+; GCN-LABEL: global_atomicrmw_b32_idxprom:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: v_mov_b32_e32 v1, 1
+; GCN-NEXT: global_atomic_add_u32 v0, v1, s[0:1] scale_offset scope:SCOPE_SYS
+; GCN-NEXT: s_endpgm
+entry:
+ %idxprom = sext i32 %idx to i64
+ %arrayidx = getelementptr inbounds i32, ptr addrspace(1) %p, i64 %idxprom
+ atomicrmw add ptr addrspace(1) %arrayidx, i32 1 monotonic
+ ret void
+}
+
+define amdgpu_ps <2 x float> @global_atomicrmw_b64_rtn_idxprom(ptr addrspace(1) align 8 inreg %p, i32 %idx) {
+; GCN-LABEL: global_atomicrmw_b64_rtn_idxprom:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: v_mov_b64_e32 v[2:3], 1
+; GCN-NEXT: global_atomic_add_u64 v[0:1], v0, v[2:3], s[0:1] scale_offset th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GCN-NEXT: s_wait_loadcnt 0x0
+; GCN-NEXT: ; return to shader part epilog
+entry:
+ %idxprom = sext i32 %idx to i64
+ %arrayidx = getelementptr inbounds i64, ptr addrspace(1) %p, i64 %idxprom
+ %ret = atomicrmw add ptr addrspace(1) %arrayidx, i64 1 monotonic
+ %ret.cast = bitcast i64 %ret to <2 x float>
+ ret <2 x float> %ret.cast
+}
+
+!0 = !{i32 0, i32 1024}
diff --git a/llvm/test/CodeGen/AMDGPU/scale-offset-scratch.ll b/llvm/test/CodeGen/AMDGPU/scale-offset-scratch.ll
new file mode 100644
index 0000000..27ecc83
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/scale-offset-scratch.ll
@@ -0,0 +1,322 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1250 < %s | FileCheck --check-prefix=GCN %s
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx1250 < %s | FileCheck --check-prefix=GCN %s
+
+define amdgpu_ps float @scratch_load_b32_alloca_idxprom(i32 %idx) {
+; GCN-LABEL: scratch_load_b32_alloca_idxprom:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: scratch_load_b32 v0, v0, off scale_offset
+; GCN-NEXT: s_wait_loadcnt 0x0
+; GCN-NEXT: ; return to shader part epilog
+entry:
+ %p = alloca [32 x i32], align 4, addrspace(5)
+ %idxprom = zext i32 %idx to i64
+ %arrayidx = getelementptr inbounds float, ptr addrspace(5) %p, i64 %idxprom
+ %ret = load float, ptr addrspace(5) %arrayidx, align 4
+ ret float %ret
+}
+
+define amdgpu_ps float @scratch_load_b32_idxprom(ptr addrspace(5) align 4 inreg %p, i32 %idx) {
+; GCN-LABEL: scratch_load_b32_idxprom:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: scratch_load_b32 v0, v0, s0 scale_offset
+; GCN-NEXT: s_wait_loadcnt 0x0
+; GCN-NEXT: ; return to shader part epilog
+entry:
+ %idxprom = sext i32 %idx to i64
+ %arrayidx = getelementptr inbounds float, ptr addrspace(5) %p, i64 %idxprom
+ %ret = load float, ptr addrspace(5) %arrayidx, align 4
+ ret float %ret
+}
+
+define amdgpu_ps float @scratch_load_b32_idx32(ptr addrspace(5) align 4 inreg %p, i32 %idx) {
+; GCN-LABEL: scratch_load_b32_idx32:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: scratch_load_b32 v0, v0, s0 scale_offset
+; GCN-NEXT: s_wait_loadcnt 0x0
+; GCN-NEXT: ; return to shader part epilog
+entry:
+ %arrayidx = getelementptr inbounds float, ptr addrspace(5) %p, i32 %idx
+ %ret = load float, ptr addrspace(5) %arrayidx, align 4
+ ret float %ret
+}
+
+define amdgpu_ps float @scratch_load_b32_idxprom_wrong_stride(ptr addrspace(5) align 4 inreg %p, i32 %idx) {
+; GCN-LABEL: scratch_load_b32_idxprom_wrong_stride:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: v_lshlrev_b32_e32 v0, 3, v0
+; GCN-NEXT: scratch_load_b32 v0, v0, s0
+; GCN-NEXT: s_wait_loadcnt 0x0
+; GCN-NEXT: ; return to shader part epilog
+entry:
+ %idxprom = zext i32 %idx to i64
+ %arrayidx = getelementptr inbounds <2 x float>, ptr addrspace(5) %p, i64 %idxprom
+ %ret = load float, ptr addrspace(5) %arrayidx, align 4
+ ret float %ret
+}
+
+define amdgpu_ps float @scratch_load_b16_idxprom_ioffset(ptr addrspace(5) align 4 inreg %p, i32 %idx) {
+; GCN-LABEL: scratch_load_b16_idxprom_ioffset:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: scratch_load_u16 v0, v0, s0 offset:32 scale_offset
+; GCN-NEXT: s_wait_loadcnt 0x0
+; GCN-NEXT: ; return to shader part epilog
+entry:
+ %idxprom = sext i32 %idx to i64
+ %idxadd = add i64 %idxprom, 16
+ %arrayidx = getelementptr inbounds i16, ptr addrspace(5) %p, i64 %idxadd
+ %ld = load i16, ptr addrspace(5) %arrayidx, align 2
+ %ret.i32 = zext i16 %ld to i32
+ %ret = bitcast i32 %ret.i32 to float
+ ret float %ret
+}
+
+define amdgpu_ps <2 x float> @scratch_load_b64_idxprom(ptr addrspace(5) align 4 inreg %p, i32 %idx) {
+; GCN-LABEL: scratch_load_b64_idxprom:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: scratch_load_b64 v[0:1], v0, s0 scale_offset
+; GCN-NEXT: s_wait_loadcnt 0x0
+; GCN-NEXT: ; return to shader part epilog
+entry:
+ %idxprom = zext i32 %idx to i64
+ %arrayidx = getelementptr inbounds <2 x float>, ptr addrspace(5) %p, i64 %idxprom
+ %ret = load <2 x float>, ptr addrspace(5) %arrayidx, align 4
+ ret <2 x float> %ret
+}
+
+define amdgpu_ps <3 x float> @scratch_load_b96_idxprom(ptr addrspace(5) align 4 inreg %p, i32 %idx) {
+; GCN-LABEL: scratch_load_b96_idxprom:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: scratch_load_b96 v[0:2], v0, s0 scale_offset
+; GCN-NEXT: s_wait_loadcnt 0x0
+; GCN-NEXT: ; return to shader part epilog
+entry:
+ %idxprom = zext i32 %idx to i64
+ %arrayidx = getelementptr inbounds [3 x float], ptr addrspace(5) %p, i64 %idxprom
+ %ret = load <3 x float>, ptr addrspace(5) %arrayidx, align 4
+ ret <3 x float> %ret
+}
+
+define amdgpu_ps <3 x float> @scratch_load_b96_idxpromi_ioffset(ptr addrspace(5) align 4 inreg %p, i32 %idx) {
+; GCN-LABEL: scratch_load_b96_idxpromi_ioffset:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: scratch_load_b96 v[0:2], v0, s0 offset:192 scale_offset
+; GCN-NEXT: s_wait_loadcnt 0x0
+; GCN-NEXT: ; return to shader part epilog
+entry:
+ %idxprom = zext i32 %idx to i64
+ %idxadd = add i64 %idxprom, 16
+ %arrayidx = getelementptr inbounds [3 x float], ptr addrspace(5) %p, i64 %idxadd
+ %ret = load <3 x float>, ptr addrspace(5) %arrayidx, align 4
+ ret <3 x float> %ret
+}
+
+define amdgpu_ps <4 x float> @scratch_load_b128_idxprom(ptr addrspace(5) align 4 inreg %p, i32 %idx) {
+; GCN-LABEL: scratch_load_b128_idxprom:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: scratch_load_b128 v[0:3], v0, s0 scale_offset
+; GCN-NEXT: s_wait_loadcnt 0x0
+; GCN-NEXT: ; return to shader part epilog
+entry:
+ %idxprom = zext i32 %idx to i64
+ %arrayidx = getelementptr inbounds <4 x float>, ptr addrspace(5) %p, i64 %idxprom
+ %ret = load <4 x float>, ptr addrspace(5) %arrayidx, align 4
+ ret <4 x float> %ret
+}
+
+define amdgpu_ps float @scratch_load_b32_idxprom_range(ptr addrspace(5) align 4 inreg %p, ptr addrspace(5) align 4 %pp) {
+; GCN-LABEL: scratch_load_b32_idxprom_range:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: scratch_load_b32 v0, v0, off
+; GCN-NEXT: s_wait_loadcnt 0x0
+; GCN-NEXT: scratch_load_b32 v0, v0, s0 scale_offset
+; GCN-NEXT: s_wait_loadcnt 0x0
+; GCN-NEXT: ; return to shader part epilog
+entry:
+ %idx = load i32, ptr addrspace(5) %pp, align 4, !range !0
+ %idxprom = zext i32 %idx to i64
+ %arrayidx = getelementptr inbounds float, ptr addrspace(5) %p, i64 %idxprom
+ %ret = load float, ptr addrspace(5) %arrayidx, align 4
+ ret float %ret
+}
+
+define amdgpu_ps float @scratch_load_b32_idxprom_range_ioffset(ptr addrspace(5) align 4 inreg %p, ptr addrspace(5) align 4 %pp) {
+; GCN-LABEL: scratch_load_b32_idxprom_range_ioffset:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: scratch_load_b32 v0, v0, off
+; GCN-NEXT: s_wait_loadcnt 0x0
+; GCN-NEXT: scratch_load_b32 v0, v0, s0 offset:64 scale_offset
+; GCN-NEXT: s_wait_loadcnt 0x0
+; GCN-NEXT: ; return to shader part epilog
+entry:
+ %idx = load i32, ptr addrspace(5) %pp, align 4, !range !0
+ %idxprom = zext i32 %idx to i64
+ %idxadd = add i64 %idxprom, 16
+ %arrayidx = getelementptr inbounds float, ptr addrspace(5) %p, i64 %idxadd
+ %ret = load float, ptr addrspace(5) %arrayidx, align 4
+ ret float %ret
+}
+
+define amdgpu_ps float @scratch_load_b8_idxprom_range_ioffset(ptr addrspace(5) align 4 inreg %p, ptr addrspace(5) align 4 %pp) {
+; GCN-LABEL: scratch_load_b8_idxprom_range_ioffset:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: scratch_load_b32 v0, v0, off
+; GCN-NEXT: s_wait_loadcnt 0x0
+; GCN-NEXT: scratch_load_u8 v0, v0, s0 offset:16
+; GCN-NEXT: s_wait_loadcnt 0x0
+; GCN-NEXT: ; return to shader part epilog
+entry:
+ %idx = load i32, ptr addrspace(5) %pp, align 4, !range !0
+ %idxprom = zext i32 %idx to i64
+ %idxadd = add i64 %idxprom, 16
+ %arrayidx = getelementptr inbounds i8, ptr addrspace(5) %p, i64 %idxadd
+ %ld = load i8, ptr addrspace(5) %arrayidx
+ %ret.i32 = zext i8 %ld to i32
+ %ret = bitcast i32 %ret.i32 to float
+ ret float %ret
+}
+
+define amdgpu_ps float @scratch_load_b16_idxprom_range(ptr addrspace(5) align 4 inreg %p, ptr addrspace(5) align 4 %pp) {
+; GCN-LABEL: scratch_load_b16_idxprom_range:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: scratch_load_b32 v0, v0, off
+; GCN-NEXT: s_wait_loadcnt 0x0
+; GCN-NEXT: scratch_load_u16 v0, v0, s0 scale_offset
+; GCN-NEXT: s_wait_loadcnt 0x0
+; GCN-NEXT: ; return to shader part epilog
+entry:
+ %idx = load i32, ptr addrspace(5) %pp, align 4, !range !0
+ %idxprom = zext i32 %idx to i64
+ %arrayidx = getelementptr inbounds i16, ptr addrspace(5) %p, i64 %idxprom
+ %ld = load i16, ptr addrspace(5) %arrayidx, align 2
+ %ret.i32 = zext i16 %ld to i32
+ %ret = bitcast i32 %ret.i32 to float
+ ret float %ret
+}
+
+define amdgpu_ps float @scratch_load_b16_idxprom_range_ioffset(ptr addrspace(5) align 4 inreg %p, ptr addrspace(5) align 4 %pp) {
+; GCN-LABEL: scratch_load_b16_idxprom_range_ioffset:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: scratch_load_b32 v0, v0, off
+; GCN-NEXT: s_wait_loadcnt 0x0
+; GCN-NEXT: scratch_load_u16 v0, v0, s0 offset:32 scale_offset
+; GCN-NEXT: s_wait_loadcnt 0x0
+; GCN-NEXT: ; return to shader part epilog
+entry:
+ %idx = load i32, ptr addrspace(5) %pp, align 4, !range !0
+ %idxprom = zext i32 %idx to i64
+ %idxadd = add i64 %idxprom, 16
+ %arrayidx = getelementptr inbounds i16, ptr addrspace(5) %p, i64 %idxadd
+ %ld = load i16, ptr addrspace(5) %arrayidx, align 2
+ %ret.i32 = zext i16 %ld to i32
+ %ret = bitcast i32 %ret.i32 to float
+ ret float %ret
+}
+
+define amdgpu_ps <2 x float> @scratch_load_b64_idxprom_range(ptr addrspace(5) align 4 inreg %p, ptr addrspace(5) align 4 %pp) {
+; GCN-LABEL: scratch_load_b64_idxprom_range:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: scratch_load_b32 v0, v0, off
+; GCN-NEXT: s_wait_loadcnt 0x0
+; GCN-NEXT: scratch_load_b64 v[0:1], v0, s0 scale_offset
+; GCN-NEXT: s_wait_loadcnt 0x0
+; GCN-NEXT: ; return to shader part epilog
+entry:
+ %idx = load i32, ptr addrspace(5) %pp, align 4, !range !0
+ %idxprom = zext i32 %idx to i64
+ %arrayidx = getelementptr inbounds <2 x float>, ptr addrspace(5) %p, i64 %idxprom
+ %ret = load <2 x float>, ptr addrspace(5) %arrayidx, align 4
+ ret <2 x float> %ret
+}
+
+; Multiplication is unsigned here, so we cannot match it.
+
+define amdgpu_ps <3 x float> @scratch_load_b96_idxprom_range(ptr addrspace(5) align 4 inreg %p, ptr addrspace(5) align 4 %pp) {
+; GCN-LABEL: scratch_load_b96_idxprom_range:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: scratch_load_b32 v0, v0, off
+; GCN-NEXT: s_wait_loadcnt 0x0
+; GCN-NEXT: scratch_load_b96 v[0:2], v0, s0 scale_offset
+; GCN-NEXT: s_wait_loadcnt 0x0
+; GCN-NEXT: ; return to shader part epilog
+entry:
+ %idx = load i32, ptr addrspace(5) %pp, align 4, !range !0
+ %idxprom = sext i32 %idx to i64
+ %arrayidx = getelementptr inbounds [3 x float], ptr addrspace(5) %p, i64 %idxprom
+ %ret = load <3 x float>, ptr addrspace(5) %arrayidx, align 4
+ ret <3 x float> %ret
+}
+
+define amdgpu_ps <3 x float> @scratch_load_b96_idxprom_range_ioffset(ptr addrspace(5) align 4 inreg %p, ptr addrspace(5) align 4 %pp) {
+; GCN-LABEL: scratch_load_b96_idxprom_range_ioffset:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: scratch_load_b32 v0, v0, off
+; GCN-NEXT: s_wait_loadcnt 0x0
+; GCN-NEXT: scratch_load_b96 v[0:2], v0, s0 offset:192 scale_offset
+; GCN-NEXT: s_wait_loadcnt 0x0
+; GCN-NEXT: ; return to shader part epilog
+entry:
+ %idx = load i32, ptr addrspace(5) %pp, align 4, !range !0
+ %idxprom = sext i32 %idx to i64
+ %idxadd = add i64 %idxprom, 16
+ %arrayidx = getelementptr inbounds [3 x float], ptr addrspace(5) %p, i64 %idxadd
+ %ret = load <3 x float>, ptr addrspace(5) %arrayidx, align 4
+ ret <3 x float> %ret
+}
+
+define amdgpu_ps <4 x float> @scratch_load_b128_idxprom_range(ptr addrspace(5) align 4 inreg %p, ptr addrspace(5) align 4 %pp) {
+; GCN-LABEL: scratch_load_b128_idxprom_range:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: scratch_load_b32 v0, v0, off
+; GCN-NEXT: s_wait_loadcnt 0x0
+; GCN-NEXT: scratch_load_b128 v[0:3], v0, s0 scale_offset
+; GCN-NEXT: s_wait_loadcnt 0x0
+; GCN-NEXT: ; return to shader part epilog
+entry:
+ %idx = load i32, ptr addrspace(5) %pp, align 4, !range !0
+ %idxprom = zext i32 %idx to i64
+ %arrayidx = getelementptr inbounds <4 x float>, ptr addrspace(5) %p, i64 %idxprom
+ %ret = load <4 x float>, ptr addrspace(5) %arrayidx, align 4
+ ret <4 x float> %ret
+}
+
+define amdgpu_ps void @scratch_store_b32_idxprom(ptr addrspace(5) align 4 inreg %p, i32 %idx) {
+; GCN-LABEL: scratch_store_b32_idxprom:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: v_mov_b32_e32 v1, 1.0
+; GCN-NEXT: scratch_store_b32 v0, v1, s0 scale_offset
+; GCN-NEXT: s_endpgm
+entry:
+ %idxprom = zext i32 %idx to i64
+ %arrayidx = getelementptr inbounds float, ptr addrspace(5) %p, i64 %idxprom
+ store float 1.0, ptr addrspace(5) %arrayidx, align 4
+ ret void
+}
+
+define amdgpu_ps void @scratch_store_b16_idxprom(ptr addrspace(5) align 2 inreg %p, i32 %idx) {
+; GCN-LABEL: scratch_store_b16_idxprom:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: v_mov_b32_e32 v1, 1
+; GCN-NEXT: scratch_store_b16 v0, v1, s0 scale_offset
+; GCN-NEXT: s_endpgm
+entry:
+ %idxprom = zext i32 %idx to i64
+ %arrayidx = getelementptr inbounds i16, ptr addrspace(5) %p, i64 %idxprom
+ store i16 1, ptr addrspace(5) %arrayidx, align 2
+ ret void
+}
+
+define amdgpu_ps void @scratch_store_b64_idxprom(ptr addrspace(5) align 4 inreg %p, i32 %idx) {
+; GCN-LABEL: scratch_store_b64_idxprom:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: v_mov_b64_e32 v[2:3], 1.0
+; GCN-NEXT: scratch_store_b64 v0, v[2:3], s0 scale_offset
+; GCN-NEXT: s_endpgm
+entry:
+ %idxprom = zext i32 %idx to i64
+ %arrayidx = getelementptr inbounds double, ptr addrspace(5) %p, i64 %idxprom
+ store double 1.0, ptr addrspace(5) %arrayidx, align 4
+ ret void
+}
+
+!0 = !{i32 0, i32 1024}
diff --git a/llvm/test/CodeGen/AMDGPU/scale-offset-smem.ll b/llvm/test/CodeGen/AMDGPU/scale-offset-smem.ll
new file mode 100644
index 0000000..b5bb68e
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/scale-offset-smem.ll
@@ -0,0 +1,372 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1250 < %s | FileCheck --check-prefixes=GCN,SDAG %s
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx1250 < %s | FileCheck --check-prefixes=GCN,GISEL %s
+
+define amdgpu_ps float @s_load_b32_idxprom(ptr addrspace(4) align 4 inreg %p, i32 inreg %idx) {
+; GCN-LABEL: s_load_b32_idxprom:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: s_load_b32 s0, s[0:1], s2 offset:0x0 scale_offset
+; GCN-NEXT: s_wait_kmcnt 0x0
+; GCN-NEXT: v_mov_b32_e32 v0, s0
+; GCN-NEXT: ; return to shader part epilog
+entry:
+ %idxprom = zext i32 %idx to i64
+ %arrayidx = getelementptr inbounds float, ptr addrspace(4) %p, i64 %idxprom
+ %ret = load float, ptr addrspace(4) %arrayidx, align 4
+ ret float %ret
+}
+
+; 'i32 %idx' is a signed index while SMRD soffset is unsigned, thus it is not selected.
+
+define amdgpu_ps float @s_load_b32_idx32(ptr addrspace(4) align 4 inreg %p, i32 inreg %idx) {
+; SDAG-LABEL: s_load_b32_idx32:
+; SDAG: ; %bb.0: ; %entry
+; SDAG-NEXT: s_ashr_i32 s3, s2, 31
+; SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; SDAG-NEXT: s_lshl_b64 s[2:3], s[2:3], 2
+; SDAG-NEXT: s_add_nc_u64 s[0:1], s[0:1], s[2:3]
+; SDAG-NEXT: s_load_b32 s0, s[0:1], 0x0
+; SDAG-NEXT: s_wait_kmcnt 0x0
+; SDAG-NEXT: v_mov_b32_e32 v0, s0
+; SDAG-NEXT: ; return to shader part epilog
+;
+; GISEL-LABEL: s_load_b32_idx32:
+; GISEL: ; %bb.0: ; %entry
+; GISEL-NEXT: s_ashr_i32 s3, s2, 31
+; GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GISEL-NEXT: s_lshl_b64 s[2:3], s[2:3], 2
+; GISEL-NEXT: s_add_co_u32 s0, s0, s2
+; GISEL-NEXT: s_add_co_ci_u32 s1, s1, s3
+; GISEL-NEXT: s_load_b32 s0, s[0:1], 0x0
+; GISEL-NEXT: s_wait_kmcnt 0x0
+; GISEL-NEXT: v_mov_b32_e32 v0, s0
+; GISEL-NEXT: ; return to shader part epilog
+entry:
+ %arrayidx = getelementptr inbounds float, ptr addrspace(4) %p, i32 %idx
+ %ret = load float, ptr addrspace(4) %arrayidx, align 4
+ ret float %ret
+}
+
+define amdgpu_ps float @s_load_b32_idxprom_wrong_stride(ptr addrspace(4) align 4 inreg %p, i32 inreg %idx) {
+; SDAG-LABEL: s_load_b32_idxprom_wrong_stride:
+; SDAG: ; %bb.0: ; %entry
+; SDAG-NEXT: s_mov_b32 s3, 0
+; SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; SDAG-NEXT: s_lshl_b64 s[2:3], s[2:3], 3
+; SDAG-NEXT: s_add_nc_u64 s[0:1], s[0:1], s[2:3]
+; SDAG-NEXT: s_load_b32 s0, s[0:1], 0x0
+; SDAG-NEXT: s_wait_kmcnt 0x0
+; SDAG-NEXT: v_mov_b32_e32 v0, s0
+; SDAG-NEXT: ; return to shader part epilog
+;
+; GISEL-LABEL: s_load_b32_idxprom_wrong_stride:
+; GISEL: ; %bb.0: ; %entry
+; GISEL-NEXT: s_mov_b32 s3, 0
+; GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GISEL-NEXT: s_lshl_b64 s[2:3], s[2:3], 3
+; GISEL-NEXT: s_add_co_u32 s0, s0, s2
+; GISEL-NEXT: s_add_co_ci_u32 s1, s1, s3
+; GISEL-NEXT: s_load_b32 s0, s[0:1], 0x0
+; GISEL-NEXT: s_wait_kmcnt 0x0
+; GISEL-NEXT: v_mov_b32_e32 v0, s0
+; GISEL-NEXT: ; return to shader part epilog
+entry:
+ %idxprom = zext i32 %idx to i64
+ %arrayidx = getelementptr inbounds <2 x float>, ptr addrspace(4) %p, i64 %idxprom
+ %ret = load float, ptr addrspace(4) %arrayidx, align 4
+ ret float %ret
+}
+
+define amdgpu_ps float @s_load_b16_idxprom_ioffset(ptr addrspace(4) align 4 inreg %p, i32 inreg %idx) {
+; GCN-LABEL: s_load_b16_idxprom_ioffset:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: s_load_u16 s0, s[0:1], s2 offset:0x20 scale_offset
+; GCN-NEXT: s_wait_kmcnt 0x0
+; GCN-NEXT: v_mov_b32_e32 v0, s0
+; GCN-NEXT: ; return to shader part epilog
+entry:
+ %idxprom = zext i32 %idx to i64
+ %idxadd = add i64 %idxprom, 16
+ %arrayidx = getelementptr inbounds i16, ptr addrspace(4) %p, i64 %idxadd
+ %ld = load i16, ptr addrspace(4) %arrayidx, align 2
+ %ret.i32 = zext i16 %ld to i32
+ %ret = bitcast i32 %ret.i32 to float
+ ret float %ret
+}
+
+define amdgpu_ps <2 x float> @s_load_b64_idxprom(ptr addrspace(4) align 4 inreg %p, i32 inreg %idx) {
+; GCN-LABEL: s_load_b64_idxprom:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: s_load_b64 s[0:1], s[0:1], s2 offset:0x0 scale_offset
+; GCN-NEXT: s_wait_kmcnt 0x0
+; GCN-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GCN-NEXT: ; return to shader part epilog
+entry:
+ %idxprom = zext i32 %idx to i64
+ %arrayidx = getelementptr inbounds <2 x float>, ptr addrspace(4) %p, i64 %idxprom
+ %ret = load <2 x float>, ptr addrspace(4) %arrayidx, align 4
+ ret <2 x float> %ret
+}
+
+define amdgpu_ps <3 x float> @s_load_b96_idxprom(ptr addrspace(4) align 4 inreg %p, i32 inreg %idx) {
+; GCN-LABEL: s_load_b96_idxprom:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: s_load_b96 s[0:2], s[0:1], s2 offset:0x0 scale_offset
+; GCN-NEXT: s_wait_kmcnt 0x0
+; GCN-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GCN-NEXT: v_mov_b32_e32 v2, s2
+; GCN-NEXT: ; return to shader part epilog
+entry:
+ %idxprom = zext i32 %idx to i64
+ %arrayidx = getelementptr inbounds [3 x float], ptr addrspace(4) %p, i64 %idxprom
+ %ret = load <3 x float>, ptr addrspace(4) %arrayidx, align 4
+ ret <3 x float> %ret
+}
+
+define amdgpu_ps <4 x float> @s_load_b128_idxprom(ptr addrspace(4) align 4 inreg %p, i32 inreg %idx) {
+; GCN-LABEL: s_load_b128_idxprom:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: s_load_b128 s[0:3], s[0:1], s2 offset:0x0 scale_offset
+; GCN-NEXT: s_wait_kmcnt 0x0
+; GCN-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GCN-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
+; GCN-NEXT: ; return to shader part epilog
+entry:
+ %idxprom = zext i32 %idx to i64
+ %arrayidx = getelementptr inbounds <4 x float>, ptr addrspace(4) %p, i64 %idxprom
+ %ret = load <4 x float>, ptr addrspace(4) %arrayidx, align 4
+ ret <4 x float> %ret
+}
+
+define amdgpu_ps <8 x float> @s_load_b256_idxprom(ptr addrspace(4) align 4 inreg %p, i32 inreg %idx) {
+; GCN-LABEL: s_load_b256_idxprom:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: s_load_b256 s[0:7], s[0:1], s2 offset:0x0 scale_offset
+; GCN-NEXT: s_wait_kmcnt 0x0
+; GCN-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GCN-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
+; GCN-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
+; GCN-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
+; GCN-NEXT: ; return to shader part epilog
+entry:
+ %idxprom = zext i32 %idx to i64
+ %arrayidx = getelementptr inbounds <8 x float>, ptr addrspace(4) %p, i64 %idxprom
+ %ret = load <8 x float>, ptr addrspace(4) %arrayidx, align 4
+ ret <8 x float> %ret
+}
+
+define amdgpu_ps <16 x float> @s_load_b512_idxprom(ptr addrspace(4) align 4 inreg %p, i32 inreg %idx) {
+; GCN-LABEL: s_load_b512_idxprom:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: s_load_b512 s[0:15], s[0:1], s2 offset:0x0 scale_offset
+; GCN-NEXT: s_wait_kmcnt 0x0
+; GCN-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GCN-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
+; GCN-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
+; GCN-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
+; GCN-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9
+; GCN-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11
+; GCN-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13
+; GCN-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s15
+; GCN-NEXT: ; return to shader part epilog
+entry:
+ %idxprom = zext i32 %idx to i64
+ %arrayidx = getelementptr inbounds <16 x float>, ptr addrspace(4) %p, i64 %idxprom
+ %ret = load <16 x float>, ptr addrspace(4) %arrayidx, align 4
+ ret <16 x float> %ret
+}
+
+define amdgpu_ps float @s_load_b32_idxprom_range(ptr addrspace(4) align 4 inreg %p) {
+; GCN-LABEL: s_load_b32_idxprom_range:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: s_load_b32 s2, s[0:1], 0x0
+; GCN-NEXT: s_wait_kmcnt 0x0
+; GCN-NEXT: s_load_b32 s0, s[0:1], s2 offset:0x0 scale_offset
+; GCN-NEXT: s_wait_kmcnt 0x0
+; GCN-NEXT: v_mov_b32_e32 v0, s0
+; GCN-NEXT: ; return to shader part epilog
+entry:
+ %idx = load i32, ptr addrspace(4) %p, align 4, !range !0
+ %idxprom = zext i32 %idx to i64
+ %arrayidx = getelementptr inbounds float, ptr addrspace(4) %p, i64 %idxprom
+ %ret = load float, ptr addrspace(4) %arrayidx, align 4
+ ret float %ret
+}
+
+define amdgpu_ps float @s_load_b32_idxprom_range_ioffset(ptr addrspace(4) align 4 inreg %p) {
+; GCN-LABEL: s_load_b32_idxprom_range_ioffset:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: s_load_b32 s2, s[0:1], 0x0
+; GCN-NEXT: s_wait_kmcnt 0x0
+; GCN-NEXT: s_load_b32 s0, s[0:1], s2 offset:0x40 scale_offset
+; GCN-NEXT: s_wait_kmcnt 0x0
+; GCN-NEXT: v_mov_b32_e32 v0, s0
+; GCN-NEXT: ; return to shader part epilog
+entry:
+ %idx = load i32, ptr addrspace(4) %p, align 4, !range !0
+ %idxprom = zext i32 %idx to i64
+ %idxadd = add i64 %idxprom, 16
+ %arrayidx = getelementptr inbounds float, ptr addrspace(4) %p, i64 %idxadd
+ %ret = load float, ptr addrspace(4) %arrayidx, align 4
+ ret float %ret
+}
+
+; Note: this is a byte load, there is nothing to scale
+
+define amdgpu_ps float @s_load_b8_idxprom_range_ioffset(ptr addrspace(4) align 4 inreg %p) {
+; GCN-LABEL: s_load_b8_idxprom_range_ioffset:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: s_load_b32 s2, s[0:1], 0x0
+; GCN-NEXT: s_wait_kmcnt 0x0
+; GCN-NEXT: s_load_u8 s0, s[0:1], s2 offset:0x10
+; GCN-NEXT: s_wait_kmcnt 0x0
+; GCN-NEXT: v_mov_b32_e32 v0, s0
+; GCN-NEXT: ; return to shader part epilog
+entry:
+ %idx = load i32, ptr addrspace(4) %p, align 4, !range !0
+ %idxprom = zext i32 %idx to i64
+ %idxadd = add i64 %idxprom, 16
+ %arrayidx = getelementptr inbounds i8, ptr addrspace(4) %p, i64 %idxadd
+ %ld = load i8, ptr addrspace(4) %arrayidx
+ %ret.i32 = zext i8 %ld to i32
+ %ret = bitcast i32 %ret.i32 to float
+ ret float %ret
+}
+
+define amdgpu_ps float @s_load_b16_idxprom_range(ptr addrspace(4) align 4 inreg %p) {
+; GCN-LABEL: s_load_b16_idxprom_range:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: s_load_b32 s2, s[0:1], 0x0
+; GCN-NEXT: s_wait_kmcnt 0x0
+; GCN-NEXT: s_load_u16 s0, s[0:1], s2 offset:0x0 scale_offset
+; GCN-NEXT: s_wait_kmcnt 0x0
+; GCN-NEXT: v_mov_b32_e32 v0, s0
+; GCN-NEXT: ; return to shader part epilog
+entry:
+ %idx = load i32, ptr addrspace(4) %p, align 4, !range !0
+ %idxprom = zext i32 %idx to i64
+ %arrayidx = getelementptr inbounds i16, ptr addrspace(4) %p, i64 %idxprom
+ %ld = load i16, ptr addrspace(4) %arrayidx, align 2
+ %ret.i32 = zext i16 %ld to i32
+ %ret = bitcast i32 %ret.i32 to float
+ ret float %ret
+}
+
+define amdgpu_ps float @s_load_b16_idxprom_range_ioffset(ptr addrspace(4) align 4 inreg %p) {
+; GCN-LABEL: s_load_b16_idxprom_range_ioffset:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: s_load_b32 s2, s[0:1], 0x0
+; GCN-NEXT: s_wait_kmcnt 0x0
+; GCN-NEXT: s_load_u16 s0, s[0:1], s2 offset:0x20 scale_offset
+; GCN-NEXT: s_wait_kmcnt 0x0
+; GCN-NEXT: v_mov_b32_e32 v0, s0
+; GCN-NEXT: ; return to shader part epilog
+entry:
+ %idx = load i32, ptr addrspace(4) %p, align 4, !range !0
+ %idxprom = zext i32 %idx to i64
+ %idxadd = add i64 %idxprom, 16
+ %arrayidx = getelementptr inbounds i16, ptr addrspace(4) %p, i64 %idxadd
+ %ld = load i16, ptr addrspace(4) %arrayidx, align 2
+ %ret.i32 = zext i16 %ld to i32
+ %ret = bitcast i32 %ret.i32 to float
+ ret float %ret
+}
+
+define amdgpu_ps <2 x float> @s_load_b64_idxprom_range(ptr addrspace(4) align 4 inreg %p) {
+; GCN-LABEL: s_load_b64_idxprom_range:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: s_load_b32 s2, s[0:1], 0x0
+; GCN-NEXT: s_wait_kmcnt 0x0
+; GCN-NEXT: s_load_b64 s[0:1], s[0:1], s2 offset:0x0 scale_offset
+; GCN-NEXT: s_wait_kmcnt 0x0
+; GCN-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GCN-NEXT: ; return to shader part epilog
+entry:
+ %idx = load i32, ptr addrspace(4) %p, align 4, !range !0
+ %idxprom = zext i32 %idx to i64
+ %arrayidx = getelementptr inbounds <2 x float>, ptr addrspace(4) %p, i64 %idxprom
+ %ret = load <2 x float>, ptr addrspace(4) %arrayidx, align 4
+ ret <2 x float> %ret
+}
+
+define amdgpu_ps <3 x float> @s_load_b96_idxprom_range(ptr addrspace(4) align 4 inreg %p) {
+; GCN-LABEL: s_load_b96_idxprom_range:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: s_load_b32 s2, s[0:1], 0x0
+; GCN-NEXT: s_wait_kmcnt 0x0
+; GCN-NEXT: s_load_b96 s[0:2], s[0:1], s2 offset:0x0 scale_offset
+; GCN-NEXT: s_wait_kmcnt 0x0
+; GCN-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GCN-NEXT: v_mov_b32_e32 v2, s2
+; GCN-NEXT: ; return to shader part epilog
+entry:
+ %idx = load i32, ptr addrspace(4) %p, align 4, !range !0
+ %idxprom = zext i32 %idx to i64
+ %arrayidx = getelementptr inbounds [3 x float], ptr addrspace(4) %p, i64 %idxprom
+ %ret = load <3 x float>, ptr addrspace(4) %arrayidx, align 4
+ ret <3 x float> %ret
+}
+
+define amdgpu_ps <4 x float> @s_load_b128_idxprom_range(ptr addrspace(4) align 4 inreg %p) {
+; GCN-LABEL: s_load_b128_idxprom_range:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: s_load_b32 s2, s[0:1], 0x0
+; GCN-NEXT: s_wait_kmcnt 0x0
+; GCN-NEXT: s_load_b128 s[0:3], s[0:1], s2 offset:0x0 scale_offset
+; GCN-NEXT: s_wait_kmcnt 0x0
+; GCN-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GCN-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
+; GCN-NEXT: ; return to shader part epilog
+entry:
+ %idx = load i32, ptr addrspace(4) %p, align 4, !range !0
+ %idxprom = zext i32 %idx to i64
+ %arrayidx = getelementptr inbounds <4 x float>, ptr addrspace(4) %p, i64 %idxprom
+ %ret = load <4 x float>, ptr addrspace(4) %arrayidx, align 4
+ ret <4 x float> %ret
+}
+
+define amdgpu_ps <8 x float> @s_load_b256_idxprom_range(ptr addrspace(4) align 4 inreg %p) {
+; GCN-LABEL: s_load_b256_idxprom_range:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: s_load_b32 s2, s[0:1], 0x0
+; GCN-NEXT: s_wait_kmcnt 0x0
+; GCN-NEXT: s_load_b256 s[0:7], s[0:1], s2 offset:0x0 scale_offset
+; GCN-NEXT: s_wait_kmcnt 0x0
+; GCN-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GCN-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
+; GCN-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
+; GCN-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
+; GCN-NEXT: ; return to shader part epilog
+entry:
+ %idx = load i32, ptr addrspace(4) %p, align 4, !range !0
+ %idxprom = zext i32 %idx to i64
+ %arrayidx = getelementptr inbounds <8 x float>, ptr addrspace(4) %p, i64 %idxprom
+ %ret = load <8 x float>, ptr addrspace(4) %arrayidx, align 4
+ ret <8 x float> %ret
+}
+
+define amdgpu_ps <16 x float> @s_load_b512_idxprom_range(ptr addrspace(4) align 4 inreg %p) {
+; GCN-LABEL: s_load_b512_idxprom_range:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: s_load_b32 s2, s[0:1], 0x0
+; GCN-NEXT: s_wait_kmcnt 0x0
+; GCN-NEXT: s_load_b512 s[0:15], s[0:1], s2 offset:0x0 scale_offset
+; GCN-NEXT: s_wait_kmcnt 0x0
+; GCN-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GCN-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
+; GCN-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
+; GCN-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
+; GCN-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9
+; GCN-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11
+; GCN-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13
+; GCN-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s15
+; GCN-NEXT: ; return to shader part epilog
+entry:
+ %idx = load i32, ptr addrspace(4) %p, align 4, !range !0
+ %idxprom = zext i32 %idx to i64
+ %arrayidx = getelementptr inbounds <16 x float>, ptr addrspace(4) %p, i64 %idxprom
+ %ret = load <16 x float>, ptr addrspace(4) %arrayidx, align 4
+ ret <16 x float> %ret
+}
+
+!0 = !{i32 0, i32 1024}
diff --git a/llvm/test/CodeGen/AMDGPU/srem.ll b/llvm/test/CodeGen/AMDGPU/srem.ll
index a6b8ea3..6da7d1b 100644
--- a/llvm/test/CodeGen/AMDGPU/srem.ll
+++ b/llvm/test/CodeGen/AMDGPU/srem.ll
@@ -1819,7 +1819,7 @@ define amdgpu_kernel void @srem_i64(ptr addrspace(1) %out, ptr addrspace(1) %in)
; TAHITI-NEXT: v_mul_hi_u32 v1, v0, v1
; TAHITI-NEXT: v_mul_lo_u32 v1, v1, v2
; TAHITI-NEXT: v_sub_i32_e32 v0, vcc, v0, v1
-; TAHITI-NEXT: v_sub_i32_e32 v1, vcc, v0, v2
+; TAHITI-NEXT: v_subrev_i32_e32 v1, vcc, v2, v0
; TAHITI-NEXT: v_cmp_ge_u32_e32 vcc, v0, v2
; TAHITI-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
; TAHITI-NEXT: v_sub_i32_e32 v1, vcc, v0, v2
@@ -6232,7 +6232,7 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; TONGA-NEXT: v_mul_hi_u32 v8, v14, v8
; TONGA-NEXT: v_mul_lo_u32 v8, v8, v10
; TONGA-NEXT: v_sub_u32_e32 v8, vcc, v14, v8
-; TONGA-NEXT: v_sub_u32_e32 v9, vcc, v8, v10
+; TONGA-NEXT: v_subrev_u32_e32 v9, vcc, v10, v8
; TONGA-NEXT: v_cmp_ge_u32_e32 vcc, v8, v10
; TONGA-NEXT: v_cndmask_b32_e32 v8, v8, v9, vcc
; TONGA-NEXT: v_sub_u32_e32 v9, vcc, v8, v10
diff --git a/llvm/test/CodeGen/AMDGPU/wait-xcnt.mir b/llvm/test/CodeGen/AMDGPU/wait-xcnt.mir
index f4b9523..af8b9e7 100644
--- a/llvm/test/CodeGen/AMDGPU/wait-xcnt.mir
+++ b/llvm/test/CodeGen/AMDGPU/wait-xcnt.mir
@@ -966,3 +966,45 @@ body: |
$vgpr2 = V_MOV_B32_e32 $vgpr2, implicit $exec
$sgpr0 = S_MOV_B32 0
...
+
+# TODO: Unnecessary wait before overwriting vgpr0.
+---
+name: overwrite_vgpr_after_smem
+tracksRegLiveness: true
+machineFunctionInfo:
+ isEntryFunction: true
+body: |
+ bb.0:
+ liveins: $vgpr0_vgpr1, $sgpr0_sgpr1
+ ; GCN-LABEL: name: overwrite_vgpr_after_smem
+ ; GCN: liveins: $vgpr0_vgpr1, $sgpr0_sgpr1
+ ; GCN-NEXT: {{ $}}
+ ; GCN-NEXT: $vgpr2 = GLOBAL_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec
+ ; GCN-NEXT: $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 0, 0
+ ; GCN-NEXT: S_WAIT_XCNT 0
+ ; GCN-NEXT: $vgpr0 = V_MOV_B32_e32 0, implicit $exec
+ $vgpr2 = GLOBAL_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec
+ $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 0, 0
+ $vgpr0 = V_MOV_B32_e32 0, implicit $exec
+...
+
+# TODO: Unnecessary wait before overwriting sgpr0.
+---
+name: overwrite_sgpr_after_vmem
+tracksRegLiveness: true
+machineFunctionInfo:
+ isEntryFunction: true
+body: |
+ bb.0:
+ liveins: $vgpr0_vgpr1, $sgpr0_sgpr1
+ ; GCN-LABEL: name: overwrite_sgpr_after_vmem
+ ; GCN: liveins: $vgpr0_vgpr1, $sgpr0_sgpr1
+ ; GCN-NEXT: {{ $}}
+ ; GCN-NEXT: $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 0, 0
+ ; GCN-NEXT: $vgpr2 = GLOBAL_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec
+ ; GCN-NEXT: S_WAIT_XCNT 0
+ ; GCN-NEXT: $sgpr0 = S_MOV_B32 0
+ $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 0, 0
+ $vgpr2 = GLOBAL_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec
+ $sgpr0 = S_MOV_B32 0
+...
diff --git a/llvm/test/CodeGen/DirectX/ShaderFlags/lifetimes-noint64op.ll b/llvm/test/CodeGen/DirectX/ShaderFlags/lifetimes-noint64op.ll
new file mode 100644
index 0000000..736c86e
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/ShaderFlags/lifetimes-noint64op.ll
@@ -0,0 +1,36 @@
+; RUN: opt -S --passes="print-dx-shader-flags" 2>&1 %s | FileCheck %s
+; RUN: llc %s --filetype=obj -o - | obj2yaml | FileCheck %s --check-prefix=DXC
+
+target triple = "dxil-pc-shadermodel6.7-library"
+
+; CHECK: ; Combined Shader Flags for Module
+; CHECK-NEXT: ; Shader Flags Value: 0x00000000
+; CHECK-NEXT: ;
+; CHECK-NOT: ; Note: shader requires additional functionality:
+; CHECK-NOT: ; 64-Bit integer
+; CHECK-NOT: ; Note: extra DXIL module flags:
+; CHECK-NOT: ;
+; CHECK-NEXT: ; Shader Flags for Module Functions
+; CHECK-NEXT: ; Function lifetimes : 0x00000000
+
+define void @lifetimes() #0 {
+ %a = alloca [4 x i32], align 8
+ call void @llvm.lifetime.start.p0(i64 16, ptr nonnull %a)
+ call void @llvm.lifetime.end.p0(i64 16, ptr nonnull %a)
+ ret void
+}
+
+; Function Attrs: nounwind memory(argmem: readwrite)
+declare void @llvm.lifetime.start.p0(i64, ptr) #1
+
+; Function Attrs: nounwind memory(argmem: readwrite)
+declare void @llvm.lifetime.end.p0(i64, ptr) #1
+
+attributes #0 = { convergent norecurse nounwind "hlsl.export"}
+attributes #1 = { nounwind memory(argmem: readwrite) }
+
+; DXC: - Name: SFI0
+; DXC-NEXT: Size: 8
+; DXC-NOT: Flags:
+; DXC-NOT: Int64Ops: true
+; DXC: ...
diff --git a/llvm/test/CodeGen/DirectX/legalize-lifetimes-valver-1.6.ll b/llvm/test/CodeGen/DirectX/legalize-lifetimes-valver-1.6.ll
index 6552ccd..77133eb 100644
--- a/llvm/test/CodeGen/DirectX/legalize-lifetimes-valver-1.6.ll
+++ b/llvm/test/CodeGen/DirectX/legalize-lifetimes-valver-1.6.ll
@@ -1,21 +1,27 @@
; RUN: opt -S -passes='dxil-op-lower' -mtriple=dxil-pc-shadermodel6.3-library %s | FileCheck %s --check-prefixes=CHECK,CHECK-SM63
; RUN: opt -S -passes='dxil-op-lower' -mtriple=dxil-pc-shadermodel6.6-library %s | FileCheck %s --check-prefixes=CHECK,CHECK-SM66
+; RUN: opt -S -dxil-prepare -dxil-embed -mtriple=dxil-pc-shadermodel6.6-library %s | FileCheck %s --check-prefixes=CHECK,CHECK-EMBED
+
+; Lifetime intrinsics are not valid prior to shader model 6.6 and are instead
+; replaced with undef stores, provided the validator version is 1.6 or greater
+
+; The dxil-embed pass will remove lifetime intrinsics because they transformed
+; in a way that is illegal in modern LLVM IR before serializing to DXIL bitcode.
+; So we check that no bitcast or lifetime intrinsics remain after dxil-embed
; CHECK-LABEL: define void @test_legal_lifetime() {
-;
-; CHECK-SM63-NEXT: [[ACCUM_I_FLAT:%.*]] = alloca [1 x i32], align 4
-; CHECK-SM63-NEXT: [[GEP:%.*]] = getelementptr i32, ptr [[ACCUM_I_FLAT]], i32 0
-; CHECK-SM63-NEXT: store [1 x i32] undef, ptr [[ACCUM_I_FLAT]], align 4
-; CHECK-SM63-NEXT: store i32 0, ptr [[GEP]], align 4
-; CHECK-SM63-NEXT: store [1 x i32] undef, ptr [[ACCUM_I_FLAT]], align 4
-;
-; CHECK-SM66-NEXT: [[ACCUM_I_FLAT:%.*]] = alloca [1 x i32], align 4
-; CHECK-SM66-NEXT: [[GEP:%.*]] = getelementptr i32, ptr [[ACCUM_I_FLAT]], i32 0
-; CHECK-SM66-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[ACCUM_I_FLAT]])
-; CHECK-SM66-NEXT: store i32 0, ptr [[GEP]], align 4
-; CHECK-SM66-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[ACCUM_I_FLAT]])
-;
-; CHECK-NEXT: ret void
+; CHECK-NEXT: [[ACCUM_I_FLAT:%.*]] = alloca [1 x i32], align 4
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, ptr [[ACCUM_I_FLAT]], i32 0
+; CHECK-SM63-NEXT: store [1 x i32] undef, ptr [[ACCUM_I_FLAT]], align 4
+; CHECK-SM66-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[ACCUM_I_FLAT]])
+; CHECK-EMBED-NOT: bitcast
+; CHECK-EMBED-NOT: lifetime
+; CHECK-NEXT: store i32 0, ptr [[GEP]], align 4
+; CHECK-SM63-NEXT: store [1 x i32] undef, ptr [[ACCUM_I_FLAT]], align 4
+; CHECK-SM66-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[ACCUM_I_FLAT]])
+; CHECK-EMBED-NOT: bitcast
+; CHECK-EMBED-NOT: lifetime
+; CHECK-NEXT: ret void
;
define void @test_legal_lifetime() {
%accum.i.flat = alloca [1 x i32], align 4
diff --git a/llvm/test/CodeGen/NVPTX/LoadStoreVectorizer.ll b/llvm/test/CodeGen/NVPTX/LoadStoreVectorizer.ll
index 23832a9..dd9a472 100644
--- a/llvm/test/CodeGen/NVPTX/LoadStoreVectorizer.ll
+++ b/llvm/test/CodeGen/NVPTX/LoadStoreVectorizer.ll
@@ -181,32 +181,32 @@ define void @combine_v16i8(ptr noundef align 16 %ptr1, ptr noundef align 16 %ptr
; ENABLED-NEXT: prmt.b32 %r5, %r4, 0, 0x7773U;
; ENABLED-NEXT: prmt.b32 %r6, %r4, 0, 0x7772U;
; ENABLED-NEXT: prmt.b32 %r7, %r4, 0, 0x7771U;
-; ENABLED-NEXT: prmt.b32 %r8, %r4, 0, 0x7770U;
-; ENABLED-NEXT: prmt.b32 %r9, %r3, 0, 0x7773U;
-; ENABLED-NEXT: prmt.b32 %r10, %r3, 0, 0x7772U;
-; ENABLED-NEXT: prmt.b32 %r11, %r3, 0, 0x7771U;
-; ENABLED-NEXT: prmt.b32 %r12, %r3, 0, 0x7770U;
-; ENABLED-NEXT: prmt.b32 %r13, %r2, 0, 0x7773U;
-; ENABLED-NEXT: prmt.b32 %r14, %r2, 0, 0x7772U;
-; ENABLED-NEXT: prmt.b32 %r15, %r2, 0, 0x7771U;
-; ENABLED-NEXT: prmt.b32 %r16, %r2, 0, 0x7770U;
-; ENABLED-NEXT: prmt.b32 %r17, %r1, 0, 0x7773U;
-; ENABLED-NEXT: prmt.b32 %r18, %r1, 0, 0x7772U;
-; ENABLED-NEXT: prmt.b32 %r19, %r1, 0, 0x7771U;
-; ENABLED-NEXT: prmt.b32 %r20, %r1, 0, 0x7770U;
+; ENABLED-NEXT: prmt.b32 %r8, %r3, 0, 0x7773U;
+; ENABLED-NEXT: prmt.b32 %r9, %r3, 0, 0x7772U;
+; ENABLED-NEXT: prmt.b32 %r10, %r3, 0, 0x7771U;
+; ENABLED-NEXT: prmt.b32 %r11, %r2, 0, 0x7773U;
+; ENABLED-NEXT: prmt.b32 %r12, %r2, 0, 0x7772U;
+; ENABLED-NEXT: prmt.b32 %r13, %r2, 0, 0x7771U;
+; ENABLED-NEXT: prmt.b32 %r14, %r1, 0, 0x7773U;
+; ENABLED-NEXT: prmt.b32 %r15, %r1, 0, 0x7772U;
+; ENABLED-NEXT: prmt.b32 %r16, %r1, 0, 0x7771U;
; ENABLED-NEXT: ld.param.b64 %rd2, [combine_v16i8_param_1];
-; ENABLED-NEXT: add.s32 %r21, %r20, %r19;
-; ENABLED-NEXT: add.s32 %r22, %r21, %r18;
-; ENABLED-NEXT: add.s32 %r23, %r22, %r17;
-; ENABLED-NEXT: add.s32 %r24, %r23, %r16;
-; ENABLED-NEXT: add.s32 %r25, %r24, %r15;
-; ENABLED-NEXT: add.s32 %r26, %r25, %r14;
-; ENABLED-NEXT: add.s32 %r27, %r26, %r13;
-; ENABLED-NEXT: add.s32 %r28, %r27, %r12;
-; ENABLED-NEXT: add.s32 %r29, %r28, %r11;
-; ENABLED-NEXT: add.s32 %r30, %r29, %r10;
-; ENABLED-NEXT: add.s32 %r31, %r30, %r9;
-; ENABLED-NEXT: add.s32 %r32, %r31, %r8;
+; ENABLED-NEXT: and.b32 %r17, %r1, 255;
+; ENABLED-NEXT: and.b32 %r18, %r2, 255;
+; ENABLED-NEXT: and.b32 %r19, %r3, 255;
+; ENABLED-NEXT: and.b32 %r20, %r4, 255;
+; ENABLED-NEXT: add.s32 %r21, %r17, %r16;
+; ENABLED-NEXT: add.s32 %r22, %r21, %r15;
+; ENABLED-NEXT: add.s32 %r23, %r22, %r14;
+; ENABLED-NEXT: add.s32 %r24, %r23, %r18;
+; ENABLED-NEXT: add.s32 %r25, %r24, %r13;
+; ENABLED-NEXT: add.s32 %r26, %r25, %r12;
+; ENABLED-NEXT: add.s32 %r27, %r26, %r11;
+; ENABLED-NEXT: add.s32 %r28, %r27, %r19;
+; ENABLED-NEXT: add.s32 %r29, %r28, %r10;
+; ENABLED-NEXT: add.s32 %r30, %r29, %r9;
+; ENABLED-NEXT: add.s32 %r31, %r30, %r8;
+; ENABLED-NEXT: add.s32 %r32, %r31, %r20;
; ENABLED-NEXT: add.s32 %r33, %r32, %r7;
; ENABLED-NEXT: add.s32 %r34, %r33, %r6;
; ENABLED-NEXT: add.s32 %r35, %r34, %r5;
@@ -332,36 +332,36 @@ define void @combine_v16i8_unaligned(ptr noundef align 8 %ptr1, ptr noundef alig
; ENABLED-NEXT: prmt.b32 %r3, %r2, 0, 0x7773U;
; ENABLED-NEXT: prmt.b32 %r4, %r2, 0, 0x7772U;
; ENABLED-NEXT: prmt.b32 %r5, %r2, 0, 0x7771U;
-; ENABLED-NEXT: prmt.b32 %r6, %r2, 0, 0x7770U;
-; ENABLED-NEXT: prmt.b32 %r7, %r1, 0, 0x7773U;
-; ENABLED-NEXT: prmt.b32 %r8, %r1, 0, 0x7772U;
-; ENABLED-NEXT: prmt.b32 %r9, %r1, 0, 0x7771U;
-; ENABLED-NEXT: prmt.b32 %r10, %r1, 0, 0x7770U;
+; ENABLED-NEXT: prmt.b32 %r6, %r1, 0, 0x7773U;
+; ENABLED-NEXT: prmt.b32 %r7, %r1, 0, 0x7772U;
+; ENABLED-NEXT: prmt.b32 %r8, %r1, 0, 0x7771U;
; ENABLED-NEXT: ld.param.b64 %rd2, [combine_v16i8_unaligned_param_1];
-; ENABLED-NEXT: ld.v2.b32 {%r11, %r12}, [%rd1+8];
-; ENABLED-NEXT: prmt.b32 %r13, %r12, 0, 0x7773U;
-; ENABLED-NEXT: prmt.b32 %r14, %r12, 0, 0x7772U;
-; ENABLED-NEXT: prmt.b32 %r15, %r12, 0, 0x7771U;
-; ENABLED-NEXT: prmt.b32 %r16, %r12, 0, 0x7770U;
-; ENABLED-NEXT: prmt.b32 %r17, %r11, 0, 0x7773U;
-; ENABLED-NEXT: prmt.b32 %r18, %r11, 0, 0x7772U;
-; ENABLED-NEXT: prmt.b32 %r19, %r11, 0, 0x7771U;
-; ENABLED-NEXT: prmt.b32 %r20, %r11, 0, 0x7770U;
-; ENABLED-NEXT: add.s32 %r21, %r10, %r9;
-; ENABLED-NEXT: add.s32 %r22, %r21, %r8;
-; ENABLED-NEXT: add.s32 %r23, %r22, %r7;
-; ENABLED-NEXT: add.s32 %r24, %r23, %r6;
+; ENABLED-NEXT: ld.v2.b32 {%r9, %r10}, [%rd1+8];
+; ENABLED-NEXT: prmt.b32 %r11, %r10, 0, 0x7773U;
+; ENABLED-NEXT: prmt.b32 %r12, %r10, 0, 0x7772U;
+; ENABLED-NEXT: prmt.b32 %r13, %r10, 0, 0x7771U;
+; ENABLED-NEXT: prmt.b32 %r14, %r9, 0, 0x7773U;
+; ENABLED-NEXT: prmt.b32 %r15, %r9, 0, 0x7772U;
+; ENABLED-NEXT: prmt.b32 %r16, %r9, 0, 0x7771U;
+; ENABLED-NEXT: and.b32 %r17, %r1, 255;
+; ENABLED-NEXT: and.b32 %r18, %r2, 255;
+; ENABLED-NEXT: and.b32 %r19, %r9, 255;
+; ENABLED-NEXT: and.b32 %r20, %r10, 255;
+; ENABLED-NEXT: add.s32 %r21, %r17, %r8;
+; ENABLED-NEXT: add.s32 %r22, %r21, %r7;
+; ENABLED-NEXT: add.s32 %r23, %r22, %r6;
+; ENABLED-NEXT: add.s32 %r24, %r23, %r18;
; ENABLED-NEXT: add.s32 %r25, %r24, %r5;
; ENABLED-NEXT: add.s32 %r26, %r25, %r4;
; ENABLED-NEXT: add.s32 %r27, %r26, %r3;
-; ENABLED-NEXT: add.s32 %r28, %r27, %r20;
-; ENABLED-NEXT: add.s32 %r29, %r28, %r19;
-; ENABLED-NEXT: add.s32 %r30, %r29, %r18;
-; ENABLED-NEXT: add.s32 %r31, %r30, %r17;
-; ENABLED-NEXT: add.s32 %r32, %r31, %r16;
-; ENABLED-NEXT: add.s32 %r33, %r32, %r15;
-; ENABLED-NEXT: add.s32 %r34, %r33, %r14;
-; ENABLED-NEXT: add.s32 %r35, %r34, %r13;
+; ENABLED-NEXT: add.s32 %r28, %r27, %r19;
+; ENABLED-NEXT: add.s32 %r29, %r28, %r16;
+; ENABLED-NEXT: add.s32 %r30, %r29, %r15;
+; ENABLED-NEXT: add.s32 %r31, %r30, %r14;
+; ENABLED-NEXT: add.s32 %r32, %r31, %r20;
+; ENABLED-NEXT: add.s32 %r33, %r32, %r13;
+; ENABLED-NEXT: add.s32 %r34, %r33, %r12;
+; ENABLED-NEXT: add.s32 %r35, %r34, %r11;
; ENABLED-NEXT: st.b32 [%rd2], %r35;
; ENABLED-NEXT: ret;
;
diff --git a/llvm/test/CodeGen/NVPTX/extractelement.ll b/llvm/test/CodeGen/NVPTX/extractelement.ll
index 80980ef..d61a63c 100644
--- a/llvm/test/CodeGen/NVPTX/extractelement.ll
+++ b/llvm/test/CodeGen/NVPTX/extractelement.ll
@@ -56,23 +56,22 @@ define i16 @test_v4i8(i32 %a) {
; CHECK-LABEL: test_v4i8(
; CHECK: {
; CHECK-NEXT: .reg .b16 %rs<8>;
-; CHECK-NEXT: .reg .b32 %r<7>;
+; CHECK-NEXT: .reg .b32 %r<6>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.b32 %r1, [test_v4i8_param_0];
-; CHECK-NEXT: prmt.b32 %r2, %r1, 0, 0x8880U;
-; CHECK-NEXT: cvt.u16.u32 %rs1, %r2;
-; CHECK-NEXT: prmt.b32 %r3, %r1, 0, 0x9991U;
-; CHECK-NEXT: cvt.u16.u32 %rs2, %r3;
-; CHECK-NEXT: prmt.b32 %r4, %r1, 0, 0xaaa2U;
-; CHECK-NEXT: cvt.u16.u32 %rs3, %r4;
-; CHECK-NEXT: prmt.b32 %r5, %r1, 0, 0xbbb3U;
-; CHECK-NEXT: cvt.u16.u32 %rs4, %r5;
+; CHECK-NEXT: cvt.s8.s32 %rs1, %r1;
+; CHECK-NEXT: prmt.b32 %r2, %r1, 0, 0x9991U;
+; CHECK-NEXT: cvt.u16.u32 %rs2, %r2;
+; CHECK-NEXT: prmt.b32 %r3, %r1, 0, 0xaaa2U;
+; CHECK-NEXT: cvt.u16.u32 %rs3, %r3;
+; CHECK-NEXT: prmt.b32 %r4, %r1, 0, 0xbbb3U;
+; CHECK-NEXT: cvt.u16.u32 %rs4, %r4;
; CHECK-NEXT: add.s16 %rs5, %rs1, %rs2;
; CHECK-NEXT: add.s16 %rs6, %rs3, %rs4;
; CHECK-NEXT: add.s16 %rs7, %rs5, %rs6;
-; CHECK-NEXT: cvt.u32.u16 %r6, %rs7;
-; CHECK-NEXT: st.param.b32 [func_retval0], %r6;
+; CHECK-NEXT: cvt.u32.u16 %r5, %rs7;
+; CHECK-NEXT: st.param.b32 [func_retval0], %r5;
; CHECK-NEXT: ret;
%v = bitcast i32 %a to <4 x i8>
%r0 = extractelement <4 x i8> %v, i64 0
@@ -96,7 +95,7 @@ define i32 @test_v4i8_s32(i32 %a) {
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.b32 %r1, [test_v4i8_s32_param_0];
-; CHECK-NEXT: prmt.b32 %r2, %r1, 0, 0x8880U;
+; CHECK-NEXT: cvt.s32.s8 %r2, %r1;
; CHECK-NEXT: prmt.b32 %r3, %r1, 0, 0x9991U;
; CHECK-NEXT: prmt.b32 %r4, %r1, 0, 0xaaa2U;
; CHECK-NEXT: prmt.b32 %r5, %r1, 0, 0xbbb3U;
@@ -127,12 +126,12 @@ define i32 @test_v4i8_u32(i32 %a) {
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.b32 %r1, [test_v4i8_u32_param_0];
-; CHECK-NEXT: prmt.b32 %r2, %r1, 0, 0x7770U;
-; CHECK-NEXT: prmt.b32 %r3, %r1, 0, 0x7771U;
-; CHECK-NEXT: prmt.b32 %r4, %r1, 0, 0x7772U;
-; CHECK-NEXT: prmt.b32 %r5, %r1, 0, 0x7773U;
-; CHECK-NEXT: add.s32 %r6, %r2, %r3;
-; CHECK-NEXT: add.s32 %r7, %r4, %r5;
+; CHECK-NEXT: prmt.b32 %r2, %r1, 0, 0x7771U;
+; CHECK-NEXT: prmt.b32 %r3, %r1, 0, 0x7772U;
+; CHECK-NEXT: prmt.b32 %r4, %r1, 0, 0x7773U;
+; CHECK-NEXT: and.b32 %r5, %r1, 255;
+; CHECK-NEXT: add.s32 %r6, %r5, %r2;
+; CHECK-NEXT: add.s32 %r7, %r3, %r4;
; CHECK-NEXT: add.s32 %r8, %r6, %r7;
; CHECK-NEXT: st.param.b32 [func_retval0], %r8;
; CHECK-NEXT: ret;
@@ -157,26 +156,24 @@ define i16 @test_v8i8(i64 %a) {
; CHECK-LABEL: test_v8i8(
; CHECK: {
; CHECK-NEXT: .reg .b16 %rs<16>;
-; CHECK-NEXT: .reg .b32 %r<12>;
+; CHECK-NEXT: .reg .b32 %r<10>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_v8i8_param_0];
-; CHECK-NEXT: prmt.b32 %r3, %r1, 0, 0x8880U;
-; CHECK-NEXT: cvt.u16.u32 %rs1, %r3;
-; CHECK-NEXT: prmt.b32 %r4, %r1, 0, 0x9991U;
-; CHECK-NEXT: cvt.u16.u32 %rs2, %r4;
-; CHECK-NEXT: prmt.b32 %r5, %r1, 0, 0xaaa2U;
-; CHECK-NEXT: cvt.u16.u32 %rs3, %r5;
-; CHECK-NEXT: prmt.b32 %r6, %r1, 0, 0xbbb3U;
-; CHECK-NEXT: cvt.u16.u32 %rs4, %r6;
-; CHECK-NEXT: prmt.b32 %r7, %r2, 0, 0x8880U;
-; CHECK-NEXT: cvt.u16.u32 %rs5, %r7;
-; CHECK-NEXT: prmt.b32 %r8, %r2, 0, 0x9991U;
-; CHECK-NEXT: cvt.u16.u32 %rs6, %r8;
-; CHECK-NEXT: prmt.b32 %r9, %r2, 0, 0xaaa2U;
-; CHECK-NEXT: cvt.u16.u32 %rs7, %r9;
-; CHECK-NEXT: prmt.b32 %r10, %r2, 0, 0xbbb3U;
-; CHECK-NEXT: cvt.u16.u32 %rs8, %r10;
+; CHECK-NEXT: cvt.s8.s32 %rs1, %r1;
+; CHECK-NEXT: prmt.b32 %r3, %r1, 0, 0x9991U;
+; CHECK-NEXT: cvt.u16.u32 %rs2, %r3;
+; CHECK-NEXT: prmt.b32 %r4, %r1, 0, 0xaaa2U;
+; CHECK-NEXT: cvt.u16.u32 %rs3, %r4;
+; CHECK-NEXT: prmt.b32 %r5, %r1, 0, 0xbbb3U;
+; CHECK-NEXT: cvt.u16.u32 %rs4, %r5;
+; CHECK-NEXT: cvt.s8.s32 %rs5, %r2;
+; CHECK-NEXT: prmt.b32 %r6, %r2, 0, 0x9991U;
+; CHECK-NEXT: cvt.u16.u32 %rs6, %r6;
+; CHECK-NEXT: prmt.b32 %r7, %r2, 0, 0xaaa2U;
+; CHECK-NEXT: cvt.u16.u32 %rs7, %r7;
+; CHECK-NEXT: prmt.b32 %r8, %r2, 0, 0xbbb3U;
+; CHECK-NEXT: cvt.u16.u32 %rs8, %r8;
; CHECK-NEXT: add.s16 %rs9, %rs1, %rs2;
; CHECK-NEXT: add.s16 %rs10, %rs3, %rs4;
; CHECK-NEXT: add.s16 %rs11, %rs5, %rs6;
@@ -184,8 +181,8 @@ define i16 @test_v8i8(i64 %a) {
; CHECK-NEXT: add.s16 %rs13, %rs9, %rs10;
; CHECK-NEXT: add.s16 %rs14, %rs11, %rs12;
; CHECK-NEXT: add.s16 %rs15, %rs13, %rs14;
-; CHECK-NEXT: cvt.u32.u16 %r11, %rs15;
-; CHECK-NEXT: st.param.b32 [func_retval0], %r11;
+; CHECK-NEXT: cvt.u32.u16 %r9, %rs15;
+; CHECK-NEXT: st.param.b32 [func_retval0], %r9;
; CHECK-NEXT: ret;
%v = bitcast i64 %a to <8 x i8>
%r0 = extractelement <8 x i8> %v, i64 0
diff --git a/llvm/test/CodeGen/NVPTX/i1-select.ll b/llvm/test/CodeGen/NVPTX/i1-select.ll
index f1adc34..9a051b3 100644
--- a/llvm/test/CodeGen/NVPTX/i1-select.ll
+++ b/llvm/test/CodeGen/NVPTX/i1-select.ll
@@ -94,27 +94,27 @@ define i32 @test_select_i1_basic(i32 %v1, i32 %v2, i32 %v3, i32 %true, i32 %fals
define i32 @test_select_i1_basic_folding(i32 %v1, i32 %v2, i32 %v3, i32 %true, i32 %false) {
; CHECK-LABEL: test_select_i1_basic_folding(
; CHECK: {
-; CHECK-NEXT: .reg .pred %p<12>;
-; CHECK-NEXT: .reg .b32 %r<9>;
+; CHECK-NEXT: .reg .pred %p<13>;
+; CHECK-NEXT: .reg .b32 %r<7>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.b32 %r1, [test_select_i1_basic_folding_param_0];
; CHECK-NEXT: setp.eq.b32 %p1, %r1, 0;
-; CHECK-NEXT: ld.param.b32 %r3, [test_select_i1_basic_folding_param_1];
-; CHECK-NEXT: setp.ne.b32 %p2, %r3, 0;
-; CHECK-NEXT: setp.eq.b32 %p3, %r3, 0;
-; CHECK-NEXT: ld.param.b32 %r5, [test_select_i1_basic_folding_param_2];
-; CHECK-NEXT: setp.eq.b32 %p4, %r5, 0;
-; CHECK-NEXT: ld.param.b32 %r6, [test_select_i1_basic_folding_param_3];
+; CHECK-NEXT: ld.param.b32 %r2, [test_select_i1_basic_folding_param_1];
+; CHECK-NEXT: setp.ne.b32 %p2, %r2, 0;
+; CHECK-NEXT: setp.eq.b32 %p3, %r2, 0;
+; CHECK-NEXT: ld.param.b32 %r3, [test_select_i1_basic_folding_param_2];
+; CHECK-NEXT: setp.eq.b32 %p4, %r3, 0;
+; CHECK-NEXT: ld.param.b32 %r4, [test_select_i1_basic_folding_param_3];
; CHECK-NEXT: xor.pred %p6, %p1, %p3;
-; CHECK-NEXT: ld.param.b32 %r7, [test_select_i1_basic_folding_param_4];
+; CHECK-NEXT: ld.param.b32 %r5, [test_select_i1_basic_folding_param_4];
; CHECK-NEXT: and.pred %p7, %p6, %p4;
-; CHECK-NEXT: and.pred %p8, %p2, %p4;
-; CHECK-NEXT: and.pred %p9, %p3, %p7;
-; CHECK-NEXT: or.pred %p10, %p9, %p8;
-; CHECK-NEXT: xor.pred %p11, %p10, %p3;
-; CHECK-NEXT: selp.b32 %r8, %r6, %r7, %p11;
-; CHECK-NEXT: st.param.b32 [func_retval0], %r8;
+; CHECK-NEXT: and.pred %p9, %p2, %p4;
+; CHECK-NEXT: and.pred %p10, %p3, %p7;
+; CHECK-NEXT: or.pred %p11, %p10, %p9;
+; CHECK-NEXT: xor.pred %p12, %p11, %p3;
+; CHECK-NEXT: selp.b32 %r6, %r4, %r5, %p12;
+; CHECK-NEXT: st.param.b32 [func_retval0], %r6;
; CHECK-NEXT: ret;
%b1 = icmp eq i32 %v1, 0
%b2 = icmp eq i32 %v2, 0
diff --git a/llvm/test/CodeGen/NVPTX/i128.ll b/llvm/test/CodeGen/NVPTX/i128.ll
index f2211eb..44d8558 100644
--- a/llvm/test/CodeGen/NVPTX/i128.ll
+++ b/llvm/test/CodeGen/NVPTX/i128.ll
@@ -5,9 +5,9 @@
define i128 @srem_i128(i128 %lhs, i128 %rhs) {
; CHECK-LABEL: srem_i128(
; CHECK: {
-; CHECK-NEXT: .reg .pred %p<22>;
+; CHECK-NEXT: .reg .pred %p<20>;
; CHECK-NEXT: .reg .b32 %r<12>;
-; CHECK-NEXT: .reg .b64 %rd<126>;
+; CHECK-NEXT: .reg .b64 %rd<127>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0: // %_udiv-special-cases
; CHECK-NEXT: ld.param.v2.b64 {%rd45, %rd46}, [srem_i128_param_0];
@@ -42,103 +42,102 @@ define i128 @srem_i128(i128 %lhs, i128 %rhs) {
; CHECK-NEXT: cvt.u64.u32 %rd62, %r4;
; CHECK-NEXT: add.s64 %rd63, %rd62, 64;
; CHECK-NEXT: selp.b64 %rd64, %rd61, %rd63, %p7;
-; CHECK-NEXT: mov.b64 %rd116, 0;
+; CHECK-NEXT: mov.b64 %rd117, 0;
; CHECK-NEXT: sub.cc.s64 %rd66, %rd60, %rd64;
-; CHECK-NEXT: subc.cc.s64 %rd8, %rd116, 0;
-; CHECK-NEXT: setp.ne.b64 %p8, %rd8, 0;
-; CHECK-NEXT: and.pred %p10, %p8, %p8;
-; CHECK-NEXT: setp.eq.b64 %p11, %rd8, 0;
-; CHECK-NEXT: setp.gt.u64 %p12, %rd66, 127;
-; CHECK-NEXT: and.pred %p13, %p11, %p12;
-; CHECK-NEXT: or.pred %p14, %p13, %p10;
-; CHECK-NEXT: or.pred %p15, %p5, %p14;
-; CHECK-NEXT: xor.b64 %rd67, %rd66, 127;
-; CHECK-NEXT: or.b64 %rd68, %rd67, %rd8;
-; CHECK-NEXT: setp.eq.b64 %p16, %rd68, 0;
-; CHECK-NEXT: selp.b64 %rd125, 0, %rd4, %p15;
-; CHECK-NEXT: selp.b64 %rd124, 0, %rd3, %p15;
-; CHECK-NEXT: or.pred %p17, %p15, %p16;
-; CHECK-NEXT: @%p17 bra $L__BB0_5;
+; CHECK-NEXT: subc.cc.s64 %rd67, %rd117, 0;
+; CHECK-NEXT: setp.gt.u64 %p8, %rd66, 127;
+; CHECK-NEXT: setp.eq.b64 %p9, %rd67, 0;
+; CHECK-NEXT: and.pred %p10, %p9, %p8;
+; CHECK-NEXT: setp.ne.b64 %p11, %rd67, 0;
+; CHECK-NEXT: or.pred %p12, %p10, %p11;
+; CHECK-NEXT: or.pred %p13, %p5, %p12;
+; CHECK-NEXT: xor.b64 %rd68, %rd66, 127;
+; CHECK-NEXT: or.b64 %rd69, %rd68, %rd67;
+; CHECK-NEXT: setp.eq.b64 %p14, %rd69, 0;
+; CHECK-NEXT: selp.b64 %rd126, 0, %rd4, %p13;
+; CHECK-NEXT: selp.b64 %rd125, 0, %rd3, %p13;
+; CHECK-NEXT: or.pred %p15, %p13, %p14;
+; CHECK-NEXT: @%p15 bra $L__BB0_5;
; CHECK-NEXT: // %bb.3: // %udiv-bb1
-; CHECK-NEXT: add.cc.s64 %rd118, %rd66, 1;
-; CHECK-NEXT: addc.cc.s64 %rd119, %rd8, 0;
-; CHECK-NEXT: or.b64 %rd71, %rd118, %rd119;
-; CHECK-NEXT: setp.eq.b64 %p18, %rd71, 0;
+; CHECK-NEXT: add.cc.s64 %rd119, %rd66, 1;
+; CHECK-NEXT: addc.cc.s64 %rd120, %rd67, 0;
+; CHECK-NEXT: or.b64 %rd72, %rd119, %rd120;
+; CHECK-NEXT: setp.eq.b64 %p16, %rd72, 0;
; CHECK-NEXT: cvt.u32.u64 %r5, %rd66;
; CHECK-NEXT: sub.s32 %r6, 127, %r5;
-; CHECK-NEXT: shl.b64 %rd72, %rd4, %r6;
+; CHECK-NEXT: shl.b64 %rd73, %rd4, %r6;
; CHECK-NEXT: sub.s32 %r7, 64, %r6;
-; CHECK-NEXT: shr.u64 %rd73, %rd3, %r7;
-; CHECK-NEXT: or.b64 %rd74, %rd72, %rd73;
+; CHECK-NEXT: shr.u64 %rd74, %rd3, %r7;
+; CHECK-NEXT: or.b64 %rd75, %rd73, %rd74;
; CHECK-NEXT: sub.s32 %r8, 63, %r5;
-; CHECK-NEXT: shl.b64 %rd75, %rd3, %r8;
-; CHECK-NEXT: setp.gt.s32 %p19, %r6, 63;
-; CHECK-NEXT: selp.b64 %rd123, %rd75, %rd74, %p19;
-; CHECK-NEXT: shl.b64 %rd122, %rd3, %r6;
-; CHECK-NEXT: mov.b64 %rd113, %rd116;
-; CHECK-NEXT: @%p18 bra $L__BB0_4;
+; CHECK-NEXT: shl.b64 %rd76, %rd3, %r8;
+; CHECK-NEXT: setp.gt.s32 %p17, %r6, 63;
+; CHECK-NEXT: selp.b64 %rd124, %rd76, %rd75, %p17;
+; CHECK-NEXT: shl.b64 %rd123, %rd3, %r6;
+; CHECK-NEXT: mov.b64 %rd114, %rd117;
+; CHECK-NEXT: @%p16 bra $L__BB0_4;
; CHECK-NEXT: // %bb.1: // %udiv-preheader
-; CHECK-NEXT: cvt.u32.u64 %r9, %rd118;
-; CHECK-NEXT: shr.u64 %rd78, %rd3, %r9;
+; CHECK-NEXT: cvt.u32.u64 %r9, %rd119;
+; CHECK-NEXT: shr.u64 %rd79, %rd3, %r9;
; CHECK-NEXT: sub.s32 %r10, 64, %r9;
-; CHECK-NEXT: shl.b64 %rd79, %rd4, %r10;
-; CHECK-NEXT: or.b64 %rd80, %rd78, %rd79;
+; CHECK-NEXT: shl.b64 %rd80, %rd4, %r10;
+; CHECK-NEXT: or.b64 %rd81, %rd79, %rd80;
; CHECK-NEXT: add.s32 %r11, %r9, -64;
-; CHECK-NEXT: shr.u64 %rd81, %rd4, %r11;
-; CHECK-NEXT: setp.gt.s32 %p20, %r9, 63;
-; CHECK-NEXT: selp.b64 %rd120, %rd81, %rd80, %p20;
-; CHECK-NEXT: shr.u64 %rd121, %rd4, %r9;
+; CHECK-NEXT: shr.u64 %rd82, %rd4, %r11;
+; CHECK-NEXT: setp.gt.s32 %p18, %r9, 63;
+; CHECK-NEXT: selp.b64 %rd121, %rd82, %rd81, %p18;
+; CHECK-NEXT: shr.u64 %rd122, %rd4, %r9;
; CHECK-NEXT: add.cc.s64 %rd35, %rd5, -1;
; CHECK-NEXT: addc.cc.s64 %rd36, %rd6, -1;
-; CHECK-NEXT: mov.b64 %rd113, 0;
-; CHECK-NEXT: mov.b64 %rd116, %rd113;
+; CHECK-NEXT: mov.b64 %rd114, 0;
+; CHECK-NEXT: mov.b64 %rd117, %rd114;
; CHECK-NEXT: $L__BB0_2: // %udiv-do-while
; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: shr.u64 %rd82, %rd120, 63;
-; CHECK-NEXT: shl.b64 %rd83, %rd121, 1;
-; CHECK-NEXT: or.b64 %rd84, %rd83, %rd82;
-; CHECK-NEXT: shl.b64 %rd85, %rd120, 1;
-; CHECK-NEXT: shr.u64 %rd86, %rd123, 63;
-; CHECK-NEXT: or.b64 %rd87, %rd85, %rd86;
-; CHECK-NEXT: shr.u64 %rd88, %rd122, 63;
-; CHECK-NEXT: shl.b64 %rd89, %rd123, 1;
-; CHECK-NEXT: or.b64 %rd90, %rd89, %rd88;
-; CHECK-NEXT: shl.b64 %rd91, %rd122, 1;
-; CHECK-NEXT: or.b64 %rd122, %rd116, %rd91;
-; CHECK-NEXT: or.b64 %rd123, %rd113, %rd90;
-; CHECK-NEXT: sub.cc.s64 %rd92, %rd35, %rd87;
-; CHECK-NEXT: subc.cc.s64 %rd93, %rd36, %rd84;
-; CHECK-NEXT: shr.s64 %rd94, %rd93, 63;
-; CHECK-NEXT: and.b64 %rd116, %rd94, 1;
-; CHECK-NEXT: and.b64 %rd95, %rd94, %rd5;
-; CHECK-NEXT: and.b64 %rd96, %rd94, %rd6;
-; CHECK-NEXT: sub.cc.s64 %rd120, %rd87, %rd95;
-; CHECK-NEXT: subc.cc.s64 %rd121, %rd84, %rd96;
-; CHECK-NEXT: add.cc.s64 %rd118, %rd118, -1;
-; CHECK-NEXT: addc.cc.s64 %rd119, %rd119, -1;
-; CHECK-NEXT: or.b64 %rd97, %rd118, %rd119;
-; CHECK-NEXT: setp.eq.b64 %p21, %rd97, 0;
-; CHECK-NEXT: @%p21 bra $L__BB0_4;
+; CHECK-NEXT: shr.u64 %rd83, %rd121, 63;
+; CHECK-NEXT: shl.b64 %rd84, %rd122, 1;
+; CHECK-NEXT: or.b64 %rd85, %rd84, %rd83;
+; CHECK-NEXT: shl.b64 %rd86, %rd121, 1;
+; CHECK-NEXT: shr.u64 %rd87, %rd124, 63;
+; CHECK-NEXT: or.b64 %rd88, %rd86, %rd87;
+; CHECK-NEXT: shr.u64 %rd89, %rd123, 63;
+; CHECK-NEXT: shl.b64 %rd90, %rd124, 1;
+; CHECK-NEXT: or.b64 %rd91, %rd90, %rd89;
+; CHECK-NEXT: shl.b64 %rd92, %rd123, 1;
+; CHECK-NEXT: or.b64 %rd123, %rd117, %rd92;
+; CHECK-NEXT: or.b64 %rd124, %rd114, %rd91;
+; CHECK-NEXT: sub.cc.s64 %rd93, %rd35, %rd88;
+; CHECK-NEXT: subc.cc.s64 %rd94, %rd36, %rd85;
+; CHECK-NEXT: shr.s64 %rd95, %rd94, 63;
+; CHECK-NEXT: and.b64 %rd117, %rd95, 1;
+; CHECK-NEXT: and.b64 %rd96, %rd95, %rd5;
+; CHECK-NEXT: and.b64 %rd97, %rd95, %rd6;
+; CHECK-NEXT: sub.cc.s64 %rd121, %rd88, %rd96;
+; CHECK-NEXT: subc.cc.s64 %rd122, %rd85, %rd97;
+; CHECK-NEXT: add.cc.s64 %rd119, %rd119, -1;
+; CHECK-NEXT: addc.cc.s64 %rd120, %rd120, -1;
+; CHECK-NEXT: or.b64 %rd98, %rd119, %rd120;
+; CHECK-NEXT: setp.eq.b64 %p19, %rd98, 0;
+; CHECK-NEXT: @%p19 bra $L__BB0_4;
; CHECK-NEXT: bra.uni $L__BB0_2;
; CHECK-NEXT: $L__BB0_4: // %udiv-loop-exit
-; CHECK-NEXT: shr.u64 %rd98, %rd122, 63;
-; CHECK-NEXT: shl.b64 %rd99, %rd123, 1;
-; CHECK-NEXT: or.b64 %rd100, %rd99, %rd98;
-; CHECK-NEXT: shl.b64 %rd101, %rd122, 1;
-; CHECK-NEXT: or.b64 %rd124, %rd116, %rd101;
-; CHECK-NEXT: or.b64 %rd125, %rd113, %rd100;
+; CHECK-NEXT: shr.u64 %rd99, %rd123, 63;
+; CHECK-NEXT: shl.b64 %rd100, %rd124, 1;
+; CHECK-NEXT: or.b64 %rd101, %rd100, %rd99;
+; CHECK-NEXT: shl.b64 %rd102, %rd123, 1;
+; CHECK-NEXT: or.b64 %rd125, %rd117, %rd102;
+; CHECK-NEXT: or.b64 %rd126, %rd114, %rd101;
; CHECK-NEXT: $L__BB0_5: // %udiv-end
-; CHECK-NEXT: mul.hi.u64 %rd102, %rd5, %rd124;
-; CHECK-NEXT: mad.lo.s64 %rd103, %rd5, %rd125, %rd102;
-; CHECK-NEXT: mad.lo.s64 %rd104, %rd6, %rd124, %rd103;
-; CHECK-NEXT: mul.lo.s64 %rd105, %rd5, %rd124;
-; CHECK-NEXT: sub.cc.s64 %rd106, %rd3, %rd105;
-; CHECK-NEXT: subc.cc.s64 %rd107, %rd4, %rd104;
-; CHECK-NEXT: xor.b64 %rd108, %rd106, %rd2;
+; CHECK-NEXT: mul.hi.u64 %rd103, %rd5, %rd125;
+; CHECK-NEXT: mad.lo.s64 %rd104, %rd5, %rd126, %rd103;
+; CHECK-NEXT: mad.lo.s64 %rd105, %rd6, %rd125, %rd104;
+; CHECK-NEXT: mul.lo.s64 %rd106, %rd5, %rd125;
+; CHECK-NEXT: sub.cc.s64 %rd107, %rd3, %rd106;
+; CHECK-NEXT: subc.cc.s64 %rd108, %rd4, %rd105;
; CHECK-NEXT: xor.b64 %rd109, %rd107, %rd2;
-; CHECK-NEXT: sub.cc.s64 %rd110, %rd108, %rd2;
-; CHECK-NEXT: subc.cc.s64 %rd111, %rd109, %rd2;
-; CHECK-NEXT: st.param.v2.b64 [func_retval0], {%rd110, %rd111};
+; CHECK-NEXT: xor.b64 %rd110, %rd108, %rd2;
+; CHECK-NEXT: sub.cc.s64 %rd111, %rd109, %rd2;
+; CHECK-NEXT: subc.cc.s64 %rd112, %rd110, %rd2;
+; CHECK-NEXT: st.param.v2.b64 [func_retval0], {%rd111, %rd112};
; CHECK-NEXT: ret;
%div = srem i128 %lhs, %rhs
ret i128 %div
@@ -149,7 +148,7 @@ define i128 @urem_i128(i128 %lhs, i128 %rhs) {
; CHECK: {
; CHECK-NEXT: .reg .pred %p<18>;
; CHECK-NEXT: .reg .b32 %r<12>;
-; CHECK-NEXT: .reg .b64 %rd<111>;
+; CHECK-NEXT: .reg .b64 %rd<113>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0: // %_udiv-special-cases
; CHECK-NEXT: ld.param.v2.b64 {%rd41, %rd42}, [urem_i128_param_0];
@@ -173,98 +172,98 @@ define i128 @urem_i128(i128 %lhs, i128 %rhs) {
; CHECK-NEXT: cvt.u64.u32 %rd52, %r4;
; CHECK-NEXT: add.s64 %rd53, %rd52, 64;
; CHECK-NEXT: selp.b64 %rd54, %rd51, %rd53, %p5;
-; CHECK-NEXT: mov.b64 %rd101, 0;
-; CHECK-NEXT: sub.cc.s64 %rd5, %rd50, %rd54;
-; CHECK-NEXT: subc.cc.s64 %rd6, %rd101, 0;
-; CHECK-NEXT: setp.gt.u64 %p6, %rd5, 127;
-; CHECK-NEXT: setp.eq.b64 %p7, %rd6, 0;
+; CHECK-NEXT: mov.b64 %rd103, 0;
+; CHECK-NEXT: sub.cc.s64 %rd56, %rd50, %rd54;
+; CHECK-NEXT: subc.cc.s64 %rd57, %rd103, 0;
+; CHECK-NEXT: setp.gt.u64 %p6, %rd56, 127;
+; CHECK-NEXT: setp.eq.b64 %p7, %rd57, 0;
; CHECK-NEXT: and.pred %p8, %p7, %p6;
-; CHECK-NEXT: setp.ne.b64 %p9, %rd6, 0;
+; CHECK-NEXT: setp.ne.b64 %p9, %rd57, 0;
; CHECK-NEXT: or.pred %p10, %p8, %p9;
; CHECK-NEXT: or.pred %p11, %p3, %p10;
-; CHECK-NEXT: xor.b64 %rd56, %rd5, 127;
-; CHECK-NEXT: or.b64 %rd57, %rd56, %rd6;
-; CHECK-NEXT: setp.eq.b64 %p12, %rd57, 0;
-; CHECK-NEXT: selp.b64 %rd110, 0, %rd42, %p11;
-; CHECK-NEXT: selp.b64 %rd109, 0, %rd41, %p11;
+; CHECK-NEXT: xor.b64 %rd58, %rd56, 127;
+; CHECK-NEXT: or.b64 %rd59, %rd58, %rd57;
+; CHECK-NEXT: setp.eq.b64 %p12, %rd59, 0;
+; CHECK-NEXT: selp.b64 %rd112, 0, %rd42, %p11;
+; CHECK-NEXT: selp.b64 %rd111, 0, %rd41, %p11;
; CHECK-NEXT: or.pred %p13, %p11, %p12;
; CHECK-NEXT: @%p13 bra $L__BB1_5;
; CHECK-NEXT: // %bb.3: // %udiv-bb1
-; CHECK-NEXT: add.cc.s64 %rd103, %rd5, 1;
-; CHECK-NEXT: addc.cc.s64 %rd104, %rd6, 0;
-; CHECK-NEXT: or.b64 %rd60, %rd103, %rd104;
-; CHECK-NEXT: setp.eq.b64 %p14, %rd60, 0;
-; CHECK-NEXT: cvt.u32.u64 %r5, %rd5;
+; CHECK-NEXT: add.cc.s64 %rd105, %rd56, 1;
+; CHECK-NEXT: addc.cc.s64 %rd106, %rd57, 0;
+; CHECK-NEXT: or.b64 %rd62, %rd105, %rd106;
+; CHECK-NEXT: setp.eq.b64 %p14, %rd62, 0;
+; CHECK-NEXT: cvt.u32.u64 %r5, %rd56;
; CHECK-NEXT: sub.s32 %r6, 127, %r5;
-; CHECK-NEXT: shl.b64 %rd61, %rd42, %r6;
+; CHECK-NEXT: shl.b64 %rd63, %rd42, %r6;
; CHECK-NEXT: sub.s32 %r7, 64, %r6;
-; CHECK-NEXT: shr.u64 %rd62, %rd41, %r7;
-; CHECK-NEXT: or.b64 %rd63, %rd61, %rd62;
+; CHECK-NEXT: shr.u64 %rd64, %rd41, %r7;
+; CHECK-NEXT: or.b64 %rd65, %rd63, %rd64;
; CHECK-NEXT: sub.s32 %r8, 63, %r5;
-; CHECK-NEXT: shl.b64 %rd64, %rd41, %r8;
+; CHECK-NEXT: shl.b64 %rd66, %rd41, %r8;
; CHECK-NEXT: setp.gt.s32 %p15, %r6, 63;
-; CHECK-NEXT: selp.b64 %rd108, %rd64, %rd63, %p15;
-; CHECK-NEXT: shl.b64 %rd107, %rd41, %r6;
-; CHECK-NEXT: mov.b64 %rd98, %rd101;
+; CHECK-NEXT: selp.b64 %rd110, %rd66, %rd65, %p15;
+; CHECK-NEXT: shl.b64 %rd109, %rd41, %r6;
+; CHECK-NEXT: mov.b64 %rd100, %rd103;
; CHECK-NEXT: @%p14 bra $L__BB1_4;
; CHECK-NEXT: // %bb.1: // %udiv-preheader
-; CHECK-NEXT: cvt.u32.u64 %r9, %rd103;
-; CHECK-NEXT: shr.u64 %rd67, %rd41, %r9;
+; CHECK-NEXT: cvt.u32.u64 %r9, %rd105;
+; CHECK-NEXT: shr.u64 %rd69, %rd41, %r9;
; CHECK-NEXT: sub.s32 %r10, 64, %r9;
-; CHECK-NEXT: shl.b64 %rd68, %rd42, %r10;
-; CHECK-NEXT: or.b64 %rd69, %rd67, %rd68;
+; CHECK-NEXT: shl.b64 %rd70, %rd42, %r10;
+; CHECK-NEXT: or.b64 %rd71, %rd69, %rd70;
; CHECK-NEXT: add.s32 %r11, %r9, -64;
-; CHECK-NEXT: shr.u64 %rd70, %rd42, %r11;
+; CHECK-NEXT: shr.u64 %rd72, %rd42, %r11;
; CHECK-NEXT: setp.gt.s32 %p16, %r9, 63;
-; CHECK-NEXT: selp.b64 %rd105, %rd70, %rd69, %p16;
-; CHECK-NEXT: shr.u64 %rd106, %rd42, %r9;
+; CHECK-NEXT: selp.b64 %rd107, %rd72, %rd71, %p16;
+; CHECK-NEXT: shr.u64 %rd108, %rd42, %r9;
; CHECK-NEXT: add.cc.s64 %rd33, %rd3, -1;
; CHECK-NEXT: addc.cc.s64 %rd34, %rd4, -1;
-; CHECK-NEXT: mov.b64 %rd98, 0;
-; CHECK-NEXT: mov.b64 %rd101, %rd98;
+; CHECK-NEXT: mov.b64 %rd100, 0;
+; CHECK-NEXT: mov.b64 %rd103, %rd100;
; CHECK-NEXT: $L__BB1_2: // %udiv-do-while
; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: shr.u64 %rd71, %rd105, 63;
-; CHECK-NEXT: shl.b64 %rd72, %rd106, 1;
-; CHECK-NEXT: or.b64 %rd73, %rd72, %rd71;
-; CHECK-NEXT: shl.b64 %rd74, %rd105, 1;
-; CHECK-NEXT: shr.u64 %rd75, %rd108, 63;
-; CHECK-NEXT: or.b64 %rd76, %rd74, %rd75;
-; CHECK-NEXT: shr.u64 %rd77, %rd107, 63;
-; CHECK-NEXT: shl.b64 %rd78, %rd108, 1;
-; CHECK-NEXT: or.b64 %rd79, %rd78, %rd77;
-; CHECK-NEXT: shl.b64 %rd80, %rd107, 1;
-; CHECK-NEXT: or.b64 %rd107, %rd101, %rd80;
-; CHECK-NEXT: or.b64 %rd108, %rd98, %rd79;
-; CHECK-NEXT: sub.cc.s64 %rd81, %rd33, %rd76;
-; CHECK-NEXT: subc.cc.s64 %rd82, %rd34, %rd73;
-; CHECK-NEXT: shr.s64 %rd83, %rd82, 63;
-; CHECK-NEXT: and.b64 %rd101, %rd83, 1;
-; CHECK-NEXT: and.b64 %rd84, %rd83, %rd3;
-; CHECK-NEXT: and.b64 %rd85, %rd83, %rd4;
-; CHECK-NEXT: sub.cc.s64 %rd105, %rd76, %rd84;
-; CHECK-NEXT: subc.cc.s64 %rd106, %rd73, %rd85;
-; CHECK-NEXT: add.cc.s64 %rd103, %rd103, -1;
-; CHECK-NEXT: addc.cc.s64 %rd104, %rd104, -1;
-; CHECK-NEXT: or.b64 %rd86, %rd103, %rd104;
-; CHECK-NEXT: setp.eq.b64 %p17, %rd86, 0;
+; CHECK-NEXT: shr.u64 %rd73, %rd107, 63;
+; CHECK-NEXT: shl.b64 %rd74, %rd108, 1;
+; CHECK-NEXT: or.b64 %rd75, %rd74, %rd73;
+; CHECK-NEXT: shl.b64 %rd76, %rd107, 1;
+; CHECK-NEXT: shr.u64 %rd77, %rd110, 63;
+; CHECK-NEXT: or.b64 %rd78, %rd76, %rd77;
+; CHECK-NEXT: shr.u64 %rd79, %rd109, 63;
+; CHECK-NEXT: shl.b64 %rd80, %rd110, 1;
+; CHECK-NEXT: or.b64 %rd81, %rd80, %rd79;
+; CHECK-NEXT: shl.b64 %rd82, %rd109, 1;
+; CHECK-NEXT: or.b64 %rd109, %rd103, %rd82;
+; CHECK-NEXT: or.b64 %rd110, %rd100, %rd81;
+; CHECK-NEXT: sub.cc.s64 %rd83, %rd33, %rd78;
+; CHECK-NEXT: subc.cc.s64 %rd84, %rd34, %rd75;
+; CHECK-NEXT: shr.s64 %rd85, %rd84, 63;
+; CHECK-NEXT: and.b64 %rd103, %rd85, 1;
+; CHECK-NEXT: and.b64 %rd86, %rd85, %rd3;
+; CHECK-NEXT: and.b64 %rd87, %rd85, %rd4;
+; CHECK-NEXT: sub.cc.s64 %rd107, %rd78, %rd86;
+; CHECK-NEXT: subc.cc.s64 %rd108, %rd75, %rd87;
+; CHECK-NEXT: add.cc.s64 %rd105, %rd105, -1;
+; CHECK-NEXT: addc.cc.s64 %rd106, %rd106, -1;
+; CHECK-NEXT: or.b64 %rd88, %rd105, %rd106;
+; CHECK-NEXT: setp.eq.b64 %p17, %rd88, 0;
; CHECK-NEXT: @%p17 bra $L__BB1_4;
; CHECK-NEXT: bra.uni $L__BB1_2;
; CHECK-NEXT: $L__BB1_4: // %udiv-loop-exit
-; CHECK-NEXT: shr.u64 %rd87, %rd107, 63;
-; CHECK-NEXT: shl.b64 %rd88, %rd108, 1;
-; CHECK-NEXT: or.b64 %rd89, %rd88, %rd87;
-; CHECK-NEXT: shl.b64 %rd90, %rd107, 1;
-; CHECK-NEXT: or.b64 %rd109, %rd101, %rd90;
-; CHECK-NEXT: or.b64 %rd110, %rd98, %rd89;
+; CHECK-NEXT: shr.u64 %rd89, %rd109, 63;
+; CHECK-NEXT: shl.b64 %rd90, %rd110, 1;
+; CHECK-NEXT: or.b64 %rd91, %rd90, %rd89;
+; CHECK-NEXT: shl.b64 %rd92, %rd109, 1;
+; CHECK-NEXT: or.b64 %rd111, %rd103, %rd92;
+; CHECK-NEXT: or.b64 %rd112, %rd100, %rd91;
; CHECK-NEXT: $L__BB1_5: // %udiv-end
-; CHECK-NEXT: mul.hi.u64 %rd91, %rd3, %rd109;
-; CHECK-NEXT: mad.lo.s64 %rd92, %rd3, %rd110, %rd91;
-; CHECK-NEXT: mad.lo.s64 %rd93, %rd4, %rd109, %rd92;
-; CHECK-NEXT: mul.lo.s64 %rd94, %rd3, %rd109;
-; CHECK-NEXT: sub.cc.s64 %rd95, %rd41, %rd94;
-; CHECK-NEXT: subc.cc.s64 %rd96, %rd42, %rd93;
-; CHECK-NEXT: st.param.v2.b64 [func_retval0], {%rd95, %rd96};
+; CHECK-NEXT: mul.hi.u64 %rd93, %rd3, %rd111;
+; CHECK-NEXT: mad.lo.s64 %rd94, %rd3, %rd112, %rd93;
+; CHECK-NEXT: mad.lo.s64 %rd95, %rd4, %rd111, %rd94;
+; CHECK-NEXT: mul.lo.s64 %rd96, %rd3, %rd111;
+; CHECK-NEXT: sub.cc.s64 %rd97, %rd41, %rd96;
+; CHECK-NEXT: subc.cc.s64 %rd98, %rd42, %rd95;
+; CHECK-NEXT: st.param.v2.b64 [func_retval0], {%rd97, %rd98};
; CHECK-NEXT: ret;
%div = urem i128 %lhs, %rhs
ret i128 %div
@@ -307,9 +306,9 @@ define i128 @urem_i128_pow2k(i128 %lhs) {
define i128 @sdiv_i128(i128 %lhs, i128 %rhs) {
; CHECK-LABEL: sdiv_i128(
; CHECK: {
-; CHECK-NEXT: .reg .pred %p<22>;
+; CHECK-NEXT: .reg .pred %p<20>;
; CHECK-NEXT: .reg .b32 %r<12>;
-; CHECK-NEXT: .reg .b64 %rd<121>;
+; CHECK-NEXT: .reg .b64 %rd<122>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0: // %_udiv-special-cases
; CHECK-NEXT: ld.param.v2.b64 {%rd45, %rd46}, [sdiv_i128_param_0];
@@ -345,97 +344,96 @@ define i128 @sdiv_i128(i128 %lhs, i128 %rhs) {
; CHECK-NEXT: cvt.u64.u32 %rd63, %r4;
; CHECK-NEXT: add.s64 %rd64, %rd63, 64;
; CHECK-NEXT: selp.b64 %rd65, %rd62, %rd64, %p7;
-; CHECK-NEXT: mov.b64 %rd111, 0;
+; CHECK-NEXT: mov.b64 %rd112, 0;
; CHECK-NEXT: sub.cc.s64 %rd67, %rd61, %rd65;
-; CHECK-NEXT: subc.cc.s64 %rd8, %rd111, 0;
-; CHECK-NEXT: setp.ne.b64 %p8, %rd8, 0;
-; CHECK-NEXT: and.pred %p10, %p8, %p8;
-; CHECK-NEXT: setp.eq.b64 %p11, %rd8, 0;
-; CHECK-NEXT: setp.gt.u64 %p12, %rd67, 127;
-; CHECK-NEXT: and.pred %p13, %p11, %p12;
-; CHECK-NEXT: or.pred %p14, %p13, %p10;
-; CHECK-NEXT: or.pred %p15, %p5, %p14;
-; CHECK-NEXT: xor.b64 %rd68, %rd67, 127;
-; CHECK-NEXT: or.b64 %rd69, %rd68, %rd8;
-; CHECK-NEXT: setp.eq.b64 %p16, %rd69, 0;
-; CHECK-NEXT: selp.b64 %rd120, 0, %rd2, %p15;
-; CHECK-NEXT: selp.b64 %rd119, 0, %rd1, %p15;
-; CHECK-NEXT: or.pred %p17, %p15, %p16;
-; CHECK-NEXT: @%p17 bra $L__BB4_5;
+; CHECK-NEXT: subc.cc.s64 %rd68, %rd112, 0;
+; CHECK-NEXT: setp.gt.u64 %p8, %rd67, 127;
+; CHECK-NEXT: setp.eq.b64 %p9, %rd68, 0;
+; CHECK-NEXT: and.pred %p10, %p9, %p8;
+; CHECK-NEXT: setp.ne.b64 %p11, %rd68, 0;
+; CHECK-NEXT: or.pred %p12, %p10, %p11;
+; CHECK-NEXT: or.pred %p13, %p5, %p12;
+; CHECK-NEXT: xor.b64 %rd69, %rd67, 127;
+; CHECK-NEXT: or.b64 %rd70, %rd69, %rd68;
+; CHECK-NEXT: setp.eq.b64 %p14, %rd70, 0;
+; CHECK-NEXT: selp.b64 %rd121, 0, %rd2, %p13;
+; CHECK-NEXT: selp.b64 %rd120, 0, %rd1, %p13;
+; CHECK-NEXT: or.pred %p15, %p13, %p14;
+; CHECK-NEXT: @%p15 bra $L__BB4_5;
; CHECK-NEXT: // %bb.3: // %udiv-bb1
-; CHECK-NEXT: add.cc.s64 %rd113, %rd67, 1;
-; CHECK-NEXT: addc.cc.s64 %rd114, %rd8, 0;
-; CHECK-NEXT: or.b64 %rd72, %rd113, %rd114;
-; CHECK-NEXT: setp.eq.b64 %p18, %rd72, 0;
+; CHECK-NEXT: add.cc.s64 %rd114, %rd67, 1;
+; CHECK-NEXT: addc.cc.s64 %rd115, %rd68, 0;
+; CHECK-NEXT: or.b64 %rd73, %rd114, %rd115;
+; CHECK-NEXT: setp.eq.b64 %p16, %rd73, 0;
; CHECK-NEXT: cvt.u32.u64 %r5, %rd67;
; CHECK-NEXT: sub.s32 %r6, 127, %r5;
-; CHECK-NEXT: shl.b64 %rd73, %rd2, %r6;
+; CHECK-NEXT: shl.b64 %rd74, %rd2, %r6;
; CHECK-NEXT: sub.s32 %r7, 64, %r6;
-; CHECK-NEXT: shr.u64 %rd74, %rd1, %r7;
-; CHECK-NEXT: or.b64 %rd75, %rd73, %rd74;
+; CHECK-NEXT: shr.u64 %rd75, %rd1, %r7;
+; CHECK-NEXT: or.b64 %rd76, %rd74, %rd75;
; CHECK-NEXT: sub.s32 %r8, 63, %r5;
-; CHECK-NEXT: shl.b64 %rd76, %rd1, %r8;
-; CHECK-NEXT: setp.gt.s32 %p19, %r6, 63;
-; CHECK-NEXT: selp.b64 %rd118, %rd76, %rd75, %p19;
-; CHECK-NEXT: shl.b64 %rd117, %rd1, %r6;
-; CHECK-NEXT: mov.b64 %rd108, %rd111;
-; CHECK-NEXT: @%p18 bra $L__BB4_4;
+; CHECK-NEXT: shl.b64 %rd77, %rd1, %r8;
+; CHECK-NEXT: setp.gt.s32 %p17, %r6, 63;
+; CHECK-NEXT: selp.b64 %rd119, %rd77, %rd76, %p17;
+; CHECK-NEXT: shl.b64 %rd118, %rd1, %r6;
+; CHECK-NEXT: mov.b64 %rd109, %rd112;
+; CHECK-NEXT: @%p16 bra $L__BB4_4;
; CHECK-NEXT: // %bb.1: // %udiv-preheader
-; CHECK-NEXT: cvt.u32.u64 %r9, %rd113;
-; CHECK-NEXT: shr.u64 %rd79, %rd1, %r9;
+; CHECK-NEXT: cvt.u32.u64 %r9, %rd114;
+; CHECK-NEXT: shr.u64 %rd80, %rd1, %r9;
; CHECK-NEXT: sub.s32 %r10, 64, %r9;
-; CHECK-NEXT: shl.b64 %rd80, %rd2, %r10;
-; CHECK-NEXT: or.b64 %rd81, %rd79, %rd80;
+; CHECK-NEXT: shl.b64 %rd81, %rd2, %r10;
+; CHECK-NEXT: or.b64 %rd82, %rd80, %rd81;
; CHECK-NEXT: add.s32 %r11, %r9, -64;
-; CHECK-NEXT: shr.u64 %rd82, %rd2, %r11;
-; CHECK-NEXT: setp.gt.s32 %p20, %r9, 63;
-; CHECK-NEXT: selp.b64 %rd115, %rd82, %rd81, %p20;
-; CHECK-NEXT: shr.u64 %rd116, %rd2, %r9;
+; CHECK-NEXT: shr.u64 %rd83, %rd2, %r11;
+; CHECK-NEXT: setp.gt.s32 %p18, %r9, 63;
+; CHECK-NEXT: selp.b64 %rd116, %rd83, %rd82, %p18;
+; CHECK-NEXT: shr.u64 %rd117, %rd2, %r9;
; CHECK-NEXT: add.cc.s64 %rd35, %rd3, -1;
; CHECK-NEXT: addc.cc.s64 %rd36, %rd4, -1;
-; CHECK-NEXT: mov.b64 %rd108, 0;
-; CHECK-NEXT: mov.b64 %rd111, %rd108;
+; CHECK-NEXT: mov.b64 %rd109, 0;
+; CHECK-NEXT: mov.b64 %rd112, %rd109;
; CHECK-NEXT: $L__BB4_2: // %udiv-do-while
; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: shr.u64 %rd83, %rd115, 63;
-; CHECK-NEXT: shl.b64 %rd84, %rd116, 1;
-; CHECK-NEXT: or.b64 %rd85, %rd84, %rd83;
-; CHECK-NEXT: shl.b64 %rd86, %rd115, 1;
-; CHECK-NEXT: shr.u64 %rd87, %rd118, 63;
-; CHECK-NEXT: or.b64 %rd88, %rd86, %rd87;
-; CHECK-NEXT: shr.u64 %rd89, %rd117, 63;
-; CHECK-NEXT: shl.b64 %rd90, %rd118, 1;
-; CHECK-NEXT: or.b64 %rd91, %rd90, %rd89;
-; CHECK-NEXT: shl.b64 %rd92, %rd117, 1;
-; CHECK-NEXT: or.b64 %rd117, %rd111, %rd92;
-; CHECK-NEXT: or.b64 %rd118, %rd108, %rd91;
-; CHECK-NEXT: sub.cc.s64 %rd93, %rd35, %rd88;
-; CHECK-NEXT: subc.cc.s64 %rd94, %rd36, %rd85;
-; CHECK-NEXT: shr.s64 %rd95, %rd94, 63;
-; CHECK-NEXT: and.b64 %rd111, %rd95, 1;
-; CHECK-NEXT: and.b64 %rd96, %rd95, %rd3;
-; CHECK-NEXT: and.b64 %rd97, %rd95, %rd4;
-; CHECK-NEXT: sub.cc.s64 %rd115, %rd88, %rd96;
-; CHECK-NEXT: subc.cc.s64 %rd116, %rd85, %rd97;
-; CHECK-NEXT: add.cc.s64 %rd113, %rd113, -1;
-; CHECK-NEXT: addc.cc.s64 %rd114, %rd114, -1;
-; CHECK-NEXT: or.b64 %rd98, %rd113, %rd114;
-; CHECK-NEXT: setp.eq.b64 %p21, %rd98, 0;
-; CHECK-NEXT: @%p21 bra $L__BB4_4;
+; CHECK-NEXT: shr.u64 %rd84, %rd116, 63;
+; CHECK-NEXT: shl.b64 %rd85, %rd117, 1;
+; CHECK-NEXT: or.b64 %rd86, %rd85, %rd84;
+; CHECK-NEXT: shl.b64 %rd87, %rd116, 1;
+; CHECK-NEXT: shr.u64 %rd88, %rd119, 63;
+; CHECK-NEXT: or.b64 %rd89, %rd87, %rd88;
+; CHECK-NEXT: shr.u64 %rd90, %rd118, 63;
+; CHECK-NEXT: shl.b64 %rd91, %rd119, 1;
+; CHECK-NEXT: or.b64 %rd92, %rd91, %rd90;
+; CHECK-NEXT: shl.b64 %rd93, %rd118, 1;
+; CHECK-NEXT: or.b64 %rd118, %rd112, %rd93;
+; CHECK-NEXT: or.b64 %rd119, %rd109, %rd92;
+; CHECK-NEXT: sub.cc.s64 %rd94, %rd35, %rd89;
+; CHECK-NEXT: subc.cc.s64 %rd95, %rd36, %rd86;
+; CHECK-NEXT: shr.s64 %rd96, %rd95, 63;
+; CHECK-NEXT: and.b64 %rd112, %rd96, 1;
+; CHECK-NEXT: and.b64 %rd97, %rd96, %rd3;
+; CHECK-NEXT: and.b64 %rd98, %rd96, %rd4;
+; CHECK-NEXT: sub.cc.s64 %rd116, %rd89, %rd97;
+; CHECK-NEXT: subc.cc.s64 %rd117, %rd86, %rd98;
+; CHECK-NEXT: add.cc.s64 %rd114, %rd114, -1;
+; CHECK-NEXT: addc.cc.s64 %rd115, %rd115, -1;
+; CHECK-NEXT: or.b64 %rd99, %rd114, %rd115;
+; CHECK-NEXT: setp.eq.b64 %p19, %rd99, 0;
+; CHECK-NEXT: @%p19 bra $L__BB4_4;
; CHECK-NEXT: bra.uni $L__BB4_2;
; CHECK-NEXT: $L__BB4_4: // %udiv-loop-exit
-; CHECK-NEXT: shr.u64 %rd99, %rd117, 63;
-; CHECK-NEXT: shl.b64 %rd100, %rd118, 1;
-; CHECK-NEXT: or.b64 %rd101, %rd100, %rd99;
-; CHECK-NEXT: shl.b64 %rd102, %rd117, 1;
-; CHECK-NEXT: or.b64 %rd119, %rd111, %rd102;
-; CHECK-NEXT: or.b64 %rd120, %rd108, %rd101;
+; CHECK-NEXT: shr.u64 %rd100, %rd118, 63;
+; CHECK-NEXT: shl.b64 %rd101, %rd119, 1;
+; CHECK-NEXT: or.b64 %rd102, %rd101, %rd100;
+; CHECK-NEXT: shl.b64 %rd103, %rd118, 1;
+; CHECK-NEXT: or.b64 %rd120, %rd112, %rd103;
+; CHECK-NEXT: or.b64 %rd121, %rd109, %rd102;
; CHECK-NEXT: $L__BB4_5: // %udiv-end
-; CHECK-NEXT: xor.b64 %rd103, %rd119, %rd5;
; CHECK-NEXT: xor.b64 %rd104, %rd120, %rd5;
-; CHECK-NEXT: sub.cc.s64 %rd105, %rd103, %rd5;
-; CHECK-NEXT: subc.cc.s64 %rd106, %rd104, %rd5;
-; CHECK-NEXT: st.param.v2.b64 [func_retval0], {%rd105, %rd106};
+; CHECK-NEXT: xor.b64 %rd105, %rd121, %rd5;
+; CHECK-NEXT: sub.cc.s64 %rd106, %rd104, %rd5;
+; CHECK-NEXT: subc.cc.s64 %rd107, %rd105, %rd5;
+; CHECK-NEXT: st.param.v2.b64 [func_retval0], {%rd106, %rd107};
; CHECK-NEXT: ret;
%div = sdiv i128 %lhs, %rhs
ret i128 %div
@@ -446,7 +444,7 @@ define i128 @udiv_i128(i128 %lhs, i128 %rhs) {
; CHECK: {
; CHECK-NEXT: .reg .pred %p<18>;
; CHECK-NEXT: .reg .b32 %r<12>;
-; CHECK-NEXT: .reg .b64 %rd<105>;
+; CHECK-NEXT: .reg .b64 %rd<107>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0: // %_udiv-special-cases
; CHECK-NEXT: ld.param.v2.b64 {%rd41, %rd42}, [udiv_i128_param_0];
@@ -470,92 +468,92 @@ define i128 @udiv_i128(i128 %lhs, i128 %rhs) {
; CHECK-NEXT: cvt.u64.u32 %rd52, %r4;
; CHECK-NEXT: add.s64 %rd53, %rd52, 64;
; CHECK-NEXT: selp.b64 %rd54, %rd51, %rd53, %p5;
-; CHECK-NEXT: mov.b64 %rd95, 0;
-; CHECK-NEXT: sub.cc.s64 %rd5, %rd50, %rd54;
-; CHECK-NEXT: subc.cc.s64 %rd6, %rd95, 0;
-; CHECK-NEXT: setp.gt.u64 %p6, %rd5, 127;
-; CHECK-NEXT: setp.eq.b64 %p7, %rd6, 0;
+; CHECK-NEXT: mov.b64 %rd97, 0;
+; CHECK-NEXT: sub.cc.s64 %rd56, %rd50, %rd54;
+; CHECK-NEXT: subc.cc.s64 %rd57, %rd97, 0;
+; CHECK-NEXT: setp.gt.u64 %p6, %rd56, 127;
+; CHECK-NEXT: setp.eq.b64 %p7, %rd57, 0;
; CHECK-NEXT: and.pred %p8, %p7, %p6;
-; CHECK-NEXT: setp.ne.b64 %p9, %rd6, 0;
+; CHECK-NEXT: setp.ne.b64 %p9, %rd57, 0;
; CHECK-NEXT: or.pred %p10, %p8, %p9;
; CHECK-NEXT: or.pred %p11, %p3, %p10;
-; CHECK-NEXT: xor.b64 %rd56, %rd5, 127;
-; CHECK-NEXT: or.b64 %rd57, %rd56, %rd6;
-; CHECK-NEXT: setp.eq.b64 %p12, %rd57, 0;
-; CHECK-NEXT: selp.b64 %rd104, 0, %rd42, %p11;
-; CHECK-NEXT: selp.b64 %rd103, 0, %rd41, %p11;
+; CHECK-NEXT: xor.b64 %rd58, %rd56, 127;
+; CHECK-NEXT: or.b64 %rd59, %rd58, %rd57;
+; CHECK-NEXT: setp.eq.b64 %p12, %rd59, 0;
+; CHECK-NEXT: selp.b64 %rd106, 0, %rd42, %p11;
+; CHECK-NEXT: selp.b64 %rd105, 0, %rd41, %p11;
; CHECK-NEXT: or.pred %p13, %p11, %p12;
; CHECK-NEXT: @%p13 bra $L__BB5_5;
; CHECK-NEXT: // %bb.3: // %udiv-bb1
-; CHECK-NEXT: add.cc.s64 %rd97, %rd5, 1;
-; CHECK-NEXT: addc.cc.s64 %rd98, %rd6, 0;
-; CHECK-NEXT: or.b64 %rd60, %rd97, %rd98;
-; CHECK-NEXT: setp.eq.b64 %p14, %rd60, 0;
-; CHECK-NEXT: cvt.u32.u64 %r5, %rd5;
+; CHECK-NEXT: add.cc.s64 %rd99, %rd56, 1;
+; CHECK-NEXT: addc.cc.s64 %rd100, %rd57, 0;
+; CHECK-NEXT: or.b64 %rd62, %rd99, %rd100;
+; CHECK-NEXT: setp.eq.b64 %p14, %rd62, 0;
+; CHECK-NEXT: cvt.u32.u64 %r5, %rd56;
; CHECK-NEXT: sub.s32 %r6, 127, %r5;
-; CHECK-NEXT: shl.b64 %rd61, %rd42, %r6;
+; CHECK-NEXT: shl.b64 %rd63, %rd42, %r6;
; CHECK-NEXT: sub.s32 %r7, 64, %r6;
-; CHECK-NEXT: shr.u64 %rd62, %rd41, %r7;
-; CHECK-NEXT: or.b64 %rd63, %rd61, %rd62;
+; CHECK-NEXT: shr.u64 %rd64, %rd41, %r7;
+; CHECK-NEXT: or.b64 %rd65, %rd63, %rd64;
; CHECK-NEXT: sub.s32 %r8, 63, %r5;
-; CHECK-NEXT: shl.b64 %rd64, %rd41, %r8;
+; CHECK-NEXT: shl.b64 %rd66, %rd41, %r8;
; CHECK-NEXT: setp.gt.s32 %p15, %r6, 63;
-; CHECK-NEXT: selp.b64 %rd102, %rd64, %rd63, %p15;
-; CHECK-NEXT: shl.b64 %rd101, %rd41, %r6;
-; CHECK-NEXT: mov.b64 %rd92, %rd95;
+; CHECK-NEXT: selp.b64 %rd104, %rd66, %rd65, %p15;
+; CHECK-NEXT: shl.b64 %rd103, %rd41, %r6;
+; CHECK-NEXT: mov.b64 %rd94, %rd97;
; CHECK-NEXT: @%p14 bra $L__BB5_4;
; CHECK-NEXT: // %bb.1: // %udiv-preheader
-; CHECK-NEXT: cvt.u32.u64 %r9, %rd97;
-; CHECK-NEXT: shr.u64 %rd67, %rd41, %r9;
+; CHECK-NEXT: cvt.u32.u64 %r9, %rd99;
+; CHECK-NEXT: shr.u64 %rd69, %rd41, %r9;
; CHECK-NEXT: sub.s32 %r10, 64, %r9;
-; CHECK-NEXT: shl.b64 %rd68, %rd42, %r10;
-; CHECK-NEXT: or.b64 %rd69, %rd67, %rd68;
+; CHECK-NEXT: shl.b64 %rd70, %rd42, %r10;
+; CHECK-NEXT: or.b64 %rd71, %rd69, %rd70;
; CHECK-NEXT: add.s32 %r11, %r9, -64;
-; CHECK-NEXT: shr.u64 %rd70, %rd42, %r11;
+; CHECK-NEXT: shr.u64 %rd72, %rd42, %r11;
; CHECK-NEXT: setp.gt.s32 %p16, %r9, 63;
-; CHECK-NEXT: selp.b64 %rd99, %rd70, %rd69, %p16;
-; CHECK-NEXT: shr.u64 %rd100, %rd42, %r9;
+; CHECK-NEXT: selp.b64 %rd101, %rd72, %rd71, %p16;
+; CHECK-NEXT: shr.u64 %rd102, %rd42, %r9;
; CHECK-NEXT: add.cc.s64 %rd33, %rd43, -1;
; CHECK-NEXT: addc.cc.s64 %rd34, %rd44, -1;
-; CHECK-NEXT: mov.b64 %rd92, 0;
-; CHECK-NEXT: mov.b64 %rd95, %rd92;
+; CHECK-NEXT: mov.b64 %rd94, 0;
+; CHECK-NEXT: mov.b64 %rd97, %rd94;
; CHECK-NEXT: $L__BB5_2: // %udiv-do-while
; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: shr.u64 %rd71, %rd99, 63;
-; CHECK-NEXT: shl.b64 %rd72, %rd100, 1;
-; CHECK-NEXT: or.b64 %rd73, %rd72, %rd71;
-; CHECK-NEXT: shl.b64 %rd74, %rd99, 1;
-; CHECK-NEXT: shr.u64 %rd75, %rd102, 63;
-; CHECK-NEXT: or.b64 %rd76, %rd74, %rd75;
-; CHECK-NEXT: shr.u64 %rd77, %rd101, 63;
-; CHECK-NEXT: shl.b64 %rd78, %rd102, 1;
-; CHECK-NEXT: or.b64 %rd79, %rd78, %rd77;
-; CHECK-NEXT: shl.b64 %rd80, %rd101, 1;
-; CHECK-NEXT: or.b64 %rd101, %rd95, %rd80;
-; CHECK-NEXT: or.b64 %rd102, %rd92, %rd79;
-; CHECK-NEXT: sub.cc.s64 %rd81, %rd33, %rd76;
-; CHECK-NEXT: subc.cc.s64 %rd82, %rd34, %rd73;
-; CHECK-NEXT: shr.s64 %rd83, %rd82, 63;
-; CHECK-NEXT: and.b64 %rd95, %rd83, 1;
-; CHECK-NEXT: and.b64 %rd84, %rd83, %rd43;
-; CHECK-NEXT: and.b64 %rd85, %rd83, %rd44;
-; CHECK-NEXT: sub.cc.s64 %rd99, %rd76, %rd84;
-; CHECK-NEXT: subc.cc.s64 %rd100, %rd73, %rd85;
-; CHECK-NEXT: add.cc.s64 %rd97, %rd97, -1;
-; CHECK-NEXT: addc.cc.s64 %rd98, %rd98, -1;
-; CHECK-NEXT: or.b64 %rd86, %rd97, %rd98;
-; CHECK-NEXT: setp.eq.b64 %p17, %rd86, 0;
+; CHECK-NEXT: shr.u64 %rd73, %rd101, 63;
+; CHECK-NEXT: shl.b64 %rd74, %rd102, 1;
+; CHECK-NEXT: or.b64 %rd75, %rd74, %rd73;
+; CHECK-NEXT: shl.b64 %rd76, %rd101, 1;
+; CHECK-NEXT: shr.u64 %rd77, %rd104, 63;
+; CHECK-NEXT: or.b64 %rd78, %rd76, %rd77;
+; CHECK-NEXT: shr.u64 %rd79, %rd103, 63;
+; CHECK-NEXT: shl.b64 %rd80, %rd104, 1;
+; CHECK-NEXT: or.b64 %rd81, %rd80, %rd79;
+; CHECK-NEXT: shl.b64 %rd82, %rd103, 1;
+; CHECK-NEXT: or.b64 %rd103, %rd97, %rd82;
+; CHECK-NEXT: or.b64 %rd104, %rd94, %rd81;
+; CHECK-NEXT: sub.cc.s64 %rd83, %rd33, %rd78;
+; CHECK-NEXT: subc.cc.s64 %rd84, %rd34, %rd75;
+; CHECK-NEXT: shr.s64 %rd85, %rd84, 63;
+; CHECK-NEXT: and.b64 %rd97, %rd85, 1;
+; CHECK-NEXT: and.b64 %rd86, %rd85, %rd43;
+; CHECK-NEXT: and.b64 %rd87, %rd85, %rd44;
+; CHECK-NEXT: sub.cc.s64 %rd101, %rd78, %rd86;
+; CHECK-NEXT: subc.cc.s64 %rd102, %rd75, %rd87;
+; CHECK-NEXT: add.cc.s64 %rd99, %rd99, -1;
+; CHECK-NEXT: addc.cc.s64 %rd100, %rd100, -1;
+; CHECK-NEXT: or.b64 %rd88, %rd99, %rd100;
+; CHECK-NEXT: setp.eq.b64 %p17, %rd88, 0;
; CHECK-NEXT: @%p17 bra $L__BB5_4;
; CHECK-NEXT: bra.uni $L__BB5_2;
; CHECK-NEXT: $L__BB5_4: // %udiv-loop-exit
-; CHECK-NEXT: shr.u64 %rd87, %rd101, 63;
-; CHECK-NEXT: shl.b64 %rd88, %rd102, 1;
-; CHECK-NEXT: or.b64 %rd89, %rd88, %rd87;
-; CHECK-NEXT: shl.b64 %rd90, %rd101, 1;
-; CHECK-NEXT: or.b64 %rd103, %rd95, %rd90;
-; CHECK-NEXT: or.b64 %rd104, %rd92, %rd89;
+; CHECK-NEXT: shr.u64 %rd89, %rd103, 63;
+; CHECK-NEXT: shl.b64 %rd90, %rd104, 1;
+; CHECK-NEXT: or.b64 %rd91, %rd90, %rd89;
+; CHECK-NEXT: shl.b64 %rd92, %rd103, 1;
+; CHECK-NEXT: or.b64 %rd105, %rd97, %rd92;
+; CHECK-NEXT: or.b64 %rd106, %rd94, %rd91;
; CHECK-NEXT: $L__BB5_5: // %udiv-end
-; CHECK-NEXT: st.param.v2.b64 [func_retval0], {%rd103, %rd104};
+; CHECK-NEXT: st.param.v2.b64 [func_retval0], {%rd105, %rd106};
; CHECK-NEXT: ret;
%div = udiv i128 %lhs, %rhs
ret i128 %div
diff --git a/llvm/test/CodeGen/NVPTX/i8x4-instructions.ll b/llvm/test/CodeGen/NVPTX/i8x4-instructions.ll
index 9891e33..da99cec 100644
--- a/llvm/test/CodeGen/NVPTX/i8x4-instructions.ll
+++ b/llvm/test/CodeGen/NVPTX/i8x4-instructions.ll
@@ -2044,7 +2044,7 @@ define void @test_srem_v4i8(ptr %a, ptr %b, ptr %c) {
; O0-LABEL: test_srem_v4i8(
; O0: {
; O0-NEXT: .reg .b16 %rs<13>;
-; O0-NEXT: .reg .b32 %r<18>;
+; O0-NEXT: .reg .b32 %r<16>;
; O0-NEXT: .reg .b64 %rd<4>;
; O0-EMPTY:
; O0-NEXT: // %bb.0: // %entry
@@ -2066,27 +2066,25 @@ define void @test_srem_v4i8(ptr %a, ptr %b, ptr %c) {
; O0-NEXT: rem.s16 %rs6, %rs5, %rs4;
; O0-NEXT: cvt.u32.u16 %r8, %rs6;
; O0-NEXT: prmt.b32 %r9, %r8, %r5, 0x3340U;
-; O0-NEXT: prmt.b32 %r10, %r2, 0, 0x9991U;
-; O0-NEXT: cvt.u16.u32 %rs7, %r10;
-; O0-NEXT: prmt.b32 %r11, %r1, 0, 0x9991U;
-; O0-NEXT: cvt.u16.u32 %rs8, %r11;
+; O0-NEXT: cvt.s8.s32 %rs7, %r2;
+; O0-NEXT: cvt.s8.s32 %rs8, %r1;
; O0-NEXT: rem.s16 %rs9, %rs8, %rs7;
-; O0-NEXT: cvt.u32.u16 %r12, %rs9;
-; O0-NEXT: prmt.b32 %r13, %r2, 0, 0x8880U;
-; O0-NEXT: cvt.u16.u32 %rs10, %r13;
-; O0-NEXT: prmt.b32 %r14, %r1, 0, 0x8880U;
-; O0-NEXT: cvt.u16.u32 %rs11, %r14;
+; O0-NEXT: cvt.u32.u16 %r10, %rs9;
+; O0-NEXT: prmt.b32 %r11, %r2, 0, 0x9991U;
+; O0-NEXT: cvt.u16.u32 %rs10, %r11;
+; O0-NEXT: prmt.b32 %r12, %r1, 0, 0x9991U;
+; O0-NEXT: cvt.u16.u32 %rs11, %r12;
; O0-NEXT: rem.s16 %rs12, %rs11, %rs10;
-; O0-NEXT: cvt.u32.u16 %r15, %rs12;
-; O0-NEXT: prmt.b32 %r16, %r15, %r12, 0x3340U;
-; O0-NEXT: prmt.b32 %r17, %r16, %r9, 0x5410U;
-; O0-NEXT: st.b32 [%rd3], %r17;
+; O0-NEXT: cvt.u32.u16 %r13, %rs12;
+; O0-NEXT: prmt.b32 %r14, %r10, %r13, 0x3340U;
+; O0-NEXT: prmt.b32 %r15, %r14, %r9, 0x5410U;
+; O0-NEXT: st.b32 [%rd3], %r15;
; O0-NEXT: ret;
;
; O3-LABEL: test_srem_v4i8(
; O3: {
; O3-NEXT: .reg .b16 %rs<13>;
-; O3-NEXT: .reg .b32 %r<18>;
+; O3-NEXT: .reg .b32 %r<16>;
; O3-NEXT: .reg .b64 %rd<4>;
; O3-EMPTY:
; O3-NEXT: // %bb.0: // %entry
@@ -2108,21 +2106,19 @@ define void @test_srem_v4i8(ptr %a, ptr %b, ptr %c) {
; O3-NEXT: rem.s16 %rs6, %rs5, %rs4;
; O3-NEXT: cvt.u32.u16 %r8, %rs6;
; O3-NEXT: prmt.b32 %r9, %r8, %r5, 0x3340U;
-; O3-NEXT: prmt.b32 %r10, %r2, 0, 0x9991U;
-; O3-NEXT: cvt.u16.u32 %rs7, %r10;
-; O3-NEXT: prmt.b32 %r11, %r1, 0, 0x9991U;
-; O3-NEXT: cvt.u16.u32 %rs8, %r11;
+; O3-NEXT: cvt.s8.s32 %rs7, %r2;
+; O3-NEXT: cvt.s8.s32 %rs8, %r1;
; O3-NEXT: rem.s16 %rs9, %rs8, %rs7;
-; O3-NEXT: cvt.u32.u16 %r12, %rs9;
-; O3-NEXT: prmt.b32 %r13, %r2, 0, 0x8880U;
-; O3-NEXT: cvt.u16.u32 %rs10, %r13;
-; O3-NEXT: prmt.b32 %r14, %r1, 0, 0x8880U;
-; O3-NEXT: cvt.u16.u32 %rs11, %r14;
+; O3-NEXT: cvt.u32.u16 %r10, %rs9;
+; O3-NEXT: prmt.b32 %r11, %r2, 0, 0x9991U;
+; O3-NEXT: cvt.u16.u32 %rs10, %r11;
+; O3-NEXT: prmt.b32 %r12, %r1, 0, 0x9991U;
+; O3-NEXT: cvt.u16.u32 %rs11, %r12;
; O3-NEXT: rem.s16 %rs12, %rs11, %rs10;
-; O3-NEXT: cvt.u32.u16 %r15, %rs12;
-; O3-NEXT: prmt.b32 %r16, %r15, %r12, 0x3340U;
-; O3-NEXT: prmt.b32 %r17, %r16, %r9, 0x5410U;
-; O3-NEXT: st.b32 [%rd3], %r17;
+; O3-NEXT: cvt.u32.u16 %r13, %rs12;
+; O3-NEXT: prmt.b32 %r14, %r10, %r13, 0x3340U;
+; O3-NEXT: prmt.b32 %r15, %r14, %r9, 0x5410U;
+; O3-NEXT: st.b32 [%rd3], %r15;
; O3-NEXT: ret;
entry:
%t57 = load <4 x i8>, ptr %a, align 4
@@ -2142,7 +2138,7 @@ define void @test_srem_v3i8(ptr %a, ptr %b, ptr %c) {
; O0-LABEL: test_srem_v3i8(
; O0: {
; O0-NEXT: .reg .b16 %rs<20>;
-; O0-NEXT: .reg .b32 %r<14>;
+; O0-NEXT: .reg .b32 %r<8>;
; O0-NEXT: .reg .b64 %rd<4>;
; O0-EMPTY:
; O0-NEXT: // %bb.0: // %entry
@@ -2161,25 +2157,19 @@ define void @test_srem_v3i8(ptr %a, ptr %b, ptr %c) {
; O0-NEXT: or.b16 %rs9, %rs8, %rs6;
; O0-NEXT: cvt.u32.u16 %r2, %rs9;
; O0-NEXT: ld.s8 %rs10, [%rd2+2];
-; O0-NEXT: prmt.b32 %r3, %r2, 0, 0x9991U;
-; O0-NEXT: cvt.u16.u32 %rs11, %r3;
-; O0-NEXT: prmt.b32 %r4, %r1, 0, 0x9991U;
-; O0-NEXT: cvt.u16.u32 %rs12, %r4;
+; O0-NEXT: cvt.s16.s8 %rs11, %rs9;
+; O0-NEXT: cvt.s16.s8 %rs12, %rs4;
; O0-NEXT: rem.s16 %rs13, %rs12, %rs11;
-; O0-NEXT: cvt.u32.u16 %r5, %rs13;
-; O0-NEXT: prmt.b32 %r6, %r2, 0, 0x8880U;
-; O0-NEXT: cvt.u16.u32 %rs14, %r6;
-; O0-NEXT: prmt.b32 %r7, %r1, 0, 0x8880U;
-; O0-NEXT: cvt.u16.u32 %rs15, %r7;
+; O0-NEXT: cvt.u32.u16 %r3, %rs13;
+; O0-NEXT: prmt.b32 %r4, %r2, 0, 0x9991U;
+; O0-NEXT: cvt.u16.u32 %rs14, %r4;
+; O0-NEXT: prmt.b32 %r5, %r1, 0, 0x9991U;
+; O0-NEXT: cvt.u16.u32 %rs15, %r5;
; O0-NEXT: rem.s16 %rs16, %rs15, %rs14;
-; O0-NEXT: cvt.u32.u16 %r8, %rs16;
-; O0-NEXT: prmt.b32 %r9, %r8, %r5, 0x3340U;
-; O0-NEXT: // implicit-def: %r11
-; O0-NEXT: // implicit-def: %r12
-; O0-NEXT: prmt.b32 %r10, %r11, %r12, 0x3340U;
-; O0-NEXT: prmt.b32 %r13, %r9, %r10, 0x5410U;
+; O0-NEXT: cvt.u32.u16 %r6, %rs16;
+; O0-NEXT: prmt.b32 %r7, %r3, %r6, 0x3340U;
; O0-NEXT: rem.s16 %rs17, %rs5, %rs10;
-; O0-NEXT: cvt.u16.u32 %rs18, %r13;
+; O0-NEXT: cvt.u16.u32 %rs18, %r7;
; O0-NEXT: st.b8 [%rd3], %rs18;
; O0-NEXT: shr.u16 %rs19, %rs18, 8;
; O0-NEXT: st.b8 [%rd3+1], %rs19;
@@ -2189,7 +2179,7 @@ define void @test_srem_v3i8(ptr %a, ptr %b, ptr %c) {
; O3-LABEL: test_srem_v3i8(
; O3: {
; O3-NEXT: .reg .b16 %rs<20>;
-; O3-NEXT: .reg .b32 %r<14>;
+; O3-NEXT: .reg .b32 %r<8>;
; O3-NEXT: .reg .b64 %rd<4>;
; O3-EMPTY:
; O3-NEXT: // %bb.0: // %entry
@@ -2208,24 +2198,20 @@ define void @test_srem_v3i8(ptr %a, ptr %b, ptr %c) {
; O3-NEXT: cvt.u32.u16 %r2, %rs9;
; O3-NEXT: ld.s8 %rs10, [%rd2+2];
; O3-NEXT: ld.param.b64 %rd3, [test_srem_v3i8_param_2];
-; O3-NEXT: prmt.b32 %r3, %r2, 0, 0x9991U;
-; O3-NEXT: cvt.u16.u32 %rs11, %r3;
-; O3-NEXT: prmt.b32 %r4, %r1, 0, 0x9991U;
-; O3-NEXT: cvt.u16.u32 %rs12, %r4;
+; O3-NEXT: cvt.s16.s8 %rs11, %rs9;
+; O3-NEXT: cvt.s16.s8 %rs12, %rs4;
; O3-NEXT: rem.s16 %rs13, %rs12, %rs11;
-; O3-NEXT: cvt.u32.u16 %r5, %rs13;
-; O3-NEXT: prmt.b32 %r6, %r2, 0, 0x8880U;
-; O3-NEXT: cvt.u16.u32 %rs14, %r6;
-; O3-NEXT: prmt.b32 %r7, %r1, 0, 0x8880U;
-; O3-NEXT: cvt.u16.u32 %rs15, %r7;
+; O3-NEXT: cvt.u32.u16 %r3, %rs13;
+; O3-NEXT: prmt.b32 %r4, %r2, 0, 0x9991U;
+; O3-NEXT: cvt.u16.u32 %rs14, %r4;
+; O3-NEXT: prmt.b32 %r5, %r1, 0, 0x9991U;
+; O3-NEXT: cvt.u16.u32 %rs15, %r5;
; O3-NEXT: rem.s16 %rs16, %rs15, %rs14;
-; O3-NEXT: cvt.u32.u16 %r8, %rs16;
-; O3-NEXT: prmt.b32 %r9, %r8, %r5, 0x3340U;
-; O3-NEXT: prmt.b32 %r10, %r11, %r12, 0x3340U;
-; O3-NEXT: prmt.b32 %r13, %r9, %r10, 0x5410U;
+; O3-NEXT: cvt.u32.u16 %r6, %rs16;
+; O3-NEXT: prmt.b32 %r7, %r3, %r6, 0x3340U;
; O3-NEXT: rem.s16 %rs17, %rs5, %rs10;
; O3-NEXT: st.b8 [%rd3+2], %rs17;
-; O3-NEXT: cvt.u16.u32 %rs18, %r13;
+; O3-NEXT: cvt.u16.u32 %rs18, %r7;
; O3-NEXT: st.b8 [%rd3], %rs18;
; O3-NEXT: shr.u16 %rs19, %rs18, 8;
; O3-NEXT: st.b8 [%rd3+1], %rs19;
@@ -2340,23 +2326,22 @@ define <4 x float> @test_sitofp_v4i8(<4 x i8> %a) {
; CHECK-LABEL: test_sitofp_v4i8(
; CHECK: {
; CHECK-NEXT: .reg .b16 %rs<5>;
-; CHECK-NEXT: .reg .b32 %r<10>;
+; CHECK-NEXT: .reg .b32 %r<9>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.b32 %r1, [test_sitofp_v4i8_param_0];
-; CHECK-NEXT: prmt.b32 %r2, %r1, 0, 0xbbb3U;
-; CHECK-NEXT: cvt.u16.u32 %rs1, %r2;
-; CHECK-NEXT: cvt.rn.f32.s16 %r3, %rs1;
-; CHECK-NEXT: prmt.b32 %r4, %r1, 0, 0xaaa2U;
-; CHECK-NEXT: cvt.u16.u32 %rs2, %r4;
-; CHECK-NEXT: cvt.rn.f32.s16 %r5, %rs2;
-; CHECK-NEXT: prmt.b32 %r6, %r1, 0, 0x9991U;
-; CHECK-NEXT: cvt.u16.u32 %rs3, %r6;
-; CHECK-NEXT: cvt.rn.f32.s16 %r7, %rs3;
-; CHECK-NEXT: prmt.b32 %r8, %r1, 0, 0x8880U;
-; CHECK-NEXT: cvt.u16.u32 %rs4, %r8;
-; CHECK-NEXT: cvt.rn.f32.s16 %r9, %rs4;
-; CHECK-NEXT: st.param.v4.b32 [func_retval0], {%r9, %r7, %r5, %r3};
+; CHECK-NEXT: cvt.s8.s32 %rs1, %r1;
+; CHECK-NEXT: cvt.rn.f32.s16 %r2, %rs1;
+; CHECK-NEXT: prmt.b32 %r3, %r1, 0, 0xbbb3U;
+; CHECK-NEXT: cvt.u16.u32 %rs2, %r3;
+; CHECK-NEXT: cvt.rn.f32.s16 %r4, %rs2;
+; CHECK-NEXT: prmt.b32 %r5, %r1, 0, 0xaaa2U;
+; CHECK-NEXT: cvt.u16.u32 %rs3, %r5;
+; CHECK-NEXT: cvt.rn.f32.s16 %r6, %rs3;
+; CHECK-NEXT: prmt.b32 %r7, %r1, 0, 0x9991U;
+; CHECK-NEXT: cvt.u16.u32 %rs4, %r7;
+; CHECK-NEXT: cvt.rn.f32.s16 %r8, %rs4;
+; CHECK-NEXT: st.param.v4.b32 [func_retval0], {%r2, %r8, %r6, %r4};
; CHECK-NEXT: ret;
%r = sitofp <4 x i8> %a to <4 x float>
ret <4 x float> %r
diff --git a/llvm/test/CodeGen/NVPTX/pr126337.ll b/llvm/test/CodeGen/NVPTX/pr126337.ll
index 32e4115..95258f7 100644
--- a/llvm/test/CodeGen/NVPTX/pr126337.ll
+++ b/llvm/test/CodeGen/NVPTX/pr126337.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
; RUN: llc < %s -mtriple=nvptx64 -mcpu=sm_70 | FileCheck %s
-; RUN: %if ptxas %{ llc < %s -mtriple=nvptx64 -mcpu=sm_70 | %ptxas-verify %}
+; RUN: %if ptxas %{ llc < %s -mtriple=nvptx64 -mcpu=sm_70 | %ptxas -arch=sm_70 -c - %}
; This IR should compile without triggering assertions in LICM
; when the CopyToReg from %0 in the first BB gets eliminated
diff --git a/llvm/test/CodeGen/PowerPC/ppc64-P9-vabsd.ll b/llvm/test/CodeGen/PowerPC/ppc64-P9-vabsd.ll
index 821cfd0..b540948 100644
--- a/llvm/test/CodeGen/PowerPC/ppc64-P9-vabsd.ll
+++ b/llvm/test/CodeGen/PowerPC/ppc64-P9-vabsd.ll
@@ -764,8 +764,13 @@ define <16 x i8> @sub_absv_8_ext(<16 x i8> %a, <16 x i8> %b) local_unnamed_addr
;
; CHECK-PWR7-LABEL: sub_absv_8_ext:
; CHECK-PWR7: # %bb.0: # %entry
-; CHECK-PWR7-NEXT: stdu r1, -448(r1)
-; CHECK-PWR7-NEXT: .cfi_def_cfa_offset 448
+; CHECK-PWR7-NEXT: stdu r1, -512(r1)
+; CHECK-PWR7-NEXT: .cfi_def_cfa_offset 512
+; CHECK-PWR7-NEXT: .cfi_offset r14, -144
+; CHECK-PWR7-NEXT: .cfi_offset r15, -136
+; CHECK-PWR7-NEXT: .cfi_offset r16, -128
+; CHECK-PWR7-NEXT: .cfi_offset r17, -120
+; CHECK-PWR7-NEXT: .cfi_offset r18, -112
; CHECK-PWR7-NEXT: .cfi_offset r19, -104
; CHECK-PWR7-NEXT: .cfi_offset r20, -96
; CHECK-PWR7-NEXT: .cfi_offset r21, -88
@@ -778,258 +783,244 @@ define <16 x i8> @sub_absv_8_ext(<16 x i8> %a, <16 x i8> %b) local_unnamed_addr
; CHECK-PWR7-NEXT: .cfi_offset r28, -32
; CHECK-PWR7-NEXT: .cfi_offset r29, -24
; CHECK-PWR7-NEXT: .cfi_offset r30, -16
-; CHECK-PWR7-NEXT: addi r3, r1, 304
-; CHECK-PWR7-NEXT: std r19, 344(r1) # 8-byte Folded Spill
-; CHECK-PWR7-NEXT: std r20, 352(r1) # 8-byte Folded Spill
-; CHECK-PWR7-NEXT: std r21, 360(r1) # 8-byte Folded Spill
-; CHECK-PWR7-NEXT: std r22, 368(r1) # 8-byte Folded Spill
-; CHECK-PWR7-NEXT: std r23, 376(r1) # 8-byte Folded Spill
-; CHECK-PWR7-NEXT: std r24, 384(r1) # 8-byte Folded Spill
-; CHECK-PWR7-NEXT: std r25, 392(r1) # 8-byte Folded Spill
-; CHECK-PWR7-NEXT: std r26, 400(r1) # 8-byte Folded Spill
-; CHECK-PWR7-NEXT: std r27, 408(r1) # 8-byte Folded Spill
-; CHECK-PWR7-NEXT: std r28, 416(r1) # 8-byte Folded Spill
-; CHECK-PWR7-NEXT: std r29, 424(r1) # 8-byte Folded Spill
-; CHECK-PWR7-NEXT: std r30, 432(r1) # 8-byte Folded Spill
-; CHECK-PWR7-NEXT: stxvw4x v2, 0, r3
+; CHECK-PWR7-NEXT: .cfi_offset r31, -8
+; CHECK-PWR7-NEXT: .cfi_offset r2, -152
; CHECK-PWR7-NEXT: addi r3, r1, 320
-; CHECK-PWR7-NEXT: lbz r7, 304(r1)
-; CHECK-PWR7-NEXT: stxvw4x v3, 0, r3
-; CHECK-PWR7-NEXT: lbz r8, 320(r1)
-; CHECK-PWR7-NEXT: lbz r9, 305(r1)
-; CHECK-PWR7-NEXT: lbz r10, 321(r1)
-; CHECK-PWR7-NEXT: lbz r26, 325(r1)
-; CHECK-PWR7-NEXT: clrlwi r7, r7, 24
-; CHECK-PWR7-NEXT: clrlwi r8, r8, 24
-; CHECK-PWR7-NEXT: clrlwi r9, r9, 24
-; CHECK-PWR7-NEXT: clrlwi r10, r10, 24
-; CHECK-PWR7-NEXT: lbz r11, 306(r1)
-; CHECK-PWR7-NEXT: lbz r12, 322(r1)
-; CHECK-PWR7-NEXT: lbz r23, 314(r1)
-; CHECK-PWR7-NEXT: clrlwi r22, r26, 24
-; CHECK-PWR7-NEXT: lbz r26, 330(r1)
-; CHECK-PWR7-NEXT: sub r8, r7, r8
-; CHECK-PWR7-NEXT: lbz r7, 315(r1)
-; CHECK-PWR7-NEXT: sub r20, r9, r10
-; CHECK-PWR7-NEXT: lbz r9, 331(r1)
-; CHECK-PWR7-NEXT: lbz r0, 307(r1)
-; CHECK-PWR7-NEXT: lbz r30, 323(r1)
-; CHECK-PWR7-NEXT: clrlwi r11, r11, 24
-; CHECK-PWR7-NEXT: clrlwi r12, r12, 24
-; CHECK-PWR7-NEXT: clrlwi r23, r23, 24
-; CHECK-PWR7-NEXT: clrlwi r21, r26, 24
-; CHECK-PWR7-NEXT: clrlwi r7, r7, 24
-; CHECK-PWR7-NEXT: clrlwi r9, r9, 24
-; CHECK-PWR7-NEXT: clrlwi r0, r0, 24
-; CHECK-PWR7-NEXT: clrlwi r30, r30, 24
-; CHECK-PWR7-NEXT: lbz r29, 308(r1)
-; CHECK-PWR7-NEXT: lbz r28, 324(r1)
-; CHECK-PWR7-NEXT: lbz r27, 309(r1)
-; CHECK-PWR7-NEXT: lbz r25, 310(r1)
-; CHECK-PWR7-NEXT: lbz r24, 326(r1)
-; CHECK-PWR7-NEXT: sub r19, r11, r12
-; CHECK-PWR7-NEXT: sub r11, r23, r21
-; CHECK-PWR7-NEXT: sub r9, r7, r9
-; CHECK-PWR7-NEXT: sub r26, r0, r30
-; CHECK-PWR7-NEXT: srawi r12, r11, 31
-; CHECK-PWR7-NEXT: srawi r0, r9, 31
-; CHECK-PWR7-NEXT: lbz r3, 312(r1)
-; CHECK-PWR7-NEXT: clrlwi r29, r29, 24
-; CHECK-PWR7-NEXT: clrlwi r28, r28, 24
-; CHECK-PWR7-NEXT: clrlwi r27, r27, 24
-; CHECK-PWR7-NEXT: clrlwi r25, r25, 24
-; CHECK-PWR7-NEXT: clrlwi r24, r24, 24
-; CHECK-PWR7-NEXT: xor r11, r11, r12
-; CHECK-PWR7-NEXT: xor r9, r9, r0
-; CHECK-PWR7-NEXT: sub r28, r29, r28
-; CHECK-PWR7-NEXT: sub r30, r27, r22
-; CHECK-PWR7-NEXT: sub r29, r25, r24
-; CHECK-PWR7-NEXT: sub r27, r11, r12
-; CHECK-PWR7-NEXT: sub r24, r9, r0
-; CHECK-PWR7-NEXT: lbz r9, 316(r1)
-; CHECK-PWR7-NEXT: lbz r11, 332(r1)
-; CHECK-PWR7-NEXT: lbz r4, 328(r1)
-; CHECK-PWR7-NEXT: lbz r5, 311(r1)
-; CHECK-PWR7-NEXT: lbz r6, 327(r1)
-; CHECK-PWR7-NEXT: clrlwi r11, r11, 24
-; CHECK-PWR7-NEXT: clrlwi r3, r3, 24
-; CHECK-PWR7-NEXT: clrlwi r4, r4, 24
-; CHECK-PWR7-NEXT: clrlwi r5, r5, 24
-; CHECK-PWR7-NEXT: clrlwi r6, r6, 24
-; CHECK-PWR7-NEXT: sub r3, r3, r4
+; CHECK-PWR7-NEXT: std r14, 368(r1) # 8-byte Folded Spill
+; CHECK-PWR7-NEXT: std r15, 376(r1) # 8-byte Folded Spill
+; CHECK-PWR7-NEXT: std r16, 384(r1) # 8-byte Folded Spill
+; CHECK-PWR7-NEXT: std r17, 392(r1) # 8-byte Folded Spill
+; CHECK-PWR7-NEXT: std r18, 400(r1) # 8-byte Folded Spill
+; CHECK-PWR7-NEXT: std r19, 408(r1) # 8-byte Folded Spill
+; CHECK-PWR7-NEXT: std r20, 416(r1) # 8-byte Folded Spill
+; CHECK-PWR7-NEXT: std r21, 424(r1) # 8-byte Folded Spill
+; CHECK-PWR7-NEXT: std r22, 432(r1) # 8-byte Folded Spill
+; CHECK-PWR7-NEXT: std r23, 440(r1) # 8-byte Folded Spill
+; CHECK-PWR7-NEXT: std r24, 448(r1) # 8-byte Folded Spill
+; CHECK-PWR7-NEXT: std r25, 456(r1) # 8-byte Folded Spill
+; CHECK-PWR7-NEXT: std r26, 464(r1) # 8-byte Folded Spill
+; CHECK-PWR7-NEXT: std r27, 472(r1) # 8-byte Folded Spill
+; CHECK-PWR7-NEXT: std r28, 480(r1) # 8-byte Folded Spill
+; CHECK-PWR7-NEXT: std r29, 488(r1) # 8-byte Folded Spill
+; CHECK-PWR7-NEXT: std r30, 496(r1) # 8-byte Folded Spill
+; CHECK-PWR7-NEXT: std r31, 504(r1) # 8-byte Folded Spill
+; CHECK-PWR7-NEXT: std r2, 360(r1) # 8-byte Folded Spill
+; CHECK-PWR7-NEXT: stxvw4x v2, 0, r3
+; CHECK-PWR7-NEXT: lbz r3, 320(r1)
+; CHECK-PWR7-NEXT: addi r4, r1, 336
+; CHECK-PWR7-NEXT: stw r3, 60(r1) # 4-byte Folded Spill
+; CHECK-PWR7-NEXT: stxvw4x v3, 0, r4
+; CHECK-PWR7-NEXT: lbz r15, 334(r1)
+; CHECK-PWR7-NEXT: lbz r14, 350(r1)
+; CHECK-PWR7-NEXT: lbz r31, 335(r1)
+; CHECK-PWR7-NEXT: lbz r2, 351(r1)
+; CHECK-PWR7-NEXT: sub r15, r15, r14
+; CHECK-PWR7-NEXT: sub r14, r31, r2
+; CHECK-PWR7-NEXT: srawi r2, r14, 31
+; CHECK-PWR7-NEXT: xor r14, r14, r2
+; CHECK-PWR7-NEXT: lbz r3, 333(r1)
+; CHECK-PWR7-NEXT: lbz r19, 331(r1)
+; CHECK-PWR7-NEXT: lbz r18, 347(r1)
+; CHECK-PWR7-NEXT: sub r19, r19, r18
+; CHECK-PWR7-NEXT: lbz r17, 332(r1)
+; CHECK-PWR7-NEXT: lbz r16, 348(r1)
+; CHECK-PWR7-NEXT: sub r17, r17, r16
+; CHECK-PWR7-NEXT: lbz r23, 329(r1)
+; CHECK-PWR7-NEXT: sub r14, r14, r2
+; CHECK-PWR7-NEXT: lbz r2, 349(r1)
+; CHECK-PWR7-NEXT: lbz r22, 345(r1)
+; CHECK-PWR7-NEXT: lbz r4, 336(r1)
+; CHECK-PWR7-NEXT: lbz r5, 321(r1)
+; CHECK-PWR7-NEXT: lbz r6, 337(r1)
+; CHECK-PWR7-NEXT: lbz r7, 322(r1)
+; CHECK-PWR7-NEXT: lbz r8, 338(r1)
+; CHECK-PWR7-NEXT: lbz r9, 323(r1)
+; CHECK-PWR7-NEXT: lbz r10, 339(r1)
+; CHECK-PWR7-NEXT: lbz r11, 324(r1)
+; CHECK-PWR7-NEXT: lbz r12, 340(r1)
+; CHECK-PWR7-NEXT: lbz r0, 325(r1)
+; CHECK-PWR7-NEXT: lbz r30, 341(r1)
+; CHECK-PWR7-NEXT: lbz r29, 326(r1)
+; CHECK-PWR7-NEXT: lbz r28, 342(r1)
+; CHECK-PWR7-NEXT: lbz r27, 327(r1)
+; CHECK-PWR7-NEXT: lbz r26, 343(r1)
+; CHECK-PWR7-NEXT: sub r3, r3, r2
+; CHECK-PWR7-NEXT: lbz r25, 328(r1)
+; CHECK-PWR7-NEXT: lbz r24, 344(r1)
+; CHECK-PWR7-NEXT: lbz r21, 330(r1)
+; CHECK-PWR7-NEXT: lbz r20, 346(r1)
; CHECK-PWR7-NEXT: sub r5, r5, r6
-; CHECK-PWR7-NEXT: clrlwi r9, r9, 24
-; CHECK-PWR7-NEXT: srawi r4, r3, 31
+; CHECK-PWR7-NEXT: srawi r18, r3, 31
+; CHECK-PWR7-NEXT: sub r7, r7, r8
+; CHECK-PWR7-NEXT: sub r9, r9, r10
+; CHECK-PWR7-NEXT: sub r11, r11, r12
+; CHECK-PWR7-NEXT: sub r0, r0, r30
+; CHECK-PWR7-NEXT: sub r29, r29, r28
+; CHECK-PWR7-NEXT: sub r27, r27, r26
+; CHECK-PWR7-NEXT: sub r25, r25, r24
+; CHECK-PWR7-NEXT: srawi r31, r15, 31
+; CHECK-PWR7-NEXT: ld r2, 360(r1) # 8-byte Folded Reload
+; CHECK-PWR7-NEXT: xor r3, r3, r18
; CHECK-PWR7-NEXT: srawi r6, r5, 31
-; CHECK-PWR7-NEXT: xor r3, r3, r4
-; CHECK-PWR7-NEXT: sldi r27, r27, 56
-; CHECK-PWR7-NEXT: xor r5, r5, r6
-; CHECK-PWR7-NEXT: sub r9, r9, r11
-; CHECK-PWR7-NEXT: sub r3, r3, r4
-; CHECK-PWR7-NEXT: sldi r24, r24, 56
+; CHECK-PWR7-NEXT: srawi r8, r7, 31
+; CHECK-PWR7-NEXT: srawi r10, r9, 31
+; CHECK-PWR7-NEXT: srawi r12, r11, 31
+; CHECK-PWR7-NEXT: srawi r30, r0, 31
+; CHECK-PWR7-NEXT: sub r3, r3, r18
+; CHECK-PWR7-NEXT: srawi r18, r19, 31
+; CHECK-PWR7-NEXT: srawi r28, r29, 31
+; CHECK-PWR7-NEXT: ld r16, 384(r1) # 8-byte Folded Reload
; CHECK-PWR7-NEXT: sldi r3, r3, 56
-; CHECK-PWR7-NEXT: srawi r11, r9, 31
-; CHECK-PWR7-NEXT: std r27, 208(r1)
-; CHECK-PWR7-NEXT: sub r4, r5, r6
-; CHECK-PWR7-NEXT: std r27, 216(r1)
-; CHECK-PWR7-NEXT: srawi r27, r29, 31
-; CHECK-PWR7-NEXT: lbz r10, 313(r1)
-; CHECK-PWR7-NEXT: xor r9, r9, r11
-; CHECK-PWR7-NEXT: std r24, 224(r1)
-; CHECK-PWR7-NEXT: lbz r22, 329(r1)
-; CHECK-PWR7-NEXT: std r24, 232(r1)
-; CHECK-PWR7-NEXT: srawi r24, r30, 31
-; CHECK-PWR7-NEXT: ld r21, 360(r1) # 8-byte Folded Reload
-; CHECK-PWR7-NEXT: sub r23, r9, r11
-; CHECK-PWR7-NEXT: lbz r9, 317(r1)
-; CHECK-PWR7-NEXT: lbz r11, 333(r1)
-; CHECK-PWR7-NEXT: xor r29, r29, r27
-; CHECK-PWR7-NEXT: std r3, 176(r1)
-; CHECK-PWR7-NEXT: std r3, 184(r1)
-; CHECK-PWR7-NEXT: sldi r3, r4, 56
-; CHECK-PWR7-NEXT: sldi r23, r23, 56
-; CHECK-PWR7-NEXT: xor r30, r30, r24
-; CHECK-PWR7-NEXT: clrlwi r9, r9, 24
-; CHECK-PWR7-NEXT: clrlwi r11, r11, 24
-; CHECK-PWR7-NEXT: sub r4, r30, r24
-; CHECK-PWR7-NEXT: ld r30, 432(r1) # 8-byte Folded Reload
-; CHECK-PWR7-NEXT: std r3, 160(r1)
-; CHECK-PWR7-NEXT: std r3, 168(r1)
-; CHECK-PWR7-NEXT: sub r9, r9, r11
-; CHECK-PWR7-NEXT: sub r3, r29, r27
-; CHECK-PWR7-NEXT: std r23, 240(r1)
-; CHECK-PWR7-NEXT: ld r29, 424(r1) # 8-byte Folded Reload
-; CHECK-PWR7-NEXT: srawi r11, r9, 31
-; CHECK-PWR7-NEXT: std r23, 248(r1)
-; CHECK-PWR7-NEXT: ld r27, 408(r1) # 8-byte Folded Reload
-; CHECK-PWR7-NEXT: srawi r23, r28, 31
+; CHECK-PWR7-NEXT: srawi r26, r27, 31
+; CHECK-PWR7-NEXT: srawi r24, r25, 31
+; CHECK-PWR7-NEXT: xor r19, r19, r18
+; CHECK-PWR7-NEXT: xor r15, r15, r31
+; CHECK-PWR7-NEXT: xor r5, r5, r6
+; CHECK-PWR7-NEXT: std r3, 272(r1)
+; CHECK-PWR7-NEXT: std r3, 280(r1)
+; CHECK-PWR7-NEXT: srawi r3, r17, 31
+; CHECK-PWR7-NEXT: sub r19, r19, r18
+; CHECK-PWR7-NEXT: xor r7, r7, r8
+; CHECK-PWR7-NEXT: sub r15, r15, r31
+; CHECK-PWR7-NEXT: xor r17, r17, r3
+; CHECK-PWR7-NEXT: xor r9, r9, r10
+; CHECK-PWR7-NEXT: xor r11, r11, r12
+; CHECK-PWR7-NEXT: xor r0, r0, r30
+; CHECK-PWR7-NEXT: xor r29, r29, r28
+; CHECK-PWR7-NEXT: xor r27, r27, r26
+; CHECK-PWR7-NEXT: sub r3, r17, r3
+; CHECK-PWR7-NEXT: xor r25, r25, r24
+; CHECK-PWR7-NEXT: sub r25, r25, r24
+; CHECK-PWR7-NEXT: sub r27, r27, r26
+; CHECK-PWR7-NEXT: sub r29, r29, r28
; CHECK-PWR7-NEXT: sldi r3, r3, 56
-; CHECK-PWR7-NEXT: xor r28, r28, r23
-; CHECK-PWR7-NEXT: xor r9, r9, r11
-; CHECK-PWR7-NEXT: std r3, 144(r1)
-; CHECK-PWR7-NEXT: ld r24, 384(r1) # 8-byte Folded Reload
-; CHECK-PWR7-NEXT: std r3, 152(r1)
-; CHECK-PWR7-NEXT: sldi r3, r4, 56
-; CHECK-PWR7-NEXT: sub r25, r9, r11
-; CHECK-PWR7-NEXT: lbz r9, 318(r1)
-; CHECK-PWR7-NEXT: lbz r11, 334(r1)
-; CHECK-PWR7-NEXT: std r3, 128(r1)
+; CHECK-PWR7-NEXT: sub r0, r0, r30
+; CHECK-PWR7-NEXT: sub r11, r11, r12
+; CHECK-PWR7-NEXT: sub r9, r9, r10
+; CHECK-PWR7-NEXT: sub r7, r7, r8
+; CHECK-PWR7-NEXT: sub r5, r5, r6
+; CHECK-PWR7-NEXT: sldi r14, r14, 56
+; CHECK-PWR7-NEXT: sldi r15, r15, 56
+; CHECK-PWR7-NEXT: ld r31, 504(r1) # 8-byte Folded Reload
+; CHECK-PWR7-NEXT: std r3, 256(r1)
+; CHECK-PWR7-NEXT: std r3, 264(r1)
+; CHECK-PWR7-NEXT: sldi r3, r19, 56
; CHECK-PWR7-NEXT: sldi r25, r25, 56
-; CHECK-PWR7-NEXT: std r3, 136(r1)
-; CHECK-PWR7-NEXT: sub r3, r28, r23
+; CHECK-PWR7-NEXT: sldi r27, r27, 56
+; CHECK-PWR7-NEXT: std r3, 240(r1)
+; CHECK-PWR7-NEXT: std r3, 248(r1)
+; CHECK-PWR7-NEXT: sub r3, r23, r22
+; CHECK-PWR7-NEXT: srawi r23, r3, 31
+; CHECK-PWR7-NEXT: sub r22, r21, r20
+; CHECK-PWR7-NEXT: srawi r21, r22, 31
+; CHECK-PWR7-NEXT: sldi r29, r29, 56
+; CHECK-PWR7-NEXT: sldi r0, r0, 56
+; CHECK-PWR7-NEXT: sldi r11, r11, 56
+; CHECK-PWR7-NEXT: xor r3, r3, r23
+; CHECK-PWR7-NEXT: xor r22, r22, r21
+; CHECK-PWR7-NEXT: sldi r9, r9, 56
+; CHECK-PWR7-NEXT: sldi r7, r7, 56
+; CHECK-PWR7-NEXT: sldi r5, r5, 56
+; CHECK-PWR7-NEXT: ld r30, 496(r1) # 8-byte Folded Reload
+; CHECK-PWR7-NEXT: ld r28, 480(r1) # 8-byte Folded Reload
+; CHECK-PWR7-NEXT: sub r3, r3, r23
+; CHECK-PWR7-NEXT: sub r22, r22, r21
+; CHECK-PWR7-NEXT: std r14, 304(r1)
+; CHECK-PWR7-NEXT: ld r26, 464(r1) # 8-byte Folded Reload
; CHECK-PWR7-NEXT: sldi r3, r3, 56
-; CHECK-PWR7-NEXT: std r3, 112(r1)
-; CHECK-PWR7-NEXT: ld r28, 416(r1) # 8-byte Folded Reload
-; CHECK-PWR7-NEXT: clrlwi r9, r9, 24
-; CHECK-PWR7-NEXT: clrlwi r11, r11, 24
-; CHECK-PWR7-NEXT: clrlwi r10, r10, 24
-; CHECK-PWR7-NEXT: std r25, 256(r1)
-; CHECK-PWR7-NEXT: std r25, 264(r1)
-; CHECK-PWR7-NEXT: sub r9, r9, r11
-; CHECK-PWR7-NEXT: srawi r25, r26, 31
-; CHECK-PWR7-NEXT: xor r26, r26, r25
-; CHECK-PWR7-NEXT: ld r23, 376(r1) # 8-byte Folded Reload
-; CHECK-PWR7-NEXT: srawi r11, r9, 31
-; CHECK-PWR7-NEXT: std r3, 120(r1)
-; CHECK-PWR7-NEXT: sub r4, r26, r25
-; CHECK-PWR7-NEXT: clrlwi r22, r22, 24
-; CHECK-PWR7-NEXT: srawi r7, r8, 31
-; CHECK-PWR7-NEXT: sub r10, r10, r22
-; CHECK-PWR7-NEXT: ld r26, 400(r1) # 8-byte Folded Reload
-; CHECK-PWR7-NEXT: xor r9, r9, r11
-; CHECK-PWR7-NEXT: sldi r3, r4, 56
-; CHECK-PWR7-NEXT: srawi r22, r10, 31
-; CHECK-PWR7-NEXT: xor r8, r8, r7
-; CHECK-PWR7-NEXT: xor r10, r10, r22
-; CHECK-PWR7-NEXT: sub r10, r10, r22
-; CHECK-PWR7-NEXT: ld r25, 392(r1) # 8-byte Folded Reload
-; CHECK-PWR7-NEXT: sub r12, r9, r11
-; CHECK-PWR7-NEXT: lbz r9, 319(r1)
-; CHECK-PWR7-NEXT: lbz r11, 335(r1)
-; CHECK-PWR7-NEXT: std r3, 96(r1)
-; CHECK-PWR7-NEXT: sldi r12, r12, 56
-; CHECK-PWR7-NEXT: std r3, 104(r1)
-; CHECK-PWR7-NEXT: ld r22, 368(r1) # 8-byte Folded Reload
-; CHECK-PWR7-NEXT: sldi r10, r10, 56
-; CHECK-PWR7-NEXT: std r10, 192(r1)
-; CHECK-PWR7-NEXT: clrlwi r9, r9, 24
-; CHECK-PWR7-NEXT: clrlwi r11, r11, 24
-; CHECK-PWR7-NEXT: sub r9, r9, r11
-; CHECK-PWR7-NEXT: std r12, 272(r1)
-; CHECK-PWR7-NEXT: std r12, 280(r1)
-; CHECK-PWR7-NEXT: srawi r12, r19, 31
-; CHECK-PWR7-NEXT: xor r0, r19, r12
-; CHECK-PWR7-NEXT: ld r19, 344(r1) # 8-byte Folded Reload
-; CHECK-PWR7-NEXT: sub r3, r0, r12
-; CHECK-PWR7-NEXT: srawi r11, r9, 31
-; CHECK-PWR7-NEXT: std r10, 200(r1)
-; CHECK-PWR7-NEXT: xor r9, r9, r11
+; CHECK-PWR7-NEXT: sldi r22, r22, 56
+; CHECK-PWR7-NEXT: ld r24, 448(r1) # 8-byte Folded Reload
+; CHECK-PWR7-NEXT: ld r23, 440(r1) # 8-byte Folded Reload
+; CHECK-PWR7-NEXT: std r14, 312(r1)
+; CHECK-PWR7-NEXT: std r15, 288(r1)
+; CHECK-PWR7-NEXT: std r3, 208(r1)
+; CHECK-PWR7-NEXT: std r3, 216(r1)
+; CHECK-PWR7-NEXT: lwz r3, 60(r1) # 4-byte Folded Reload
+; CHECK-PWR7-NEXT: std r15, 296(r1)
+; CHECK-PWR7-NEXT: ld r21, 424(r1) # 8-byte Folded Reload
+; CHECK-PWR7-NEXT: ld r20, 416(r1) # 8-byte Folded Reload
+; CHECK-PWR7-NEXT: std r22, 224(r1)
+; CHECK-PWR7-NEXT: std r22, 232(r1)
+; CHECK-PWR7-NEXT: sub r4, r3, r4
+; CHECK-PWR7-NEXT: std r25, 192(r1)
+; CHECK-PWR7-NEXT: ld r22, 432(r1) # 8-byte Folded Reload
+; CHECK-PWR7-NEXT: ld r19, 408(r1) # 8-byte Folded Reload
+; CHECK-PWR7-NEXT: srawi r3, r4, 31
+; CHECK-PWR7-NEXT: std r25, 200(r1)
+; CHECK-PWR7-NEXT: ld r25, 456(r1) # 8-byte Folded Reload
+; CHECK-PWR7-NEXT: std r27, 176(r1)
+; CHECK-PWR7-NEXT: std r27, 184(r1)
+; CHECK-PWR7-NEXT: xor r4, r4, r3
+; CHECK-PWR7-NEXT: std r29, 160(r1)
+; CHECK-PWR7-NEXT: ld r27, 472(r1) # 8-byte Folded Reload
+; CHECK-PWR7-NEXT: std r29, 168(r1)
+; CHECK-PWR7-NEXT: std r0, 144(r1)
+; CHECK-PWR7-NEXT: sub r3, r4, r3
+; CHECK-PWR7-NEXT: std r0, 152(r1)
+; CHECK-PWR7-NEXT: ld r29, 488(r1) # 8-byte Folded Reload
+; CHECK-PWR7-NEXT: ld r18, 400(r1) # 8-byte Folded Reload
; CHECK-PWR7-NEXT: sldi r3, r3, 56
-; CHECK-PWR7-NEXT: sub r9, r9, r11
-; CHECK-PWR7-NEXT: std r3, 80(r1)
-; CHECK-PWR7-NEXT: std r3, 88(r1)
-; CHECK-PWR7-NEXT: sldi r9, r9, 56
-; CHECK-PWR7-NEXT: std r9, 288(r1)
-; CHECK-PWR7-NEXT: std r9, 296(r1)
-; CHECK-PWR7-NEXT: srawi r9, r20, 31
-; CHECK-PWR7-NEXT: xor r11, r20, r9
-; CHECK-PWR7-NEXT: ld r20, 352(r1) # 8-byte Folded Reload
-; CHECK-PWR7-NEXT: sub r4, r11, r9
-; CHECK-PWR7-NEXT: sldi r3, r4, 56
+; CHECK-PWR7-NEXT: std r11, 128(r1)
+; CHECK-PWR7-NEXT: ld r17, 392(r1) # 8-byte Folded Reload
+; CHECK-PWR7-NEXT: std r11, 136(r1)
+; CHECK-PWR7-NEXT: std r9, 112(r1)
; CHECK-PWR7-NEXT: std r3, 64(r1)
; CHECK-PWR7-NEXT: std r3, 72(r1)
-; CHECK-PWR7-NEXT: sub r3, r8, r7
-; CHECK-PWR7-NEXT: sldi r3, r3, 56
-; CHECK-PWR7-NEXT: std r3, 48(r1)
-; CHECK-PWR7-NEXT: std r3, 56(r1)
-; CHECK-PWR7-NEXT: addi r3, r1, 288
+; CHECK-PWR7-NEXT: addi r3, r1, 304
+; CHECK-PWR7-NEXT: std r9, 120(r1)
+; CHECK-PWR7-NEXT: ld r15, 376(r1) # 8-byte Folded Reload
+; CHECK-PWR7-NEXT: std r7, 96(r1)
+; CHECK-PWR7-NEXT: std r7, 104(r1)
+; CHECK-PWR7-NEXT: std r5, 80(r1)
+; CHECK-PWR7-NEXT: std r5, 88(r1)
; CHECK-PWR7-NEXT: lxvw4x v2, 0, r3
-; CHECK-PWR7-NEXT: addi r3, r1, 272
+; CHECK-PWR7-NEXT: addi r3, r1, 288
; CHECK-PWR7-NEXT: lxvw4x v3, 0, r3
-; CHECK-PWR7-NEXT: addi r3, r1, 256
+; CHECK-PWR7-NEXT: addi r3, r1, 272
+; CHECK-PWR7-NEXT: ld r14, 368(r1) # 8-byte Folded Reload
; CHECK-PWR7-NEXT: vmrghb v2, v3, v2
; CHECK-PWR7-NEXT: lxvw4x v3, 0, r3
-; CHECK-PWR7-NEXT: addi r3, r1, 240
+; CHECK-PWR7-NEXT: addi r3, r1, 256
; CHECK-PWR7-NEXT: lxvw4x v4, 0, r3
-; CHECK-PWR7-NEXT: addi r3, r1, 224
+; CHECK-PWR7-NEXT: addi r3, r1, 240
; CHECK-PWR7-NEXT: vmrghb v3, v4, v3
; CHECK-PWR7-NEXT: vmrghh v2, v3, v2
; CHECK-PWR7-NEXT: lxvw4x v3, 0, r3
-; CHECK-PWR7-NEXT: addi r3, r1, 208
+; CHECK-PWR7-NEXT: addi r3, r1, 224
; CHECK-PWR7-NEXT: lxvw4x v4, 0, r3
-; CHECK-PWR7-NEXT: addi r3, r1, 192
+; CHECK-PWR7-NEXT: addi r3, r1, 208
; CHECK-PWR7-NEXT: vmrghb v3, v4, v3
; CHECK-PWR7-NEXT: lxvw4x v4, 0, r3
-; CHECK-PWR7-NEXT: addi r3, r1, 176
+; CHECK-PWR7-NEXT: addi r3, r1, 192
; CHECK-PWR7-NEXT: lxvw4x v5, 0, r3
-; CHECK-PWR7-NEXT: addi r3, r1, 160
+; CHECK-PWR7-NEXT: addi r3, r1, 176
; CHECK-PWR7-NEXT: vmrghb v4, v5, v4
; CHECK-PWR7-NEXT: vmrghh v3, v4, v3
; CHECK-PWR7-NEXT: xxmrghw vs0, v3, v2
; CHECK-PWR7-NEXT: lxvw4x v2, 0, r3
-; CHECK-PWR7-NEXT: addi r3, r1, 144
+; CHECK-PWR7-NEXT: addi r3, r1, 160
; CHECK-PWR7-NEXT: lxvw4x v3, 0, r3
-; CHECK-PWR7-NEXT: addi r3, r1, 128
+; CHECK-PWR7-NEXT: addi r3, r1, 144
; CHECK-PWR7-NEXT: vmrghb v2, v3, v2
; CHECK-PWR7-NEXT: lxvw4x v3, 0, r3
-; CHECK-PWR7-NEXT: addi r3, r1, 112
+; CHECK-PWR7-NEXT: addi r3, r1, 128
; CHECK-PWR7-NEXT: lxvw4x v4, 0, r3
-; CHECK-PWR7-NEXT: addi r3, r1, 96
; CHECK-PWR7-NEXT: vmrghb v3, v4, v3
+; CHECK-PWR7-NEXT: addi r3, r1, 112
; CHECK-PWR7-NEXT: vmrghh v2, v3, v2
; CHECK-PWR7-NEXT: lxvw4x v3, 0, r3
-; CHECK-PWR7-NEXT: addi r3, r1, 80
+; CHECK-PWR7-NEXT: addi r3, r1, 96
; CHECK-PWR7-NEXT: lxvw4x v4, 0, r3
-; CHECK-PWR7-NEXT: addi r3, r1, 64
+; CHECK-PWR7-NEXT: addi r3, r1, 80
; CHECK-PWR7-NEXT: vmrghb v3, v4, v3
; CHECK-PWR7-NEXT: lxvw4x v4, 0, r3
-; CHECK-PWR7-NEXT: addi r3, r1, 48
+; CHECK-PWR7-NEXT: addi r3, r1, 64
; CHECK-PWR7-NEXT: lxvw4x v5, 0, r3
; CHECK-PWR7-NEXT: vmrghb v4, v5, v4
; CHECK-PWR7-NEXT: vmrghh v3, v4, v3
; CHECK-PWR7-NEXT: xxmrghw vs1, v3, v2
; CHECK-PWR7-NEXT: xxmrghd v2, vs1, vs0
-; CHECK-PWR7-NEXT: addi r1, r1, 448
+; CHECK-PWR7-NEXT: addi r1, r1, 512
; CHECK-PWR7-NEXT: blr
entry:
%vecext = extractelement <16 x i8> %a, i32 0
diff --git a/llvm/test/CodeGen/RISCV/fpclamptosat.ll b/llvm/test/CodeGen/RISCV/fpclamptosat.ll
index 246e6a6..117e3e4 100644
--- a/llvm/test/CodeGen/RISCV/fpclamptosat.ll
+++ b/llvm/test/CodeGen/RISCV/fpclamptosat.ll
@@ -3292,30 +3292,30 @@ define i64 @ustest_f64i64_mm(double %x) {
; RV32IF-NEXT: mv a1, a0
; RV32IF-NEXT: addi a0, sp, 8
; RV32IF-NEXT: call __fixdfti
-; RV32IF-NEXT: lw a0, 8(sp)
-; RV32IF-NEXT: lw a1, 12(sp)
-; RV32IF-NEXT: lw a2, 20(sp)
+; RV32IF-NEXT: lw a0, 20(sp)
+; RV32IF-NEXT: lw a1, 8(sp)
+; RV32IF-NEXT: lw a2, 12(sp)
; RV32IF-NEXT: lw a3, 16(sp)
-; RV32IF-NEXT: beqz a2, .LBB47_2
+; RV32IF-NEXT: beqz a0, .LBB47_2
; RV32IF-NEXT: # %bb.1: # %entry
-; RV32IF-NEXT: slti a4, a2, 0
+; RV32IF-NEXT: slti a4, a0, 0
; RV32IF-NEXT: j .LBB47_3
; RV32IF-NEXT: .LBB47_2:
; RV32IF-NEXT: seqz a4, a3
; RV32IF-NEXT: .LBB47_3: # %entry
; RV32IF-NEXT: xori a3, a3, 1
-; RV32IF-NEXT: or a3, a3, a2
+; RV32IF-NEXT: or a3, a3, a0
; RV32IF-NEXT: seqz a3, a3
; RV32IF-NEXT: addi a3, a3, -1
; RV32IF-NEXT: and a3, a3, a4
; RV32IF-NEXT: neg a3, a3
+; RV32IF-NEXT: and a2, a3, a2
; RV32IF-NEXT: and a1, a3, a1
; RV32IF-NEXT: and a0, a3, a0
-; RV32IF-NEXT: and a2, a3, a2
-; RV32IF-NEXT: slti a2, a2, 0
-; RV32IF-NEXT: addi a2, a2, -1
-; RV32IF-NEXT: and a0, a2, a0
-; RV32IF-NEXT: and a1, a2, a1
+; RV32IF-NEXT: slti a0, a0, 0
+; RV32IF-NEXT: addi a3, a0, -1
+; RV32IF-NEXT: and a0, a3, a1
+; RV32IF-NEXT: and a1, a3, a2
; RV32IF-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; RV32IF-NEXT: .cfi_restore ra
; RV32IF-NEXT: addi sp, sp, 32
@@ -3354,30 +3354,30 @@ define i64 @ustest_f64i64_mm(double %x) {
; RV32IFD-NEXT: .cfi_offset ra, -4
; RV32IFD-NEXT: addi a0, sp, 8
; RV32IFD-NEXT: call __fixdfti
-; RV32IFD-NEXT: lw a0, 8(sp)
-; RV32IFD-NEXT: lw a1, 12(sp)
-; RV32IFD-NEXT: lw a2, 20(sp)
+; RV32IFD-NEXT: lw a0, 20(sp)
+; RV32IFD-NEXT: lw a1, 8(sp)
+; RV32IFD-NEXT: lw a2, 12(sp)
; RV32IFD-NEXT: lw a3, 16(sp)
-; RV32IFD-NEXT: beqz a2, .LBB47_2
+; RV32IFD-NEXT: beqz a0, .LBB47_2
; RV32IFD-NEXT: # %bb.1: # %entry
-; RV32IFD-NEXT: slti a4, a2, 0
+; RV32IFD-NEXT: slti a4, a0, 0
; RV32IFD-NEXT: j .LBB47_3
; RV32IFD-NEXT: .LBB47_2:
; RV32IFD-NEXT: seqz a4, a3
; RV32IFD-NEXT: .LBB47_3: # %entry
; RV32IFD-NEXT: xori a3, a3, 1
-; RV32IFD-NEXT: or a3, a3, a2
+; RV32IFD-NEXT: or a3, a3, a0
; RV32IFD-NEXT: seqz a3, a3
; RV32IFD-NEXT: addi a3, a3, -1
; RV32IFD-NEXT: and a3, a3, a4
; RV32IFD-NEXT: neg a3, a3
+; RV32IFD-NEXT: and a2, a3, a2
; RV32IFD-NEXT: and a1, a3, a1
; RV32IFD-NEXT: and a0, a3, a0
-; RV32IFD-NEXT: and a2, a3, a2
-; RV32IFD-NEXT: slti a2, a2, 0
-; RV32IFD-NEXT: addi a2, a2, -1
-; RV32IFD-NEXT: and a0, a2, a0
-; RV32IFD-NEXT: and a1, a2, a1
+; RV32IFD-NEXT: slti a0, a0, 0
+; RV32IFD-NEXT: addi a3, a0, -1
+; RV32IFD-NEXT: and a0, a3, a1
+; RV32IFD-NEXT: and a1, a3, a2
; RV32IFD-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: .cfi_restore ra
; RV32IFD-NEXT: addi sp, sp, 32
@@ -3530,30 +3530,30 @@ define i64 @ustest_f32i64_mm(float %x) {
; RV32-NEXT: .cfi_offset ra, -4
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: call __fixsfti
-; RV32-NEXT: lw a0, 8(sp)
-; RV32-NEXT: lw a1, 12(sp)
-; RV32-NEXT: lw a2, 20(sp)
+; RV32-NEXT: lw a0, 20(sp)
+; RV32-NEXT: lw a1, 8(sp)
+; RV32-NEXT: lw a2, 12(sp)
; RV32-NEXT: lw a3, 16(sp)
-; RV32-NEXT: beqz a2, .LBB50_2
+; RV32-NEXT: beqz a0, .LBB50_2
; RV32-NEXT: # %bb.1: # %entry
-; RV32-NEXT: slti a4, a2, 0
+; RV32-NEXT: slti a4, a0, 0
; RV32-NEXT: j .LBB50_3
; RV32-NEXT: .LBB50_2:
; RV32-NEXT: seqz a4, a3
; RV32-NEXT: .LBB50_3: # %entry
; RV32-NEXT: xori a3, a3, 1
-; RV32-NEXT: or a3, a3, a2
+; RV32-NEXT: or a3, a3, a0
; RV32-NEXT: seqz a3, a3
; RV32-NEXT: addi a3, a3, -1
; RV32-NEXT: and a3, a3, a4
; RV32-NEXT: neg a3, a3
+; RV32-NEXT: and a2, a3, a2
; RV32-NEXT: and a1, a3, a1
; RV32-NEXT: and a0, a3, a0
-; RV32-NEXT: and a2, a3, a2
-; RV32-NEXT: slti a2, a2, 0
-; RV32-NEXT: addi a2, a2, -1
-; RV32-NEXT: and a0, a2, a0
-; RV32-NEXT: and a1, a2, a1
+; RV32-NEXT: slti a0, a0, 0
+; RV32-NEXT: addi a3, a0, -1
+; RV32-NEXT: and a0, a3, a1
+; RV32-NEXT: and a1, a3, a2
; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; RV32-NEXT: .cfi_restore ra
; RV32-NEXT: addi sp, sp, 32
@@ -3767,30 +3767,30 @@ define i64 @ustest_f16i64_mm(half %x) {
; RV32-NEXT: call __extendhfsf2
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: call __fixsfti
-; RV32-NEXT: lw a0, 8(sp)
-; RV32-NEXT: lw a1, 12(sp)
-; RV32-NEXT: lw a2, 20(sp)
+; RV32-NEXT: lw a0, 20(sp)
+; RV32-NEXT: lw a1, 8(sp)
+; RV32-NEXT: lw a2, 12(sp)
; RV32-NEXT: lw a3, 16(sp)
-; RV32-NEXT: beqz a2, .LBB53_2
+; RV32-NEXT: beqz a0, .LBB53_2
; RV32-NEXT: # %bb.1: # %entry
-; RV32-NEXT: slti a4, a2, 0
+; RV32-NEXT: slti a4, a0, 0
; RV32-NEXT: j .LBB53_3
; RV32-NEXT: .LBB53_2:
; RV32-NEXT: seqz a4, a3
; RV32-NEXT: .LBB53_3: # %entry
; RV32-NEXT: xori a3, a3, 1
-; RV32-NEXT: or a3, a3, a2
+; RV32-NEXT: or a3, a3, a0
; RV32-NEXT: seqz a3, a3
; RV32-NEXT: addi a3, a3, -1
; RV32-NEXT: and a3, a3, a4
; RV32-NEXT: neg a3, a3
+; RV32-NEXT: and a2, a3, a2
; RV32-NEXT: and a1, a3, a1
; RV32-NEXT: and a0, a3, a0
-; RV32-NEXT: and a2, a3, a2
-; RV32-NEXT: slti a2, a2, 0
-; RV32-NEXT: addi a2, a2, -1
-; RV32-NEXT: and a0, a2, a0
-; RV32-NEXT: and a1, a2, a1
+; RV32-NEXT: slti a0, a0, 0
+; RV32-NEXT: addi a3, a0, -1
+; RV32-NEXT: and a0, a3, a1
+; RV32-NEXT: and a1, a3, a2
; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; RV32-NEXT: .cfi_restore ra
; RV32-NEXT: addi sp, sp, 32
diff --git a/llvm/test/CodeGen/RISCV/intrinsic-cttz-elts-vscale.ll b/llvm/test/CodeGen/RISCV/intrinsic-cttz-elts-vscale.ll
index 87c8343..a06c750 100644
--- a/llvm/test/CodeGen/RISCV/intrinsic-cttz-elts-vscale.ll
+++ b/llvm/test/CodeGen/RISCV/intrinsic-cttz-elts-vscale.ll
@@ -7,18 +7,18 @@
define i32 @ctz_nxv4i32(<vscale x 4 x i32> %a) #0 {
; RV32-LABEL: ctz_nxv4i32:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a0, zero, e16, m1, ta, ma
-; RV32-NEXT: vid.v v10
-; RV32-NEXT: vmv.v.i v11, -1
; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: vsetvli a1, zero, e16, m1, ta, ma
+; RV32-NEXT: vid.v v10
+; RV32-NEXT: li a1, -1
; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; RV32-NEXT: vmsne.vi v0, v8, 0
; RV32-NEXT: srli a0, a0, 1
; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: vmacc.vv v8, v10, v11
-; RV32-NEXT: vmv.v.i v9, 0
-; RV32-NEXT: vmerge.vvm v8, v9, v8, v0
+; RV32-NEXT: vmadd.vx v10, a1, v8
+; RV32-NEXT: vmv.v.i v8, 0
+; RV32-NEXT: vmerge.vvm v8, v8, v10, v0
; RV32-NEXT: vredmaxu.vs v8, v8, v8
; RV32-NEXT: vmv.x.s a1, v8
; RV32-NEXT: sub a0, a0, a1
@@ -28,18 +28,18 @@ define i32 @ctz_nxv4i32(<vscale x 4 x i32> %a) #0 {
;
; RV64-LABEL: ctz_nxv4i32:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a0, zero, e16, m1, ta, ma
-; RV64-NEXT: vid.v v10
-; RV64-NEXT: vmv.v.i v11, -1
; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: vsetvli a1, zero, e16, m1, ta, ma
+; RV64-NEXT: vid.v v10
+; RV64-NEXT: li a1, -1
; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; RV64-NEXT: vmsne.vi v0, v8, 0
; RV64-NEXT: srli a0, a0, 1
; RV64-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: vmacc.vv v8, v10, v11
-; RV64-NEXT: vmv.v.i v9, 0
-; RV64-NEXT: vmerge.vvm v8, v9, v8, v0
+; RV64-NEXT: vmadd.vx v10, a1, v8
+; RV64-NEXT: vmv.v.i v8, 0
+; RV64-NEXT: vmerge.vvm v8, v8, v10, v0
; RV64-NEXT: vredmaxu.vs v8, v8, v8
; RV64-NEXT: vmv.x.s a1, v8
; RV64-NEXT: sub a0, a0, a1
@@ -109,17 +109,17 @@ define i64 @ctz_nxv8i1_no_range(<vscale x 8 x i16> %a) {
;
; RV64-LABEL: ctz_nxv8i1_no_range:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a0, zero, e64, m8, ta, ma
-; RV64-NEXT: vid.v v16
-; RV64-NEXT: vmv.v.i v24, -1
; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
+; RV64-NEXT: vid.v v16
+; RV64-NEXT: li a1, -1
; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; RV64-NEXT: vmsne.vi v0, v8, 0
; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: vmacc.vv v8, v16, v24
-; RV64-NEXT: vmv.v.i v16, 0
-; RV64-NEXT: vmerge.vvm v8, v16, v8, v0
+; RV64-NEXT: vmadd.vx v16, a1, v8
+; RV64-NEXT: vmv.v.i v8, 0
+; RV64-NEXT: vmerge.vvm v8, v8, v16, v0
; RV64-NEXT: vredmaxu.vs v8, v8, v8
; RV64-NEXT: vmv.x.s a1, v8
; RV64-NEXT: sub a0, a0, a1
diff --git a/llvm/test/CodeGen/RISCV/pr148084.ll b/llvm/test/CodeGen/RISCV/pr148084.ll
new file mode 100644
index 0000000..9fa26c7
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/pr148084.ll
@@ -0,0 +1,279 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s | FileCheck %s
+
+source_filename = "external/libaom/av1/encoder/tx_search.c"
+target datalayout = "e-m:e-p:64:64-i64:64-i128:128-n32:64-S128"
+target triple = "riscv64-unknown-linux-android10000"
+
+define fastcc void @search_tx_type() #0 {
+; CHECK-LABEL: search_tx_type:
+; CHECK: # %bb.0: # %._crit_edge.i
+; CHECK-NEXT: # %bb.1: # %bb
+; CHECK-NEXT: lbu a1, 0(zero)
+; CHECK-NEXT: lw a0, 0(zero)
+; CHECK-NEXT: lh a2, 0(zero)
+; CHECK-NEXT: seqz a1, a1
+; CHECK-NEXT: srai a3, a0, 63
+; CHECK-NEXT: addi a1, a1, -1
+; CHECK-NEXT: and a1, a1, a2
+; CHECK-NEXT: andi a2, a1, 1
+; CHECK-NEXT: addi a2, a2, -1
+; CHECK-NEXT: or a3, a3, a0
+; CHECK-NEXT: or a2, a2, a3
+; CHECK-NEXT: bgez a2, .LBB0_3
+; CHECK-NEXT: # %bb.2:
+; CHECK-NEXT: bexti a3, a1, 1
+; CHECK-NEXT: addi a3, a3, -1
+; CHECK-NEXT: and a2, a2, a3
+; CHECK-NEXT: .LBB0_3: # %bb
+; CHECK-NEXT: andi a4, a1, 4
+; CHECK-NEXT: mv a3, a2
+; CHECK-NEXT: beqz a4, .LBB0_5
+; CHECK-NEXT: # %bb.4: # %bb
+; CHECK-NEXT: mv a3, a0
+; CHECK-NEXT: .LBB0_5: # %bb
+; CHECK-NEXT: blt a2, a0, .LBB0_7
+; CHECK-NEXT: # %bb.6: # %bb
+; CHECK-NEXT: mv a3, a2
+; CHECK-NEXT: .LBB0_7: # %bb
+; CHECK-NEXT: andi a5, a1, 8
+; CHECK-NEXT: sext.w a4, a3
+; CHECK-NEXT: mv a2, a3
+; CHECK-NEXT: beqz a5, .LBB0_9
+; CHECK-NEXT: # %bb.8: # %bb
+; CHECK-NEXT: mv a2, a0
+; CHECK-NEXT: .LBB0_9: # %bb
+; CHECK-NEXT: blt a4, a0, .LBB0_11
+; CHECK-NEXT: # %bb.10: # %bb
+; CHECK-NEXT: mv a2, a3
+; CHECK-NEXT: .LBB0_11: # %bb
+; CHECK-NEXT: andi a5, a1, 16
+; CHECK-NEXT: sext.w a4, a2
+; CHECK-NEXT: mv a3, a2
+; CHECK-NEXT: beqz a5, .LBB0_13
+; CHECK-NEXT: # %bb.12: # %bb
+; CHECK-NEXT: mv a3, a0
+; CHECK-NEXT: .LBB0_13: # %bb
+; CHECK-NEXT: blt a4, a0, .LBB0_15
+; CHECK-NEXT: # %bb.14: # %bb
+; CHECK-NEXT: mv a3, a2
+; CHECK-NEXT: .LBB0_15: # %bb
+; CHECK-NEXT: andi a5, a1, 32
+; CHECK-NEXT: sext.w a4, a3
+; CHECK-NEXT: mv a2, a3
+; CHECK-NEXT: beqz a5, .LBB0_17
+; CHECK-NEXT: # %bb.16: # %bb
+; CHECK-NEXT: mv a2, a0
+; CHECK-NEXT: .LBB0_17: # %bb
+; CHECK-NEXT: blt a4, a0, .LBB0_19
+; CHECK-NEXT: # %bb.18: # %bb
+; CHECK-NEXT: mv a2, a3
+; CHECK-NEXT: .LBB0_19: # %bb
+; CHECK-NEXT: andi a5, a1, 64
+; CHECK-NEXT: sext.w a4, a2
+; CHECK-NEXT: mv a3, a2
+; CHECK-NEXT: beqz a5, .LBB0_21
+; CHECK-NEXT: # %bb.20: # %bb
+; CHECK-NEXT: mv a3, a0
+; CHECK-NEXT: .LBB0_21: # %bb
+; CHECK-NEXT: blt a4, a0, .LBB0_23
+; CHECK-NEXT: # %bb.22: # %bb
+; CHECK-NEXT: mv a3, a2
+; CHECK-NEXT: .LBB0_23: # %bb
+; CHECK-NEXT: andi a5, a1, 128
+; CHECK-NEXT: sext.w a4, a3
+; CHECK-NEXT: mv a2, a3
+; CHECK-NEXT: beqz a5, .LBB0_25
+; CHECK-NEXT: # %bb.24: # %bb
+; CHECK-NEXT: mv a2, a0
+; CHECK-NEXT: .LBB0_25: # %bb
+; CHECK-NEXT: blt a4, a0, .LBB0_27
+; CHECK-NEXT: # %bb.26: # %bb
+; CHECK-NEXT: mv a2, a3
+; CHECK-NEXT: .LBB0_27: # %bb
+; CHECK-NEXT: andi a5, a1, 256
+; CHECK-NEXT: sext.w a4, a2
+; CHECK-NEXT: mv a3, a2
+; CHECK-NEXT: beqz a5, .LBB0_29
+; CHECK-NEXT: # %bb.28: # %bb
+; CHECK-NEXT: mv a3, a0
+; CHECK-NEXT: .LBB0_29: # %bb
+; CHECK-NEXT: blt a4, a0, .LBB0_31
+; CHECK-NEXT: # %bb.30: # %bb
+; CHECK-NEXT: mv a3, a2
+; CHECK-NEXT: .LBB0_31: # %bb
+; CHECK-NEXT: andi a5, a1, 512
+; CHECK-NEXT: sext.w a4, a3
+; CHECK-NEXT: mv a2, a3
+; CHECK-NEXT: beqz a5, .LBB0_33
+; CHECK-NEXT: # %bb.32: # %bb
+; CHECK-NEXT: mv a2, a0
+; CHECK-NEXT: .LBB0_33: # %bb
+; CHECK-NEXT: blt a4, a0, .LBB0_35
+; CHECK-NEXT: # %bb.34: # %bb
+; CHECK-NEXT: mv a2, a3
+; CHECK-NEXT: .LBB0_35: # %bb
+; CHECK-NEXT: andi a5, a1, 1024
+; CHECK-NEXT: sext.w a4, a2
+; CHECK-NEXT: mv a3, a2
+; CHECK-NEXT: beqz a5, .LBB0_37
+; CHECK-NEXT: # %bb.36: # %bb
+; CHECK-NEXT: mv a3, a0
+; CHECK-NEXT: .LBB0_37: # %bb
+; CHECK-NEXT: blt a4, a0, .LBB0_39
+; CHECK-NEXT: # %bb.38: # %bb
+; CHECK-NEXT: mv a3, a2
+; CHECK-NEXT: .LBB0_39: # %bb
+; CHECK-NEXT: slli a5, a1, 52
+; CHECK-NEXT: sext.w a4, a3
+; CHECK-NEXT: mv a2, a3
+; CHECK-NEXT: bgez a5, .LBB0_41
+; CHECK-NEXT: # %bb.40: # %bb
+; CHECK-NEXT: mv a2, a0
+; CHECK-NEXT: .LBB0_41: # %bb
+; CHECK-NEXT: blt a4, a0, .LBB0_43
+; CHECK-NEXT: # %bb.42: # %bb
+; CHECK-NEXT: mv a2, a3
+; CHECK-NEXT: .LBB0_43: # %bb
+; CHECK-NEXT: slli a4, a1, 51
+; CHECK-NEXT: sext.w a3, a2
+; CHECK-NEXT: mv a1, a2
+; CHECK-NEXT: bltz a4, .LBB0_49
+; CHECK-NEXT: # %bb.44: # %bb
+; CHECK-NEXT: bge a3, a0, .LBB0_50
+; CHECK-NEXT: .LBB0_45: # %bb
+; CHECK-NEXT: sext.w a2, a1
+; CHECK-NEXT: blt a2, a0, .LBB0_47
+; CHECK-NEXT: .LBB0_46: # %bb
+; CHECK-NEXT: mv a0, a1
+; CHECK-NEXT: .LBB0_47: # %bb
+; CHECK-NEXT: sext.w a0, a0
+; CHECK-NEXT: # %bb.48: # %get_tx_mask.exit
+; CHECK-NEXT: ret
+; CHECK-NEXT: .LBB0_49: # %bb
+; CHECK-NEXT: mv a1, a0
+; CHECK-NEXT: blt a3, a0, .LBB0_45
+; CHECK-NEXT: .LBB0_50: # %bb
+; CHECK-NEXT: mv a1, a2
+; CHECK-NEXT: sext.w a2, a2
+; CHECK-NEXT: bge a2, a0, .LBB0_46
+; CHECK-NEXT: j .LBB0_47
+._crit_edge.i:
+ %.in196.i = load i16, ptr null, align 2
+ %i2 = load i16, ptr null, align 2
+ %i3 = and i16 %i2, %.in196.i
+ %i9 = trunc nuw i8 0 to i1
+ br i1 %i9, label %get_tx_mask.exit, label %bb
+
+bb: ; preds = %._crit_edge.i
+ %i13 = load i8, ptr null, align 1
+ %i14 = icmp eq i8 %i13, 0
+ %spec.select211.i = select i1 %i14, i16 0, i16 %i3
+ %i19 = load i32, ptr null, align 4
+ %i20 = zext i16 %spec.select211.i to i32
+ %i21 = load i32, ptr null, align 4
+ %i22 = icmp sgt i32 %i21, -1
+ %i23 = and i32 %i20, 1
+ %.not203.i = icmp eq i32 %i23, 0
+ %spec.select212.i = select i1 %.not203.i, i32 -1, i32 %i21
+ %.1174.i = select i1 %i22, i32 %spec.select212.i, i32 -1
+ %i28 = icmp sgt i32 0, %.1174.i
+ %i29 = and i32 %i20, 2
+ %.not203.1.not.i = icmp eq i32 %i29, 0
+ %spec.select212.1.i = select i1 %.not203.1.not.i, i32 %.1174.i, i32 0
+ %.1174.1.i = select i1 %i28, i32 %spec.select212.1.i, i32 %.1174.i
+ %i30 = load i32, ptr null, align 4
+ %i31 = icmp sgt i32 %i30, %.1174.1.i
+ %i32 = and i32 %i20, 4
+ %.not203.2.i = icmp eq i32 %i32, 0
+ %spec.select212.2.i = select i1 %.not203.2.i, i32 %.1174.1.i, i32 %i30
+ %.1174.2.i = select i1 %i31, i32 %spec.select212.2.i, i32 %.1174.1.i
+ %i36 = load i32, ptr null, align 4
+ %i37 = icmp sgt i32 %i36, %.1174.2.i
+ %i38 = and i32 %i20, 8
+ %.not203.3.i = icmp eq i32 %i38, 0
+ %spec.select212.3.i = select i1 %.not203.3.i, i32 %.1174.2.i, i32 %i36
+ %.1174.3.i = select i1 %i37, i32 %spec.select212.3.i, i32 %.1174.2.i
+ %i42 = load i32, ptr null, align 4
+ %i43 = icmp sgt i32 %i42, %.1174.3.i
+ %i44 = and i32 %i20, 16
+ %.not203.4.i = icmp eq i32 %i44, 0
+ %spec.select212.4.i = select i1 %.not203.4.i, i32 %.1174.3.i, i32 %i42
+ %.1174.4.i = select i1 %i43, i32 %spec.select212.4.i, i32 %.1174.3.i
+ %i48 = load i32, ptr null, align 4
+ %i49 = icmp sgt i32 %i48, %.1174.4.i
+ %i50 = and i32 %i20, 32
+ %.not203.5.i = icmp eq i32 %i50, 0
+ %spec.select212.5.i = select i1 %.not203.5.i, i32 %.1174.4.i, i32 %i48
+ %.1174.5.i = select i1 %i49, i32 %spec.select212.5.i, i32 %.1174.4.i
+ %i51 = load i32, ptr null, align 4
+ %i52 = icmp sgt i32 %i51, %.1174.5.i
+ %i53 = and i32 %i20, 64
+ %.not203.6.i = icmp eq i32 %i53, 0
+ %spec.select212.6.i = select i1 %.not203.6.i, i32 %.1174.5.i, i32 %i51
+ %.1174.6.i = select i1 %i52, i32 %spec.select212.6.i, i32 %.1174.5.i
+ %i56 = load i32, ptr null, align 4
+ %i57 = icmp sgt i32 %i56, %.1174.6.i
+ %i58 = and i32 %i20, 128
+ %.not203.7.i = icmp eq i32 %i58, 0
+ %spec.select212.7.i = select i1 %.not203.7.i, i32 %.1174.6.i, i32 %i56
+ %.1174.7.i = select i1 %i57, i32 %spec.select212.7.i, i32 %.1174.6.i
+ %i60 = load i32, ptr null, align 4
+ %i61 = icmp sgt i32 %i60, %.1174.7.i
+ %i62 = and i32 %i20, 256
+ %.not203.8.i = icmp eq i32 %i62, 0
+ %spec.select212.8.i = select i1 %.not203.8.i, i32 %.1174.7.i, i32 %i60
+ %.1174.8.i = select i1 %i61, i32 %spec.select212.8.i, i32 %.1174.7.i
+ %i63 = load i32, ptr null, align 4
+ %i64 = icmp sgt i32 %i63, %.1174.8.i
+ %i65 = and i32 %i20, 512
+ %.not203.9.i = icmp eq i32 %i65, 0
+ %spec.select212.9.i = select i1 %.not203.9.i, i32 %.1174.8.i, i32 %i63
+ %.1174.9.i = select i1 %i64, i32 %spec.select212.9.i, i32 %.1174.8.i
+ %i67 = load i32, ptr null, align 4
+ %i68 = icmp sgt i32 %i67, %.1174.9.i
+ %i69 = and i32 %i20, 1024
+ %.not203.10.i = icmp eq i32 %i69, 0
+ %spec.select212.10.i = select i1 %.not203.10.i, i32 %.1174.9.i, i32 %i67
+ %.1174.10.i = select i1 %i68, i32 %spec.select212.10.i, i32 %.1174.9.i
+ %i70 = load i32, ptr null, align 4
+ %i71 = icmp sgt i32 %i70, %.1174.10.i
+ %i72 = and i32 %i20, 2048
+ %.not203.11.i = icmp eq i32 %i72, 0
+ %spec.select212.11.i = select i1 %.not203.11.i, i32 %.1174.10.i, i32 %i70
+ %.1174.11.i = select i1 %i71, i32 %spec.select212.11.i, i32 %.1174.10.i
+ %i75 = load i32, ptr null, align 4
+ %i76 = icmp sgt i32 %i75, %.1174.11.i
+ %i77 = and i32 %i20, 4096
+ %.not203.12.i = icmp eq i32 %i77, 0
+ %spec.select212.12.i = select i1 %.not203.12.i, i32 %.1174.11.i, i32 %i75
+ %.1174.12.i = select i1 %i76, i32 %spec.select212.12.i, i32 %.1174.11.i
+ %i80 = load i32, ptr null, align 4
+ %i81 = icmp sgt i32 %i80, %.1174.12.i
+ %spec.select212.13.i = select i1 false, i32 %.1174.12.i, i32 %i80
+ %.1174.13.i = select i1 %i81, i32 %spec.select212.13.i, i32 %.1174.12.i
+ %.1172.13.i = select i1 %i81, i32 13, i32 0
+ %i84 = icmp sgt i32 0, %.1174.13.i
+ %.1172.14.i = select i1 %i84, i32 14, i32 %.1172.13.i
+ %i88 = icmp slt i32 0, %i19
+ %i89 = select i1 %i88, i16 -32768, i16 0
+ %i90 = zext i16 %i89 to i32
+ %i91 = shl nuw nsw i32 1, %.1172.14.i
+ %i92 = and i32 %i91, %i90
+ %.not200.i = icmp eq i32 %i92, 0
+ %i93 = trunc nuw i32 %i91 to i16
+ %i94 = xor i16 %i93, -1
+ %i95 = select i1 %.not200.i, i16 -1, i16 %i94
+ %.2177.i = and i16 %i95, %i89
+ %i96 = xor i16 %.2177.i, -1
+ %i97 = and i16 %spec.select211.i, %i96
+ br label %get_tx_mask.exit
+
+get_tx_mask.exit: ; preds = %._crit_edge.i, %bb
+ %.1261.i = phi i16 [ %i97, %bb ], [ 0, %._crit_edge.i ]
+ %i99 = icmp eq i16 %.1261.i, 0
+ %.2262.i = select i1 %i99, i16 0, i16 %.1261.i
+ ret void
+}
+
+attributes #0 = { noimplicitfloat nounwind sspstrong uwtable vscale_range(2,1024) "frame-pointer"="non-leaf" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="generic-rv64" "target-features"="+64bit,+a,+b,+c,+d,+f,+m,+relax,+unaligned-scalar-mem,+unaligned-vector-mem,+v,+zaamo,+zalrsc,+zba,+zbb,+zbs,+zca,+zcd,+zicsr,+zifencei,+zmmul,+zve32f,+zve32x,+zve64d,+zve64f,+zve64x,+zvl128b,+zvl32b,+zvl64b,-e,-experimental-p,-experimental-smctr,-experimental-ssctr,-experimental-svukte,-experimental-xqccmp,-experimental-xqcia,-experimental-xqciac,-experimental-xqcibi,-experimental-xqcibm,-experimental-xqcicli,-experimental-xqcicm,-experimental-xqcics,-experimental-xqcicsr,-experimental-xqciint,-experimental-xqciio,-experimental-xqcilb,-experimental-xqcili,-experimental-xqcilia,-experimental-xqcilo,-experimental-xqcilsm,-experimental-xqcisim,-experimental-xqcisls,-experimental-xqcisync,-experimental-xrivosvisni,-experimental-xrivosvizip,-experimental-xsfmclic,-experimental-xsfsclic,-experimental-zalasr,-experimental-zicfilp,-experimental-zicfiss,-experimental-zvbc32e,-experimental-zvkgs,-experimental-zvqdotq,-h,-q,-sdext,-sdtrig,-sha,-shcounterenw,-shgatpa,-shlcofideleg,-shtvala,-shvsatpa,-shvstvala,-shvstvecd,-smaia,-smcdeleg,-smcntrpmf,-smcsrind,-smdbltrp,-smepmp,-smmpm,-smnpm,-smrnmi,-smstateen,-ssaia,-ssccfg,-ssccptr,-sscofpmf,-sscounterenw,-sscsrind,-ssdbltrp,-ssnpm,-sspm,-ssqosid,-ssstateen,-ssstrict,-sstc,-sstvala,-sstvecd,-ssu64xl,-supm,-svade,-svadu,-svbare,-svinval,-svnapot,-svpbmt,-svvptc,-xandesperf,-xandesvbfhcvt,-xandesvdot,-xandesvpackfph,-xcvalu,-xcvbi,-xcvbitmanip,-xcvelw,-xcvmac,-xcvmem,-xcvsimd,-xmipscmov,-xmipslsp,-xsfcease,-xsfmm128t,-xsfmm16t,-xsfmm32a16f,-xsfmm32a32f,-xsfmm32a8f,-xsfmm32a8i,-xsfmm32t,-xsfmm64a64f,-xsfmm64t,-xsfmmbase,-xsfvcp,-xsfvfnrclipxfqf,-xsfvfwmaccqqq,-xsfvqmaccdod,-xsfvqmaccqoq,-xsifivecdiscarddlone,-xsifivecflushdlone,-xtheadba,-xtheadbb,-xtheadbs,-xtheadcmo,-xtheadcondmov,-xtheadfmemidx,-xtheadmac,-xtheadmemidx,-xtheadmempair,-xtheadsync,-xtheadvdot,-xventanacondops,-xwchc,-za128rs,-za64rs,-zabha,-zacas,-zama16b,-zawrs,-zbc,-zbkb,-zbkc,-zbkx,-zcb,-zce,-zcf,-zclsd,-zcmop,-zcmp,-zcmt,-zdinx,-zfa,-zfbfmin,-zfh,-zfhmin,-zfinx,-zhinx,-zhinxmin,-zic64b,-zicbom,-zicbop,-zicboz,-ziccamoa,-ziccamoc,-ziccif,-zicclsm,-ziccrse,-zicntr,-zicond,-zihintntl,-zihintpause,-zihpm,-zilsd,-zimop,-zk,-zkn,-zknd,-zkne,-zknh,-zkr,-zks,-zksed,-zksh,-zkt,-ztso,-zvbb,-zvbc,-zvfbfmin,-zvfbfwma,-zvfh,-zvfhmin,-zvkb,-zvkg,-zvkn,-zvknc,-zvkned,-zvkng,-zvknha,-zvknhb,-zvks,-zvksc,-zvksed,-zvksg,-zvksh,-zvkt,-zvl1024b,-zvl16384b,-zvl2048b,-zvl256b,-zvl32768b,-zvl4096b,-zvl512b,-zvl65536b,-zvl8192b" }
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access.ll
index bdf344d..7274e1b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access.ll
@@ -190,6 +190,20 @@ define {<4 x i32>, <4 x i32>} @vpload_factor2(ptr %ptr) {
ret {<4 x i32>, <4 x i32>} %res1
}
+define {<4 x i32>, <4 x i32>} @vpload_factor2_interleaved_mask_intrinsic(ptr %ptr, <4 x i1> %m) {
+; CHECK-LABEL: vpload_factor2_interleaved_mask_intrinsic:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vlseg2e32.v v8, (a0), v0.t
+; CHECK-NEXT: ret
+ %interleaved.mask = call <8 x i1> @llvm.vector.interleave2(<4 x i1> %m, <4 x i1> %m)
+ %interleaved.vec = tail call <8 x i32> @llvm.vp.load.v8i32.p0(ptr %ptr, <8 x i1> %interleaved.mask, i32 8)
+ %v0 = shufflevector <8 x i32> %interleaved.vec, <8 x i32> poison, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+ %v1 = shufflevector <8 x i32> %interleaved.vec, <8 x i32> poison, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+ %res0 = insertvalue {<4 x i32>, <4 x i32>} undef, <4 x i32> %v0, 0
+ %res1 = insertvalue {<4 x i32>, <4 x i32>} %res0, <4 x i32> %v1, 1
+ ret {<4 x i32>, <4 x i32>} %res1
+}
define {<4 x i32>, <4 x i32>, <4 x i32>} @vpload_factor3(ptr %ptr) {
; CHECK-LABEL: vpload_factor3:
@@ -423,8 +437,8 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
; RV32-NEXT: li a2, 32
; RV32-NEXT: lui a3, 12
; RV32-NEXT: lui a6, 12291
-; RV32-NEXT: lui a7, %hi(.LCPI20_0)
-; RV32-NEXT: addi a7, a7, %lo(.LCPI20_0)
+; RV32-NEXT: lui a7, %hi(.LCPI21_0)
+; RV32-NEXT: addi a7, a7, %lo(.LCPI21_0)
; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma
; RV32-NEXT: vle32.v v24, (a5)
; RV32-NEXT: vmv.s.x v0, a3
@@ -509,12 +523,12 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
; RV32-NEXT: addi a1, a1, 16
; RV32-NEXT: vs4r.v v8, (a1) # vscale x 32-byte Folded Spill
; RV32-NEXT: lui a7, 49164
-; RV32-NEXT: lui a1, %hi(.LCPI20_1)
-; RV32-NEXT: addi a1, a1, %lo(.LCPI20_1)
+; RV32-NEXT: lui a1, %hi(.LCPI21_1)
+; RV32-NEXT: addi a1, a1, %lo(.LCPI21_1)
; RV32-NEXT: lui t2, 3
; RV32-NEXT: lui t1, 196656
-; RV32-NEXT: lui a4, %hi(.LCPI20_3)
-; RV32-NEXT: addi a4, a4, %lo(.LCPI20_3)
+; RV32-NEXT: lui a4, %hi(.LCPI21_3)
+; RV32-NEXT: addi a4, a4, %lo(.LCPI21_3)
; RV32-NEXT: lui t0, 786624
; RV32-NEXT: li a5, 48
; RV32-NEXT: lui a6, 768
@@ -693,8 +707,8 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
; RV32-NEXT: vl8r.v v8, (a1) # vscale x 64-byte Folded Reload
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; RV32-NEXT: vrgatherei16.vv v24, v8, v2
-; RV32-NEXT: lui a1, %hi(.LCPI20_2)
-; RV32-NEXT: addi a1, a1, %lo(.LCPI20_2)
+; RV32-NEXT: lui a1, %hi(.LCPI21_2)
+; RV32-NEXT: addi a1, a1, %lo(.LCPI21_2)
; RV32-NEXT: lui a3, 3073
; RV32-NEXT: addi a3, a3, -1024
; RV32-NEXT: vmv.s.x v0, a3
@@ -758,16 +772,16 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
; RV32-NEXT: vrgatherei16.vv v28, v8, v3
; RV32-NEXT: vsetivli zero, 10, e32, m4, tu, ma
; RV32-NEXT: vmv.v.v v28, v24
-; RV32-NEXT: lui a1, %hi(.LCPI20_4)
-; RV32-NEXT: addi a1, a1, %lo(.LCPI20_4)
-; RV32-NEXT: lui a2, %hi(.LCPI20_5)
-; RV32-NEXT: addi a2, a2, %lo(.LCPI20_5)
+; RV32-NEXT: lui a1, %hi(.LCPI21_4)
+; RV32-NEXT: addi a1, a1, %lo(.LCPI21_4)
+; RV32-NEXT: lui a2, %hi(.LCPI21_5)
+; RV32-NEXT: addi a2, a2, %lo(.LCPI21_5)
; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
; RV32-NEXT: vle16.v v24, (a2)
; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; RV32-NEXT: vle16.v v8, (a1)
-; RV32-NEXT: lui a1, %hi(.LCPI20_7)
-; RV32-NEXT: addi a1, a1, %lo(.LCPI20_7)
+; RV32-NEXT: lui a1, %hi(.LCPI21_7)
+; RV32-NEXT: addi a1, a1, %lo(.LCPI21_7)
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vle16.v v10, (a1)
; RV32-NEXT: csrr a1, vlenb
@@ -795,14 +809,14 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
; RV32-NEXT: vl8r.v v0, (a1) # vscale x 64-byte Folded Reload
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vrgatherei16.vv v16, v0, v10
-; RV32-NEXT: lui a1, %hi(.LCPI20_6)
-; RV32-NEXT: addi a1, a1, %lo(.LCPI20_6)
-; RV32-NEXT: lui a2, %hi(.LCPI20_8)
-; RV32-NEXT: addi a2, a2, %lo(.LCPI20_8)
+; RV32-NEXT: lui a1, %hi(.LCPI21_6)
+; RV32-NEXT: addi a1, a1, %lo(.LCPI21_6)
+; RV32-NEXT: lui a2, %hi(.LCPI21_8)
+; RV32-NEXT: addi a2, a2, %lo(.LCPI21_8)
; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; RV32-NEXT: vle16.v v4, (a1)
-; RV32-NEXT: lui a1, %hi(.LCPI20_9)
-; RV32-NEXT: addi a1, a1, %lo(.LCPI20_9)
+; RV32-NEXT: lui a1, %hi(.LCPI21_9)
+; RV32-NEXT: addi a1, a1, %lo(.LCPI21_9)
; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
; RV32-NEXT: vle16.v v6, (a1)
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma
@@ -889,8 +903,8 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
; RV64-NEXT: li a4, 128
; RV64-NEXT: lui a1, 1
; RV64-NEXT: vle64.v v8, (a3)
-; RV64-NEXT: lui a3, %hi(.LCPI20_0)
-; RV64-NEXT: addi a3, a3, %lo(.LCPI20_0)
+; RV64-NEXT: lui a3, %hi(.LCPI21_0)
+; RV64-NEXT: addi a3, a3, %lo(.LCPI21_0)
; RV64-NEXT: vmv.s.x v0, a4
; RV64-NEXT: csrr a4, vlenb
; RV64-NEXT: li a5, 61
@@ -1078,8 +1092,8 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
; RV64-NEXT: vl8r.v v16, (a2) # vscale x 64-byte Folded Reload
; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV64-NEXT: vslideup.vi v12, v16, 1, v0.t
-; RV64-NEXT: lui a2, %hi(.LCPI20_1)
-; RV64-NEXT: addi a2, a2, %lo(.LCPI20_1)
+; RV64-NEXT: lui a2, %hi(.LCPI21_1)
+; RV64-NEXT: addi a2, a2, %lo(.LCPI21_1)
; RV64-NEXT: li a3, 192
; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
; RV64-NEXT: vle16.v v6, (a2)
@@ -1113,8 +1127,8 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
; RV64-NEXT: vrgatherei16.vv v24, v16, v6
; RV64-NEXT: addi a2, sp, 16
; RV64-NEXT: vs8r.v v24, (a2) # vscale x 64-byte Folded Spill
-; RV64-NEXT: lui a2, %hi(.LCPI20_2)
-; RV64-NEXT: addi a2, a2, %lo(.LCPI20_2)
+; RV64-NEXT: lui a2, %hi(.LCPI21_2)
+; RV64-NEXT: addi a2, a2, %lo(.LCPI21_2)
; RV64-NEXT: li a3, 1040
; RV64-NEXT: vmv.s.x v0, a3
; RV64-NEXT: addi a1, a1, -2016
@@ -1198,12 +1212,12 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
; RV64-NEXT: add a1, sp, a1
; RV64-NEXT: addi a1, a1, 16
; RV64-NEXT: vs4r.v v8, (a1) # vscale x 32-byte Folded Spill
-; RV64-NEXT: lui a1, %hi(.LCPI20_3)
-; RV64-NEXT: addi a1, a1, %lo(.LCPI20_3)
+; RV64-NEXT: lui a1, %hi(.LCPI21_3)
+; RV64-NEXT: addi a1, a1, %lo(.LCPI21_3)
; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
; RV64-NEXT: vle16.v v20, (a1)
-; RV64-NEXT: lui a1, %hi(.LCPI20_4)
-; RV64-NEXT: addi a1, a1, %lo(.LCPI20_4)
+; RV64-NEXT: lui a1, %hi(.LCPI21_4)
+; RV64-NEXT: addi a1, a1, %lo(.LCPI21_4)
; RV64-NEXT: vle16.v v8, (a1)
; RV64-NEXT: csrr a1, vlenb
; RV64-NEXT: li a2, 77
@@ -1254,8 +1268,8 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
; RV64-NEXT: vl2r.v v8, (a1) # vscale x 16-byte Folded Reload
; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV64-NEXT: vrgatherei16.vv v0, v16, v8
-; RV64-NEXT: lui a1, %hi(.LCPI20_5)
-; RV64-NEXT: addi a1, a1, %lo(.LCPI20_5)
+; RV64-NEXT: lui a1, %hi(.LCPI21_5)
+; RV64-NEXT: addi a1, a1, %lo(.LCPI21_5)
; RV64-NEXT: vle16.v v20, (a1)
; RV64-NEXT: csrr a1, vlenb
; RV64-NEXT: li a2, 61
@@ -1472,6 +1486,19 @@ define void @vpstore_factor2(ptr %ptr, <4 x i32> %v0, <4 x i32> %v1) {
ret void
}
+define void @vpstore_factor2_interleaved_mask_intrinsic(ptr %ptr, <4 x i32> %v0, <4 x i32> %v1, <4 x i1> %m) {
+; CHECK-LABEL: vpstore_factor2_interleaved_mask_intrinsic:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t
+; CHECK-NEXT: ret
+ %interleaved.mask = call <8 x i1> @llvm.vector.interleave2(<4 x i1> %m, <4 x i1> %m)
+ %interleaved.vec = shufflevector <4 x i32> %v0, <4 x i32> %v1, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
+ tail call void @llvm.vp.store.v8i32.p0(<8 x i32> %interleaved.vec, ptr %ptr, <8 x i1> %interleaved.mask, i32 8)
+ ret void
+}
+
+
define void @vpstore_factor3(ptr %ptr, <4 x i32> %v0, <4 x i32> %v1, <4 x i32> %v2) {
; CHECK-LABEL: vpstore_factor3:
; CHECK: # %bb.0:
@@ -1757,8 +1784,9 @@ define void @store_factor4_one_active(ptr %ptr, <4 x i32> %v) {
define void @vpstore_factor4_one_active(ptr %ptr, <4 x i32> %v) {
; CHECK-LABEL: vpstore_factor4_one_active:
; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, 16
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: vsseg4e32.v v8, (a0)
+; CHECK-NEXT: vsse32.v v8, (a0), a1
; CHECK-NEXT: ret
%v0 = shufflevector <4 x i32> %v, <4 x i32> poison, <16 x i32> <i32 0, i32 undef, i32 undef, i32 undef, i32 1, i32 undef, i32 undef, i32 undef, i32 2, i32 undef, i32 undef, i32 undef, i32 3, i32 undef, i32 undef, i32 undef>
tail call void @llvm.vp.store.v16i32.p0(<16 x i32> %v0, ptr %ptr, <16 x i1> splat (i1 true), i32 16)
@@ -1782,7 +1810,7 @@ define void @store_factor4_one_active_fullwidth(ptr %ptr, <16 x i32> %v) {
; CHECK-LABEL: store_factor4_one_active_fullwidth:
; CHECK: # %bb.0:
; CHECK-NEXT: li a1, 16
-; CHECK-NEXT: vsetivli zero, 4, e32, m4, ta, ma
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vsse32.v v8, (a0), a1
; CHECK-NEXT: ret
%v0 = shufflevector <16 x i32> %v, <16 x i32> poison, <16 x i32> <i32 0, i32 undef, i32 undef, i32 undef, i32 1, i32 undef, i32 undef, i32 undef, i32 2, i32 undef, i32 undef, i32 undef, i32 3, i32 undef, i32 undef, i32 undef>
@@ -1839,8 +1867,8 @@ define {<4 x i32>, <4 x i32>, <4 x i32>} @invalid_vp_mask(ptr %ptr) {
; RV32-NEXT: vle32.v v12, (a0), v0.t
; RV32-NEXT: li a0, 36
; RV32-NEXT: vmv.s.x v20, a1
-; RV32-NEXT: lui a1, %hi(.LCPI54_0)
-; RV32-NEXT: addi a1, a1, %lo(.LCPI54_0)
+; RV32-NEXT: lui a1, %hi(.LCPI56_0)
+; RV32-NEXT: addi a1, a1, %lo(.LCPI56_0)
; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: vle16.v v21, (a1)
; RV32-NEXT: vcompress.vm v8, v12, v11
@@ -1915,8 +1943,8 @@ define {<4 x i32>, <4 x i32>, <4 x i32>} @invalid_vp_evl(ptr %ptr) {
; RV32-NEXT: vmv.s.x v10, a0
; RV32-NEXT: li a0, 146
; RV32-NEXT: vmv.s.x v11, a0
-; RV32-NEXT: lui a0, %hi(.LCPI55_0)
-; RV32-NEXT: addi a0, a0, %lo(.LCPI55_0)
+; RV32-NEXT: lui a0, %hi(.LCPI57_0)
+; RV32-NEXT: addi a0, a0, %lo(.LCPI57_0)
; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: vle16.v v20, (a0)
; RV32-NEXT: li a0, 36
diff --git a/llvm/test/CodeGen/RISCV/wide-scalar-shift-legalization.ll b/llvm/test/CodeGen/RISCV/wide-scalar-shift-legalization.ll
index 32753ca..cd7f30d 100644
--- a/llvm/test/CodeGen/RISCV/wide-scalar-shift-legalization.ll
+++ b/llvm/test/CodeGen/RISCV/wide-scalar-shift-legalization.ll
@@ -716,92 +716,101 @@ define void @lshr_16bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; RV32I-NEXT: slli a4, a4, 8
; RV32I-NEXT: slli a5, a5, 16
; RV32I-NEXT: slli a6, a6, 24
+; RV32I-NEXT: or a3, a4, a3
+; RV32I-NEXT: or a4, a6, a5
+; RV32I-NEXT: lbu a5, 8(a0)
+; RV32I-NEXT: lbu a6, 9(a0)
+; RV32I-NEXT: lbu t3, 10(a0)
+; RV32I-NEXT: lbu t4, 11(a0)
; RV32I-NEXT: slli t0, t0, 8
-; RV32I-NEXT: or a4, a4, a3
-; RV32I-NEXT: or a5, a6, a5
-; RV32I-NEXT: or a3, t0, a7
-; RV32I-NEXT: lbu a6, 8(a0)
-; RV32I-NEXT: lbu a7, 9(a0)
-; RV32I-NEXT: lbu t0, 10(a0)
-; RV32I-NEXT: lbu t3, 11(a0)
; RV32I-NEXT: slli t1, t1, 16
; RV32I-NEXT: slli t2, t2, 24
-; RV32I-NEXT: slli a7, a7, 8
-; RV32I-NEXT: slli t0, t0, 16
-; RV32I-NEXT: slli t3, t3, 24
-; RV32I-NEXT: or t1, t2, t1
-; RV32I-NEXT: or a6, a7, a6
-; RV32I-NEXT: or a7, t3, t0
-; RV32I-NEXT: lbu t0, 12(a0)
-; RV32I-NEXT: lbu t2, 13(a0)
-; RV32I-NEXT: lbu t3, 14(a0)
-; RV32I-NEXT: lbu t4, 15(a0)
-; RV32I-NEXT: lbu a0, 0(a1)
+; RV32I-NEXT: slli a6, a6, 8
+; RV32I-NEXT: or a7, t0, a7
+; RV32I-NEXT: or t0, t2, t1
+; RV32I-NEXT: or a5, a6, a5
+; RV32I-NEXT: lbu a6, 12(a0)
+; RV32I-NEXT: lbu t1, 13(a0)
+; RV32I-NEXT: lbu t2, 14(a0)
+; RV32I-NEXT: lbu a0, 15(a0)
+; RV32I-NEXT: slli t3, t3, 16
+; RV32I-NEXT: slli t4, t4, 24
+; RV32I-NEXT: slli t1, t1, 8
+; RV32I-NEXT: slli t2, t2, 16
+; RV32I-NEXT: slli a0, a0, 24
+; RV32I-NEXT: or t3, t4, t3
+; RV32I-NEXT: or a6, t1, a6
+; RV32I-NEXT: or a0, a0, t2
+; RV32I-NEXT: lbu t1, 1(a1)
+; RV32I-NEXT: lbu t2, 0(a1)
+; RV32I-NEXT: lbu t4, 2(a1)
+; RV32I-NEXT: lbu a1, 3(a1)
+; RV32I-NEXT: slli t1, t1, 8
+; RV32I-NEXT: or t1, t1, t2
; RV32I-NEXT: sw zero, 16(sp)
; RV32I-NEXT: sw zero, 20(sp)
; RV32I-NEXT: sw zero, 24(sp)
; RV32I-NEXT: sw zero, 28(sp)
-; RV32I-NEXT: slli t2, t2, 8
-; RV32I-NEXT: or a1, t2, t0
-; RV32I-NEXT: mv t0, sp
-; RV32I-NEXT: slli t3, t3, 16
-; RV32I-NEXT: slli t4, t4, 24
-; RV32I-NEXT: or t2, t4, t3
-; RV32I-NEXT: srli t3, a0, 3
-; RV32I-NEXT: or a4, a5, a4
-; RV32I-NEXT: andi a5, a0, 31
-; RV32I-NEXT: andi t3, t3, 12
-; RV32I-NEXT: xori a5, a5, 31
-; RV32I-NEXT: or a3, t1, a3
-; RV32I-NEXT: or a6, a7, a6
-; RV32I-NEXT: or a1, t2, a1
-; RV32I-NEXT: add t0, t0, t3
-; RV32I-NEXT: sw a4, 0(sp)
-; RV32I-NEXT: sw a3, 4(sp)
-; RV32I-NEXT: sw a6, 8(sp)
-; RV32I-NEXT: sw a1, 12(sp)
-; RV32I-NEXT: lw a1, 4(t0)
-; RV32I-NEXT: lw a3, 8(t0)
-; RV32I-NEXT: lw a4, 0(t0)
-; RV32I-NEXT: lw a6, 12(t0)
-; RV32I-NEXT: srl a7, a1, a0
-; RV32I-NEXT: slli t0, a3, 1
-; RV32I-NEXT: srl a4, a4, a0
-; RV32I-NEXT: slli a1, a1, 1
-; RV32I-NEXT: srl a3, a3, a0
-; RV32I-NEXT: slli t1, a6, 1
-; RV32I-NEXT: srl a0, a6, a0
-; RV32I-NEXT: sll a6, t0, a5
-; RV32I-NEXT: sll a1, a1, a5
-; RV32I-NEXT: sll a5, t1, a5
+; RV32I-NEXT: slli t4, t4, 16
+; RV32I-NEXT: slli a1, a1, 24
+; RV32I-NEXT: or a1, a1, t4
+; RV32I-NEXT: mv t2, sp
+; RV32I-NEXT: or a3, a4, a3
+; RV32I-NEXT: or a4, t0, a7
+; RV32I-NEXT: or a5, t3, a5
+; RV32I-NEXT: or a0, a0, a6
+; RV32I-NEXT: or a1, a1, t1
+; RV32I-NEXT: sw a3, 0(sp)
+; RV32I-NEXT: sw a4, 4(sp)
+; RV32I-NEXT: sw a5, 8(sp)
+; RV32I-NEXT: sw a0, 12(sp)
+; RV32I-NEXT: srli a0, a1, 3
+; RV32I-NEXT: andi a3, a1, 31
+; RV32I-NEXT: andi a0, a0, 12
+; RV32I-NEXT: xori a3, a3, 31
+; RV32I-NEXT: add a0, t2, a0
+; RV32I-NEXT: lw a4, 4(a0)
+; RV32I-NEXT: lw a5, 8(a0)
+; RV32I-NEXT: lw a6, 0(a0)
+; RV32I-NEXT: lw a0, 12(a0)
+; RV32I-NEXT: srl a7, a4, a1
+; RV32I-NEXT: slli t0, a5, 1
+; RV32I-NEXT: srl a6, a6, a1
+; RV32I-NEXT: slli a4, a4, 1
+; RV32I-NEXT: srl a5, a5, a1
+; RV32I-NEXT: slli t1, a0, 1
+; RV32I-NEXT: srl a0, a0, a1
+; RV32I-NEXT: sll a1, t0, a3
+; RV32I-NEXT: sll a4, a4, a3
+; RV32I-NEXT: sll a3, t1, a3
; RV32I-NEXT: srli t0, a0, 16
; RV32I-NEXT: srli t1, a0, 24
; RV32I-NEXT: srli t2, a0, 8
-; RV32I-NEXT: or a6, a7, a6
-; RV32I-NEXT: or a1, a4, a1
-; RV32I-NEXT: or a3, a3, a5
+; RV32I-NEXT: or a1, a7, a1
+; RV32I-NEXT: or a4, a6, a4
+; RV32I-NEXT: or a3, a5, a3
; RV32I-NEXT: sb a0, 12(a2)
; RV32I-NEXT: sb t2, 13(a2)
; RV32I-NEXT: sb t0, 14(a2)
; RV32I-NEXT: sb t1, 15(a2)
; RV32I-NEXT: srli a0, a3, 16
-; RV32I-NEXT: srli a4, a3, 24
-; RV32I-NEXT: srli a5, a3, 8
-; RV32I-NEXT: srli a7, a1, 16
-; RV32I-NEXT: srli t0, a1, 24
-; RV32I-NEXT: srli t1, a1, 8
-; RV32I-NEXT: srli t2, a6, 16
-; RV32I-NEXT: srli t3, a6, 24
+; RV32I-NEXT: srli a5, a3, 24
+; RV32I-NEXT: srli a6, a3, 8
+; RV32I-NEXT: srli a7, a4, 16
+; RV32I-NEXT: srli t0, a4, 24
+; RV32I-NEXT: srli t1, a4, 8
+; RV32I-NEXT: srli t2, a1, 16
+; RV32I-NEXT: srli t3, a1, 24
; RV32I-NEXT: sb a3, 8(a2)
-; RV32I-NEXT: sb a5, 9(a2)
+; RV32I-NEXT: sb a6, 9(a2)
; RV32I-NEXT: sb a0, 10(a2)
-; RV32I-NEXT: sb a4, 11(a2)
-; RV32I-NEXT: srli a0, a6, 8
-; RV32I-NEXT: sb a1, 0(a2)
+; RV32I-NEXT: sb a5, 11(a2)
+; RV32I-NEXT: srli a0, a1, 8
+; RV32I-NEXT: sb a4, 0(a2)
; RV32I-NEXT: sb t1, 1(a2)
; RV32I-NEXT: sb a7, 2(a2)
; RV32I-NEXT: sb t0, 3(a2)
-; RV32I-NEXT: sb a6, 4(a2)
+; RV32I-NEXT: sb a1, 4(a2)
; RV32I-NEXT: sb a0, 5(a2)
; RV32I-NEXT: sb t2, 6(a2)
; RV32I-NEXT: sb t3, 7(a2)
@@ -943,93 +952,102 @@ define void @shl_16bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; RV32I-NEXT: slli a4, a4, 8
; RV32I-NEXT: slli a5, a5, 16
; RV32I-NEXT: slli a6, a6, 24
+; RV32I-NEXT: or a3, a4, a3
+; RV32I-NEXT: or a4, a6, a5
+; RV32I-NEXT: lbu a5, 8(a0)
+; RV32I-NEXT: lbu a6, 9(a0)
+; RV32I-NEXT: lbu t3, 10(a0)
+; RV32I-NEXT: lbu t4, 11(a0)
; RV32I-NEXT: slli t0, t0, 8
-; RV32I-NEXT: or a4, a4, a3
-; RV32I-NEXT: or a5, a6, a5
-; RV32I-NEXT: or a3, t0, a7
-; RV32I-NEXT: lbu a6, 8(a0)
-; RV32I-NEXT: lbu a7, 9(a0)
-; RV32I-NEXT: lbu t0, 10(a0)
-; RV32I-NEXT: lbu t3, 11(a0)
; RV32I-NEXT: slli t1, t1, 16
; RV32I-NEXT: slli t2, t2, 24
-; RV32I-NEXT: slli a7, a7, 8
-; RV32I-NEXT: slli t0, t0, 16
-; RV32I-NEXT: slli t3, t3, 24
-; RV32I-NEXT: or t1, t2, t1
-; RV32I-NEXT: or a6, a7, a6
-; RV32I-NEXT: or a7, t3, t0
-; RV32I-NEXT: lbu t0, 12(a0)
-; RV32I-NEXT: lbu t2, 13(a0)
-; RV32I-NEXT: lbu t3, 14(a0)
-; RV32I-NEXT: lbu t4, 15(a0)
-; RV32I-NEXT: lbu a0, 0(a1)
+; RV32I-NEXT: slli a6, a6, 8
+; RV32I-NEXT: or a7, t0, a7
+; RV32I-NEXT: or t0, t2, t1
+; RV32I-NEXT: or a5, a6, a5
+; RV32I-NEXT: lbu a6, 12(a0)
+; RV32I-NEXT: lbu t1, 13(a0)
+; RV32I-NEXT: lbu t2, 14(a0)
+; RV32I-NEXT: lbu a0, 15(a0)
+; RV32I-NEXT: slli t3, t3, 16
+; RV32I-NEXT: slli t4, t4, 24
+; RV32I-NEXT: slli t1, t1, 8
+; RV32I-NEXT: slli t2, t2, 16
+; RV32I-NEXT: slli a0, a0, 24
+; RV32I-NEXT: or t3, t4, t3
+; RV32I-NEXT: or a6, t1, a6
+; RV32I-NEXT: or a0, a0, t2
+; RV32I-NEXT: lbu t1, 1(a1)
+; RV32I-NEXT: lbu t2, 0(a1)
+; RV32I-NEXT: lbu t4, 2(a1)
+; RV32I-NEXT: lbu a1, 3(a1)
+; RV32I-NEXT: slli t1, t1, 8
+; RV32I-NEXT: or t1, t1, t2
; RV32I-NEXT: sw zero, 0(sp)
; RV32I-NEXT: sw zero, 4(sp)
; RV32I-NEXT: sw zero, 8(sp)
; RV32I-NEXT: sw zero, 12(sp)
-; RV32I-NEXT: slli t2, t2, 8
-; RV32I-NEXT: or a1, t2, t0
-; RV32I-NEXT: addi t0, sp, 16
-; RV32I-NEXT: slli t3, t3, 16
-; RV32I-NEXT: slli t4, t4, 24
-; RV32I-NEXT: or t2, t4, t3
-; RV32I-NEXT: srli t3, a0, 3
-; RV32I-NEXT: or a4, a5, a4
-; RV32I-NEXT: andi a5, a0, 31
-; RV32I-NEXT: andi t3, t3, 12
-; RV32I-NEXT: or a3, t1, a3
-; RV32I-NEXT: or a6, a7, a6
-; RV32I-NEXT: or a1, t2, a1
-; RV32I-NEXT: sub a7, t0, t3
-; RV32I-NEXT: sw a4, 16(sp)
-; RV32I-NEXT: sw a3, 20(sp)
-; RV32I-NEXT: sw a6, 24(sp)
-; RV32I-NEXT: sw a1, 28(sp)
-; RV32I-NEXT: lw a1, 0(a7)
-; RV32I-NEXT: lw a3, 4(a7)
-; RV32I-NEXT: lw a4, 8(a7)
-; RV32I-NEXT: lw a6, 12(a7)
-; RV32I-NEXT: xori a5, a5, 31
-; RV32I-NEXT: sll a7, a3, a0
-; RV32I-NEXT: srli t0, a1, 1
-; RV32I-NEXT: sll a6, a6, a0
-; RV32I-NEXT: srli t1, a4, 1
-; RV32I-NEXT: sll a4, a4, a0
-; RV32I-NEXT: srli a3, a3, 1
-; RV32I-NEXT: sll a0, a1, a0
-; RV32I-NEXT: srl a1, t0, a5
-; RV32I-NEXT: srl t0, t1, a5
-; RV32I-NEXT: srl a3, a3, a5
-; RV32I-NEXT: srli a5, a0, 16
-; RV32I-NEXT: srli t1, a0, 24
-; RV32I-NEXT: srli t2, a0, 8
-; RV32I-NEXT: or a1, a7, a1
-; RV32I-NEXT: or a6, a6, t0
+; RV32I-NEXT: slli t4, t4, 16
+; RV32I-NEXT: slli a1, a1, 24
+; RV32I-NEXT: or a1, a1, t4
+; RV32I-NEXT: addi t2, sp, 16
; RV32I-NEXT: or a3, a4, a3
-; RV32I-NEXT: sb a0, 0(a2)
+; RV32I-NEXT: or a4, t0, a7
+; RV32I-NEXT: or a5, t3, a5
+; RV32I-NEXT: or a0, a0, a6
+; RV32I-NEXT: or a1, a1, t1
+; RV32I-NEXT: sw a3, 16(sp)
+; RV32I-NEXT: sw a4, 20(sp)
+; RV32I-NEXT: sw a5, 24(sp)
+; RV32I-NEXT: sw a0, 28(sp)
+; RV32I-NEXT: srli a0, a1, 3
+; RV32I-NEXT: andi a3, a1, 31
+; RV32I-NEXT: andi a0, a0, 12
+; RV32I-NEXT: sub a0, t2, a0
+; RV32I-NEXT: lw a4, 0(a0)
+; RV32I-NEXT: lw a5, 4(a0)
+; RV32I-NEXT: lw a6, 8(a0)
+; RV32I-NEXT: lw a0, 12(a0)
+; RV32I-NEXT: xori a3, a3, 31
+; RV32I-NEXT: sll a7, a5, a1
+; RV32I-NEXT: srli t0, a4, 1
+; RV32I-NEXT: sll a0, a0, a1
+; RV32I-NEXT: srli t1, a6, 1
+; RV32I-NEXT: sll a6, a6, a1
+; RV32I-NEXT: srli a5, a5, 1
+; RV32I-NEXT: sll a1, a4, a1
+; RV32I-NEXT: srl a4, t0, a3
+; RV32I-NEXT: srl t0, t1, a3
+; RV32I-NEXT: srl a3, a5, a3
+; RV32I-NEXT: srli a5, a1, 16
+; RV32I-NEXT: srli t1, a1, 24
+; RV32I-NEXT: srli t2, a1, 8
+; RV32I-NEXT: or a4, a7, a4
+; RV32I-NEXT: or a0, a0, t0
+; RV32I-NEXT: or a3, a6, a3
+; RV32I-NEXT: sb a1, 0(a2)
; RV32I-NEXT: sb t2, 1(a2)
; RV32I-NEXT: sb a5, 2(a2)
; RV32I-NEXT: sb t1, 3(a2)
-; RV32I-NEXT: srli a0, a3, 16
-; RV32I-NEXT: srli a4, a3, 24
-; RV32I-NEXT: srli a5, a3, 8
-; RV32I-NEXT: srli a7, a6, 16
-; RV32I-NEXT: srli t0, a6, 24
-; RV32I-NEXT: srli t1, a6, 8
-; RV32I-NEXT: srli t2, a1, 16
-; RV32I-NEXT: srli t3, a1, 24
+; RV32I-NEXT: srli a1, a3, 16
+; RV32I-NEXT: srli a5, a3, 24
+; RV32I-NEXT: srli a6, a3, 8
+; RV32I-NEXT: srli a7, a0, 16
+; RV32I-NEXT: srli t0, a0, 24
+; RV32I-NEXT: srli t1, a0, 8
+; RV32I-NEXT: srli t2, a4, 16
+; RV32I-NEXT: srli t3, a4, 24
; RV32I-NEXT: sb a3, 8(a2)
-; RV32I-NEXT: sb a5, 9(a2)
-; RV32I-NEXT: sb a0, 10(a2)
-; RV32I-NEXT: sb a4, 11(a2)
-; RV32I-NEXT: srli a0, a1, 8
-; RV32I-NEXT: sb a6, 12(a2)
+; RV32I-NEXT: sb a6, 9(a2)
+; RV32I-NEXT: sb a1, 10(a2)
+; RV32I-NEXT: sb a5, 11(a2)
+; RV32I-NEXT: srli a1, a4, 8
+; RV32I-NEXT: sb a0, 12(a2)
; RV32I-NEXT: sb t1, 13(a2)
; RV32I-NEXT: sb a7, 14(a2)
; RV32I-NEXT: sb t0, 15(a2)
-; RV32I-NEXT: sb a1, 4(a2)
-; RV32I-NEXT: sb a0, 5(a2)
+; RV32I-NEXT: sb a4, 4(a2)
+; RV32I-NEXT: sb a1, 5(a2)
; RV32I-NEXT: sb t2, 6(a2)
; RV32I-NEXT: sb t3, 7(a2)
; RV32I-NEXT: addi sp, sp, 32
@@ -1168,73 +1186,82 @@ define void @ashr_16bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; RV32I-NEXT: lbu t1, 6(a0)
; RV32I-NEXT: lbu t2, 7(a0)
; RV32I-NEXT: slli a4, a4, 8
+; RV32I-NEXT: or a3, a4, a3
+; RV32I-NEXT: lbu a4, 8(a0)
+; RV32I-NEXT: lbu t3, 9(a0)
+; RV32I-NEXT: lbu t4, 10(a0)
+; RV32I-NEXT: lbu t5, 11(a0)
; RV32I-NEXT: slli a5, a5, 16
; RV32I-NEXT: slli a6, a6, 24
; RV32I-NEXT: slli t0, t0, 8
-; RV32I-NEXT: or a3, a4, a3
-; RV32I-NEXT: or a4, a6, a5
-; RV32I-NEXT: or a5, t0, a7
-; RV32I-NEXT: lbu a6, 8(a0)
-; RV32I-NEXT: lbu a7, 9(a0)
-; RV32I-NEXT: lbu t0, 10(a0)
-; RV32I-NEXT: lbu t3, 11(a0)
; RV32I-NEXT: slli t1, t1, 16
; RV32I-NEXT: slli t2, t2, 24
-; RV32I-NEXT: slli a7, a7, 8
-; RV32I-NEXT: slli t0, t0, 16
-; RV32I-NEXT: slli t3, t3, 24
-; RV32I-NEXT: or t1, t2, t1
-; RV32I-NEXT: or a6, a7, a6
-; RV32I-NEXT: or a7, t3, t0
+; RV32I-NEXT: or a5, a6, a5
+; RV32I-NEXT: or a6, t0, a7
+; RV32I-NEXT: or a7, t2, t1
; RV32I-NEXT: lbu t0, 12(a0)
-; RV32I-NEXT: lbu t2, 13(a0)
-; RV32I-NEXT: lbu t3, 14(a0)
-; RV32I-NEXT: lbu t4, 15(a0)
-; RV32I-NEXT: lbu a0, 0(a1)
-; RV32I-NEXT: slli t2, t2, 8
-; RV32I-NEXT: or a1, t2, t0
-; RV32I-NEXT: mv t0, sp
-; RV32I-NEXT: slli t3, t3, 16
-; RV32I-NEXT: slli t4, t4, 24
-; RV32I-NEXT: or a3, a4, a3
-; RV32I-NEXT: srli a4, a0, 3
-; RV32I-NEXT: or a5, t1, a5
-; RV32I-NEXT: andi t1, a0, 31
-; RV32I-NEXT: or t2, t4, t3
-; RV32I-NEXT: srai t3, t4, 31
-; RV32I-NEXT: andi a4, a4, 12
-; RV32I-NEXT: xori t1, t1, 31
+; RV32I-NEXT: lbu t1, 13(a0)
+; RV32I-NEXT: lbu t2, 14(a0)
+; RV32I-NEXT: lbu a0, 15(a0)
+; RV32I-NEXT: slli t3, t3, 8
+; RV32I-NEXT: slli t4, t4, 16
+; RV32I-NEXT: slli t5, t5, 24
+; RV32I-NEXT: slli t1, t1, 8
+; RV32I-NEXT: or a4, t3, a4
+; RV32I-NEXT: or t3, t5, t4
+; RV32I-NEXT: or t0, t1, t0
+; RV32I-NEXT: lbu t1, 1(a1)
+; RV32I-NEXT: lbu t4, 0(a1)
+; RV32I-NEXT: lbu t5, 2(a1)
+; RV32I-NEXT: lbu a1, 3(a1)
+; RV32I-NEXT: slli t1, t1, 8
+; RV32I-NEXT: or t1, t1, t4
+; RV32I-NEXT: slli t5, t5, 16
+; RV32I-NEXT: slli a1, a1, 24
+; RV32I-NEXT: or a1, a1, t5
+; RV32I-NEXT: or a3, a5, a3
+; RV32I-NEXT: mv a5, sp
+; RV32I-NEXT: slli t2, t2, 16
+; RV32I-NEXT: slli a0, a0, 24
+; RV32I-NEXT: or t2, a0, t2
+; RV32I-NEXT: srai a0, a0, 31
; RV32I-NEXT: or a6, a7, a6
-; RV32I-NEXT: or a1, t2, a1
-; RV32I-NEXT: sw t3, 16(sp)
-; RV32I-NEXT: sw t3, 20(sp)
-; RV32I-NEXT: sw t3, 24(sp)
-; RV32I-NEXT: sw t3, 28(sp)
-; RV32I-NEXT: add a4, t0, a4
+; RV32I-NEXT: or a4, t3, a4
+; RV32I-NEXT: or a7, t2, t0
+; RV32I-NEXT: or a1, a1, t1
+; RV32I-NEXT: sw a0, 16(sp)
+; RV32I-NEXT: sw a0, 20(sp)
+; RV32I-NEXT: sw a0, 24(sp)
+; RV32I-NEXT: sw a0, 28(sp)
; RV32I-NEXT: sw a3, 0(sp)
-; RV32I-NEXT: sw a5, 4(sp)
-; RV32I-NEXT: sw a6, 8(sp)
-; RV32I-NEXT: sw a1, 12(sp)
-; RV32I-NEXT: lw a1, 4(a4)
-; RV32I-NEXT: lw a3, 8(a4)
-; RV32I-NEXT: lw a5, 0(a4)
-; RV32I-NEXT: lw a4, 12(a4)
-; RV32I-NEXT: srl a6, a1, a0
-; RV32I-NEXT: slli a7, a3, 1
-; RV32I-NEXT: srl a5, a5, a0
-; RV32I-NEXT: slli a1, a1, 1
-; RV32I-NEXT: srl a3, a3, a0
-; RV32I-NEXT: slli t0, a4, 1
-; RV32I-NEXT: sra a0, a4, a0
-; RV32I-NEXT: sll a4, a7, t1
-; RV32I-NEXT: sll a1, a1, t1
-; RV32I-NEXT: sll a7, t0, t1
+; RV32I-NEXT: sw a6, 4(sp)
+; RV32I-NEXT: sw a4, 8(sp)
+; RV32I-NEXT: sw a7, 12(sp)
+; RV32I-NEXT: srli a0, a1, 3
+; RV32I-NEXT: andi a3, a1, 31
+; RV32I-NEXT: andi a0, a0, 12
+; RV32I-NEXT: xori a3, a3, 31
+; RV32I-NEXT: add a0, a5, a0
+; RV32I-NEXT: lw a4, 4(a0)
+; RV32I-NEXT: lw a5, 8(a0)
+; RV32I-NEXT: lw a6, 0(a0)
+; RV32I-NEXT: lw a0, 12(a0)
+; RV32I-NEXT: srl a7, a4, a1
+; RV32I-NEXT: slli t0, a5, 1
+; RV32I-NEXT: srl a6, a6, a1
+; RV32I-NEXT: slli a4, a4, 1
+; RV32I-NEXT: srl a5, a5, a1
+; RV32I-NEXT: slli t1, a0, 1
+; RV32I-NEXT: sra a0, a0, a1
+; RV32I-NEXT: sll a1, t0, a3
+; RV32I-NEXT: sll a4, a4, a3
+; RV32I-NEXT: sll a3, t1, a3
; RV32I-NEXT: srli t0, a0, 16
; RV32I-NEXT: srli t1, a0, 24
; RV32I-NEXT: srli t2, a0, 8
+; RV32I-NEXT: or a1, a7, a1
; RV32I-NEXT: or a4, a6, a4
-; RV32I-NEXT: or a1, a5, a1
-; RV32I-NEXT: or a3, a3, a7
+; RV32I-NEXT: or a3, a5, a3
; RV32I-NEXT: sb a0, 12(a2)
; RV32I-NEXT: sb t2, 13(a2)
; RV32I-NEXT: sb t0, 14(a2)
@@ -1242,21 +1269,21 @@ define void @ashr_16bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; RV32I-NEXT: srli a0, a3, 16
; RV32I-NEXT: srli a5, a3, 24
; RV32I-NEXT: srli a6, a3, 8
-; RV32I-NEXT: srli a7, a1, 16
-; RV32I-NEXT: srli t0, a1, 24
-; RV32I-NEXT: srli t1, a1, 8
-; RV32I-NEXT: srli t2, a4, 16
-; RV32I-NEXT: srli t3, a4, 24
+; RV32I-NEXT: srli a7, a4, 16
+; RV32I-NEXT: srli t0, a4, 24
+; RV32I-NEXT: srli t1, a4, 8
+; RV32I-NEXT: srli t2, a1, 16
+; RV32I-NEXT: srli t3, a1, 24
; RV32I-NEXT: sb a3, 8(a2)
; RV32I-NEXT: sb a6, 9(a2)
; RV32I-NEXT: sb a0, 10(a2)
; RV32I-NEXT: sb a5, 11(a2)
-; RV32I-NEXT: srli a0, a4, 8
-; RV32I-NEXT: sb a1, 0(a2)
+; RV32I-NEXT: srli a0, a1, 8
+; RV32I-NEXT: sb a4, 0(a2)
; RV32I-NEXT: sb t1, 1(a2)
; RV32I-NEXT: sb a7, 2(a2)
; RV32I-NEXT: sb t0, 3(a2)
-; RV32I-NEXT: sb a4, 4(a2)
+; RV32I-NEXT: sb a1, 4(a2)
; RV32I-NEXT: sb a0, 5(a2)
; RV32I-NEXT: sb t2, 6(a2)
; RV32I-NEXT: sb t3, 7(a2)
@@ -1272,17 +1299,19 @@ define void @ashr_16bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
define void @lshr_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; RV64I-LABEL: lshr_32bytes:
; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -144
-; RV64I-NEXT: sd s0, 136(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s1, 128(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s2, 120(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s3, 112(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s4, 104(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s5, 96(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s6, 88(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s7, 80(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s8, 72(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s9, 64(sp) # 8-byte Folded Spill
+; RV64I-NEXT: addi sp, sp, -160
+; RV64I-NEXT: sd s0, 152(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s1, 144(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s2, 136(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s3, 128(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s4, 120(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s5, 112(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s6, 104(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s7, 96(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s8, 88(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s9, 80(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s10, 72(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s11, 64(sp) # 8-byte Folded Spill
; RV64I-NEXT: lbu a3, 0(a0)
; RV64I-NEXT: lbu a4, 1(a0)
; RV64I-NEXT: lbu a5, 2(a0)
@@ -1299,122 +1328,143 @@ define void @lshr_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; RV64I-NEXT: lbu s1, 13(a0)
; RV64I-NEXT: lbu s2, 14(a0)
; RV64I-NEXT: lbu s3, 15(a0)
-; RV64I-NEXT: slli a4, a4, 8
-; RV64I-NEXT: slli a5, a5, 16
-; RV64I-NEXT: slli a6, a6, 24
-; RV64I-NEXT: or a3, a4, a3
-; RV64I-NEXT: or a4, a6, a5
; RV64I-NEXT: lbu s4, 16(a0)
; RV64I-NEXT: lbu s5, 17(a0)
; RV64I-NEXT: lbu s6, 18(a0)
; RV64I-NEXT: lbu s7, 19(a0)
+; RV64I-NEXT: slli a4, a4, 8
+; RV64I-NEXT: slli s8, a5, 16
+; RV64I-NEXT: slli a6, a6, 24
; RV64I-NEXT: slli t0, t0, 8
; RV64I-NEXT: slli t1, t1, 16
; RV64I-NEXT: slli t2, t2, 24
+; RV64I-NEXT: or a5, a4, a3
+; RV64I-NEXT: or a6, a6, s8
+; RV64I-NEXT: or a3, t0, a7
+; RV64I-NEXT: or a4, t2, t1
+; RV64I-NEXT: lbu s8, 20(a0)
+; RV64I-NEXT: lbu s9, 21(a0)
+; RV64I-NEXT: lbu s10, 22(a0)
+; RV64I-NEXT: lbu s11, 23(a0)
; RV64I-NEXT: slli t4, t4, 8
; RV64I-NEXT: slli t5, t5, 16
; RV64I-NEXT: slli t6, t6, 24
-; RV64I-NEXT: or a5, t0, a7
-; RV64I-NEXT: or a6, t2, t1
-; RV64I-NEXT: or a7, t4, t3
-; RV64I-NEXT: or t0, t6, t5
-; RV64I-NEXT: lbu t5, 20(a0)
-; RV64I-NEXT: lbu t6, 21(a0)
-; RV64I-NEXT: lbu s8, 22(a0)
-; RV64I-NEXT: lbu s9, 23(a0)
; RV64I-NEXT: slli s1, s1, 8
; RV64I-NEXT: slli s2, s2, 16
; RV64I-NEXT: slli s3, s3, 24
+; RV64I-NEXT: or a7, t4, t3
+; RV64I-NEXT: or t0, t6, t5
+; RV64I-NEXT: or t1, s1, s0
+; RV64I-NEXT: or t2, s3, s2
+; RV64I-NEXT: lbu t6, 24(a0)
+; RV64I-NEXT: lbu s0, 25(a0)
+; RV64I-NEXT: lbu s1, 26(a0)
+; RV64I-NEXT: lbu s2, 27(a0)
; RV64I-NEXT: slli s5, s5, 8
; RV64I-NEXT: slli s6, s6, 16
; RV64I-NEXT: slli s7, s7, 24
-; RV64I-NEXT: or t1, s1, s0
-; RV64I-NEXT: or t2, s3, s2
+; RV64I-NEXT: slli s9, s9, 8
; RV64I-NEXT: or t3, s5, s4
; RV64I-NEXT: or t4, s7, s6
-; RV64I-NEXT: lbu s0, 24(a0)
-; RV64I-NEXT: lbu s1, 25(a0)
-; RV64I-NEXT: lbu s2, 26(a0)
-; RV64I-NEXT: lbu s3, 27(a0)
-; RV64I-NEXT: slli t6, t6, 8
-; RV64I-NEXT: slli s8, s8, 16
-; RV64I-NEXT: slli s9, s9, 24
-; RV64I-NEXT: slli s1, s1, 8
-; RV64I-NEXT: or t5, t6, t5
-; RV64I-NEXT: or t6, s9, s8
-; RV64I-NEXT: or s0, s1, s0
-; RV64I-NEXT: lbu s1, 28(a0)
+; RV64I-NEXT: or t5, s9, s8
+; RV64I-NEXT: lbu s3, 28(a0)
; RV64I-NEXT: lbu s4, 29(a0)
; RV64I-NEXT: lbu s5, 30(a0)
; RV64I-NEXT: lbu s6, 31(a0)
-; RV64I-NEXT: lbu a0, 0(a1)
+; RV64I-NEXT: slli s10, s10, 16
+; RV64I-NEXT: slli s11, s11, 24
+; RV64I-NEXT: slli s0, s0, 8
+; RV64I-NEXT: slli s1, s1, 16
+; RV64I-NEXT: slli s2, s2, 24
+; RV64I-NEXT: slli s4, s4, 8
+; RV64I-NEXT: or a0, s11, s10
+; RV64I-NEXT: or t6, s0, t6
+; RV64I-NEXT: or s0, s2, s1
+; RV64I-NEXT: or s1, s4, s3
+; RV64I-NEXT: lbu s2, 0(a1)
+; RV64I-NEXT: lbu s3, 1(a1)
+; RV64I-NEXT: lbu s4, 2(a1)
+; RV64I-NEXT: lbu s7, 3(a1)
+; RV64I-NEXT: slli s5, s5, 16
+; RV64I-NEXT: slli s6, s6, 24
+; RV64I-NEXT: slli s3, s3, 8
+; RV64I-NEXT: slli s4, s4, 16
+; RV64I-NEXT: slli s7, s7, 24
+; RV64I-NEXT: or s5, s6, s5
+; RV64I-NEXT: or s2, s3, s2
+; RV64I-NEXT: or s3, s7, s4
+; RV64I-NEXT: lbu s4, 5(a1)
+; RV64I-NEXT: lbu s6, 4(a1)
+; RV64I-NEXT: lbu s7, 6(a1)
+; RV64I-NEXT: lbu a1, 7(a1)
+; RV64I-NEXT: slli s4, s4, 8
+; RV64I-NEXT: or s4, s4, s6
+; RV64I-NEXT: slli s7, s7, 16
+; RV64I-NEXT: slli a1, a1, 24
+; RV64I-NEXT: or a1, a1, s7
; RV64I-NEXT: sd zero, 32(sp)
; RV64I-NEXT: sd zero, 40(sp)
; RV64I-NEXT: sd zero, 48(sp)
; RV64I-NEXT: sd zero, 56(sp)
-; RV64I-NEXT: slli s2, s2, 16
-; RV64I-NEXT: slli s3, s3, 24
-; RV64I-NEXT: or a1, s3, s2
-; RV64I-NEXT: mv s2, sp
-; RV64I-NEXT: slli s4, s4, 8
-; RV64I-NEXT: slli s5, s5, 16
-; RV64I-NEXT: slli s6, s6, 24
-; RV64I-NEXT: or s1, s4, s1
-; RV64I-NEXT: srli s3, a0, 3
-; RV64I-NEXT: or s4, s6, s5
-; RV64I-NEXT: andi s5, a0, 63
-; RV64I-NEXT: andi s3, s3, 24
-; RV64I-NEXT: xori s5, s5, 63
-; RV64I-NEXT: or a3, a4, a3
-; RV64I-NEXT: or a4, a6, a5
-; RV64I-NEXT: or a5, t0, a7
-; RV64I-NEXT: or a6, t2, t1
-; RV64I-NEXT: or a7, t4, t3
-; RV64I-NEXT: or t0, t6, t5
-; RV64I-NEXT: or a1, a1, s0
-; RV64I-NEXT: or t1, s4, s1
-; RV64I-NEXT: add s2, s2, s3
-; RV64I-NEXT: slli a4, a4, 32
-; RV64I-NEXT: slli a6, a6, 32
-; RV64I-NEXT: slli t0, t0, 32
-; RV64I-NEXT: slli t1, t1, 32
+; RV64I-NEXT: or a5, a6, a5
+; RV64I-NEXT: mv a6, sp
; RV64I-NEXT: or a3, a4, a3
-; RV64I-NEXT: or a4, a6, a5
-; RV64I-NEXT: or a5, t0, a7
-; RV64I-NEXT: or a1, t1, a1
+; RV64I-NEXT: or a4, t0, a7
+; RV64I-NEXT: or a7, t2, t1
+; RV64I-NEXT: or t0, t4, t3
+; RV64I-NEXT: or a0, a0, t5
+; RV64I-NEXT: or t1, s0, t6
+; RV64I-NEXT: or t2, s5, s1
+; RV64I-NEXT: or t3, s3, s2
+; RV64I-NEXT: or a1, a1, s4
+; RV64I-NEXT: slli a3, a3, 32
+; RV64I-NEXT: slli a7, a7, 32
+; RV64I-NEXT: slli a0, a0, 32
+; RV64I-NEXT: slli t2, t2, 32
+; RV64I-NEXT: slli a1, a1, 32
+; RV64I-NEXT: or a3, a3, a5
+; RV64I-NEXT: or a4, a7, a4
+; RV64I-NEXT: or a0, a0, t0
+; RV64I-NEXT: or a5, t2, t1
+; RV64I-NEXT: or a1, a1, t3
; RV64I-NEXT: sd a3, 0(sp)
; RV64I-NEXT: sd a4, 8(sp)
-; RV64I-NEXT: sd a5, 16(sp)
-; RV64I-NEXT: sd a1, 24(sp)
-; RV64I-NEXT: ld a1, 8(s2)
-; RV64I-NEXT: ld a3, 16(s2)
-; RV64I-NEXT: ld a4, 0(s2)
-; RV64I-NEXT: ld a5, 24(s2)
-; RV64I-NEXT: srl a6, a1, a0
-; RV64I-NEXT: slli a7, a3, 1
-; RV64I-NEXT: srl a4, a4, a0
-; RV64I-NEXT: slli a1, a1, 1
-; RV64I-NEXT: srl a3, a3, a0
+; RV64I-NEXT: sd a0, 16(sp)
+; RV64I-NEXT: sd a5, 24(sp)
+; RV64I-NEXT: srli a0, a1, 3
+; RV64I-NEXT: andi a3, a1, 63
+; RV64I-NEXT: andi a0, a0, 24
+; RV64I-NEXT: xori a3, a3, 63
+; RV64I-NEXT: add a0, a6, a0
+; RV64I-NEXT: ld a4, 8(a0)
+; RV64I-NEXT: ld a5, 16(a0)
+; RV64I-NEXT: ld a6, 0(a0)
+; RV64I-NEXT: ld a0, 24(a0)
+; RV64I-NEXT: srl a7, a4, a1
; RV64I-NEXT: slli t0, a5, 1
-; RV64I-NEXT: srl a5, a5, a0
-; RV64I-NEXT: sll a0, a7, s5
-; RV64I-NEXT: sll a1, a1, s5
-; RV64I-NEXT: sll a7, t0, s5
-; RV64I-NEXT: srli t0, a5, 56
-; RV64I-NEXT: srli t1, a5, 48
-; RV64I-NEXT: srli t2, a5, 40
-; RV64I-NEXT: srli t3, a5, 32
-; RV64I-NEXT: srli t4, a5, 24
-; RV64I-NEXT: srli t5, a5, 16
-; RV64I-NEXT: srli t6, a5, 8
-; RV64I-NEXT: or a0, a6, a0
-; RV64I-NEXT: or a1, a4, a1
-; RV64I-NEXT: or a3, a3, a7
+; RV64I-NEXT: srl a6, a6, a1
+; RV64I-NEXT: slli a4, a4, 1
+; RV64I-NEXT: srl a5, a5, a1
+; RV64I-NEXT: slli t1, a0, 1
+; RV64I-NEXT: srl t2, a0, a1
+; RV64I-NEXT: sll a0, t0, a3
+; RV64I-NEXT: sll a1, a4, a3
+; RV64I-NEXT: sll a3, t1, a3
+; RV64I-NEXT: srli a4, t2, 56
+; RV64I-NEXT: srli t0, t2, 48
+; RV64I-NEXT: srli t1, t2, 40
+; RV64I-NEXT: srli t3, t2, 32
+; RV64I-NEXT: srli t4, t2, 24
+; RV64I-NEXT: srli t5, t2, 16
+; RV64I-NEXT: srli t6, t2, 8
+; RV64I-NEXT: or a0, a7, a0
+; RV64I-NEXT: or a1, a6, a1
+; RV64I-NEXT: or a3, a5, a3
; RV64I-NEXT: sb t3, 28(a2)
-; RV64I-NEXT: sb t2, 29(a2)
-; RV64I-NEXT: sb t1, 30(a2)
-; RV64I-NEXT: sb t0, 31(a2)
-; RV64I-NEXT: sb a5, 24(a2)
+; RV64I-NEXT: sb t1, 29(a2)
+; RV64I-NEXT: sb t0, 30(a2)
+; RV64I-NEXT: sb a4, 31(a2)
+; RV64I-NEXT: sb t2, 24(a2)
; RV64I-NEXT: sb t6, 25(a2)
; RV64I-NEXT: sb t5, 26(a2)
; RV64I-NEXT: sb t4, 27(a2)
@@ -1463,17 +1513,19 @@ define void @lshr_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; RV64I-NEXT: sb a1, 9(a2)
; RV64I-NEXT: sb a5, 10(a2)
; RV64I-NEXT: sb a3, 11(a2)
-; RV64I-NEXT: ld s0, 136(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s1, 128(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s2, 120(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s3, 112(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s4, 104(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s5, 96(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s6, 88(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s7, 80(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s8, 72(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s9, 64(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 144
+; RV64I-NEXT: ld s0, 152(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s1, 144(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s2, 136(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s3, 128(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s4, 120(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s5, 112(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s6, 104(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s7, 96(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s8, 88(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s9, 80(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s10, 72(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s11, 64(sp) # 8-byte Folded Reload
+; RV64I-NEXT: addi sp, sp, 160
; RV64I-NEXT: ret
;
; RV32I-LABEL: lshr_32bytes:
@@ -1498,55 +1550,67 @@ define void @lshr_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; RV32I-NEXT: lbu a7, 3(a0)
; RV32I-NEXT: lbu a5, 4(a0)
; RV32I-NEXT: lbu t0, 5(a0)
-; RV32I-NEXT: lbu t3, 6(a0)
-; RV32I-NEXT: lbu t6, 7(a0)
-; RV32I-NEXT: lbu s2, 8(a0)
-; RV32I-NEXT: lbu s3, 9(a0)
-; RV32I-NEXT: lbu s4, 10(a0)
-; RV32I-NEXT: lbu s5, 11(a0)
-; RV32I-NEXT: lbu s7, 12(a0)
-; RV32I-NEXT: lbu s8, 13(a0)
-; RV32I-NEXT: lbu s9, 14(a0)
-; RV32I-NEXT: lbu s10, 15(a0)
-; RV32I-NEXT: lbu s11, 16(a0)
-; RV32I-NEXT: lbu ra, 17(a0)
-; RV32I-NEXT: lbu t4, 18(a0)
-; RV32I-NEXT: lbu s0, 19(a0)
+; RV32I-NEXT: lbu t1, 6(a0)
+; RV32I-NEXT: lbu t2, 7(a0)
+; RV32I-NEXT: lbu t3, 8(a0)
+; RV32I-NEXT: lbu t4, 9(a0)
+; RV32I-NEXT: lbu t5, 10(a0)
+; RV32I-NEXT: lbu t6, 11(a0)
+; RV32I-NEXT: lbu s0, 12(a0)
+; RV32I-NEXT: lbu s2, 13(a0)
+; RV32I-NEXT: lbu s4, 14(a0)
+; RV32I-NEXT: lbu s5, 15(a0)
+; RV32I-NEXT: lbu s6, 16(a0)
+; RV32I-NEXT: lbu s7, 17(a0)
+; RV32I-NEXT: lbu s8, 18(a0)
+; RV32I-NEXT: lbu s9, 19(a0)
; RV32I-NEXT: slli a4, a4, 8
; RV32I-NEXT: slli a6, a6, 16
; RV32I-NEXT: slli a7, a7, 24
; RV32I-NEXT: or a3, a4, a3
+; RV32I-NEXT: sw a3, 4(sp) # 4-byte Folded Spill
; RV32I-NEXT: or a4, a7, a6
-; RV32I-NEXT: lbu t1, 20(a0)
-; RV32I-NEXT: lbu t2, 21(a0)
-; RV32I-NEXT: lbu t5, 22(a0)
-; RV32I-NEXT: lbu s1, 23(a0)
+; RV32I-NEXT: lbu s10, 20(a0)
+; RV32I-NEXT: lbu s11, 21(a0)
+; RV32I-NEXT: lbu ra, 22(a0)
+; RV32I-NEXT: lbu a3, 23(a0)
; RV32I-NEXT: slli t0, t0, 8
-; RV32I-NEXT: slli t3, t3, 16
+; RV32I-NEXT: slli t1, t1, 16
+; RV32I-NEXT: slli t2, t2, 24
+; RV32I-NEXT: slli t4, t4, 8
+; RV32I-NEXT: slli t5, t5, 16
; RV32I-NEXT: slli t6, t6, 24
-; RV32I-NEXT: slli s3, s3, 8
+; RV32I-NEXT: or a5, t0, a5
+; RV32I-NEXT: or a6, t2, t1
+; RV32I-NEXT: or a7, t4, t3
+; RV32I-NEXT: or t0, t6, t5
+; RV32I-NEXT: lbu s1, 24(a0)
+; RV32I-NEXT: lbu s3, 25(a0)
+; RV32I-NEXT: lbu t4, 26(a0)
+; RV32I-NEXT: lbu t5, 27(a0)
+; RV32I-NEXT: slli s2, s2, 8
; RV32I-NEXT: slli s4, s4, 16
; RV32I-NEXT: slli s5, s5, 24
-; RV32I-NEXT: or a5, t0, a5
-; RV32I-NEXT: or a6, t6, t3
-; RV32I-NEXT: or a7, s3, s2
-; RV32I-NEXT: or t0, s5, s4
-; RV32I-NEXT: lbu t3, 24(a0)
-; RV32I-NEXT: lbu s5, 25(a0)
-; RV32I-NEXT: lbu s6, 26(a0)
-; RV32I-NEXT: lbu t6, 27(a0)
-; RV32I-NEXT: slli s8, s8, 8
-; RV32I-NEXT: slli s9, s9, 16
-; RV32I-NEXT: slli s10, s10, 24
-; RV32I-NEXT: slli ra, ra, 8
-; RV32I-NEXT: or s7, s8, s7
-; RV32I-NEXT: or s2, s10, s9
-; RV32I-NEXT: or s3, ra, s11
-; RV32I-NEXT: lbu s4, 28(a0)
-; RV32I-NEXT: lbu s8, 29(a0)
-; RV32I-NEXT: lbu s9, 30(a0)
-; RV32I-NEXT: lbu s10, 31(a0)
-; RV32I-NEXT: lbu a0, 0(a1)
+; RV32I-NEXT: slli s7, s7, 8
+; RV32I-NEXT: or t1, s2, s0
+; RV32I-NEXT: or t2, s5, s4
+; RV32I-NEXT: or t3, s7, s6
+; RV32I-NEXT: lbu t6, 28(a0)
+; RV32I-NEXT: lbu s4, 29(a0)
+; RV32I-NEXT: lbu s5, 30(a0)
+; RV32I-NEXT: lbu s6, 31(a0)
+; RV32I-NEXT: slli s8, s8, 16
+; RV32I-NEXT: slli s9, s9, 24
+; RV32I-NEXT: slli s11, s11, 8
+; RV32I-NEXT: slli ra, ra, 16
+; RV32I-NEXT: slli a3, a3, 24
+; RV32I-NEXT: or a0, s9, s8
+; RV32I-NEXT: or s0, s11, s10
+; RV32I-NEXT: or s2, a3, ra
+; RV32I-NEXT: lbu a3, 0(a1)
+; RV32I-NEXT: lbu s7, 1(a1)
+; RV32I-NEXT: lbu s8, 2(a1)
+; RV32I-NEXT: lbu a1, 3(a1)
; RV32I-NEXT: sw zero, 56(sp)
; RV32I-NEXT: sw zero, 60(sp)
; RV32I-NEXT: sw zero, 64(sp)
@@ -1555,90 +1619,89 @@ define void @lshr_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; RV32I-NEXT: sw zero, 44(sp)
; RV32I-NEXT: sw zero, 48(sp)
; RV32I-NEXT: sw zero, 52(sp)
+; RV32I-NEXT: slli s3, s3, 8
+; RV32I-NEXT: or s1, s3, s1
+; RV32I-NEXT: addi s3, sp, 8
; RV32I-NEXT: slli t4, t4, 16
-; RV32I-NEXT: slli s0, s0, 24
-; RV32I-NEXT: or t4, s0, t4
-; RV32I-NEXT: addi s0, sp, 8
-; RV32I-NEXT: slli t2, t2, 8
-; RV32I-NEXT: slli t5, t5, 16
-; RV32I-NEXT: slli s1, s1, 24
-; RV32I-NEXT: slli s5, s5, 8
-; RV32I-NEXT: slli s6, s6, 16
-; RV32I-NEXT: slli t6, t6, 24
-; RV32I-NEXT: slli s8, s8, 8
-; RV32I-NEXT: slli s9, s9, 16
-; RV32I-NEXT: slli s10, s10, 24
-; RV32I-NEXT: or t1, t2, t1
+; RV32I-NEXT: slli t5, t5, 24
+; RV32I-NEXT: slli s4, s4, 8
+; RV32I-NEXT: slli s5, s5, 16
+; RV32I-NEXT: slli s6, s6, 24
+; RV32I-NEXT: slli s7, s7, 8
+; RV32I-NEXT: slli s8, s8, 16
+; RV32I-NEXT: slli a1, a1, 24
+; RV32I-NEXT: or t4, t5, t4
+; RV32I-NEXT: or t5, s4, t6
+; RV32I-NEXT: or t6, s6, s5
+; RV32I-NEXT: or a3, s7, a3
+; RV32I-NEXT: or a1, a1, s8
+; RV32I-NEXT: lw s4, 4(sp) # 4-byte Folded Reload
+; RV32I-NEXT: or a4, a4, s4
+; RV32I-NEXT: or a5, a6, a5
+; RV32I-NEXT: or a6, t0, a7
+; RV32I-NEXT: or a7, t2, t1
+; RV32I-NEXT: or t0, a0, t3
+; RV32I-NEXT: or t1, s2, s0
+; RV32I-NEXT: or t2, t4, s1
+; RV32I-NEXT: or t3, t6, t5
+; RV32I-NEXT: or a0, a1, a3
+; RV32I-NEXT: sw t0, 24(sp)
+; RV32I-NEXT: sw t1, 28(sp)
+; RV32I-NEXT: sw t2, 32(sp)
+; RV32I-NEXT: sw t3, 36(sp)
+; RV32I-NEXT: sw a4, 8(sp)
+; RV32I-NEXT: sw a5, 12(sp)
+; RV32I-NEXT: sw a6, 16(sp)
+; RV32I-NEXT: sw a7, 20(sp)
; RV32I-NEXT: srli a1, a0, 3
-; RV32I-NEXT: or t2, s1, t5
-; RV32I-NEXT: andi t5, a0, 31
-; RV32I-NEXT: or t3, s5, t3
-; RV32I-NEXT: or t6, t6, s6
-; RV32I-NEXT: or s1, s8, s4
-; RV32I-NEXT: or s4, s10, s9
-; RV32I-NEXT: andi s5, a1, 28
-; RV32I-NEXT: xori a1, t5, 31
-; RV32I-NEXT: or a3, a4, a3
-; RV32I-NEXT: or a4, a6, a5
-; RV32I-NEXT: or a5, t0, a7
-; RV32I-NEXT: or a6, s2, s7
-; RV32I-NEXT: or a7, t4, s3
-; RV32I-NEXT: or t0, t2, t1
-; RV32I-NEXT: or t1, t6, t3
-; RV32I-NEXT: or t2, s4, s1
-; RV32I-NEXT: add s0, s0, s5
-; RV32I-NEXT: sw a7, 24(sp)
-; RV32I-NEXT: sw t0, 28(sp)
-; RV32I-NEXT: sw t1, 32(sp)
-; RV32I-NEXT: sw t2, 36(sp)
-; RV32I-NEXT: sw a3, 8(sp)
-; RV32I-NEXT: sw a4, 12(sp)
-; RV32I-NEXT: sw a5, 16(sp)
-; RV32I-NEXT: sw a6, 20(sp)
-; RV32I-NEXT: lw a3, 0(s0)
-; RV32I-NEXT: lw a4, 4(s0)
-; RV32I-NEXT: lw a5, 8(s0)
-; RV32I-NEXT: lw a6, 12(s0)
-; RV32I-NEXT: lw a7, 16(s0)
-; RV32I-NEXT: lw t0, 20(s0)
-; RV32I-NEXT: lw t1, 24(s0)
-; RV32I-NEXT: lw t2, 28(s0)
-; RV32I-NEXT: srl t3, a4, a0
-; RV32I-NEXT: slli t4, a5, 1
+; RV32I-NEXT: andi a3, a0, 31
+; RV32I-NEXT: andi a4, a1, 28
+; RV32I-NEXT: xori a1, a3, 31
+; RV32I-NEXT: add a4, s3, a4
+; RV32I-NEXT: lw a3, 0(a4)
+; RV32I-NEXT: lw a5, 4(a4)
+; RV32I-NEXT: lw a6, 8(a4)
+; RV32I-NEXT: lw a7, 12(a4)
+; RV32I-NEXT: lw t0, 16(a4)
+; RV32I-NEXT: lw t1, 20(a4)
+; RV32I-NEXT: lw t2, 24(a4)
+; RV32I-NEXT: lw a4, 28(a4)
+; RV32I-NEXT: srl t3, a5, a0
+; RV32I-NEXT: slli t4, a6, 1
; RV32I-NEXT: srl a3, a3, a0
-; RV32I-NEXT: slli a4, a4, 1
-; RV32I-NEXT: srl t5, a6, a0
-; RV32I-NEXT: slli t6, a7, 1
-; RV32I-NEXT: srl a5, a5, a0
-; RV32I-NEXT: slli a6, a6, 1
-; RV32I-NEXT: srl s0, t0, a0
-; RV32I-NEXT: slli s1, t1, 1
-; RV32I-NEXT: srl a7, a7, a0
-; RV32I-NEXT: slli t0, t0, 1
-; RV32I-NEXT: srl t1, t1, a0
-; RV32I-NEXT: slli s2, t2, 1
+; RV32I-NEXT: slli a5, a5, 1
+; RV32I-NEXT: srl t5, a7, a0
+; RV32I-NEXT: slli t6, t0, 1
+; RV32I-NEXT: srl a6, a6, a0
+; RV32I-NEXT: slli a7, a7, 1
+; RV32I-NEXT: srl s0, t1, a0
+; RV32I-NEXT: slli s1, t2, 1
+; RV32I-NEXT: srl t0, t0, a0
+; RV32I-NEXT: slli t1, t1, 1
; RV32I-NEXT: srl t2, t2, a0
+; RV32I-NEXT: slli s2, a4, 1
+; RV32I-NEXT: srl s3, a4, a0
; RV32I-NEXT: sll a0, t4, a1
-; RV32I-NEXT: sll a4, a4, a1
-; RV32I-NEXT: sll t4, t6, a1
-; RV32I-NEXT: sll a6, a6, a1
-; RV32I-NEXT: sll t6, s1, a1
-; RV32I-NEXT: sll t0, t0, a1
-; RV32I-NEXT: sll s1, s2, a1
-; RV32I-NEXT: srli s2, t2, 24
-; RV32I-NEXT: srli s3, t2, 16
-; RV32I-NEXT: srli s4, t2, 8
+; RV32I-NEXT: sll a4, a5, a1
+; RV32I-NEXT: sll a5, t6, a1
+; RV32I-NEXT: sll a7, a7, a1
+; RV32I-NEXT: sll t4, s1, a1
+; RV32I-NEXT: sll t1, t1, a1
+; RV32I-NEXT: sll t6, s2, a1
+; RV32I-NEXT: srli s1, s3, 24
+; RV32I-NEXT: srli s2, s3, 16
+; RV32I-NEXT: srli s4, s3, 8
; RV32I-NEXT: or a0, t3, a0
; RV32I-NEXT: or a1, a3, a4
-; RV32I-NEXT: or a3, t5, t4
-; RV32I-NEXT: or a4, a5, a6
-; RV32I-NEXT: or a5, s0, t6
-; RV32I-NEXT: or a6, a7, t0
-; RV32I-NEXT: or a7, t1, s1
-; RV32I-NEXT: sb t2, 28(a2)
+; RV32I-NEXT: or a3, t5, a5
+; RV32I-NEXT: or a4, a6, a7
+; RV32I-NEXT: or a5, s0, t4
+; RV32I-NEXT: or a6, t0, t1
+; RV32I-NEXT: or a7, t2, t6
+; RV32I-NEXT: sb s3, 28(a2)
; RV32I-NEXT: sb s4, 29(a2)
-; RV32I-NEXT: sb s3, 30(a2)
-; RV32I-NEXT: sb s2, 31(a2)
+; RV32I-NEXT: sb s2, 30(a2)
+; RV32I-NEXT: sb s1, 31(a2)
; RV32I-NEXT: srli t0, a7, 24
; RV32I-NEXT: srli t1, a7, 16
; RV32I-NEXT: srli t2, a7, 8
@@ -1712,17 +1775,19 @@ define void @lshr_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
define void @shl_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; RV64I-LABEL: shl_32bytes:
; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -144
-; RV64I-NEXT: sd s0, 136(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s1, 128(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s2, 120(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s3, 112(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s4, 104(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s5, 96(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s6, 88(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s7, 80(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s8, 72(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s9, 64(sp) # 8-byte Folded Spill
+; RV64I-NEXT: addi sp, sp, -160
+; RV64I-NEXT: sd s0, 152(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s1, 144(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s2, 136(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s3, 128(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s4, 120(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s5, 112(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s6, 104(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s7, 96(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s8, 88(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s9, 80(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s10, 72(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s11, 64(sp) # 8-byte Folded Spill
; RV64I-NEXT: lbu a3, 0(a0)
; RV64I-NEXT: lbu a4, 1(a0)
; RV64I-NEXT: lbu a5, 2(a0)
@@ -1739,125 +1804,146 @@ define void @shl_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; RV64I-NEXT: lbu s1, 13(a0)
; RV64I-NEXT: lbu s2, 14(a0)
; RV64I-NEXT: lbu s3, 15(a0)
-; RV64I-NEXT: slli a4, a4, 8
-; RV64I-NEXT: slli a5, a5, 16
-; RV64I-NEXT: slli a6, a6, 24
-; RV64I-NEXT: or a3, a4, a3
-; RV64I-NEXT: or a4, a6, a5
; RV64I-NEXT: lbu s4, 16(a0)
; RV64I-NEXT: lbu s5, 17(a0)
; RV64I-NEXT: lbu s6, 18(a0)
; RV64I-NEXT: lbu s7, 19(a0)
+; RV64I-NEXT: slli a4, a4, 8
+; RV64I-NEXT: slli s8, a5, 16
+; RV64I-NEXT: slli a6, a6, 24
; RV64I-NEXT: slli t0, t0, 8
; RV64I-NEXT: slli t1, t1, 16
; RV64I-NEXT: slli t2, t2, 24
+; RV64I-NEXT: or a5, a4, a3
+; RV64I-NEXT: or a6, a6, s8
+; RV64I-NEXT: or a3, t0, a7
+; RV64I-NEXT: or a4, t2, t1
+; RV64I-NEXT: lbu s8, 20(a0)
+; RV64I-NEXT: lbu s9, 21(a0)
+; RV64I-NEXT: lbu s10, 22(a0)
+; RV64I-NEXT: lbu s11, 23(a0)
; RV64I-NEXT: slli t4, t4, 8
; RV64I-NEXT: slli t5, t5, 16
; RV64I-NEXT: slli t6, t6, 24
-; RV64I-NEXT: or a5, t0, a7
-; RV64I-NEXT: or a6, t2, t1
-; RV64I-NEXT: or a7, t4, t3
-; RV64I-NEXT: or t0, t6, t5
-; RV64I-NEXT: lbu t5, 20(a0)
-; RV64I-NEXT: lbu t6, 21(a0)
-; RV64I-NEXT: lbu s8, 22(a0)
-; RV64I-NEXT: lbu s9, 23(a0)
; RV64I-NEXT: slli s1, s1, 8
; RV64I-NEXT: slli s2, s2, 16
; RV64I-NEXT: slli s3, s3, 24
+; RV64I-NEXT: or a7, t4, t3
+; RV64I-NEXT: or t0, t6, t5
+; RV64I-NEXT: or t1, s1, s0
+; RV64I-NEXT: or t2, s3, s2
+; RV64I-NEXT: lbu t6, 24(a0)
+; RV64I-NEXT: lbu s0, 25(a0)
+; RV64I-NEXT: lbu s1, 26(a0)
+; RV64I-NEXT: lbu s2, 27(a0)
; RV64I-NEXT: slli s5, s5, 8
; RV64I-NEXT: slli s6, s6, 16
; RV64I-NEXT: slli s7, s7, 24
-; RV64I-NEXT: or t1, s1, s0
-; RV64I-NEXT: or t2, s3, s2
+; RV64I-NEXT: slli s9, s9, 8
; RV64I-NEXT: or t3, s5, s4
; RV64I-NEXT: or t4, s7, s6
-; RV64I-NEXT: lbu s0, 24(a0)
-; RV64I-NEXT: lbu s1, 25(a0)
-; RV64I-NEXT: lbu s2, 26(a0)
-; RV64I-NEXT: lbu s3, 27(a0)
-; RV64I-NEXT: slli t6, t6, 8
-; RV64I-NEXT: slli s8, s8, 16
-; RV64I-NEXT: slli s9, s9, 24
-; RV64I-NEXT: slli s1, s1, 8
-; RV64I-NEXT: or t5, t6, t5
-; RV64I-NEXT: or t6, s9, s8
-; RV64I-NEXT: or s0, s1, s0
-; RV64I-NEXT: lbu s1, 28(a0)
+; RV64I-NEXT: or t5, s9, s8
+; RV64I-NEXT: lbu s3, 28(a0)
; RV64I-NEXT: lbu s4, 29(a0)
; RV64I-NEXT: lbu s5, 30(a0)
; RV64I-NEXT: lbu s6, 31(a0)
-; RV64I-NEXT: lbu a0, 0(a1)
+; RV64I-NEXT: slli s10, s10, 16
+; RV64I-NEXT: slli s11, s11, 24
+; RV64I-NEXT: slli s0, s0, 8
+; RV64I-NEXT: slli s1, s1, 16
+; RV64I-NEXT: slli s2, s2, 24
+; RV64I-NEXT: slli s4, s4, 8
+; RV64I-NEXT: or a0, s11, s10
+; RV64I-NEXT: or t6, s0, t6
+; RV64I-NEXT: or s0, s2, s1
+; RV64I-NEXT: or s1, s4, s3
+; RV64I-NEXT: lbu s2, 0(a1)
+; RV64I-NEXT: lbu s3, 1(a1)
+; RV64I-NEXT: lbu s4, 2(a1)
+; RV64I-NEXT: lbu s7, 3(a1)
+; RV64I-NEXT: slli s5, s5, 16
+; RV64I-NEXT: slli s6, s6, 24
+; RV64I-NEXT: slli s3, s3, 8
+; RV64I-NEXT: slli s4, s4, 16
+; RV64I-NEXT: slli s7, s7, 24
+; RV64I-NEXT: or s5, s6, s5
+; RV64I-NEXT: or s2, s3, s2
+; RV64I-NEXT: or s3, s7, s4
+; RV64I-NEXT: lbu s4, 5(a1)
+; RV64I-NEXT: lbu s6, 4(a1)
+; RV64I-NEXT: lbu s7, 6(a1)
+; RV64I-NEXT: lbu a1, 7(a1)
+; RV64I-NEXT: slli s4, s4, 8
+; RV64I-NEXT: or s4, s4, s6
+; RV64I-NEXT: slli s7, s7, 16
+; RV64I-NEXT: slli a1, a1, 24
+; RV64I-NEXT: or a1, a1, s7
; RV64I-NEXT: sd zero, 0(sp)
; RV64I-NEXT: sd zero, 8(sp)
; RV64I-NEXT: sd zero, 16(sp)
; RV64I-NEXT: sd zero, 24(sp)
-; RV64I-NEXT: slli s2, s2, 16
-; RV64I-NEXT: slli s3, s3, 24
-; RV64I-NEXT: or a1, s3, s2
-; RV64I-NEXT: addi s2, sp, 32
-; RV64I-NEXT: slli s4, s4, 8
-; RV64I-NEXT: slli s5, s5, 16
-; RV64I-NEXT: slli s6, s6, 24
-; RV64I-NEXT: or s1, s4, s1
-; RV64I-NEXT: srli s3, a0, 3
-; RV64I-NEXT: or s4, s6, s5
-; RV64I-NEXT: andi s5, a0, 63
-; RV64I-NEXT: andi s3, s3, 24
-; RV64I-NEXT: or a3, a4, a3
-; RV64I-NEXT: or a4, a6, a5
-; RV64I-NEXT: or a5, t0, a7
-; RV64I-NEXT: or a6, t2, t1
-; RV64I-NEXT: or a7, t4, t3
-; RV64I-NEXT: or t0, t6, t5
-; RV64I-NEXT: or a1, a1, s0
-; RV64I-NEXT: or t1, s4, s1
-; RV64I-NEXT: sub t2, s2, s3
-; RV64I-NEXT: slli a4, a4, 32
-; RV64I-NEXT: slli a6, a6, 32
-; RV64I-NEXT: slli t0, t0, 32
-; RV64I-NEXT: slli t1, t1, 32
+; RV64I-NEXT: or a5, a6, a5
+; RV64I-NEXT: addi a6, sp, 32
; RV64I-NEXT: or a3, a4, a3
-; RV64I-NEXT: or a4, a6, a5
-; RV64I-NEXT: or a5, t0, a7
-; RV64I-NEXT: or a1, t1, a1
+; RV64I-NEXT: or a4, t0, a7
+; RV64I-NEXT: or a7, t2, t1
+; RV64I-NEXT: or t0, t4, t3
+; RV64I-NEXT: or a0, a0, t5
+; RV64I-NEXT: or t1, s0, t6
+; RV64I-NEXT: or t2, s5, s1
+; RV64I-NEXT: or t3, s3, s2
+; RV64I-NEXT: or a1, a1, s4
+; RV64I-NEXT: slli a3, a3, 32
+; RV64I-NEXT: slli a7, a7, 32
+; RV64I-NEXT: slli a0, a0, 32
+; RV64I-NEXT: slli t2, t2, 32
+; RV64I-NEXT: slli a1, a1, 32
+; RV64I-NEXT: or a3, a3, a5
+; RV64I-NEXT: or a4, a7, a4
+; RV64I-NEXT: or a0, a0, t0
+; RV64I-NEXT: or a5, t2, t1
+; RV64I-NEXT: or a1, a1, t3
; RV64I-NEXT: sd a3, 32(sp)
; RV64I-NEXT: sd a4, 40(sp)
-; RV64I-NEXT: sd a5, 48(sp)
-; RV64I-NEXT: sd a1, 56(sp)
-; RV64I-NEXT: ld a1, 0(t2)
-; RV64I-NEXT: ld a3, 8(t2)
-; RV64I-NEXT: ld a4, 16(t2)
-; RV64I-NEXT: ld a5, 24(t2)
-; RV64I-NEXT: xori a6, s5, 63
-; RV64I-NEXT: sll a7, a3, a0
-; RV64I-NEXT: srli t0, a1, 1
-; RV64I-NEXT: sll a5, a5, a0
-; RV64I-NEXT: srli t1, a4, 1
-; RV64I-NEXT: sll a4, a4, a0
-; RV64I-NEXT: srli a3, a3, 1
-; RV64I-NEXT: sll t2, a1, a0
-; RV64I-NEXT: srl a0, t0, a6
-; RV64I-NEXT: srl a1, t1, a6
-; RV64I-NEXT: srl a3, a3, a6
-; RV64I-NEXT: srli a6, t2, 56
-; RV64I-NEXT: srli t0, t2, 48
-; RV64I-NEXT: srli t1, t2, 40
-; RV64I-NEXT: srli t3, t2, 32
-; RV64I-NEXT: srli t4, t2, 24
-; RV64I-NEXT: srli t5, t2, 16
-; RV64I-NEXT: srli t6, t2, 8
-; RV64I-NEXT: or a0, a7, a0
-; RV64I-NEXT: or a1, a5, a1
-; RV64I-NEXT: or a3, a4, a3
-; RV64I-NEXT: sb t3, 4(a2)
-; RV64I-NEXT: sb t1, 5(a2)
-; RV64I-NEXT: sb t0, 6(a2)
-; RV64I-NEXT: sb a6, 7(a2)
-; RV64I-NEXT: sb t2, 0(a2)
-; RV64I-NEXT: sb t6, 1(a2)
-; RV64I-NEXT: sb t5, 2(a2)
-; RV64I-NEXT: sb t4, 3(a2)
+; RV64I-NEXT: sd a0, 48(sp)
+; RV64I-NEXT: sd a5, 56(sp)
+; RV64I-NEXT: srli a0, a1, 3
+; RV64I-NEXT: andi a3, a1, 63
+; RV64I-NEXT: andi a0, a0, 24
+; RV64I-NEXT: sub a0, a6, a0
+; RV64I-NEXT: ld a4, 0(a0)
+; RV64I-NEXT: ld a5, 8(a0)
+; RV64I-NEXT: ld a6, 16(a0)
+; RV64I-NEXT: ld a0, 24(a0)
+; RV64I-NEXT: xori a3, a3, 63
+; RV64I-NEXT: sll a7, a5, a1
+; RV64I-NEXT: srli t0, a4, 1
+; RV64I-NEXT: sll t1, a0, a1
+; RV64I-NEXT: srli a0, a6, 1
+; RV64I-NEXT: sll a6, a6, a1
+; RV64I-NEXT: srli a5, a5, 1
+; RV64I-NEXT: sll a4, a4, a1
+; RV64I-NEXT: srl a1, t0, a3
+; RV64I-NEXT: srl t0, a0, a3
+; RV64I-NEXT: srl a3, a5, a3
+; RV64I-NEXT: srli a5, a4, 56
+; RV64I-NEXT: srli t2, a4, 48
+; RV64I-NEXT: srli t3, a4, 40
+; RV64I-NEXT: srli t4, a4, 32
+; RV64I-NEXT: srli t5, a4, 24
+; RV64I-NEXT: srli t6, a4, 16
+; RV64I-NEXT: srli s0, a4, 8
+; RV64I-NEXT: or a0, a7, a1
+; RV64I-NEXT: or a1, t1, t0
+; RV64I-NEXT: or a3, a6, a3
+; RV64I-NEXT: sb t4, 4(a2)
+; RV64I-NEXT: sb t3, 5(a2)
+; RV64I-NEXT: sb t2, 6(a2)
+; RV64I-NEXT: sb a5, 7(a2)
+; RV64I-NEXT: sb a4, 0(a2)
+; RV64I-NEXT: sb s0, 1(a2)
+; RV64I-NEXT: sb t6, 2(a2)
+; RV64I-NEXT: sb t5, 3(a2)
; RV64I-NEXT: srli a4, a3, 56
; RV64I-NEXT: srli a5, a3, 48
; RV64I-NEXT: srli a6, a3, 40
@@ -1903,17 +1989,19 @@ define void @shl_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; RV64I-NEXT: sb a1, 9(a2)
; RV64I-NEXT: sb a5, 10(a2)
; RV64I-NEXT: sb a3, 11(a2)
-; RV64I-NEXT: ld s0, 136(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s1, 128(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s2, 120(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s3, 112(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s4, 104(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s5, 96(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s6, 88(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s7, 80(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s8, 72(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s9, 64(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 144
+; RV64I-NEXT: ld s0, 152(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s1, 144(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s2, 136(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s3, 128(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s4, 120(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s5, 112(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s6, 104(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s7, 96(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s8, 88(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s9, 80(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s10, 72(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s11, 64(sp) # 8-byte Folded Reload
+; RV64I-NEXT: addi sp, sp, 160
; RV64I-NEXT: ret
;
; RV32I-LABEL: shl_32bytes:
@@ -1938,55 +2026,67 @@ define void @shl_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; RV32I-NEXT: lbu a7, 3(a0)
; RV32I-NEXT: lbu a5, 4(a0)
; RV32I-NEXT: lbu t0, 5(a0)
-; RV32I-NEXT: lbu t3, 6(a0)
-; RV32I-NEXT: lbu t6, 7(a0)
-; RV32I-NEXT: lbu s2, 8(a0)
-; RV32I-NEXT: lbu s3, 9(a0)
-; RV32I-NEXT: lbu s4, 10(a0)
-; RV32I-NEXT: lbu s5, 11(a0)
-; RV32I-NEXT: lbu s7, 12(a0)
-; RV32I-NEXT: lbu s8, 13(a0)
-; RV32I-NEXT: lbu s9, 14(a0)
-; RV32I-NEXT: lbu s10, 15(a0)
-; RV32I-NEXT: lbu s11, 16(a0)
-; RV32I-NEXT: lbu ra, 17(a0)
-; RV32I-NEXT: lbu t4, 18(a0)
-; RV32I-NEXT: lbu s0, 19(a0)
+; RV32I-NEXT: lbu t1, 6(a0)
+; RV32I-NEXT: lbu t2, 7(a0)
+; RV32I-NEXT: lbu t3, 8(a0)
+; RV32I-NEXT: lbu t4, 9(a0)
+; RV32I-NEXT: lbu t5, 10(a0)
+; RV32I-NEXT: lbu t6, 11(a0)
+; RV32I-NEXT: lbu s0, 12(a0)
+; RV32I-NEXT: lbu s2, 13(a0)
+; RV32I-NEXT: lbu s4, 14(a0)
+; RV32I-NEXT: lbu s5, 15(a0)
+; RV32I-NEXT: lbu s6, 16(a0)
+; RV32I-NEXT: lbu s7, 17(a0)
+; RV32I-NEXT: lbu s8, 18(a0)
+; RV32I-NEXT: lbu s9, 19(a0)
; RV32I-NEXT: slli a4, a4, 8
; RV32I-NEXT: slli a6, a6, 16
; RV32I-NEXT: slli a7, a7, 24
; RV32I-NEXT: or a3, a4, a3
+; RV32I-NEXT: sw a3, 4(sp) # 4-byte Folded Spill
; RV32I-NEXT: or a4, a7, a6
-; RV32I-NEXT: lbu t1, 20(a0)
-; RV32I-NEXT: lbu t2, 21(a0)
-; RV32I-NEXT: lbu t5, 22(a0)
-; RV32I-NEXT: lbu s1, 23(a0)
+; RV32I-NEXT: lbu s10, 20(a0)
+; RV32I-NEXT: lbu s11, 21(a0)
+; RV32I-NEXT: lbu ra, 22(a0)
+; RV32I-NEXT: lbu a3, 23(a0)
; RV32I-NEXT: slli t0, t0, 8
-; RV32I-NEXT: slli t3, t3, 16
+; RV32I-NEXT: slli t1, t1, 16
+; RV32I-NEXT: slli t2, t2, 24
+; RV32I-NEXT: slli t4, t4, 8
+; RV32I-NEXT: slli t5, t5, 16
; RV32I-NEXT: slli t6, t6, 24
-; RV32I-NEXT: slli s3, s3, 8
+; RV32I-NEXT: or a5, t0, a5
+; RV32I-NEXT: or a6, t2, t1
+; RV32I-NEXT: or a7, t4, t3
+; RV32I-NEXT: or t0, t6, t5
+; RV32I-NEXT: lbu s1, 24(a0)
+; RV32I-NEXT: lbu s3, 25(a0)
+; RV32I-NEXT: lbu t4, 26(a0)
+; RV32I-NEXT: lbu t5, 27(a0)
+; RV32I-NEXT: slli s2, s2, 8
; RV32I-NEXT: slli s4, s4, 16
; RV32I-NEXT: slli s5, s5, 24
-; RV32I-NEXT: or a5, t0, a5
-; RV32I-NEXT: or a6, t6, t3
-; RV32I-NEXT: or a7, s3, s2
-; RV32I-NEXT: or t0, s5, s4
-; RV32I-NEXT: lbu t3, 24(a0)
-; RV32I-NEXT: lbu s5, 25(a0)
-; RV32I-NEXT: lbu s6, 26(a0)
-; RV32I-NEXT: lbu t6, 27(a0)
-; RV32I-NEXT: slli s8, s8, 8
-; RV32I-NEXT: slli s9, s9, 16
-; RV32I-NEXT: slli s10, s10, 24
-; RV32I-NEXT: slli ra, ra, 8
-; RV32I-NEXT: or s7, s8, s7
-; RV32I-NEXT: or s2, s10, s9
-; RV32I-NEXT: or s3, ra, s11
-; RV32I-NEXT: lbu s4, 28(a0)
-; RV32I-NEXT: lbu s8, 29(a0)
-; RV32I-NEXT: lbu s9, 30(a0)
-; RV32I-NEXT: lbu s10, 31(a0)
-; RV32I-NEXT: lbu a0, 0(a1)
+; RV32I-NEXT: slli s7, s7, 8
+; RV32I-NEXT: or t1, s2, s0
+; RV32I-NEXT: or t2, s5, s4
+; RV32I-NEXT: or t3, s7, s6
+; RV32I-NEXT: lbu t6, 28(a0)
+; RV32I-NEXT: lbu s4, 29(a0)
+; RV32I-NEXT: lbu s5, 30(a0)
+; RV32I-NEXT: lbu s6, 31(a0)
+; RV32I-NEXT: slli s8, s8, 16
+; RV32I-NEXT: slli s9, s9, 24
+; RV32I-NEXT: slli s11, s11, 8
+; RV32I-NEXT: slli ra, ra, 16
+; RV32I-NEXT: slli a3, a3, 24
+; RV32I-NEXT: or a0, s9, s8
+; RV32I-NEXT: or s0, s11, s10
+; RV32I-NEXT: or s2, a3, ra
+; RV32I-NEXT: lbu a3, 0(a1)
+; RV32I-NEXT: lbu s7, 1(a1)
+; RV32I-NEXT: lbu s8, 2(a1)
+; RV32I-NEXT: lbu a1, 3(a1)
; RV32I-NEXT: sw zero, 24(sp)
; RV32I-NEXT: sw zero, 28(sp)
; RV32I-NEXT: sw zero, 32(sp)
@@ -1995,89 +2095,88 @@ define void @shl_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; RV32I-NEXT: sw zero, 12(sp)
; RV32I-NEXT: sw zero, 16(sp)
; RV32I-NEXT: sw zero, 20(sp)
+; RV32I-NEXT: slli s3, s3, 8
+; RV32I-NEXT: or s1, s3, s1
+; RV32I-NEXT: addi s3, sp, 40
; RV32I-NEXT: slli t4, t4, 16
-; RV32I-NEXT: slli s0, s0, 24
-; RV32I-NEXT: or t4, s0, t4
-; RV32I-NEXT: addi s0, sp, 40
-; RV32I-NEXT: slli t2, t2, 8
-; RV32I-NEXT: slli t5, t5, 16
-; RV32I-NEXT: slli s1, s1, 24
-; RV32I-NEXT: slli s5, s5, 8
-; RV32I-NEXT: slli s6, s6, 16
-; RV32I-NEXT: slli t6, t6, 24
-; RV32I-NEXT: slli s8, s8, 8
-; RV32I-NEXT: slli s9, s9, 16
-; RV32I-NEXT: slli s10, s10, 24
-; RV32I-NEXT: or t1, t2, t1
+; RV32I-NEXT: slli t5, t5, 24
+; RV32I-NEXT: slli s4, s4, 8
+; RV32I-NEXT: slli s5, s5, 16
+; RV32I-NEXT: slli s6, s6, 24
+; RV32I-NEXT: slli s7, s7, 8
+; RV32I-NEXT: slli s8, s8, 16
+; RV32I-NEXT: slli a1, a1, 24
+; RV32I-NEXT: or t4, t5, t4
+; RV32I-NEXT: or t5, s4, t6
+; RV32I-NEXT: or t6, s6, s5
+; RV32I-NEXT: or a3, s7, a3
+; RV32I-NEXT: or a1, a1, s8
+; RV32I-NEXT: lw s4, 4(sp) # 4-byte Folded Reload
+; RV32I-NEXT: or a4, a4, s4
+; RV32I-NEXT: or a5, a6, a5
+; RV32I-NEXT: or a6, t0, a7
+; RV32I-NEXT: or a7, t2, t1
+; RV32I-NEXT: or t0, a0, t3
+; RV32I-NEXT: or t1, s2, s0
+; RV32I-NEXT: or t2, t4, s1
+; RV32I-NEXT: or t3, t6, t5
+; RV32I-NEXT: or a0, a1, a3
+; RV32I-NEXT: sw t0, 56(sp)
+; RV32I-NEXT: sw t1, 60(sp)
+; RV32I-NEXT: sw t2, 64(sp)
+; RV32I-NEXT: sw t3, 68(sp)
+; RV32I-NEXT: sw a4, 40(sp)
+; RV32I-NEXT: sw a5, 44(sp)
+; RV32I-NEXT: sw a6, 48(sp)
+; RV32I-NEXT: sw a7, 52(sp)
; RV32I-NEXT: srli a1, a0, 3
-; RV32I-NEXT: or t2, s1, t5
-; RV32I-NEXT: andi t5, a0, 31
-; RV32I-NEXT: or t3, s5, t3
-; RV32I-NEXT: or t6, t6, s6
-; RV32I-NEXT: or s1, s8, s4
-; RV32I-NEXT: or s4, s10, s9
-; RV32I-NEXT: andi s5, a1, 28
-; RV32I-NEXT: xori a1, t5, 31
-; RV32I-NEXT: or a3, a4, a3
-; RV32I-NEXT: or a4, a6, a5
-; RV32I-NEXT: or a5, t0, a7
-; RV32I-NEXT: or a6, s2, s7
-; RV32I-NEXT: or a7, t4, s3
-; RV32I-NEXT: or t0, t2, t1
-; RV32I-NEXT: or t1, t6, t3
-; RV32I-NEXT: or t2, s4, s1
-; RV32I-NEXT: sub t3, s0, s5
-; RV32I-NEXT: sw a7, 56(sp)
-; RV32I-NEXT: sw t0, 60(sp)
-; RV32I-NEXT: sw t1, 64(sp)
-; RV32I-NEXT: sw t2, 68(sp)
-; RV32I-NEXT: sw a3, 40(sp)
-; RV32I-NEXT: sw a4, 44(sp)
-; RV32I-NEXT: sw a5, 48(sp)
-; RV32I-NEXT: sw a6, 52(sp)
-; RV32I-NEXT: lw a3, 0(t3)
-; RV32I-NEXT: lw a4, 4(t3)
-; RV32I-NEXT: lw a5, 8(t3)
-; RV32I-NEXT: lw a6, 12(t3)
-; RV32I-NEXT: lw a7, 16(t3)
-; RV32I-NEXT: lw t0, 20(t3)
-; RV32I-NEXT: lw t1, 24(t3)
-; RV32I-NEXT: lw t2, 28(t3)
-; RV32I-NEXT: sll t3, a4, a0
-; RV32I-NEXT: srli t4, a3, 1
-; RV32I-NEXT: sll t5, a6, a0
-; RV32I-NEXT: srli t6, a5, 1
-; RV32I-NEXT: sll a5, a5, a0
-; RV32I-NEXT: srli a4, a4, 1
-; RV32I-NEXT: sll s0, t0, a0
-; RV32I-NEXT: srli s1, a7, 1
-; RV32I-NEXT: sll a7, a7, a0
-; RV32I-NEXT: srli a6, a6, 1
+; RV32I-NEXT: andi a3, a0, 31
+; RV32I-NEXT: andi a4, a1, 28
+; RV32I-NEXT: xori a1, a3, 31
+; RV32I-NEXT: sub a3, s3, a4
+; RV32I-NEXT: lw a4, 0(a3)
+; RV32I-NEXT: lw a5, 4(a3)
+; RV32I-NEXT: lw a6, 8(a3)
+; RV32I-NEXT: lw a7, 12(a3)
+; RV32I-NEXT: lw t0, 16(a3)
+; RV32I-NEXT: lw t1, 20(a3)
+; RV32I-NEXT: lw t2, 24(a3)
+; RV32I-NEXT: lw a3, 28(a3)
+; RV32I-NEXT: sll t3, a5, a0
+; RV32I-NEXT: srli t4, a4, 1
+; RV32I-NEXT: sll t5, a7, a0
+; RV32I-NEXT: srli t6, a6, 1
+; RV32I-NEXT: sll a6, a6, a0
+; RV32I-NEXT: srli a5, a5, 1
+; RV32I-NEXT: sll s0, t1, a0
+; RV32I-NEXT: srli s1, t0, 1
+; RV32I-NEXT: sll t0, t0, a0
+; RV32I-NEXT: srli a7, a7, 1
+; RV32I-NEXT: sll s2, a3, a0
+; RV32I-NEXT: srli a3, t2, 1
; RV32I-NEXT: sll t2, t2, a0
-; RV32I-NEXT: srli s2, t1, 1
-; RV32I-NEXT: sll t1, t1, a0
-; RV32I-NEXT: srli t0, t0, 1
-; RV32I-NEXT: sll s3, a3, a0
+; RV32I-NEXT: srli t1, t1, 1
+; RV32I-NEXT: sll s3, a4, a0
; RV32I-NEXT: srl a0, t4, a1
-; RV32I-NEXT: srl a3, t6, a1
-; RV32I-NEXT: srl a4, a4, a1
+; RV32I-NEXT: srl a4, t6, a1
+; RV32I-NEXT: srl a5, a5, a1
; RV32I-NEXT: srl t4, s1, a1
-; RV32I-NEXT: srl a6, a6, a1
-; RV32I-NEXT: srl t6, s2, a1
-; RV32I-NEXT: srl t0, t0, a1
+; RV32I-NEXT: srl a7, a7, a1
+; RV32I-NEXT: srl t6, a3, a1
+; RV32I-NEXT: srl t1, t1, a1
; RV32I-NEXT: srli s1, s3, 24
-; RV32I-NEXT: srli s2, s3, 16
-; RV32I-NEXT: srli s4, s3, 8
+; RV32I-NEXT: srli s4, s3, 16
+; RV32I-NEXT: srli s5, s3, 8
; RV32I-NEXT: or a0, t3, a0
-; RV32I-NEXT: or a1, t5, a3
-; RV32I-NEXT: or a3, a5, a4
+; RV32I-NEXT: or a1, t5, a4
+; RV32I-NEXT: or a3, a6, a5
; RV32I-NEXT: or a4, s0, t4
-; RV32I-NEXT: or a5, a7, a6
-; RV32I-NEXT: or a6, t2, t6
-; RV32I-NEXT: or a7, t1, t0
+; RV32I-NEXT: or a5, t0, a7
+; RV32I-NEXT: or a6, s2, t6
+; RV32I-NEXT: or a7, t2, t1
; RV32I-NEXT: sb s3, 0(a2)
-; RV32I-NEXT: sb s4, 1(a2)
-; RV32I-NEXT: sb s2, 2(a2)
+; RV32I-NEXT: sb s5, 1(a2)
+; RV32I-NEXT: sb s4, 2(a2)
; RV32I-NEXT: sb s1, 3(a2)
; RV32I-NEXT: srli t0, a7, 24
; RV32I-NEXT: srli t1, a7, 16
@@ -2152,17 +2251,19 @@ define void @shl_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
define void @ashr_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; RV64I-LABEL: ashr_32bytes:
; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -144
-; RV64I-NEXT: sd s0, 136(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s1, 128(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s2, 120(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s3, 112(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s4, 104(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s5, 96(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s6, 88(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s7, 80(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s8, 72(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s9, 64(sp) # 8-byte Folded Spill
+; RV64I-NEXT: addi sp, sp, -160
+; RV64I-NEXT: sd s0, 152(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s1, 144(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s2, 136(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s3, 128(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s4, 120(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s5, 112(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s6, 104(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s7, 96(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s8, 88(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s9, 80(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s10, 72(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s11, 64(sp) # 8-byte Folded Spill
; RV64I-NEXT: lbu a3, 0(a0)
; RV64I-NEXT: lbu a4, 1(a0)
; RV64I-NEXT: lbu a5, 2(a0)
@@ -2179,123 +2280,144 @@ define void @ashr_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; RV64I-NEXT: lbu s1, 13(a0)
; RV64I-NEXT: lbu s2, 14(a0)
; RV64I-NEXT: lbu s3, 15(a0)
-; RV64I-NEXT: slli a4, a4, 8
-; RV64I-NEXT: slli a5, a5, 16
-; RV64I-NEXT: slli a6, a6, 24
-; RV64I-NEXT: or a3, a4, a3
-; RV64I-NEXT: or a4, a6, a5
; RV64I-NEXT: lbu s4, 16(a0)
; RV64I-NEXT: lbu s5, 17(a0)
; RV64I-NEXT: lbu s6, 18(a0)
; RV64I-NEXT: lbu s7, 19(a0)
+; RV64I-NEXT: slli a4, a4, 8
+; RV64I-NEXT: slli a5, a5, 16
+; RV64I-NEXT: slli a6, a6, 24
; RV64I-NEXT: slli t0, t0, 8
; RV64I-NEXT: slli t1, t1, 16
; RV64I-NEXT: slli t2, t2, 24
+; RV64I-NEXT: or a3, a4, a3
+; RV64I-NEXT: or a4, a6, a5
+; RV64I-NEXT: or a5, t0, a7
+; RV64I-NEXT: or a6, t2, t1
+; RV64I-NEXT: lbu s8, 20(a0)
+; RV64I-NEXT: lbu s9, 21(a0)
+; RV64I-NEXT: lbu s10, 22(a0)
+; RV64I-NEXT: lbu s11, 23(a0)
; RV64I-NEXT: slli t4, t4, 8
; RV64I-NEXT: slli t5, t5, 16
; RV64I-NEXT: slli t6, t6, 24
-; RV64I-NEXT: or a5, t0, a7
-; RV64I-NEXT: or a6, t2, t1
-; RV64I-NEXT: or a7, t4, t3
-; RV64I-NEXT: or t0, t6, t5
-; RV64I-NEXT: lbu t5, 20(a0)
-; RV64I-NEXT: lbu t6, 21(a0)
-; RV64I-NEXT: lbu s8, 22(a0)
-; RV64I-NEXT: lbu s9, 23(a0)
; RV64I-NEXT: slli s1, s1, 8
; RV64I-NEXT: slli s2, s2, 16
; RV64I-NEXT: slli s3, s3, 24
+; RV64I-NEXT: or a7, t4, t3
+; RV64I-NEXT: or t0, t6, t5
+; RV64I-NEXT: or t1, s1, s0
+; RV64I-NEXT: or t2, s3, s2
+; RV64I-NEXT: lbu t6, 24(a0)
+; RV64I-NEXT: lbu s0, 25(a0)
+; RV64I-NEXT: lbu s1, 26(a0)
+; RV64I-NEXT: lbu s2, 27(a0)
; RV64I-NEXT: slli s5, s5, 8
; RV64I-NEXT: slli s6, s6, 16
; RV64I-NEXT: slli s7, s7, 24
-; RV64I-NEXT: or t1, s1, s0
-; RV64I-NEXT: or t2, s3, s2
+; RV64I-NEXT: slli s9, s9, 8
; RV64I-NEXT: or t3, s5, s4
; RV64I-NEXT: or t4, s7, s6
-; RV64I-NEXT: lbu s0, 24(a0)
-; RV64I-NEXT: lbu s1, 25(a0)
-; RV64I-NEXT: lbu s2, 26(a0)
-; RV64I-NEXT: lbu s3, 27(a0)
-; RV64I-NEXT: slli t6, t6, 8
-; RV64I-NEXT: slli s8, s8, 16
-; RV64I-NEXT: slli s9, s9, 24
-; RV64I-NEXT: slli s1, s1, 8
-; RV64I-NEXT: or t5, t6, t5
-; RV64I-NEXT: or t6, s9, s8
-; RV64I-NEXT: or s0, s1, s0
-; RV64I-NEXT: lbu s1, 28(a0)
+; RV64I-NEXT: or t5, s9, s8
+; RV64I-NEXT: lbu s3, 28(a0)
; RV64I-NEXT: lbu s4, 29(a0)
; RV64I-NEXT: lbu s5, 30(a0)
; RV64I-NEXT: lbu s6, 31(a0)
-; RV64I-NEXT: lbu a0, 0(a1)
-; RV64I-NEXT: slli s2, s2, 16
-; RV64I-NEXT: slli s3, s3, 24
-; RV64I-NEXT: or a1, s3, s2
-; RV64I-NEXT: mv s2, sp
+; RV64I-NEXT: slli s10, s10, 16
+; RV64I-NEXT: slli s11, s11, 24
+; RV64I-NEXT: slli s0, s0, 8
+; RV64I-NEXT: slli s1, s1, 16
+; RV64I-NEXT: slli s2, s2, 24
; RV64I-NEXT: slli s4, s4, 8
+; RV64I-NEXT: or a0, s11, s10
+; RV64I-NEXT: or t6, s0, t6
+; RV64I-NEXT: or s0, s2, s1
+; RV64I-NEXT: or s1, s4, s3
+; RV64I-NEXT: lbu s2, 0(a1)
+; RV64I-NEXT: lbu s3, 1(a1)
+; RV64I-NEXT: lbu s4, 2(a1)
+; RV64I-NEXT: lbu s7, 3(a1)
; RV64I-NEXT: slli s5, s5, 16
; RV64I-NEXT: slli s6, s6, 24
-; RV64I-NEXT: or s1, s4, s1
-; RV64I-NEXT: srli s3, a0, 3
-; RV64I-NEXT: or s4, s6, s5
-; RV64I-NEXT: andi s5, a0, 63
-; RV64I-NEXT: andi s3, s3, 24
-; RV64I-NEXT: xori s5, s5, 63
+; RV64I-NEXT: slli s3, s3, 8
+; RV64I-NEXT: slli s4, s4, 16
+; RV64I-NEXT: slli s7, s7, 24
+; RV64I-NEXT: or s5, s6, s5
+; RV64I-NEXT: or s2, s3, s2
+; RV64I-NEXT: or s3, s7, s4
+; RV64I-NEXT: lbu s4, 5(a1)
+; RV64I-NEXT: lbu s6, 4(a1)
+; RV64I-NEXT: lbu s7, 6(a1)
+; RV64I-NEXT: lbu a1, 7(a1)
+; RV64I-NEXT: slli s4, s4, 8
+; RV64I-NEXT: or s4, s4, s6
+; RV64I-NEXT: slli s7, s7, 16
+; RV64I-NEXT: slli a1, a1, 24
+; RV64I-NEXT: or a1, a1, s7
+; RV64I-NEXT: mv s6, sp
; RV64I-NEXT: or a3, a4, a3
; RV64I-NEXT: or a4, a6, a5
; RV64I-NEXT: or a5, t0, a7
; RV64I-NEXT: or a6, t2, t1
; RV64I-NEXT: or a7, t4, t3
-; RV64I-NEXT: or t0, t6, t5
-; RV64I-NEXT: or a1, a1, s0
-; RV64I-NEXT: or t1, s4, s1
-; RV64I-NEXT: add s2, s2, s3
+; RV64I-NEXT: or a0, a0, t5
+; RV64I-NEXT: or t0, s0, t6
+; RV64I-NEXT: or t1, s5, s1
+; RV64I-NEXT: or t2, s3, s2
+; RV64I-NEXT: or a1, a1, s4
; RV64I-NEXT: slli a4, a4, 32
; RV64I-NEXT: slli a6, a6, 32
-; RV64I-NEXT: slli t0, t0, 32
-; RV64I-NEXT: slli t2, t1, 32
+; RV64I-NEXT: slli a0, a0, 32
+; RV64I-NEXT: slli t3, t1, 32
+; RV64I-NEXT: slli a1, a1, 32
; RV64I-NEXT: sraiw t1, t1, 31
; RV64I-NEXT: or a3, a4, a3
; RV64I-NEXT: or a4, a6, a5
-; RV64I-NEXT: or a5, t0, a7
-; RV64I-NEXT: or a1, t2, a1
+; RV64I-NEXT: or a0, a0, a7
+; RV64I-NEXT: or a5, t3, t0
+; RV64I-NEXT: or a1, a1, t2
; RV64I-NEXT: sd t1, 32(sp)
; RV64I-NEXT: sd t1, 40(sp)
; RV64I-NEXT: sd t1, 48(sp)
; RV64I-NEXT: sd t1, 56(sp)
; RV64I-NEXT: sd a3, 0(sp)
; RV64I-NEXT: sd a4, 8(sp)
-; RV64I-NEXT: sd a5, 16(sp)
-; RV64I-NEXT: sd a1, 24(sp)
-; RV64I-NEXT: ld a1, 8(s2)
-; RV64I-NEXT: ld a3, 16(s2)
-; RV64I-NEXT: ld a4, 0(s2)
-; RV64I-NEXT: ld a5, 24(s2)
-; RV64I-NEXT: srl a6, a1, a0
-; RV64I-NEXT: slli a7, a3, 1
-; RV64I-NEXT: srl a4, a4, a0
-; RV64I-NEXT: slli a1, a1, 1
-; RV64I-NEXT: srl a3, a3, a0
+; RV64I-NEXT: sd a0, 16(sp)
+; RV64I-NEXT: sd a5, 24(sp)
+; RV64I-NEXT: srli a0, a1, 3
+; RV64I-NEXT: andi a3, a1, 63
+; RV64I-NEXT: andi a0, a0, 24
+; RV64I-NEXT: xori a3, a3, 63
+; RV64I-NEXT: add a0, s6, a0
+; RV64I-NEXT: ld a4, 8(a0)
+; RV64I-NEXT: ld a5, 16(a0)
+; RV64I-NEXT: ld a6, 0(a0)
+; RV64I-NEXT: ld a0, 24(a0)
+; RV64I-NEXT: srl a7, a4, a1
; RV64I-NEXT: slli t0, a5, 1
-; RV64I-NEXT: sra a5, a5, a0
-; RV64I-NEXT: sll a0, a7, s5
-; RV64I-NEXT: sll a1, a1, s5
-; RV64I-NEXT: sll a7, t0, s5
-; RV64I-NEXT: srli t0, a5, 56
-; RV64I-NEXT: srli t1, a5, 48
-; RV64I-NEXT: srli t2, a5, 40
-; RV64I-NEXT: srli t3, a5, 32
-; RV64I-NEXT: srli t4, a5, 24
-; RV64I-NEXT: srli t5, a5, 16
-; RV64I-NEXT: srli t6, a5, 8
-; RV64I-NEXT: or a0, a6, a0
-; RV64I-NEXT: or a1, a4, a1
-; RV64I-NEXT: or a3, a3, a7
+; RV64I-NEXT: srl a6, a6, a1
+; RV64I-NEXT: slli a4, a4, 1
+; RV64I-NEXT: srl a5, a5, a1
+; RV64I-NEXT: slli t1, a0, 1
+; RV64I-NEXT: sra t2, a0, a1
+; RV64I-NEXT: sll a0, t0, a3
+; RV64I-NEXT: sll a1, a4, a3
+; RV64I-NEXT: sll a3, t1, a3
+; RV64I-NEXT: srli a4, t2, 56
+; RV64I-NEXT: srli t0, t2, 48
+; RV64I-NEXT: srli t1, t2, 40
+; RV64I-NEXT: srli t3, t2, 32
+; RV64I-NEXT: srli t4, t2, 24
+; RV64I-NEXT: srli t5, t2, 16
+; RV64I-NEXT: srli t6, t2, 8
+; RV64I-NEXT: or a0, a7, a0
+; RV64I-NEXT: or a1, a6, a1
+; RV64I-NEXT: or a3, a5, a3
; RV64I-NEXT: sb t3, 28(a2)
-; RV64I-NEXT: sb t2, 29(a2)
-; RV64I-NEXT: sb t1, 30(a2)
-; RV64I-NEXT: sb t0, 31(a2)
-; RV64I-NEXT: sb a5, 24(a2)
+; RV64I-NEXT: sb t1, 29(a2)
+; RV64I-NEXT: sb t0, 30(a2)
+; RV64I-NEXT: sb a4, 31(a2)
+; RV64I-NEXT: sb t2, 24(a2)
; RV64I-NEXT: sb t6, 25(a2)
; RV64I-NEXT: sb t5, 26(a2)
; RV64I-NEXT: sb t4, 27(a2)
@@ -2316,45 +2438,47 @@ define void @ashr_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; RV64I-NEXT: srli s3, a0, 56
; RV64I-NEXT: srli s4, a0, 48
; RV64I-NEXT: srli s5, a0, 40
+; RV64I-NEXT: srli s6, a0, 32
; RV64I-NEXT: sb a7, 20(a2)
; RV64I-NEXT: sb a6, 21(a2)
; RV64I-NEXT: sb a5, 22(a2)
; RV64I-NEXT: sb a4, 23(a2)
-; RV64I-NEXT: srli a4, a0, 32
+; RV64I-NEXT: srli a4, a0, 24
; RV64I-NEXT: sb a3, 16(a2)
; RV64I-NEXT: sb t2, 17(a2)
; RV64I-NEXT: sb t1, 18(a2)
; RV64I-NEXT: sb t0, 19(a2)
-; RV64I-NEXT: srli a3, a0, 24
+; RV64I-NEXT: srli a3, a0, 16
; RV64I-NEXT: sb t6, 4(a2)
; RV64I-NEXT: sb t5, 5(a2)
; RV64I-NEXT: sb t4, 6(a2)
; RV64I-NEXT: sb t3, 7(a2)
-; RV64I-NEXT: srli a5, a0, 16
+; RV64I-NEXT: srli a5, a0, 8
; RV64I-NEXT: sb a1, 0(a2)
; RV64I-NEXT: sb s2, 1(a2)
; RV64I-NEXT: sb s1, 2(a2)
; RV64I-NEXT: sb s0, 3(a2)
-; RV64I-NEXT: srli a1, a0, 8
-; RV64I-NEXT: sb a4, 12(a2)
+; RV64I-NEXT: sb s6, 12(a2)
; RV64I-NEXT: sb s5, 13(a2)
; RV64I-NEXT: sb s4, 14(a2)
; RV64I-NEXT: sb s3, 15(a2)
; RV64I-NEXT: sb a0, 8(a2)
-; RV64I-NEXT: sb a1, 9(a2)
-; RV64I-NEXT: sb a5, 10(a2)
-; RV64I-NEXT: sb a3, 11(a2)
-; RV64I-NEXT: ld s0, 136(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s1, 128(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s2, 120(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s3, 112(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s4, 104(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s5, 96(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s6, 88(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s7, 80(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s8, 72(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s9, 64(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 144
+; RV64I-NEXT: sb a5, 9(a2)
+; RV64I-NEXT: sb a3, 10(a2)
+; RV64I-NEXT: sb a4, 11(a2)
+; RV64I-NEXT: ld s0, 152(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s1, 144(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s2, 136(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s3, 128(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s4, 120(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s5, 112(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s6, 104(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s7, 96(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s8, 88(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s9, 80(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s10, 72(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s11, 64(sp) # 8-byte Folded Reload
+; RV64I-NEXT: addi sp, sp, 160
; RV64I-NEXT: ret
;
; RV32I-LABEL: ashr_32bytes:
@@ -2379,148 +2503,159 @@ define void @ashr_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; RV32I-NEXT: lbu a7, 3(a0)
; RV32I-NEXT: lbu a5, 4(a0)
; RV32I-NEXT: lbu t0, 5(a0)
-; RV32I-NEXT: lbu t3, 6(a0)
-; RV32I-NEXT: lbu t4, 7(a0)
-; RV32I-NEXT: lbu t6, 8(a0)
-; RV32I-NEXT: lbu s0, 9(a0)
-; RV32I-NEXT: lbu s4, 10(a0)
-; RV32I-NEXT: lbu s5, 11(a0)
-; RV32I-NEXT: lbu s6, 12(a0)
-; RV32I-NEXT: lbu s7, 13(a0)
-; RV32I-NEXT: lbu s8, 14(a0)
-; RV32I-NEXT: lbu s9, 15(a0)
-; RV32I-NEXT: lbu s10, 16(a0)
-; RV32I-NEXT: lbu s11, 17(a0)
-; RV32I-NEXT: lbu s2, 18(a0)
-; RV32I-NEXT: lbu s3, 19(a0)
+; RV32I-NEXT: lbu t1, 6(a0)
+; RV32I-NEXT: lbu t2, 7(a0)
+; RV32I-NEXT: lbu t3, 8(a0)
+; RV32I-NEXT: lbu t4, 9(a0)
+; RV32I-NEXT: lbu t5, 10(a0)
+; RV32I-NEXT: lbu t6, 11(a0)
+; RV32I-NEXT: lbu s0, 12(a0)
+; RV32I-NEXT: lbu s1, 13(a0)
+; RV32I-NEXT: lbu s2, 14(a0)
+; RV32I-NEXT: lbu s3, 15(a0)
+; RV32I-NEXT: lbu s4, 16(a0)
+; RV32I-NEXT: lbu s5, 17(a0)
+; RV32I-NEXT: lbu s6, 18(a0)
+; RV32I-NEXT: lbu s7, 19(a0)
; RV32I-NEXT: slli a4, a4, 8
; RV32I-NEXT: slli a6, a6, 16
; RV32I-NEXT: slli a7, a7, 24
; RV32I-NEXT: or a3, a4, a3
+; RV32I-NEXT: sw a3, 4(sp) # 4-byte Folded Spill
; RV32I-NEXT: or a4, a7, a6
-; RV32I-NEXT: lbu t1, 20(a0)
-; RV32I-NEXT: lbu t2, 21(a0)
-; RV32I-NEXT: lbu t5, 22(a0)
-; RV32I-NEXT: lbu s1, 23(a0)
+; RV32I-NEXT: lbu s8, 20(a0)
+; RV32I-NEXT: lbu s9, 21(a0)
+; RV32I-NEXT: lbu s10, 22(a0)
+; RV32I-NEXT: lbu s11, 23(a0)
; RV32I-NEXT: slli t0, t0, 8
-; RV32I-NEXT: slli t3, t3, 16
-; RV32I-NEXT: slli t4, t4, 24
-; RV32I-NEXT: slli s0, s0, 8
-; RV32I-NEXT: slli s4, s4, 16
-; RV32I-NEXT: slli s5, s5, 24
+; RV32I-NEXT: slli t1, t1, 16
+; RV32I-NEXT: slli t2, t2, 24
+; RV32I-NEXT: slli t4, t4, 8
+; RV32I-NEXT: slli t5, t5, 16
+; RV32I-NEXT: slli t6, t6, 24
; RV32I-NEXT: or a5, t0, a5
-; RV32I-NEXT: or a6, t4, t3
-; RV32I-NEXT: or a7, s0, t6
-; RV32I-NEXT: or t0, s5, s4
-; RV32I-NEXT: lbu t3, 24(a0)
-; RV32I-NEXT: lbu s4, 25(a0)
-; RV32I-NEXT: lbu s5, 26(a0)
-; RV32I-NEXT: lbu ra, 27(a0)
-; RV32I-NEXT: slli s7, s7, 8
-; RV32I-NEXT: slli s8, s8, 16
-; RV32I-NEXT: slli s9, s9, 24
-; RV32I-NEXT: slli s11, s11, 8
-; RV32I-NEXT: or t4, s7, s6
-; RV32I-NEXT: or t6, s9, s8
-; RV32I-NEXT: or s0, s11, s10
-; RV32I-NEXT: lbu s6, 28(a0)
-; RV32I-NEXT: lbu s7, 29(a0)
-; RV32I-NEXT: lbu s8, 30(a0)
-; RV32I-NEXT: lbu s9, 31(a0)
-; RV32I-NEXT: lbu a0, 0(a1)
+; RV32I-NEXT: or a6, t2, t1
+; RV32I-NEXT: or a7, t4, t3
+; RV32I-NEXT: or t0, t6, t5
+; RV32I-NEXT: lbu ra, 24(a0)
+; RV32I-NEXT: lbu a3, 25(a0)
+; RV32I-NEXT: lbu t4, 26(a0)
+; RV32I-NEXT: lbu t5, 27(a0)
+; RV32I-NEXT: slli s1, s1, 8
; RV32I-NEXT: slli s2, s2, 16
; RV32I-NEXT: slli s3, s3, 24
-; RV32I-NEXT: or s2, s3, s2
-; RV32I-NEXT: addi s3, sp, 8
-; RV32I-NEXT: slli t2, t2, 8
-; RV32I-NEXT: slli t5, t5, 16
-; RV32I-NEXT: slli s1, s1, 24
-; RV32I-NEXT: slli s4, s4, 8
-; RV32I-NEXT: slli s5, s5, 16
-; RV32I-NEXT: slli ra, ra, 24
-; RV32I-NEXT: slli s7, s7, 8
-; RV32I-NEXT: slli s8, s8, 16
-; RV32I-NEXT: slli s9, s9, 24
-; RV32I-NEXT: or t1, t2, t1
-; RV32I-NEXT: srli a1, a0, 3
+; RV32I-NEXT: slli s5, s5, 8
+; RV32I-NEXT: or t1, s1, s0
+; RV32I-NEXT: or t2, s3, s2
+; RV32I-NEXT: or t3, s5, s4
+; RV32I-NEXT: lbu t6, 28(a0)
+; RV32I-NEXT: lbu s0, 29(a0)
+; RV32I-NEXT: lbu s1, 30(a0)
+; RV32I-NEXT: lbu a0, 31(a0)
+; RV32I-NEXT: slli s6, s6, 16
+; RV32I-NEXT: slli s7, s7, 24
+; RV32I-NEXT: slli s9, s9, 8
+; RV32I-NEXT: slli s10, s10, 16
+; RV32I-NEXT: slli s11, s11, 24
+; RV32I-NEXT: or s2, s7, s6
+; RV32I-NEXT: or s3, s9, s8
+; RV32I-NEXT: or s4, s11, s10
+; RV32I-NEXT: lbu s5, 0(a1)
+; RV32I-NEXT: lbu s6, 1(a1)
+; RV32I-NEXT: lbu s7, 2(a1)
+; RV32I-NEXT: lbu a1, 3(a1)
+; RV32I-NEXT: slli a3, a3, 8
+; RV32I-NEXT: or a3, a3, ra
+; RV32I-NEXT: addi s8, sp, 8
+; RV32I-NEXT: slli t4, t4, 16
+; RV32I-NEXT: slli t5, t5, 24
+; RV32I-NEXT: slli s0, s0, 8
+; RV32I-NEXT: slli s1, s1, 16
+; RV32I-NEXT: slli a0, a0, 24
+; RV32I-NEXT: slli s6, s6, 8
+; RV32I-NEXT: slli s7, s7, 16
+; RV32I-NEXT: slli a1, a1, 24
+; RV32I-NEXT: or t4, t5, t4
+; RV32I-NEXT: or t5, s0, t6
+; RV32I-NEXT: or s1, a0, s1
+; RV32I-NEXT: or t6, s6, s5
+; RV32I-NEXT: or a1, a1, s7
+; RV32I-NEXT: srai s0, a0, 31
+; RV32I-NEXT: lw a0, 4(sp) # 4-byte Folded Reload
+; RV32I-NEXT: or a4, a4, a0
+; RV32I-NEXT: or a5, a6, a5
+; RV32I-NEXT: or a6, t0, a7
+; RV32I-NEXT: or a7, t2, t1
+; RV32I-NEXT: or t0, s2, t3
+; RV32I-NEXT: or t1, s4, s3
+; RV32I-NEXT: or a3, t4, a3
; RV32I-NEXT: or t2, s1, t5
-; RV32I-NEXT: andi t5, a0, 31
-; RV32I-NEXT: or t3, s4, t3
-; RV32I-NEXT: or s1, ra, s5
-; RV32I-NEXT: or s4, s7, s6
-; RV32I-NEXT: or s5, s9, s8
-; RV32I-NEXT: srai s6, s9, 31
-; RV32I-NEXT: andi s7, a1, 28
-; RV32I-NEXT: xori a1, t5, 31
-; RV32I-NEXT: or a3, a4, a3
-; RV32I-NEXT: or a4, a6, a5
-; RV32I-NEXT: or a5, t0, a7
-; RV32I-NEXT: or a6, t6, t4
-; RV32I-NEXT: or a7, s2, s0
-; RV32I-NEXT: or t0, t2, t1
-; RV32I-NEXT: or t1, s1, t3
-; RV32I-NEXT: or t2, s5, s4
-; RV32I-NEXT: sw s6, 56(sp)
-; RV32I-NEXT: sw s6, 60(sp)
-; RV32I-NEXT: sw s6, 64(sp)
-; RV32I-NEXT: sw s6, 68(sp)
-; RV32I-NEXT: sw s6, 40(sp)
-; RV32I-NEXT: sw s6, 44(sp)
-; RV32I-NEXT: sw s6, 48(sp)
-; RV32I-NEXT: sw s6, 52(sp)
-; RV32I-NEXT: add s3, s3, s7
-; RV32I-NEXT: sw a7, 24(sp)
-; RV32I-NEXT: sw t0, 28(sp)
-; RV32I-NEXT: sw t1, 32(sp)
+; RV32I-NEXT: or a0, a1, t6
+; RV32I-NEXT: sw s0, 56(sp)
+; RV32I-NEXT: sw s0, 60(sp)
+; RV32I-NEXT: sw s0, 64(sp)
+; RV32I-NEXT: sw s0, 68(sp)
+; RV32I-NEXT: sw s0, 40(sp)
+; RV32I-NEXT: sw s0, 44(sp)
+; RV32I-NEXT: sw s0, 48(sp)
+; RV32I-NEXT: sw s0, 52(sp)
+; RV32I-NEXT: sw t0, 24(sp)
+; RV32I-NEXT: sw t1, 28(sp)
+; RV32I-NEXT: sw a3, 32(sp)
; RV32I-NEXT: sw t2, 36(sp)
-; RV32I-NEXT: sw a3, 8(sp)
-; RV32I-NEXT: sw a4, 12(sp)
-; RV32I-NEXT: sw a5, 16(sp)
-; RV32I-NEXT: sw a6, 20(sp)
-; RV32I-NEXT: lw a3, 0(s3)
-; RV32I-NEXT: lw a4, 4(s3)
-; RV32I-NEXT: lw a5, 8(s3)
-; RV32I-NEXT: lw a6, 12(s3)
-; RV32I-NEXT: lw a7, 16(s3)
-; RV32I-NEXT: lw t0, 20(s3)
-; RV32I-NEXT: lw t1, 24(s3)
-; RV32I-NEXT: lw t2, 28(s3)
-; RV32I-NEXT: srl t3, a4, a0
-; RV32I-NEXT: slli t4, a5, 1
+; RV32I-NEXT: sw a4, 8(sp)
+; RV32I-NEXT: sw a5, 12(sp)
+; RV32I-NEXT: sw a6, 16(sp)
+; RV32I-NEXT: sw a7, 20(sp)
+; RV32I-NEXT: srli a1, a0, 3
+; RV32I-NEXT: andi a3, a0, 31
+; RV32I-NEXT: andi a4, a1, 28
+; RV32I-NEXT: xori a1, a3, 31
+; RV32I-NEXT: add a4, s8, a4
+; RV32I-NEXT: lw a3, 0(a4)
+; RV32I-NEXT: lw a5, 4(a4)
+; RV32I-NEXT: lw a6, 8(a4)
+; RV32I-NEXT: lw a7, 12(a4)
+; RV32I-NEXT: lw t0, 16(a4)
+; RV32I-NEXT: lw t1, 20(a4)
+; RV32I-NEXT: lw t2, 24(a4)
+; RV32I-NEXT: lw a4, 28(a4)
+; RV32I-NEXT: srl t3, a5, a0
+; RV32I-NEXT: slli t4, a6, 1
; RV32I-NEXT: srl a3, a3, a0
-; RV32I-NEXT: slli a4, a4, 1
-; RV32I-NEXT: srl t5, a6, a0
-; RV32I-NEXT: slli t6, a7, 1
-; RV32I-NEXT: srl a5, a5, a0
-; RV32I-NEXT: slli a6, a6, 1
-; RV32I-NEXT: srl s0, t0, a0
-; RV32I-NEXT: slli s1, t1, 1
-; RV32I-NEXT: srl a7, a7, a0
-; RV32I-NEXT: slli t0, t0, 1
-; RV32I-NEXT: srl t1, t1, a0
-; RV32I-NEXT: slli s2, t2, 1
-; RV32I-NEXT: sra t2, t2, a0
+; RV32I-NEXT: slli a5, a5, 1
+; RV32I-NEXT: srl t5, a7, a0
+; RV32I-NEXT: slli t6, t0, 1
+; RV32I-NEXT: srl a6, a6, a0
+; RV32I-NEXT: slli a7, a7, 1
+; RV32I-NEXT: srl s0, t1, a0
+; RV32I-NEXT: slli s1, t2, 1
+; RV32I-NEXT: srl t0, t0, a0
+; RV32I-NEXT: slli t1, t1, 1
+; RV32I-NEXT: srl t2, t2, a0
+; RV32I-NEXT: slli s2, a4, 1
+; RV32I-NEXT: sra s3, a4, a0
; RV32I-NEXT: sll a0, t4, a1
-; RV32I-NEXT: sll a4, a4, a1
-; RV32I-NEXT: sll t4, t6, a1
-; RV32I-NEXT: sll a6, a6, a1
-; RV32I-NEXT: sll t6, s1, a1
-; RV32I-NEXT: sll t0, t0, a1
-; RV32I-NEXT: sll s1, s2, a1
-; RV32I-NEXT: srli s2, t2, 24
-; RV32I-NEXT: srli s3, t2, 16
-; RV32I-NEXT: srli s4, t2, 8
+; RV32I-NEXT: sll a4, a5, a1
+; RV32I-NEXT: sll a5, t6, a1
+; RV32I-NEXT: sll a7, a7, a1
+; RV32I-NEXT: sll t4, s1, a1
+; RV32I-NEXT: sll t1, t1, a1
+; RV32I-NEXT: sll t6, s2, a1
+; RV32I-NEXT: srli s1, s3, 24
+; RV32I-NEXT: srli s2, s3, 16
+; RV32I-NEXT: srli s4, s3, 8
; RV32I-NEXT: or a0, t3, a0
; RV32I-NEXT: or a1, a3, a4
-; RV32I-NEXT: or a3, t5, t4
-; RV32I-NEXT: or a4, a5, a6
-; RV32I-NEXT: or a5, s0, t6
-; RV32I-NEXT: or a6, a7, t0
-; RV32I-NEXT: or a7, t1, s1
-; RV32I-NEXT: sb t2, 28(a2)
+; RV32I-NEXT: or a3, t5, a5
+; RV32I-NEXT: or a4, a6, a7
+; RV32I-NEXT: or a5, s0, t4
+; RV32I-NEXT: or a6, t0, t1
+; RV32I-NEXT: or a7, t2, t6
+; RV32I-NEXT: sb s3, 28(a2)
; RV32I-NEXT: sb s4, 29(a2)
-; RV32I-NEXT: sb s3, 30(a2)
-; RV32I-NEXT: sb s2, 31(a2)
+; RV32I-NEXT: sb s2, 30(a2)
+; RV32I-NEXT: sb s1, 31(a2)
; RV32I-NEXT: srli t0, a7, 24
; RV32I-NEXT: srli t1, a7, 16
; RV32I-NEXT: srli t2, a7, 8
diff --git a/llvm/test/CodeGen/RISCV/xqcisls.ll b/llvm/test/CodeGen/RISCV/xqcisls.ll
index 709dc4c..3dea540 100644
--- a/llvm/test/CodeGen/RISCV/xqcisls.ll
+++ b/llvm/test/CodeGen/RISCV/xqcisls.ll
@@ -308,13 +308,13 @@ define i64 @lrd(ptr %a, i32 %b) {
;
; RV32IZBAXQCISLS-LABEL: lrd:
; RV32IZBAXQCISLS: # %bb.0:
-; RV32IZBAXQCISLS-NEXT: qc.lrw a2, a0, a1, 3
-; RV32IZBAXQCISLS-NEXT: addi a0, a0, 4
-; RV32IZBAXQCISLS-NEXT: qc.lrw a1, a0, a1, 3
-; RV32IZBAXQCISLS-NEXT: add a0, a2, a2
-; RV32IZBAXQCISLS-NEXT: sltu a2, a0, a2
-; RV32IZBAXQCISLS-NEXT: add a1, a1, a1
-; RV32IZBAXQCISLS-NEXT: add a1, a1, a2
+; RV32IZBAXQCISLS-NEXT: sh3add a0, a1, a0
+; RV32IZBAXQCISLS-NEXT: lw a1, 0(a0)
+; RV32IZBAXQCISLS-NEXT: lw a2, 4(a0)
+; RV32IZBAXQCISLS-NEXT: add a0, a1, a1
+; RV32IZBAXQCISLS-NEXT: sltu a1, a0, a1
+; RV32IZBAXQCISLS-NEXT: add a2, a2, a2
+; RV32IZBAXQCISLS-NEXT: add a1, a2, a1
; RV32IZBAXQCISLS-NEXT: ret
%1 = getelementptr i64, ptr %a, i32 %b
%2 = load i64, ptr %1, align 8
@@ -348,14 +348,13 @@ define i64 @lrd_2(ptr %a, i32 %b) {
;
; RV32IZBAXQCISLS-LABEL: lrd_2:
; RV32IZBAXQCISLS: # %bb.0:
-; RV32IZBAXQCISLS-NEXT: addi a2, a0, 96
-; RV32IZBAXQCISLS-NEXT: qc.lrw a2, a2, a1, 3
-; RV32IZBAXQCISLS-NEXT: addi a0, a0, 100
-; RV32IZBAXQCISLS-NEXT: qc.lrw a1, a0, a1, 3
-; RV32IZBAXQCISLS-NEXT: add a0, a2, a2
-; RV32IZBAXQCISLS-NEXT: sltu a2, a0, a2
-; RV32IZBAXQCISLS-NEXT: add a1, a1, a1
-; RV32IZBAXQCISLS-NEXT: add a1, a1, a2
+; RV32IZBAXQCISLS-NEXT: sh3add a0, a1, a0
+; RV32IZBAXQCISLS-NEXT: lw a1, 96(a0)
+; RV32IZBAXQCISLS-NEXT: lw a2, 100(a0)
+; RV32IZBAXQCISLS-NEXT: add a0, a1, a1
+; RV32IZBAXQCISLS-NEXT: sltu a1, a0, a1
+; RV32IZBAXQCISLS-NEXT: add a2, a2, a2
+; RV32IZBAXQCISLS-NEXT: add a1, a2, a1
; RV32IZBAXQCISLS-NEXT: ret
%1 = add i32 %b, 12
%2 = getelementptr i64, ptr %a, i32 %1
@@ -472,11 +471,11 @@ define void @srd(ptr %a, i32 %b, i64 %c) {
; RV32IZBAXQCISLS: # %bb.0:
; RV32IZBAXQCISLS-NEXT: add a4, a2, a2
; RV32IZBAXQCISLS-NEXT: add a3, a3, a3
-; RV32IZBAXQCISLS-NEXT: sltu a2, a4, a2
-; RV32IZBAXQCISLS-NEXT: qc.srw a4, a0, a1, 3
-; RV32IZBAXQCISLS-NEXT: add a2, a3, a2
-; RV32IZBAXQCISLS-NEXT: addi a0, a0, 4
-; RV32IZBAXQCISLS-NEXT: qc.srw a2, a0, a1, 3
+; RV32IZBAXQCISLS-NEXT: sh3add a0, a1, a0
+; RV32IZBAXQCISLS-NEXT: sltu a1, a4, a2
+; RV32IZBAXQCISLS-NEXT: add a1, a3, a1
+; RV32IZBAXQCISLS-NEXT: sw a4, 0(a0)
+; RV32IZBAXQCISLS-NEXT: sw a1, 4(a0)
; RV32IZBAXQCISLS-NEXT: ret
%1 = add i64 %c, %c
%2 = getelementptr i64, ptr %a, i32 %b
@@ -503,10 +502,10 @@ define i64 @lrd_large_shift(ptr %a, i32 %b) {
;
; RV32IZBAXQCISLS-LABEL: lrd_large_shift:
; RV32IZBAXQCISLS: # %bb.0:
-; RV32IZBAXQCISLS-NEXT: addi a2, a0, 384
-; RV32IZBAXQCISLS-NEXT: addi a3, a0, 388
-; RV32IZBAXQCISLS-NEXT: qc.lrw a0, a2, a1, 5
-; RV32IZBAXQCISLS-NEXT: qc.lrw a1, a3, a1, 5
+; RV32IZBAXQCISLS-NEXT: slli a1, a1, 5
+; RV32IZBAXQCISLS-NEXT: add a1, a1, a0
+; RV32IZBAXQCISLS-NEXT: lw a0, 384(a1)
+; RV32IZBAXQCISLS-NEXT: lw a1, 388(a1)
; RV32IZBAXQCISLS-NEXT: ret
%1 = add i32 %b, 12
%2 = shl i32 %1, 2
diff --git a/llvm/test/CodeGen/RISCV/xtheadfmemidx.ll b/llvm/test/CodeGen/RISCV/xtheadfmemidx.ll
index cdaae23..5724c4f 100644
--- a/llvm/test/CodeGen/RISCV/xtheadfmemidx.ll
+++ b/llvm/test/CodeGen/RISCV/xtheadfmemidx.ll
@@ -1,33 +1,27 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+d -mattr=+xtheadfmemidx -mattr=+m -verify-machineinstrs < %s \
-; RUN: | FileCheck %s -check-prefix=RV32XTHEADMEMIDX
-; RUN: llc -mtriple=riscv64 -mattr=+d -mattr=+xtheadfmemidx -verify-machineinstrs < %s \
-; RUN: | FileCheck %s -check-prefix=RV64XTHEADFMEMIDX
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+d,+xtheadfmemidx \
+; RUN: -verify-machineinstrs | FileCheck %s -check-prefixes=CHECK,RV32XTHEADFMEMIDX
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+d,+xtheadfmemidx \
+; RUN: -verify-machineinstrs | FileCheck %s -check-prefixes=CHECK,RV64XTHEADFMEMIDX
-define float @flrw(ptr %a, i64 %b) {
-; RV32XTHEADMEMIDX-LABEL: flrw:
-; RV32XTHEADMEMIDX: # %bb.0:
-; RV32XTHEADMEMIDX-NEXT: th.flrw fa5, a0, a1, 2
-; RV32XTHEADMEMIDX-NEXT: fadd.s fa0, fa5, fa5
-; RV32XTHEADMEMIDX-NEXT: ret
-;
-; RV64XTHEADFMEMIDX-LABEL: flrw:
-; RV64XTHEADFMEMIDX: # %bb.0:
-; RV64XTHEADFMEMIDX-NEXT: th.flrw fa5, a0, a1, 2
-; RV64XTHEADFMEMIDX-NEXT: fadd.s fa0, fa5, fa5
-; RV64XTHEADFMEMIDX-NEXT: ret
- %1 = getelementptr float, ptr %a, i64 %b
+define float @flrw(ptr %a, iXLen %b) {
+; CHECK-LABEL: flrw:
+; CHECK: # %bb.0:
+; CHECK-NEXT: th.flrw fa5, a0, a1, 2
+; CHECK-NEXT: fadd.s fa0, fa5, fa5
+; CHECK-NEXT: ret
+ %1 = getelementptr float, ptr %a, iXLen %b
%2 = load float, ptr %1, align 4
%3 = fadd float %2, %2
ret float %3
}
define float @flurw(ptr %a, i32 %b) {
-; RV32XTHEADMEMIDX-LABEL: flurw:
-; RV32XTHEADMEMIDX: # %bb.0:
-; RV32XTHEADMEMIDX-NEXT: th.flrw fa5, a0, a1, 2
-; RV32XTHEADMEMIDX-NEXT: fadd.s fa0, fa5, fa5
-; RV32XTHEADMEMIDX-NEXT: ret
+; RV32XTHEADFMEMIDX-LABEL: flurw:
+; RV32XTHEADFMEMIDX: # %bb.0:
+; RV32XTHEADFMEMIDX-NEXT: th.flrw fa5, a0, a1, 2
+; RV32XTHEADFMEMIDX-NEXT: fadd.s fa0, fa5, fa5
+; RV32XTHEADFMEMIDX-NEXT: ret
;
; RV64XTHEADFMEMIDX-LABEL: flurw:
; RV64XTHEADFMEMIDX: # %bb.0:
@@ -41,30 +35,24 @@ define float @flurw(ptr %a, i32 %b) {
ret float %4
}
-define void @fsrw(ptr %a, i64 %b, float %c) {
-; RV32XTHEADMEMIDX-LABEL: fsrw:
-; RV32XTHEADMEMIDX: # %bb.0:
-; RV32XTHEADMEMIDX-NEXT: fadd.s fa5, fa0, fa0
-; RV32XTHEADMEMIDX-NEXT: th.fsrw fa5, a0, a1, 2
-; RV32XTHEADMEMIDX-NEXT: ret
-;
-; RV64XTHEADFMEMIDX-LABEL: fsrw:
-; RV64XTHEADFMEMIDX: # %bb.0:
-; RV64XTHEADFMEMIDX-NEXT: fadd.s fa5, fa0, fa0
-; RV64XTHEADFMEMIDX-NEXT: th.fsrw fa5, a0, a1, 2
-; RV64XTHEADFMEMIDX-NEXT: ret
+define void @fsrw(ptr %a, iXLen %b, float %c) {
+; CHECK-LABEL: fsrw:
+; CHECK: # %bb.0:
+; CHECK-NEXT: fadd.s fa5, fa0, fa0
+; CHECK-NEXT: th.fsrw fa5, a0, a1, 2
+; CHECK-NEXT: ret
%1 = fadd float %c, %c
- %2 = getelementptr float, ptr %a, i64 %b
+ %2 = getelementptr float, ptr %a, iXLen %b
store float %1, ptr %2, align 4
ret void
}
define void @fsurw(ptr %a, i32 %b, float %c) {
-; RV32XTHEADMEMIDX-LABEL: fsurw:
-; RV32XTHEADMEMIDX: # %bb.0:
-; RV32XTHEADMEMIDX-NEXT: fadd.s fa5, fa0, fa0
-; RV32XTHEADMEMIDX-NEXT: th.fsrw fa5, a0, a1, 2
-; RV32XTHEADMEMIDX-NEXT: ret
+; RV32XTHEADFMEMIDX-LABEL: fsurw:
+; RV32XTHEADFMEMIDX: # %bb.0:
+; RV32XTHEADFMEMIDX-NEXT: fadd.s fa5, fa0, fa0
+; RV32XTHEADFMEMIDX-NEXT: th.fsrw fa5, a0, a1, 2
+; RV32XTHEADFMEMIDX-NEXT: ret
;
; RV64XTHEADFMEMIDX-LABEL: fsurw:
; RV64XTHEADFMEMIDX: # %bb.0:
@@ -78,30 +66,24 @@ define void @fsurw(ptr %a, i32 %b, float %c) {
ret void
}
-define double @flrd(ptr %a, i64 %b) {
-; RV32XTHEADMEMIDX-LABEL: flrd:
-; RV32XTHEADMEMIDX: # %bb.0:
-; RV32XTHEADMEMIDX-NEXT: th.flrd fa5, a0, a1, 3
-; RV32XTHEADMEMIDX-NEXT: fadd.d fa0, fa5, fa5
-; RV32XTHEADMEMIDX-NEXT: ret
-;
-; RV64XTHEADFMEMIDX-LABEL: flrd:
-; RV64XTHEADFMEMIDX: # %bb.0:
-; RV64XTHEADFMEMIDX-NEXT: th.flrd fa5, a0, a1, 3
-; RV64XTHEADFMEMIDX-NEXT: fadd.d fa0, fa5, fa5
-; RV64XTHEADFMEMIDX-NEXT: ret
- %1 = getelementptr double, ptr %a, i64 %b
+define double @flrd(ptr %a, iXLen %b) {
+; CHECK-LABEL: flrd:
+; CHECK: # %bb.0:
+; CHECK-NEXT: th.flrd fa5, a0, a1, 3
+; CHECK-NEXT: fadd.d fa0, fa5, fa5
+; CHECK-NEXT: ret
+ %1 = getelementptr double, ptr %a, iXLen %b
%2 = load double, ptr %1, align 8
%3 = fadd double %2, %2
ret double %3
}
define double @flurd(ptr %a, i32 %b) {
-; RV32XTHEADMEMIDX-LABEL: flurd:
-; RV32XTHEADMEMIDX: # %bb.0:
-; RV32XTHEADMEMIDX-NEXT: th.flrd fa5, a0, a1, 3
-; RV32XTHEADMEMIDX-NEXT: fadd.d fa0, fa5, fa5
-; RV32XTHEADMEMIDX-NEXT: ret
+; RV32XTHEADFMEMIDX-LABEL: flurd:
+; RV32XTHEADFMEMIDX: # %bb.0:
+; RV32XTHEADFMEMIDX-NEXT: th.flrd fa5, a0, a1, 3
+; RV32XTHEADFMEMIDX-NEXT: fadd.d fa0, fa5, fa5
+; RV32XTHEADFMEMIDX-NEXT: ret
;
; RV64XTHEADFMEMIDX-LABEL: flurd:
; RV64XTHEADFMEMIDX: # %bb.0:
@@ -115,30 +97,24 @@ define double @flurd(ptr %a, i32 %b) {
ret double %4
}
-define void @fsrd(ptr %a, i64 %b, double %c) {
-; RV32XTHEADMEMIDX-LABEL: fsrd:
-; RV32XTHEADMEMIDX: # %bb.0:
-; RV32XTHEADMEMIDX-NEXT: fadd.d fa5, fa0, fa0
-; RV32XTHEADMEMIDX-NEXT: th.fsrd fa5, a0, a1, 3
-; RV32XTHEADMEMIDX-NEXT: ret
-;
-; RV64XTHEADFMEMIDX-LABEL: fsrd:
-; RV64XTHEADFMEMIDX: # %bb.0:
-; RV64XTHEADFMEMIDX-NEXT: fadd.d fa5, fa0, fa0
-; RV64XTHEADFMEMIDX-NEXT: th.fsrd fa5, a0, a1, 3
-; RV64XTHEADFMEMIDX-NEXT: ret
+define void @fsrd(ptr %a, iXLen %b, double %c) {
+; CHECK-LABEL: fsrd:
+; CHECK: # %bb.0:
+; CHECK-NEXT: fadd.d fa5, fa0, fa0
+; CHECK-NEXT: th.fsrd fa5, a0, a1, 3
+; CHECK-NEXT: ret
%1 = fadd double %c, %c
- %2 = getelementptr double, ptr %a, i64 %b
+ %2 = getelementptr double, ptr %a, iXLen %b
store double %1, ptr %2, align 8
ret void
}
define void @fsurd(ptr %a, i32 %b, double %c) {
-; RV32XTHEADMEMIDX-LABEL: fsurd:
-; RV32XTHEADMEMIDX: # %bb.0:
-; RV32XTHEADMEMIDX-NEXT: fadd.d fa5, fa0, fa0
-; RV32XTHEADMEMIDX-NEXT: th.fsrd fa5, a0, a1, 3
-; RV32XTHEADMEMIDX-NEXT: ret
+; RV32XTHEADFMEMIDX-LABEL: fsurd:
+; RV32XTHEADFMEMIDX: # %bb.0:
+; RV32XTHEADFMEMIDX-NEXT: fadd.d fa5, fa0, fa0
+; RV32XTHEADFMEMIDX-NEXT: th.fsrd fa5, a0, a1, 3
+; RV32XTHEADFMEMIDX-NEXT: ret
;
; RV64XTHEADFMEMIDX-LABEL: fsurd:
; RV64XTHEADFMEMIDX: # %bb.0:
diff --git a/llvm/test/CodeGen/RISCV/xtheadmemidx.ll b/llvm/test/CodeGen/RISCV/xtheadmemidx.ll
index fc20fcb..9f0f8d9 100644
--- a/llvm/test/CodeGen/RISCV/xtheadmemidx.ll
+++ b/llvm/test/CodeGen/RISCV/xtheadmemidx.ll
@@ -1,238 +1,156 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+d -mattr=+xtheadmemidx -mattr=+m -verify-machineinstrs < %s \
-; RUN: | FileCheck %s -check-prefix=RV32XTHEADMEMIDX
-; RUN: llc -mtriple=riscv64 -mattr=+d -mattr=+xtheadmemidx -mattr=+m -verify-machineinstrs < %s \
-; RUN: | FileCheck %s -check-prefix=RV64XTHEADMEMIDX
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+d,+xtheadmemidx \
+; RUN: -verify-machineinstrs | FileCheck %s -check-prefixes=CHECK,RV32XTHEADMEMIDX
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+d,+xtheadmemidx \
+; RUN: -verify-machineinstrs | FileCheck %s -check-prefixes=CHECK,RV64XTHEADMEMIDX
define ptr @lbia(ptr %base, ptr %addr.2, i8 %a) {
-; RV32XTHEADMEMIDX-LABEL: lbia:
-; RV32XTHEADMEMIDX: # %bb.0:
-; RV32XTHEADMEMIDX-NEXT: th.lbia a3, (a0), -1, 0
-; RV32XTHEADMEMIDX-NEXT: add a2, a3, a2
-; RV32XTHEADMEMIDX-NEXT: sb a2, 0(a1)
-; RV32XTHEADMEMIDX-NEXT: ret
-;
-; RV64XTHEADMEMIDX-LABEL: lbia:
-; RV64XTHEADMEMIDX: # %bb.0:
-; RV64XTHEADMEMIDX-NEXT: th.lbia a3, (a0), -1, 0
-; RV64XTHEADMEMIDX-NEXT: add a2, a3, a2
-; RV64XTHEADMEMIDX-NEXT: sb a2, 0(a1)
-; RV64XTHEADMEMIDX-NEXT: ret
- %addr = getelementptr i8, ptr %base, i8 0
+; CHECK-LABEL: lbia:
+; CHECK: # %bb.0:
+; CHECK-NEXT: th.lbia a3, (a0), -1, 0
+; CHECK-NEXT: add a2, a3, a2
+; CHECK-NEXT: sb a2, 0(a1)
+; CHECK-NEXT: ret
+ %addr = getelementptr i8, ptr %base, iXLen 0
%ld = load i8, ptr %addr
- %addr.1 = getelementptr i8, ptr %base, i8 -1
+ %addr.1 = getelementptr i8, ptr %base, iXLen -1
%res = add i8 %ld, %a
store i8 %res, ptr %addr.2
ret ptr %addr.1
}
define ptr @lbib(ptr %base, i8 %a) {
-; RV32XTHEADMEMIDX-LABEL: lbib:
-; RV32XTHEADMEMIDX: # %bb.0:
-; RV32XTHEADMEMIDX-NEXT: th.lbib a2, (a0), 1, 0
-; RV32XTHEADMEMIDX-NEXT: add a1, a2, a1
-; RV32XTHEADMEMIDX-NEXT: sb a1, 1(a0)
-; RV32XTHEADMEMIDX-NEXT: ret
-;
-; RV64XTHEADMEMIDX-LABEL: lbib:
-; RV64XTHEADMEMIDX: # %bb.0:
-; RV64XTHEADMEMIDX-NEXT: th.lbib a2, (a0), 1, 0
-; RV64XTHEADMEMIDX-NEXT: add a1, a2, a1
-; RV64XTHEADMEMIDX-NEXT: sb a1, 1(a0)
-; RV64XTHEADMEMIDX-NEXT: ret
- %addr = getelementptr i8, ptr %base, i8 1
+; CHECK-LABEL: lbib:
+; CHECK: # %bb.0:
+; CHECK-NEXT: th.lbib a2, (a0), 1, 0
+; CHECK-NEXT: add a1, a2, a1
+; CHECK-NEXT: sb a1, 1(a0)
+; CHECK-NEXT: ret
+ %addr = getelementptr i8, ptr %base, iXLen 1
%ld = load i8, ptr %addr
- %addr.1 = getelementptr i8, ptr %base, i8 2
+ %addr.1 = getelementptr i8, ptr %base, iXLen 2
%res = add i8 %ld, %a
store i8 %res, ptr %addr.1
ret ptr %addr
}
-define ptr @lbuia(ptr %base, ptr %addr.2, i64 %a) {
-; RV32XTHEADMEMIDX-LABEL: lbuia:
-; RV32XTHEADMEMIDX: # %bb.0:
-; RV32XTHEADMEMIDX-NEXT: th.lbuia a4, (a0), -1, 0
-; RV32XTHEADMEMIDX-NEXT: add a2, a4, a2
-; RV32XTHEADMEMIDX-NEXT: sltu a4, a2, a4
-; RV32XTHEADMEMIDX-NEXT: add a3, a3, a4
-; RV32XTHEADMEMIDX-NEXT: sw a2, 0(a1)
-; RV32XTHEADMEMIDX-NEXT: sw a3, 4(a1)
-; RV32XTHEADMEMIDX-NEXT: ret
-;
-; RV64XTHEADMEMIDX-LABEL: lbuia:
-; RV64XTHEADMEMIDX: # %bb.0:
-; RV64XTHEADMEMIDX-NEXT: th.lbuia a3, (a0), -1, 0
-; RV64XTHEADMEMIDX-NEXT: add a2, a3, a2
-; RV64XTHEADMEMIDX-NEXT: sd a2, 0(a1)
-; RV64XTHEADMEMIDX-NEXT: ret
- %addr = getelementptr i8, ptr %base, i8 0
+define ptr @lbuia(ptr %base, ptr %addr.2, i32 %a) {
+; CHECK-LABEL: lbuia:
+; CHECK: # %bb.0:
+; CHECK-NEXT: th.lbuia a3, (a0), -1, 0
+; CHECK-NEXT: add a2, a3, a2
+; CHECK-NEXT: sw a2, 0(a1)
+; CHECK-NEXT: ret
+ %addr = getelementptr i8, ptr %base, iXLen 0
%ld = load i8, ptr %addr
- %zext = zext i8 %ld to i64
- %addr.1 = getelementptr i8, ptr %base, i8 -1
- %res = add i64 %zext, %a
- store i64 %res, ptr %addr.2
+ %zext = zext i8 %ld to i32
+ %addr.1 = getelementptr i8, ptr %base, iXLen -1
+ %res = add i32 %zext, %a
+ store i32 %res, ptr %addr.2
ret ptr %addr.1
}
-define ptr @lbuib(ptr %base, i64 %a, ptr %addr.1) {
-; RV32XTHEADMEMIDX-LABEL: lbuib:
-; RV32XTHEADMEMIDX: # %bb.0:
-; RV32XTHEADMEMIDX-NEXT: th.lbuib a4, (a0), 1, 0
-; RV32XTHEADMEMIDX-NEXT: add a1, a4, a1
-; RV32XTHEADMEMIDX-NEXT: sltu a4, a1, a4
-; RV32XTHEADMEMIDX-NEXT: add a2, a2, a4
-; RV32XTHEADMEMIDX-NEXT: sw a1, 0(a3)
-; RV32XTHEADMEMIDX-NEXT: sw a2, 4(a3)
-; RV32XTHEADMEMIDX-NEXT: ret
-;
-; RV64XTHEADMEMIDX-LABEL: lbuib:
-; RV64XTHEADMEMIDX: # %bb.0:
-; RV64XTHEADMEMIDX-NEXT: th.lbuib a3, (a0), 1, 0
-; RV64XTHEADMEMIDX-NEXT: add a1, a3, a1
-; RV64XTHEADMEMIDX-NEXT: sd a1, 0(a2)
-; RV64XTHEADMEMIDX-NEXT: ret
- %addr = getelementptr i8, ptr %base, i8 1
+define ptr @lbuib(ptr %base, i32 %a, ptr %addr.1) {
+; CHECK-LABEL: lbuib:
+; CHECK: # %bb.0:
+; CHECK-NEXT: th.lbuib a3, (a0), 1, 0
+; CHECK-NEXT: add a1, a3, a1
+; CHECK-NEXT: sw a1, 0(a2)
+; CHECK-NEXT: ret
+ %addr = getelementptr i8, ptr %base, iXLen 1
%ld = load i8, ptr %addr
- %zext = zext i8 %ld to i64
- %res = add i64 %zext, %a
- store i64 %res, ptr %addr.1
+ %zext = zext i8 %ld to i32
+ %res = add i32 %zext, %a
+ store i32 %res, ptr %addr.1
ret ptr %addr
}
define ptr @lhia(ptr %base, ptr %addr.2, i16 %a) {
-; RV32XTHEADMEMIDX-LABEL: lhia:
-; RV32XTHEADMEMIDX: # %bb.0:
-; RV32XTHEADMEMIDX-NEXT: th.lhia a3, (a0), -16, 1
-; RV32XTHEADMEMIDX-NEXT: add a2, a3, a2
-; RV32XTHEADMEMIDX-NEXT: sh a2, 0(a1)
-; RV32XTHEADMEMIDX-NEXT: ret
-;
-; RV64XTHEADMEMIDX-LABEL: lhia:
-; RV64XTHEADMEMIDX: # %bb.0:
-; RV64XTHEADMEMIDX-NEXT: th.lhia a3, (a0), -16, 1
-; RV64XTHEADMEMIDX-NEXT: add a2, a3, a2
-; RV64XTHEADMEMIDX-NEXT: sh a2, 0(a1)
-; RV64XTHEADMEMIDX-NEXT: ret
- %addr = getelementptr i16, ptr %base, i16 0
+; CHECK-LABEL: lhia:
+; CHECK: # %bb.0:
+; CHECK-NEXT: th.lhia a3, (a0), -16, 1
+; CHECK-NEXT: add a2, a3, a2
+; CHECK-NEXT: sh a2, 0(a1)
+; CHECK-NEXT: ret
+ %addr = getelementptr i16, ptr %base, iXLen 0
%ld = load i16, ptr %addr
- %addr.1 = getelementptr i16, ptr %base, i16 -16
+ %addr.1 = getelementptr i16, ptr %base, iXLen -16
%res = add i16 %ld, %a
store i16 %res, ptr %addr.2
ret ptr %addr.1
}
define ptr @lhib(ptr %base, i16 %a) {
-; RV32XTHEADMEMIDX-LABEL: lhib:
-; RV32XTHEADMEMIDX: # %bb.0:
-; RV32XTHEADMEMIDX-NEXT: th.lhib a2, (a0), 2, 0
-; RV32XTHEADMEMIDX-NEXT: add a1, a2, a1
-; RV32XTHEADMEMIDX-NEXT: sh a1, 2(a0)
-; RV32XTHEADMEMIDX-NEXT: ret
-;
-; RV64XTHEADMEMIDX-LABEL: lhib:
-; RV64XTHEADMEMIDX: # %bb.0:
-; RV64XTHEADMEMIDX-NEXT: th.lhib a2, (a0), 2, 0
-; RV64XTHEADMEMIDX-NEXT: add a1, a2, a1
-; RV64XTHEADMEMIDX-NEXT: sh a1, 2(a0)
-; RV64XTHEADMEMIDX-NEXT: ret
- %addr = getelementptr i16, ptr %base, i16 1
+; CHECK-LABEL: lhib:
+; CHECK: # %bb.0:
+; CHECK-NEXT: th.lhib a2, (a0), 2, 0
+; CHECK-NEXT: add a1, a2, a1
+; CHECK-NEXT: sh a1, 2(a0)
+; CHECK-NEXT: ret
+ %addr = getelementptr i16, ptr %base, iXLen 1
%ld = load i16, ptr %addr
- %addr.1 = getelementptr i16, ptr %base, i16 2
+ %addr.1 = getelementptr i16, ptr %base, iXLen 2
%res = add i16 %ld, %a
store i16 %res, ptr %addr.1
ret ptr %addr
}
-define ptr @lhuia(ptr %base, ptr %addr.2, i64 %a) {
-; RV32XTHEADMEMIDX-LABEL: lhuia:
-; RV32XTHEADMEMIDX: # %bb.0:
-; RV32XTHEADMEMIDX-NEXT: th.lhuia a4, (a0), -16, 1
-; RV32XTHEADMEMIDX-NEXT: add a2, a4, a2
-; RV32XTHEADMEMIDX-NEXT: sltu a4, a2, a4
-; RV32XTHEADMEMIDX-NEXT: add a3, a3, a4
-; RV32XTHEADMEMIDX-NEXT: sw a2, 0(a1)
-; RV32XTHEADMEMIDX-NEXT: sw a3, 4(a1)
-; RV32XTHEADMEMIDX-NEXT: ret
-;
-; RV64XTHEADMEMIDX-LABEL: lhuia:
-; RV64XTHEADMEMIDX: # %bb.0:
-; RV64XTHEADMEMIDX-NEXT: th.lhuia a3, (a0), -16, 1
-; RV64XTHEADMEMIDX-NEXT: add a2, a3, a2
-; RV64XTHEADMEMIDX-NEXT: sd a2, 0(a1)
-; RV64XTHEADMEMIDX-NEXT: ret
- %addr = getelementptr i16, ptr %base, i16 0
+define ptr @lhuia(ptr %base, ptr %addr.2, i32 %a) {
+; CHECK-LABEL: lhuia:
+; CHECK: # %bb.0:
+; CHECK-NEXT: th.lhuia a3, (a0), -16, 1
+; CHECK-NEXT: add a2, a3, a2
+; CHECK-NEXT: sw a2, 0(a1)
+; CHECK-NEXT: ret
+ %addr = getelementptr i16, ptr %base, iXLen 0
%ld = load i16, ptr %addr
- %zext = zext i16 %ld to i64
- %addr.1 = getelementptr i16, ptr %base, i16 -16
- %res = add i64 %zext, %a
- store i64 %res, ptr %addr.2
+ %zext = zext i16 %ld to i32
+ %addr.1 = getelementptr i16, ptr %base, iXLen -16
+ %res = add i32 %zext, %a
+ store i32 %res, ptr %addr.2
ret ptr %addr.1
}
-define ptr @lhuib(ptr %base, i64 %a, ptr %addr.1) {
-; RV32XTHEADMEMIDX-LABEL: lhuib:
-; RV32XTHEADMEMIDX: # %bb.0:
-; RV32XTHEADMEMIDX-NEXT: th.lhuib a4, (a0), 2, 0
-; RV32XTHEADMEMIDX-NEXT: add a1, a4, a1
-; RV32XTHEADMEMIDX-NEXT: sltu a4, a1, a4
-; RV32XTHEADMEMIDX-NEXT: add a2, a2, a4
-; RV32XTHEADMEMIDX-NEXT: sw a1, 0(a3)
-; RV32XTHEADMEMIDX-NEXT: sw a2, 4(a3)
-; RV32XTHEADMEMIDX-NEXT: ret
-;
-; RV64XTHEADMEMIDX-LABEL: lhuib:
-; RV64XTHEADMEMIDX: # %bb.0:
-; RV64XTHEADMEMIDX-NEXT: th.lhuib a3, (a0), 2, 0
-; RV64XTHEADMEMIDX-NEXT: add a1, a3, a1
-; RV64XTHEADMEMIDX-NEXT: sd a1, 0(a2)
-; RV64XTHEADMEMIDX-NEXT: ret
- %addr = getelementptr i16, ptr %base, i16 1
+define ptr @lhuib(ptr %base, i32 %a, ptr %addr.1) {
+; CHECK-LABEL: lhuib:
+; CHECK: # %bb.0:
+; CHECK-NEXT: th.lhuib a3, (a0), 2, 0
+; CHECK-NEXT: add a1, a3, a1
+; CHECK-NEXT: sw a1, 0(a2)
+; CHECK-NEXT: ret
+ %addr = getelementptr i16, ptr %base, iXLen 1
%ld = load i16, ptr %addr
- %zext = zext i16 %ld to i64
- %res = add i64 %zext, %a
- store i64 %res, ptr %addr.1
+ %zext = zext i16 %ld to i32
+ %res = add i32 %zext, %a
+ store i32 %res, ptr %addr.1
ret ptr %addr
}
define ptr @lwia(ptr %base, ptr %addr.2, i32 %a) {
-; RV32XTHEADMEMIDX-LABEL: lwia:
-; RV32XTHEADMEMIDX: # %bb.0:
-; RV32XTHEADMEMIDX-NEXT: th.lwia a3, (a0), -16, 2
-; RV32XTHEADMEMIDX-NEXT: add a2, a3, a2
-; RV32XTHEADMEMIDX-NEXT: sw a2, 0(a1)
-; RV32XTHEADMEMIDX-NEXT: ret
-;
-; RV64XTHEADMEMIDX-LABEL: lwia:
-; RV64XTHEADMEMIDX: # %bb.0:
-; RV64XTHEADMEMIDX-NEXT: th.lwia a3, (a0), -16, 2
-; RV64XTHEADMEMIDX-NEXT: add a2, a3, a2
-; RV64XTHEADMEMIDX-NEXT: sw a2, 0(a1)
-; RV64XTHEADMEMIDX-NEXT: ret
- %addr = getelementptr i32, ptr %base, i32 0
+; CHECK-LABEL: lwia:
+; CHECK: # %bb.0:
+; CHECK-NEXT: th.lwia a3, (a0), -16, 2
+; CHECK-NEXT: add a2, a3, a2
+; CHECK-NEXT: sw a2, 0(a1)
+; CHECK-NEXT: ret
+ %addr = getelementptr i32, ptr %base, iXLen 0
%ld = load i32, ptr %addr
- %addr.1 = getelementptr i32, ptr %base, i32 -16
+ %addr.1 = getelementptr i32, ptr %base, iXLen -16
%res = add i32 %ld, %a
store i32 %res, ptr %addr.2
ret ptr %addr.1
}
define ptr @lwib(ptr %base, i32 %a) {
-; RV32XTHEADMEMIDX-LABEL: lwib:
-; RV32XTHEADMEMIDX: # %bb.0:
-; RV32XTHEADMEMIDX-NEXT: th.lwib a2, (a0), 4, 0
-; RV32XTHEADMEMIDX-NEXT: add a1, a2, a1
-; RV32XTHEADMEMIDX-NEXT: sw a1, 4(a0)
-; RV32XTHEADMEMIDX-NEXT: ret
-;
-; RV64XTHEADMEMIDX-LABEL: lwib:
-; RV64XTHEADMEMIDX: # %bb.0:
-; RV64XTHEADMEMIDX-NEXT: th.lwib a2, (a0), 4, 0
-; RV64XTHEADMEMIDX-NEXT: add a1, a2, a1
-; RV64XTHEADMEMIDX-NEXT: sw a1, 4(a0)
-; RV64XTHEADMEMIDX-NEXT: ret
- %addr = getelementptr i32, ptr %base, i32 1
+; CHECK-LABEL: lwib:
+; CHECK: # %bb.0:
+; CHECK-NEXT: th.lwib a2, (a0), 4, 0
+; CHECK-NEXT: add a1, a2, a1
+; CHECK-NEXT: sw a1, 4(a0)
+; CHECK-NEXT: ret
+ %addr = getelementptr i32, ptr %base, iXLen 1
%ld = load i32, ptr %addr
- %addr.1 = getelementptr i32, ptr %base, i32 2
+ %addr.1 = getelementptr i32, ptr %base, iXLen 2
%res = add i32 %ld, %a
store i32 %res, ptr %addr.1
ret ptr %addr
@@ -255,10 +173,10 @@ define ptr @lwuia(ptr %base, ptr %addr.2, i64 %a) {
; RV64XTHEADMEMIDX-NEXT: add a2, a3, a2
; RV64XTHEADMEMIDX-NEXT: sd a2, 0(a1)
; RV64XTHEADMEMIDX-NEXT: ret
- %addr = getelementptr i32, ptr %base, i32 0
+ %addr = getelementptr i32, ptr %base, iXLen 0
%ld = load i32, ptr %addr
%zext = zext i32 %ld to i64
- %addr.1 = getelementptr i32, ptr %base, i32 -16
+ %addr.1 = getelementptr i32, ptr %base, iXLen -16
%res = add i64 %zext, %a
store i64 %res, ptr %addr.2
ret ptr %addr.1
@@ -281,7 +199,7 @@ define ptr @lwuib(ptr %base, i64 %a, ptr %addr.1) {
; RV64XTHEADMEMIDX-NEXT: add a1, a3, a1
; RV64XTHEADMEMIDX-NEXT: sd a1, 0(a2)
; RV64XTHEADMEMIDX-NEXT: ret
- %addr = getelementptr i32, ptr %base, i32 1
+ %addr = getelementptr i32, ptr %base, iXLen 1
%ld = load i32, ptr %addr
%zext = zext i32 %ld to i64
%res = add i64 %zext, %a
@@ -309,9 +227,9 @@ define ptr @ldia(ptr %base, ptr %addr.2, i64 %a) {
; RV64XTHEADMEMIDX-NEXT: add a2, a3, a2
; RV64XTHEADMEMIDX-NEXT: sd a2, 0(a1)
; RV64XTHEADMEMIDX-NEXT: ret
- %addr = getelementptr i64, ptr %base, i64 0
+ %addr = getelementptr i64, ptr %base, iXLen 0
%ld = load i64, ptr %addr
- %addr.1 = getelementptr i64, ptr %base, i64 -16
+ %addr.1 = getelementptr i64, ptr %base, iXLen -16
%res = add i64 %ld, %a
store i64 %res, ptr %addr.2
ret ptr %addr.1
@@ -336,117 +254,81 @@ define ptr @ldib(ptr %base, i64 %a) {
; RV64XTHEADMEMIDX-NEXT: add a1, a2, a1
; RV64XTHEADMEMIDX-NEXT: sd a1, 8(a0)
; RV64XTHEADMEMIDX-NEXT: ret
- %addr = getelementptr i64, ptr %base, i64 1
+ %addr = getelementptr i64, ptr %base, iXLen 1
%ld = load i64, ptr %addr
- %addr.1 = getelementptr i64, ptr %base, i64 2
+ %addr.1 = getelementptr i64, ptr %base, iXLen 2
%res = add i64 %ld, %a
store i64 %res, ptr %addr.1
ret ptr %addr
}
define ptr @sbia(ptr %base, i8 %a, i8 %b) {
-; RV32XTHEADMEMIDX-LABEL: sbia:
-; RV32XTHEADMEMIDX: # %bb.0:
-; RV32XTHEADMEMIDX-NEXT: add a1, a1, a2
-; RV32XTHEADMEMIDX-NEXT: th.sbia a1, (a0), 1, 0
-; RV32XTHEADMEMIDX-NEXT: ret
-;
-; RV64XTHEADMEMIDX-LABEL: sbia:
-; RV64XTHEADMEMIDX: # %bb.0:
-; RV64XTHEADMEMIDX-NEXT: add a1, a1, a2
-; RV64XTHEADMEMIDX-NEXT: th.sbia a1, (a0), 1, 0
-; RV64XTHEADMEMIDX-NEXT: ret
- %addr.1 = getelementptr i8, ptr %base, i8 1
+; CHECK-LABEL: sbia:
+; CHECK: # %bb.0:
+; CHECK-NEXT: add a1, a1, a2
+; CHECK-NEXT: th.sbia a1, (a0), 1, 0
+; CHECK-NEXT: ret
+ %addr.1 = getelementptr i8, ptr %base, iXLen 1
%res = add i8 %a, %b
store i8 %res, ptr %base
ret ptr %addr.1
}
define ptr @sbib(ptr %base, i8 %a, i8 %b) {
-; RV32XTHEADMEMIDX-LABEL: sbib:
-; RV32XTHEADMEMIDX: # %bb.0:
-; RV32XTHEADMEMIDX-NEXT: add a1, a1, a2
-; RV32XTHEADMEMIDX-NEXT: th.sbib a1, (a0), 1, 0
-; RV32XTHEADMEMIDX-NEXT: ret
-;
-; RV64XTHEADMEMIDX-LABEL: sbib:
-; RV64XTHEADMEMIDX: # %bb.0:
-; RV64XTHEADMEMIDX-NEXT: add a1, a1, a2
-; RV64XTHEADMEMIDX-NEXT: th.sbib a1, (a0), 1, 0
-; RV64XTHEADMEMIDX-NEXT: ret
- %addr.1 = getelementptr i8, ptr %base, i8 1
+; CHECK-LABEL: sbib:
+; CHECK: # %bb.0:
+; CHECK-NEXT: add a1, a1, a2
+; CHECK-NEXT: th.sbib a1, (a0), 1, 0
+; CHECK-NEXT: ret
+ %addr.1 = getelementptr i8, ptr %base, iXLen 1
%res = add i8 %a, %b
store i8 %res, ptr %addr.1
ret ptr %addr.1
}
define ptr @shia(ptr %base, i16 %a, i16 %b) {
-; RV32XTHEADMEMIDX-LABEL: shia:
-; RV32XTHEADMEMIDX: # %bb.0:
-; RV32XTHEADMEMIDX-NEXT: add a1, a1, a2
-; RV32XTHEADMEMIDX-NEXT: th.shia a1, (a0), -9, 1
-; RV32XTHEADMEMIDX-NEXT: ret
-;
-; RV64XTHEADMEMIDX-LABEL: shia:
-; RV64XTHEADMEMIDX: # %bb.0:
-; RV64XTHEADMEMIDX-NEXT: add a1, a1, a2
-; RV64XTHEADMEMIDX-NEXT: th.shia a1, (a0), -9, 1
-; RV64XTHEADMEMIDX-NEXT: ret
- %addr.1 = getelementptr i16, ptr %base, i16 -9
+; CHECK-LABEL: shia:
+; CHECK: # %bb.0:
+; CHECK-NEXT: add a1, a1, a2
+; CHECK-NEXT: th.shia a1, (a0), -9, 1
+; CHECK-NEXT: ret
+ %addr.1 = getelementptr i16, ptr %base, iXLen -9
%res = add i16 %a, %b
store i16 %res, ptr %base
ret ptr %addr.1
}
define ptr @shib(ptr %base, i16 %a, i16 %b) {
-; RV32XTHEADMEMIDX-LABEL: shib:
-; RV32XTHEADMEMIDX: # %bb.0:
-; RV32XTHEADMEMIDX-NEXT: add a1, a1, a2
-; RV32XTHEADMEMIDX-NEXT: th.shib a1, (a0), 2, 0
-; RV32XTHEADMEMIDX-NEXT: ret
-;
-; RV64XTHEADMEMIDX-LABEL: shib:
-; RV64XTHEADMEMIDX: # %bb.0:
-; RV64XTHEADMEMIDX-NEXT: add a1, a1, a2
-; RV64XTHEADMEMIDX-NEXT: th.shib a1, (a0), 2, 0
-; RV64XTHEADMEMIDX-NEXT: ret
- %addr.1 = getelementptr i16, ptr %base, i16 1
+; CHECK-LABEL: shib:
+; CHECK: # %bb.0:
+; CHECK-NEXT: add a1, a1, a2
+; CHECK-NEXT: th.shib a1, (a0), 2, 0
+; CHECK-NEXT: ret
+ %addr.1 = getelementptr i16, ptr %base, iXLen 1
%res = add i16 %a, %b
store i16 %res, ptr %addr.1
ret ptr %addr.1
}
define ptr @swia(ptr %base, i32 %a, i32 %b) {
-; RV32XTHEADMEMIDX-LABEL: swia:
-; RV32XTHEADMEMIDX: # %bb.0:
-; RV32XTHEADMEMIDX-NEXT: add a1, a1, a2
-; RV32XTHEADMEMIDX-NEXT: th.swia a1, (a0), 8, 2
-; RV32XTHEADMEMIDX-NEXT: ret
-;
-; RV64XTHEADMEMIDX-LABEL: swia:
-; RV64XTHEADMEMIDX: # %bb.0:
-; RV64XTHEADMEMIDX-NEXT: add a1, a1, a2
-; RV64XTHEADMEMIDX-NEXT: th.swia a1, (a0), 8, 2
-; RV64XTHEADMEMIDX-NEXT: ret
- %addr.1 = getelementptr i32, ptr %base, i32 8
+; CHECK-LABEL: swia:
+; CHECK: # %bb.0:
+; CHECK-NEXT: add a1, a1, a2
+; CHECK-NEXT: th.swia a1, (a0), 8, 2
+; CHECK-NEXT: ret
+ %addr.1 = getelementptr i32, ptr %base, iXLen 8
%res = add i32 %a, %b
store i32 %res, ptr %base
ret ptr %addr.1
}
define ptr @swib(ptr %base, i32 %a, i32 %b) {
-; RV32XTHEADMEMIDX-LABEL: swib:
-; RV32XTHEADMEMIDX: # %bb.0:
-; RV32XTHEADMEMIDX-NEXT: add a1, a1, a2
-; RV32XTHEADMEMIDX-NEXT: th.swib a1, (a0), -13, 3
-; RV32XTHEADMEMIDX-NEXT: ret
-;
-; RV64XTHEADMEMIDX-LABEL: swib:
-; RV64XTHEADMEMIDX: # %bb.0:
-; RV64XTHEADMEMIDX-NEXT: add a1, a1, a2
-; RV64XTHEADMEMIDX-NEXT: th.swib a1, (a0), -13, 3
-; RV64XTHEADMEMIDX-NEXT: ret
- %addr.1 = getelementptr i32, ptr %base, i32 -26
+; CHECK-LABEL: swib:
+; CHECK: # %bb.0:
+; CHECK-NEXT: add a1, a1, a2
+; CHECK-NEXT: th.swib a1, (a0), -13, 3
+; CHECK-NEXT: ret
+ %addr.1 = getelementptr i32, ptr %base, iXLen -26
%res = add i32 %a, %b
store i32 %res, ptr %addr.1
ret ptr %addr.1
@@ -470,7 +352,7 @@ define ptr @sdia(ptr %base, i64 %a, i64 %b) {
; RV64XTHEADMEMIDX-NEXT: add a1, a1, a2
; RV64XTHEADMEMIDX-NEXT: th.sdia a1, (a0), 8, 3
; RV64XTHEADMEMIDX-NEXT: ret
- %addr.1 = getelementptr i64, ptr %base, i64 8
+ %addr.1 = getelementptr i64, ptr %base, iXLen 8
%res = add i64 %a, %b
store i64 %res, ptr %base
ret ptr %addr.1
@@ -492,48 +374,33 @@ define ptr @sdib(ptr %base, i64 %a, i64 %b) {
; RV64XTHEADMEMIDX-NEXT: add a1, a1, a2
; RV64XTHEADMEMIDX-NEXT: th.sdib a1, (a0), 8, 0
; RV64XTHEADMEMIDX-NEXT: ret
- %addr.1 = getelementptr i64, ptr %base, i64 1
+ %addr.1 = getelementptr i64, ptr %base, iXLen 1
%res = add i64 %a, %b
store i64 %res, ptr %addr.1
ret ptr %addr.1
}
-define i8 @lrb_anyext(ptr %a, i64 %b) {
-; RV32XTHEADMEMIDX-LABEL: lrb_anyext:
-; RV32XTHEADMEMIDX: # %bb.0:
-; RV32XTHEADMEMIDX-NEXT: th.lrb a0, a0, a1, 0
-; RV32XTHEADMEMIDX-NEXT: ret
-;
-; RV64XTHEADMEMIDX-LABEL: lrb_anyext:
-; RV64XTHEADMEMIDX: # %bb.0:
-; RV64XTHEADMEMIDX-NEXT: th.lrb a0, a0, a1, 0
-; RV64XTHEADMEMIDX-NEXT: ret
- %1 = getelementptr i8, ptr %a, i64 %b
+define i8 @lrb_anyext(ptr %a, iXLen %b) {
+; CHECK-LABEL: lrb_anyext:
+; CHECK: # %bb.0:
+; CHECK-NEXT: th.lrb a0, a0, a1, 0
+; CHECK-NEXT: ret
+ %1 = getelementptr i8, ptr %a, iXLen %b
%2 = load i8, ptr %1, align 1
ret i8 %2
}
-define i64 @lrb(ptr %a, i64 %b) {
-; RV32XTHEADMEMIDX-LABEL: lrb:
-; RV32XTHEADMEMIDX: # %bb.0:
-; RV32XTHEADMEMIDX-NEXT: th.lrb a1, a0, a1, 0
-; RV32XTHEADMEMIDX-NEXT: srai a2, a1, 31
-; RV32XTHEADMEMIDX-NEXT: add a0, a1, a1
-; RV32XTHEADMEMIDX-NEXT: sltu a1, a0, a1
-; RV32XTHEADMEMIDX-NEXT: add a2, a2, a2
-; RV32XTHEADMEMIDX-NEXT: add a1, a2, a1
-; RV32XTHEADMEMIDX-NEXT: ret
-;
-; RV64XTHEADMEMIDX-LABEL: lrb:
-; RV64XTHEADMEMIDX: # %bb.0:
-; RV64XTHEADMEMIDX-NEXT: th.lrb a0, a0, a1, 0
-; RV64XTHEADMEMIDX-NEXT: add a0, a0, a0
-; RV64XTHEADMEMIDX-NEXT: ret
- %1 = getelementptr i8, ptr %a, i64 %b
+define i32 @lrb(ptr %a, iXLen %b) {
+; CHECK-LABEL: lrb:
+; CHECK: # %bb.0:
+; CHECK-NEXT: th.lrb a0, a0, a1, 0
+; CHECK-NEXT: add a0, a0, a0
+; CHECK-NEXT: ret
+ %1 = getelementptr i8, ptr %a, iXLen %b
%2 = load i8, ptr %1, align 1
- %3 = sext i8 %2 to i64
- %4 = add i64 %3, %3
- ret i64 %4
+ %3 = sext i8 %2 to i32
+ %4 = add i32 %3, %3
+ ret i32 %4
}
define i8 @lurb_anyext(ptr %a, i32 %b) {
@@ -552,15 +419,11 @@ define i8 @lurb_anyext(ptr %a, i32 %b) {
ret i8 %3
}
-define i64 @lurb(ptr %a, i32 %b) {
+define i32 @lurb(ptr %a, i32 %b) {
; RV32XTHEADMEMIDX-LABEL: lurb:
; RV32XTHEADMEMIDX: # %bb.0:
-; RV32XTHEADMEMIDX-NEXT: th.lrb a1, a0, a1, 0
-; RV32XTHEADMEMIDX-NEXT: srai a2, a1, 31
-; RV32XTHEADMEMIDX-NEXT: add a0, a1, a1
-; RV32XTHEADMEMIDX-NEXT: sltu a1, a0, a1
-; RV32XTHEADMEMIDX-NEXT: add a2, a2, a2
-; RV32XTHEADMEMIDX-NEXT: add a1, a2, a1
+; RV32XTHEADMEMIDX-NEXT: th.lrb a0, a0, a1, 0
+; RV32XTHEADMEMIDX-NEXT: add a0, a0, a0
; RV32XTHEADMEMIDX-NEXT: ret
;
; RV64XTHEADMEMIDX-LABEL: lurb:
@@ -571,37 +434,29 @@ define i64 @lurb(ptr %a, i32 %b) {
%1 = zext i32 %b to i64
%2 = getelementptr i8, ptr %a, i64 %1
%3 = load i8, ptr %2, align 1
- %4 = sext i8 %3 to i64
- %5 = add i64 %4, %4
- ret i64 %5
-}
-
-define i64 @lrbu(ptr %a, i64 %b) {
-; RV32XTHEADMEMIDX-LABEL: lrbu:
-; RV32XTHEADMEMIDX: # %bb.0:
-; RV32XTHEADMEMIDX-NEXT: th.lrbu a1, a0, a1, 0
-; RV32XTHEADMEMIDX-NEXT: add a0, a1, a1
-; RV32XTHEADMEMIDX-NEXT: sltu a1, a0, a1
-; RV32XTHEADMEMIDX-NEXT: ret
-;
-; RV64XTHEADMEMIDX-LABEL: lrbu:
-; RV64XTHEADMEMIDX: # %bb.0:
-; RV64XTHEADMEMIDX-NEXT: th.lrbu a0, a0, a1, 0
-; RV64XTHEADMEMIDX-NEXT: add a0, a0, a0
-; RV64XTHEADMEMIDX-NEXT: ret
- %1 = getelementptr i8, ptr %a, i64 %b
+ %4 = sext i8 %3 to i32
+ %5 = add i32 %4, %4
+ ret i32 %5
+}
+
+define i32 @lrbu(ptr %a, iXLen %b) {
+; CHECK-LABEL: lrbu:
+; CHECK: # %bb.0:
+; CHECK-NEXT: th.lrbu a0, a0, a1, 0
+; CHECK-NEXT: add a0, a0, a0
+; CHECK-NEXT: ret
+ %1 = getelementptr i8, ptr %a, iXLen %b
%2 = load i8, ptr %1, align 1
- %3 = zext i8 %2 to i64
- %4 = add i64 %3, %3
- ret i64 %4
+ %3 = zext i8 %2 to i32
+ %4 = add i32 %3, %3
+ ret i32 %4
}
-define i64 @lurbu(ptr %a, i32 %b) {
+define i32 @lurbu(ptr %a, i32 %b) {
; RV32XTHEADMEMIDX-LABEL: lurbu:
; RV32XTHEADMEMIDX: # %bb.0:
-; RV32XTHEADMEMIDX-NEXT: th.lrbu a1, a0, a1, 0
-; RV32XTHEADMEMIDX-NEXT: add a0, a1, a1
-; RV32XTHEADMEMIDX-NEXT: sltu a1, a0, a1
+; RV32XTHEADMEMIDX-NEXT: th.lrbu a0, a0, a1, 0
+; RV32XTHEADMEMIDX-NEXT: add a0, a0, a0
; RV32XTHEADMEMIDX-NEXT: ret
;
; RV64XTHEADMEMIDX-LABEL: lurbu:
@@ -612,47 +467,32 @@ define i64 @lurbu(ptr %a, i32 %b) {
%1 = zext i32 %b to i64
%2 = getelementptr i8, ptr %a, i64 %1
%3 = load i8, ptr %2, align 1
- %4 = zext i8 %3 to i64
- %5 = add i64 %4, %4
- ret i64 %5
+ %4 = zext i8 %3 to i32
+ %5 = add i32 %4, %4
+ ret i32 %5
}
-define i16 @lrh_anyext(ptr %a, i64 %b) {
-; RV32XTHEADMEMIDX-LABEL: lrh_anyext:
-; RV32XTHEADMEMIDX: # %bb.0:
-; RV32XTHEADMEMIDX-NEXT: th.lrh a0, a0, a1, 1
-; RV32XTHEADMEMIDX-NEXT: ret
-;
-; RV64XTHEADMEMIDX-LABEL: lrh_anyext:
-; RV64XTHEADMEMIDX: # %bb.0:
-; RV64XTHEADMEMIDX-NEXT: th.lrh a0, a0, a1, 1
-; RV64XTHEADMEMIDX-NEXT: ret
- %1 = getelementptr i16, ptr %a, i64 %b
+define i16 @lrh_anyext(ptr %a, iXLen %b) {
+; CHECK-LABEL: lrh_anyext:
+; CHECK: # %bb.0:
+; CHECK-NEXT: th.lrh a0, a0, a1, 1
+; CHECK-NEXT: ret
+ %1 = getelementptr i16, ptr %a, iXLen %b
%2 = load i16, ptr %1, align 2
ret i16 %2
}
-define i64 @lrh(ptr %a, i64 %b) {
-; RV32XTHEADMEMIDX-LABEL: lrh:
-; RV32XTHEADMEMIDX: # %bb.0:
-; RV32XTHEADMEMIDX-NEXT: th.lrh a1, a0, a1, 1
-; RV32XTHEADMEMIDX-NEXT: srai a2, a1, 31
-; RV32XTHEADMEMIDX-NEXT: add a0, a1, a1
-; RV32XTHEADMEMIDX-NEXT: sltu a1, a0, a1
-; RV32XTHEADMEMIDX-NEXT: add a2, a2, a2
-; RV32XTHEADMEMIDX-NEXT: add a1, a2, a1
-; RV32XTHEADMEMIDX-NEXT: ret
-;
-; RV64XTHEADMEMIDX-LABEL: lrh:
-; RV64XTHEADMEMIDX: # %bb.0:
-; RV64XTHEADMEMIDX-NEXT: th.lrh a0, a0, a1, 1
-; RV64XTHEADMEMIDX-NEXT: add a0, a0, a0
-; RV64XTHEADMEMIDX-NEXT: ret
- %1 = getelementptr i16, ptr %a, i64 %b
+define i32 @lrh(ptr %a, iXLen %b) {
+; CHECK-LABEL: lrh:
+; CHECK: # %bb.0:
+; CHECK-NEXT: th.lrh a0, a0, a1, 1
+; CHECK-NEXT: add a0, a0, a0
+; CHECK-NEXT: ret
+ %1 = getelementptr i16, ptr %a, iXLen %b
%2 = load i16, ptr %1, align 2
- %3 = sext i16 %2 to i64
- %4 = add i64 %3, %3
- ret i64 %4
+ %3 = sext i16 %2 to i32
+ %4 = add i32 %3, %3
+ ret i32 %4
}
define i16 @lurh_anyext(ptr %a, i32 %b) {
@@ -671,15 +511,11 @@ define i16 @lurh_anyext(ptr %a, i32 %b) {
ret i16 %3
}
-define i64 @lurh(ptr %a, i32 %b) {
+define i32 @lurh(ptr %a, i32 %b) {
; RV32XTHEADMEMIDX-LABEL: lurh:
; RV32XTHEADMEMIDX: # %bb.0:
-; RV32XTHEADMEMIDX-NEXT: th.lrh a1, a0, a1, 1
-; RV32XTHEADMEMIDX-NEXT: srai a2, a1, 31
-; RV32XTHEADMEMIDX-NEXT: add a0, a1, a1
-; RV32XTHEADMEMIDX-NEXT: sltu a1, a0, a1
-; RV32XTHEADMEMIDX-NEXT: add a2, a2, a2
-; RV32XTHEADMEMIDX-NEXT: add a1, a2, a1
+; RV32XTHEADMEMIDX-NEXT: th.lrh a0, a0, a1, 1
+; RV32XTHEADMEMIDX-NEXT: add a0, a0, a0
; RV32XTHEADMEMIDX-NEXT: ret
;
; RV64XTHEADMEMIDX-LABEL: lurh:
@@ -690,37 +526,29 @@ define i64 @lurh(ptr %a, i32 %b) {
%1 = zext i32 %b to i64
%2 = getelementptr i16, ptr %a, i64 %1
%3 = load i16, ptr %2, align 2
- %4 = sext i16 %3 to i64
- %5 = add i64 %4, %4
- ret i64 %5
-}
-
-define i64 @lrhu(ptr %a, i64 %b) {
-; RV32XTHEADMEMIDX-LABEL: lrhu:
-; RV32XTHEADMEMIDX: # %bb.0:
-; RV32XTHEADMEMIDX-NEXT: th.lrhu a1, a0, a1, 1
-; RV32XTHEADMEMIDX-NEXT: add a0, a1, a1
-; RV32XTHEADMEMIDX-NEXT: sltu a1, a0, a1
-; RV32XTHEADMEMIDX-NEXT: ret
-;
-; RV64XTHEADMEMIDX-LABEL: lrhu:
-; RV64XTHEADMEMIDX: # %bb.0:
-; RV64XTHEADMEMIDX-NEXT: th.lrhu a0, a0, a1, 1
-; RV64XTHEADMEMIDX-NEXT: add a0, a0, a0
-; RV64XTHEADMEMIDX-NEXT: ret
- %1 = getelementptr i16, ptr %a, i64 %b
+ %4 = sext i16 %3 to i32
+ %5 = add i32 %4, %4
+ ret i32 %5
+}
+
+define i32 @lrhu(ptr %a, iXLen %b) {
+; CHECK-LABEL: lrhu:
+; CHECK: # %bb.0:
+; CHECK-NEXT: th.lrhu a0, a0, a1, 1
+; CHECK-NEXT: add a0, a0, a0
+; CHECK-NEXT: ret
+ %1 = getelementptr i16, ptr %a, iXLen %b
%2 = load i16, ptr %1, align 2
- %3 = zext i16 %2 to i64
- %4 = add i64 %3, %3
- ret i64 %4
+ %3 = zext i16 %2 to i32
+ %4 = add i32 %3, %3
+ ret i32 %4
}
-define i64 @lurhu(ptr %a, i32 %b) {
+define i32 @lurhu(ptr %a, i32 %b) {
; RV32XTHEADMEMIDX-LABEL: lurhu:
; RV32XTHEADMEMIDX: # %bb.0:
-; RV32XTHEADMEMIDX-NEXT: th.lrhu a1, a0, a1, 1
-; RV32XTHEADMEMIDX-NEXT: add a0, a1, a1
-; RV32XTHEADMEMIDX-NEXT: sltu a1, a0, a1
+; RV32XTHEADMEMIDX-NEXT: th.lrhu a0, a0, a1, 1
+; RV32XTHEADMEMIDX-NEXT: add a0, a0, a0
; RV32XTHEADMEMIDX-NEXT: ret
;
; RV64XTHEADMEMIDX-LABEL: lurhu:
@@ -731,27 +559,22 @@ define i64 @lurhu(ptr %a, i32 %b) {
%1 = zext i32 %b to i64
%2 = getelementptr i16, ptr %a, i64 %1
%3 = load i16, ptr %2, align 2
- %4 = zext i16 %3 to i64
- %5 = add i64 %4, %4
- ret i64 %5
+ %4 = zext i16 %3 to i32
+ %5 = add i32 %4, %4
+ ret i32 %5
}
-define i32 @lrw_anyext(ptr %a, i64 %b) {
-; RV32XTHEADMEMIDX-LABEL: lrw_anyext:
-; RV32XTHEADMEMIDX: # %bb.0:
-; RV32XTHEADMEMIDX-NEXT: th.lrw a0, a0, a1, 2
-; RV32XTHEADMEMIDX-NEXT: ret
-;
-; RV64XTHEADMEMIDX-LABEL: lrw_anyext:
-; RV64XTHEADMEMIDX: # %bb.0:
-; RV64XTHEADMEMIDX-NEXT: th.lrw a0, a0, a1, 2
-; RV64XTHEADMEMIDX-NEXT: ret
- %1 = getelementptr i32, ptr %a, i64 %b
+define i32 @lrw_anyext(ptr %a, iXLen %b) {
+; CHECK-LABEL: lrw_anyext:
+; CHECK: # %bb.0:
+; CHECK-NEXT: th.lrw a0, a0, a1, 2
+; CHECK-NEXT: ret
+ %1 = getelementptr i32, ptr %a, iXLen %b
%2 = load i32, ptr %1, align 4
ret i32 %2
}
-define i64 @lrw(ptr %a, i64 %b) {
+define i64 @lrw(ptr %a, iXLen %b) {
; RV32XTHEADMEMIDX-LABEL: lrw:
; RV32XTHEADMEMIDX: # %bb.0:
; RV32XTHEADMEMIDX-NEXT: th.lrw a1, a0, a1, 2
@@ -767,7 +590,7 @@ define i64 @lrw(ptr %a, i64 %b) {
; RV64XTHEADMEMIDX-NEXT: th.lrw a0, a0, a1, 2
; RV64XTHEADMEMIDX-NEXT: add a0, a0, a0
; RV64XTHEADMEMIDX-NEXT: ret
- %1 = getelementptr i32, ptr %a, i64 %b
+ %1 = getelementptr i32, ptr %a, iXLen %b
%2 = load i32, ptr %1, align 4
%3 = sext i32 %2 to i64
%4 = add i64 %3, %3
@@ -814,7 +637,7 @@ define i64 @lurw(ptr %a, i32 %b) {
ret i64 %5
}
-define i64 @lrwu(ptr %a, i64 %b) {
+define i64 @lrwu(ptr %a, iXLen %b) {
; RV32XTHEADMEMIDX-LABEL: lrwu:
; RV32XTHEADMEMIDX: # %bb.0:
; RV32XTHEADMEMIDX-NEXT: th.lrw a1, a0, a1, 2
@@ -827,7 +650,7 @@ define i64 @lrwu(ptr %a, i64 %b) {
; RV64XTHEADMEMIDX-NEXT: th.lrwu a0, a0, a1, 2
; RV64XTHEADMEMIDX-NEXT: add a0, a0, a0
; RV64XTHEADMEMIDX-NEXT: ret
- %1 = getelementptr i32, ptr %a, i64 %b
+ %1 = getelementptr i32, ptr %a, iXLen %b
%2 = load i32, ptr %1, align 4
%3 = zext i32 %2 to i64
%4 = add i64 %3, %3
@@ -855,7 +678,7 @@ define i64 @lurwu(ptr %a, i32 %b) {
ret i64 %5
}
-define i64 @lrd(ptr %a, i64 %b) {
+define i64 @lrd(ptr %a, iXLen %b) {
; RV32XTHEADMEMIDX-LABEL: lrd:
; RV32XTHEADMEMIDX: # %bb.0:
; RV32XTHEADMEMIDX-NEXT: th.lrw a2, a0, a1, 3
@@ -872,23 +695,23 @@ define i64 @lrd(ptr %a, i64 %b) {
; RV64XTHEADMEMIDX-NEXT: th.lrd a0, a0, a1, 3
; RV64XTHEADMEMIDX-NEXT: add a0, a0, a0
; RV64XTHEADMEMIDX-NEXT: ret
- %1 = getelementptr i64, ptr %a, i64 %b
+ %1 = getelementptr i64, ptr %a, iXLen %b
%2 = load i64, ptr %1, align 8
%3 = add i64 %2, %2
ret i64 %3
}
-define i64 @lrd_2(ptr %a, i64 %b) {
+define i64 @lrd_2(ptr %a, iXLen %b) {
; RV32XTHEADMEMIDX-LABEL: lrd_2:
; RV32XTHEADMEMIDX: # %bb.0:
-; RV32XTHEADMEMIDX-NEXT: addi a2, a0, 96
-; RV32XTHEADMEMIDX-NEXT: th.lrw a2, a2, a1, 3
-; RV32XTHEADMEMIDX-NEXT: addi a0, a0, 100
-; RV32XTHEADMEMIDX-NEXT: th.lrw a1, a0, a1, 3
-; RV32XTHEADMEMIDX-NEXT: add a0, a2, a2
-; RV32XTHEADMEMIDX-NEXT: sltu a2, a0, a2
-; RV32XTHEADMEMIDX-NEXT: add a1, a1, a1
-; RV32XTHEADMEMIDX-NEXT: add a1, a1, a2
+; RV32XTHEADMEMIDX-NEXT: slli a1, a1, 3
+; RV32XTHEADMEMIDX-NEXT: add a0, a1, a0
+; RV32XTHEADMEMIDX-NEXT: lw a1, 96(a0)
+; RV32XTHEADMEMIDX-NEXT: lw a2, 100(a0)
+; RV32XTHEADMEMIDX-NEXT: add a0, a1, a1
+; RV32XTHEADMEMIDX-NEXT: sltu a1, a0, a1
+; RV32XTHEADMEMIDX-NEXT: add a2, a2, a2
+; RV32XTHEADMEMIDX-NEXT: add a1, a2, a1
; RV32XTHEADMEMIDX-NEXT: ret
;
; RV64XTHEADMEMIDX-LABEL: lrd_2:
@@ -897,8 +720,8 @@ define i64 @lrd_2(ptr %a, i64 %b) {
; RV64XTHEADMEMIDX-NEXT: th.lrd a0, a0, a1, 3
; RV64XTHEADMEMIDX-NEXT: add a0, a0, a0
; RV64XTHEADMEMIDX-NEXT: ret
- %1 = add i64 %b, 12
- %2 = getelementptr i64, ptr %a, i64 %1
+ %1 = add iXLen %b, 12
+ %2 = getelementptr i64, ptr %a, iXLen %1
%3 = load i64, ptr %2, align 8
%4 = add i64 %3, %3
ret i64 %4
@@ -928,20 +751,14 @@ define i64 @lurd(ptr %a, i32 %b) {
ret i64 %4
}
-define void @srb(ptr %a, i64 %b, i8 %c) {
-; RV32XTHEADMEMIDX-LABEL: srb:
-; RV32XTHEADMEMIDX: # %bb.0:
-; RV32XTHEADMEMIDX-NEXT: add a3, a3, a3
-; RV32XTHEADMEMIDX-NEXT: th.srb a3, a0, a1, 0
-; RV32XTHEADMEMIDX-NEXT: ret
-;
-; RV64XTHEADMEMIDX-LABEL: srb:
-; RV64XTHEADMEMIDX: # %bb.0:
-; RV64XTHEADMEMIDX-NEXT: add a2, a2, a2
-; RV64XTHEADMEMIDX-NEXT: th.srb a2, a0, a1, 0
-; RV64XTHEADMEMIDX-NEXT: ret
+define void @srb(ptr %a, iXLen %b, i8 %c) {
+; CHECK-LABEL: srb:
+; CHECK: # %bb.0:
+; CHECK-NEXT: add a2, a2, a2
+; CHECK-NEXT: th.srb a2, a0, a1, 0
+; CHECK-NEXT: ret
%1 = add i8 %c, %c
- %2 = getelementptr i8, ptr %a, i64 %b
+ %2 = getelementptr i8, ptr %a, iXLen %b
store i8 %1, ptr %2, align 1
ret void
}
@@ -965,20 +782,14 @@ define void @surb(ptr %a, i32 %b, i8 %c) {
ret void
}
-define void @srh(ptr %a, i64 %b, i16 %c) {
-; RV32XTHEADMEMIDX-LABEL: srh:
-; RV32XTHEADMEMIDX: # %bb.0:
-; RV32XTHEADMEMIDX-NEXT: add a3, a3, a3
-; RV32XTHEADMEMIDX-NEXT: th.srh a3, a0, a1, 1
-; RV32XTHEADMEMIDX-NEXT: ret
-;
-; RV64XTHEADMEMIDX-LABEL: srh:
-; RV64XTHEADMEMIDX: # %bb.0:
-; RV64XTHEADMEMIDX-NEXT: add a2, a2, a2
-; RV64XTHEADMEMIDX-NEXT: th.srh a2, a0, a1, 1
-; RV64XTHEADMEMIDX-NEXT: ret
+define void @srh(ptr %a, iXLen %b, i16 %c) {
+; CHECK-LABEL: srh:
+; CHECK: # %bb.0:
+; CHECK-NEXT: add a2, a2, a2
+; CHECK-NEXT: th.srh a2, a0, a1, 1
+; CHECK-NEXT: ret
%1 = add i16 %c, %c
- %2 = getelementptr i16, ptr %a, i64 %b
+ %2 = getelementptr i16, ptr %a, iXLen %b
store i16 %1, ptr %2, align 2
ret void
}
@@ -1002,20 +813,14 @@ define void @surh(ptr %a, i32 %b, i16 %c) {
ret void
}
-define void @srw(ptr %a, i64 %b, i32 %c) {
-; RV32XTHEADMEMIDX-LABEL: srw:
-; RV32XTHEADMEMIDX: # %bb.0:
-; RV32XTHEADMEMIDX-NEXT: add a3, a3, a3
-; RV32XTHEADMEMIDX-NEXT: th.srw a3, a0, a1, 2
-; RV32XTHEADMEMIDX-NEXT: ret
-;
-; RV64XTHEADMEMIDX-LABEL: srw:
-; RV64XTHEADMEMIDX: # %bb.0:
-; RV64XTHEADMEMIDX-NEXT: add a2, a2, a2
-; RV64XTHEADMEMIDX-NEXT: th.srw a2, a0, a1, 2
-; RV64XTHEADMEMIDX-NEXT: ret
+define void @srw(ptr %a, iXLen %b, i32 %c) {
+; CHECK-LABEL: srw:
+; CHECK: # %bb.0:
+; CHECK-NEXT: add a2, a2, a2
+; CHECK-NEXT: th.srw a2, a0, a1, 2
+; CHECK-NEXT: ret
%1 = add i32 %c, %c
- %2 = getelementptr i32, ptr %a, i64 %b
+ %2 = getelementptr i32, ptr %a, iXLen %b
store i32 %1, ptr %2, align 4
ret void
}
@@ -1039,16 +844,16 @@ define void @surw(ptr %a, i32 %b, i32 %c) {
ret void
}
-define void @srd(ptr %a, i64 %b, i64 %c) {
+define void @srd(ptr %a, iXLen %b, i64 %c) {
; RV32XTHEADMEMIDX-LABEL: srd:
; RV32XTHEADMEMIDX: # %bb.0:
-; RV32XTHEADMEMIDX-NEXT: add a2, a3, a3
-; RV32XTHEADMEMIDX-NEXT: add a4, a4, a4
-; RV32XTHEADMEMIDX-NEXT: sltu a3, a2, a3
-; RV32XTHEADMEMIDX-NEXT: th.srw a2, a0, a1, 3
-; RV32XTHEADMEMIDX-NEXT: add a3, a4, a3
+; RV32XTHEADMEMIDX-NEXT: add a4, a2, a2
+; RV32XTHEADMEMIDX-NEXT: add a3, a3, a3
+; RV32XTHEADMEMIDX-NEXT: sltu a2, a4, a2
+; RV32XTHEADMEMIDX-NEXT: th.srw a4, a0, a1, 3
+; RV32XTHEADMEMIDX-NEXT: add a2, a3, a2
; RV32XTHEADMEMIDX-NEXT: addi a0, a0, 4
-; RV32XTHEADMEMIDX-NEXT: th.srw a3, a0, a1, 3
+; RV32XTHEADMEMIDX-NEXT: th.srw a2, a0, a1, 3
; RV32XTHEADMEMIDX-NEXT: ret
;
; RV64XTHEADMEMIDX-LABEL: srd:
@@ -1057,7 +862,7 @@ define void @srd(ptr %a, i64 %b, i64 %c) {
; RV64XTHEADMEMIDX-NEXT: th.srd a2, a0, a1, 3
; RV64XTHEADMEMIDX-NEXT: ret
%1 = add i64 %c, %c
- %2 = getelementptr i64, ptr %a, i64 %b
+ %2 = getelementptr i64, ptr %a, iXLen %b
store i64 %1, ptr %2, align 8
ret void
}
@@ -1087,24 +892,18 @@ define void @surd(ptr %a, i32 %b, i64 %c) {
}
define ptr @test_simm5(ptr %base, i32 %a, i32 %b) {
-; RV32XTHEADMEMIDX-LABEL: test_simm5:
-; RV32XTHEADMEMIDX: # %bb.0:
-; RV32XTHEADMEMIDX-NEXT: add a1, a1, a2
-; RV32XTHEADMEMIDX-NEXT: th.swia a1, (a0), -12, 2
-; RV32XTHEADMEMIDX-NEXT: ret
-;
-; RV64XTHEADMEMIDX-LABEL: test_simm5:
-; RV64XTHEADMEMIDX: # %bb.0:
-; RV64XTHEADMEMIDX-NEXT: add a1, a1, a2
-; RV64XTHEADMEMIDX-NEXT: th.swia a1, (a0), -12, 2
-; RV64XTHEADMEMIDX-NEXT: ret
+; CHECK-LABEL: test_simm5:
+; CHECK: # %bb.0:
+; CHECK-NEXT: add a1, a1, a2
+; CHECK-NEXT: th.swia a1, (a0), -12, 2
+; CHECK-NEXT: ret
%addr.1 = getelementptr i32, ptr %base, i32 -12
%res = add i32 %a, %b
store i32 %res, ptr %base
ret ptr %addr.1
}
-define i64 @lrd_large_shift(ptr %a, i64 %b) {
+define i64 @lrd_large_shift(ptr %a, iXLen %b) {
; RV32XTHEADMEMIDX-LABEL: lrd_large_shift:
; RV32XTHEADMEMIDX: # %bb.0:
; RV32XTHEADMEMIDX-NEXT: slli a1, a1, 5
@@ -1119,14 +918,14 @@ define i64 @lrd_large_shift(ptr %a, i64 %b) {
; RV64XTHEADMEMIDX-NEXT: add a0, a1, a0
; RV64XTHEADMEMIDX-NEXT: ld a0, 384(a0)
; RV64XTHEADMEMIDX-NEXT: ret
- %1 = add i64 %b, 12
- %2 = shl i64 %1, 2
- %3 = getelementptr i64, ptr %a, i64 %2
+ %1 = add iXLen %b, 12
+ %2 = shl iXLen %1, 2
+ %3 = getelementptr i64, ptr %a, iXLen %2
%4 = load i64, ptr %3, align 8
ret i64 %4
}
-define i64 @lrd_large_offset(ptr %a, i64 %b) {
+define i64 @lrd_large_offset(ptr %a, iXLen %b) {
; RV32XTHEADMEMIDX-LABEL: lrd_large_offset:
; RV32XTHEADMEMIDX: # %bb.0:
; RV32XTHEADMEMIDX-NEXT: slli a1, a1, 3
@@ -1145,8 +944,8 @@ define i64 @lrd_large_offset(ptr %a, i64 %b) {
; RV64XTHEADMEMIDX-NEXT: add a0, a0, a1
; RV64XTHEADMEMIDX-NEXT: ld a0, 1792(a0)
; RV64XTHEADMEMIDX-NEXT: ret
- %1 = add i64 %b, 12000
- %2 = getelementptr i64, ptr %a, i64 %1
+ %1 = add iXLen %b, 12000
+ %2 = getelementptr i64, ptr %a, iXLen %1
%3 = load i64, ptr %2, align 8
ret i64 %3
}
diff --git a/llvm/test/CodeGen/SPARC/tls-sp.ll b/llvm/test/CodeGen/SPARC/tls-sp.ll
new file mode 100644
index 0000000..de9af01
--- /dev/null
+++ b/llvm/test/CodeGen/SPARC/tls-sp.ll
@@ -0,0 +1,105 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=sparc -relocation-model=pic < %s | FileCheck --check-prefix=SPARC %s
+; RUN: llc -mtriple=sparc64 -relocation-model=pic < %s | FileCheck --check-prefix=SPARC64 %s
+
+@x = external thread_local global i8
+
+;; Test that we don't over-allocate stack space when calling __tls_get_addr
+;; with the call frame pseudos able to be eliminated.
+define ptr @no_alloca() nounwind {
+; SPARC-LABEL: no_alloca:
+; SPARC: ! %bb.0: ! %entry
+; SPARC-NEXT: save %sp, -96, %sp
+; SPARC-NEXT: .Ltmp0:
+; SPARC-NEXT: call .Ltmp1
+; SPARC-NEXT: .Ltmp2:
+; SPARC-NEXT: sethi %hi(_GLOBAL_OFFSET_TABLE_+(.Ltmp2-.Ltmp0)), %i0
+; SPARC-NEXT: .Ltmp1:
+; SPARC-NEXT: or %i0, %lo(_GLOBAL_OFFSET_TABLE_+(.Ltmp1-.Ltmp0)), %i0
+; SPARC-NEXT: add %i0, %o7, %i0
+; SPARC-NEXT: sethi %tgd_hi22(x), %i1
+; SPARC-NEXT: add %i1, %tgd_lo10(x), %i1
+; SPARC-NEXT: add %i0, %i1, %o0, %tgd_add(x)
+; SPARC-NEXT: call __tls_get_addr, %tgd_call(x)
+; SPARC-NEXT: nop
+; SPARC-NEXT: ret
+; SPARC-NEXT: restore %g0, %o0, %o0
+;
+; SPARC64-LABEL: no_alloca:
+; SPARC64: ! %bb.0: ! %entry
+; SPARC64-NEXT: save %sp, -128, %sp
+; SPARC64-NEXT: .Ltmp0:
+; SPARC64-NEXT: rd %pc, %o7
+; SPARC64-NEXT: .Ltmp2:
+; SPARC64-NEXT: sethi %hi(_GLOBAL_OFFSET_TABLE_+(.Ltmp2-.Ltmp0)), %i0
+; SPARC64-NEXT: .Ltmp1:
+; SPARC64-NEXT: or %i0, %lo(_GLOBAL_OFFSET_TABLE_+(.Ltmp1-.Ltmp0)), %i0
+; SPARC64-NEXT: add %i0, %o7, %i0
+; SPARC64-NEXT: sethi %tgd_hi22(x), %i1
+; SPARC64-NEXT: add %i1, %tgd_lo10(x), %i1
+; SPARC64-NEXT: add %i0, %i1, %o0, %tgd_add(x)
+; SPARC64-NEXT: call __tls_get_addr, %tgd_call(x)
+; SPARC64-NEXT: nop
+; SPARC64-NEXT: ret
+; SPARC64-NEXT: restore %g0, %o0, %o0
+entry:
+ %0 = call ptr @llvm.threadlocal.address.p0(ptr @x)
+ ret ptr %0
+}
+
+;; Test that %sp is valid for the call to __tls_get_addr. We store to a dynamic
+;; alloca in order to prevent eliminating any call frame pseudos from the call.
+define ptr @dynamic_alloca(i64 %n) nounwind {
+; SPARC-LABEL: dynamic_alloca:
+; SPARC: ! %bb.0: ! %entry
+; SPARC-NEXT: save %sp, -96, %sp
+; SPARC-NEXT: .Ltmp3:
+; SPARC-NEXT: call .Ltmp4
+; SPARC-NEXT: .Ltmp5:
+; SPARC-NEXT: sethi %hi(_GLOBAL_OFFSET_TABLE_+(.Ltmp5-.Ltmp3)), %i0
+; SPARC-NEXT: .Ltmp4:
+; SPARC-NEXT: or %i0, %lo(_GLOBAL_OFFSET_TABLE_+(.Ltmp4-.Ltmp3)), %i0
+; SPARC-NEXT: add %i0, %o7, %i0
+; SPARC-NEXT: sethi %tgd_hi22(x), %i2
+; SPARC-NEXT: add %i2, %tgd_lo10(x), %i2
+; SPARC-NEXT: add %i0, %i2, %o0, %tgd_add(x)
+; SPARC-NEXT: call __tls_get_addr, %tgd_call(x)
+; SPARC-NEXT: nop
+; SPARC-NEXT: add %i1, 7, %i0
+; SPARC-NEXT: and %i0, -8, %i0
+; SPARC-NEXT: sub %sp, %i0, %i0
+; SPARC-NEXT: add %i0, -8, %sp
+; SPARC-NEXT: mov 1, %i1
+; SPARC-NEXT: stb %i1, [%i0+88]
+; SPARC-NEXT: ret
+; SPARC-NEXT: restore %g0, %o0, %o0
+;
+; SPARC64-LABEL: dynamic_alloca:
+; SPARC64: ! %bb.0: ! %entry
+; SPARC64-NEXT: save %sp, -128, %sp
+; SPARC64-NEXT: .Ltmp3:
+; SPARC64-NEXT: rd %pc, %o7
+; SPARC64-NEXT: .Ltmp5:
+; SPARC64-NEXT: sethi %hi(_GLOBAL_OFFSET_TABLE_+(.Ltmp5-.Ltmp3)), %i1
+; SPARC64-NEXT: .Ltmp4:
+; SPARC64-NEXT: or %i1, %lo(_GLOBAL_OFFSET_TABLE_+(.Ltmp4-.Ltmp3)), %i1
+; SPARC64-NEXT: add %i1, %o7, %i1
+; SPARC64-NEXT: sethi %tgd_hi22(x), %i2
+; SPARC64-NEXT: add %i2, %tgd_lo10(x), %i2
+; SPARC64-NEXT: add %i1, %i2, %o0, %tgd_add(x)
+; SPARC64-NEXT: call __tls_get_addr, %tgd_call(x)
+; SPARC64-NEXT: nop
+; SPARC64-NEXT: add %i0, 15, %i0
+; SPARC64-NEXT: and %i0, -16, %i0
+; SPARC64-NEXT: sub %sp, %i0, %i0
+; SPARC64-NEXT: mov %i0, %sp
+; SPARC64-NEXT: mov 1, %i1
+; SPARC64-NEXT: stb %i1, [%i0+2175]
+; SPARC64-NEXT: ret
+; SPARC64-NEXT: restore %g0, %o0, %o0
+entry:
+ %0 = call ptr @llvm.threadlocal.address.p0(ptr @x)
+ %1 = alloca i8, i64 %n
+ store i8 1, ptr %1
+ ret ptr %0
+}
diff --git a/llvm/test/CodeGen/SystemZ/pr60413.ll b/llvm/test/CodeGen/SystemZ/pr60413.ll
index bbf4d50..8a6a303 100644
--- a/llvm/test/CodeGen/SystemZ/pr60413.ll
+++ b/llvm/test/CodeGen/SystemZ/pr60413.ll
@@ -16,31 +16,31 @@ define dso_local void @m() local_unnamed_addr #1 {
; CHECK-NEXT: stmg %r13, %r15, 104(%r15)
; CHECK-NEXT: aghi %r15, -168
; CHECK-NEXT: lhrl %r1, f+4
+; CHECK-NEXT: sll %r1, 8
; CHECK-NEXT: larl %r2, f
-; CHECK-NEXT: llc %r2, 6(%r2)
-; CHECK-NEXT: larl %r3, e
-; CHECK-NEXT: lb %r0, 3(%r3)
-; CHECK-NEXT: rosbg %r2, %r1, 32, 55, 8
-; CHECK-NEXT: vlvgp %v0, %r2, %r0
-; CHECK-NEXT: vlvgf %v0, %r2, 0
-; CHECK-NEXT: vlvgf %v0, %r2, 2
-; CHECK-NEXT: vlvgp %v1, %r0, %r2
-; CHECK-NEXT: vlvgp %v2, %r2, %r2
-; CHECK-NEXT: lr %r1, %r2
+; CHECK-NEXT: ic %r1, 6(%r2)
+; CHECK-NEXT: larl %r2, e
+; CHECK-NEXT: lb %r0, 3(%r2)
+; CHECK-NEXT: vlvgp %v0, %r0, %r1
+; CHECK-NEXT: vlvgp %v1, %r1, %r0
+; CHECK-NEXT: vlvgf %v1, %r1, 0
+; CHECK-NEXT: vlvgf %v1, %r1, 2
+; CHECK-NEXT: vlvgp %v2, %r1, %r1
+; CHECK-NEXT: # kill: def $r1l killed $r1l killed $r1d
; CHECK-NEXT: nilh %r1, 255
; CHECK-NEXT: chi %r1, 128
; CHECK-NEXT: ipm %r1
; CHECK-NEXT: risbg %r1, %r1, 63, 191, 36
+; CHECK-NEXT: vlvgf %v0, %r0, 0
+; CHECK-NEXT: vlvgf %v0, %r0, 2
; CHECK-NEXT: vgbm %v3, 30583
; CHECK-NEXT: vn %v0, %v0, %v3
-; CHECK-NEXT: vlvgf %v1, %r0, 0
-; CHECK-NEXT: vlvgf %v1, %r0, 2
; CHECK-NEXT: vn %v1, %v1, %v3
; CHECK-NEXT: vrepf %v2, %v2, 1
; CHECK-NEXT: vn %v2, %v2, %v3
; CHECK-NEXT: vrepif %v3, 127
-; CHECK-NEXT: vchlf %v0, %v0, %v3
-; CHECK-NEXT: vlgvf %r13, %v0, 0
+; CHECK-NEXT: vchlf %v1, %v1, %v3
+; CHECK-NEXT: vlgvf %r13, %v1, 0
; CHECK-NEXT: vchlf %v2, %v2, %v3
; CHECK-NEXT: vlgvf %r3, %v2, 1
; CHECK-NEXT: nilf %r3, 1
@@ -54,13 +54,13 @@ define dso_local void @m() local_unnamed_addr #1 {
; CHECK-NEXT: nilf %r14, 1
; CHECK-NEXT: rosbg %r2, %r14, 32, 51, 12
; CHECK-NEXT: rosbg %r2, %r13, 52, 52, 11
-; CHECK-NEXT: vlgvf %r13, %v0, 1
+; CHECK-NEXT: vlgvf %r13, %v1, 1
; CHECK-NEXT: rosbg %r2, %r13, 53, 53, 10
-; CHECK-NEXT: vlgvf %r13, %v0, 2
+; CHECK-NEXT: vlgvf %r13, %v1, 2
; CHECK-NEXT: rosbg %r2, %r13, 54, 54, 9
-; CHECK-NEXT: vlgvf %r13, %v0, 3
+; CHECK-NEXT: vlgvf %r13, %v1, 3
; CHECK-NEXT: rosbg %r2, %r13, 55, 55, 8
-; CHECK-NEXT: vchlf %v0, %v1, %v3
+; CHECK-NEXT: vchlf %v0, %v0, %v3
; CHECK-NEXT: vlgvf %r13, %v0, 0
; CHECK-NEXT: rosbg %r2, %r13, 56, 56, 7
; CHECK-NEXT: vlgvf %r13, %v0, 1
diff --git a/llvm/test/CodeGen/WebAssembly/ref-test-func.ll b/llvm/test/CodeGen/WebAssembly/ref-test-func.ll
new file mode 100644
index 0000000..e3760a0
--- /dev/null
+++ b/llvm/test/CodeGen/WebAssembly/ref-test-func.ll
@@ -0,0 +1,120 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s --mtriple=wasm32-unknown-unknown -mcpu=mvp -mattr=+reference-types | FileCheck --check-prefixes CHECK,CHK32 %s
+; RUN: llc < %s --mtriple=wasm64-unknown-unknown -mcpu=mvp -mattr=+reference-types | FileCheck --check-prefixes CHECK,CHK64 %s
+
+define void @test_fpsig_void_void(ptr noundef %func) local_unnamed_addr #0 {
+; CHECK-LABEL: test_fpsig_void_void:
+; CHK32: .functype test_fpsig_void_void (i32) -> ()
+; CHK64: .functype test_fpsig_void_void (i64) -> ()
+; CHECK-NEXT: # %bb.0: # %entry
+; CHECK-NEXT: local.get 0
+; CHECK-NEXT: table.get __indirect_function_table
+; CHECK-NEXT: ref.test () -> ()
+; CHECK-NEXT: call use
+; CHECK-NEXT: # fallthrough-return
+entry:
+ %res = tail call i32 (ptr, ...) @llvm.wasm.ref.test.func(ptr %func)
+ tail call void @use(i32 noundef %res) #3
+ ret void
+}
+
+define void @test_fpsig_return_i32(ptr noundef %func) local_unnamed_addr #0 {
+; CHECK-LABEL: test_fpsig_return_i32:
+; CHK32: .functype test_fpsig_return_i32 (i32) -> ()
+; CHK64: .functype test_fpsig_return_i32 (i64) -> ()
+; CHECK-NEXT: # %bb.0: # %entry
+; CHECK-NEXT: local.get 0
+; CHECK-NEXT: table.get __indirect_function_table
+; CHECK-NEXT: ref.test () -> (i32)
+; CHECK-NEXT: call use
+; CHECK-NEXT: # fallthrough-return
+entry:
+ %res = tail call i32 (ptr, ...) @llvm.wasm.ref.test.func(ptr %func, i32 0)
+ tail call void @use(i32 noundef %res) #3
+ ret void
+}
+
+define void @test_fpsig_return_i64(ptr noundef %func) local_unnamed_addr #0 {
+; CHECK-LABEL: test_fpsig_return_i64:
+; CHK32: .functype test_fpsig_return_i64 (i32) -> ()
+; CHK64: .functype test_fpsig_return_i64 (i64) -> ()
+; CHECK-NEXT: # %bb.0: # %entry
+; CHECK-NEXT: local.get 0
+; CHECK-NEXT: table.get __indirect_function_table
+; CHECK-NEXT: ref.test () -> (i64)
+; CHECK-NEXT: call use
+; CHECK-NEXT: # fallthrough-return
+entry:
+ %res = tail call i32 (ptr, ...) @llvm.wasm.ref.test.func(ptr %func, i64 0)
+ tail call void @use(i32 noundef %res) #3
+ ret void
+}
+
+define void @test_fpsig_return_f32(ptr noundef %func) local_unnamed_addr #0 {
+; CHECK-LABEL: test_fpsig_return_f32:
+; CHK32: .functype test_fpsig_return_f32 (i32) -> ()
+; CHK64: .functype test_fpsig_return_f32 (i64) -> ()
+; CHECK-NEXT: # %bb.0: # %entry
+; CHECK-NEXT: local.get 0
+; CHECK-NEXT: table.get __indirect_function_table
+; CHECK-NEXT: ref.test () -> (f32)
+; CHECK-NEXT: call use
+; CHECK-NEXT: # fallthrough-return
+entry:
+ %res = tail call i32 (ptr, ...) @llvm.wasm.ref.test.func(ptr %func, float 0.)
+ tail call void @use(i32 noundef %res) #3
+ ret void
+}
+
+define void @test_fpsig_return_f64(ptr noundef %func) local_unnamed_addr #0 {
+; CHECK-LABEL: test_fpsig_return_f64:
+; CHK32: .functype test_fpsig_return_f64 (i32) -> ()
+; CHK64: .functype test_fpsig_return_f64 (i64) -> ()
+; CHECK-NEXT: # %bb.0: # %entry
+; CHECK-NEXT: local.get 0
+; CHECK-NEXT: table.get __indirect_function_table
+; CHECK-NEXT: ref.test () -> (f64)
+; CHECK-NEXT: call use
+; CHECK-NEXT: # fallthrough-return
+entry:
+ %res = tail call i32 (ptr, ...) @llvm.wasm.ref.test.func(ptr %func, double 0.)
+ tail call void @use(i32 noundef %res) #3
+ ret void
+}
+
+
+define void @test_fpsig_param_i32(ptr noundef %func) local_unnamed_addr #0 {
+; CHECK-LABEL: test_fpsig_param_i32:
+; CHK32: .functype test_fpsig_param_i32 (i32) -> ()
+; CHK64: .functype test_fpsig_param_i32 (i64) -> ()
+; CHECK-NEXT: # %bb.0: # %entry
+; CHECK-NEXT: local.get 0
+; CHECK-NEXT: table.get __indirect_function_table
+; CHECK-NEXT: ref.test (f64) -> ()
+; CHECK-NEXT: call use
+; CHECK-NEXT: # fallthrough-return
+entry:
+ %res = tail call i32 (ptr, ...) @llvm.wasm.ref.test.func(ptr %func, token poison, double 0.)
+ tail call void @use(i32 noundef %res) #3
+ ret void
+}
+
+
+define void @test_fpsig_multiple_params_and_returns(ptr noundef %func) local_unnamed_addr #0 {
+; CHECK-LABEL: test_fpsig_multiple_params_and_returns:
+; CHK32: .functype test_fpsig_multiple_params_and_returns (i32) -> ()
+; CHK64: .functype test_fpsig_multiple_params_and_returns (i64) -> ()
+; CHECK-NEXT: # %bb.0: # %entry
+; CHECK-NEXT: local.get 0
+; CHECK-NEXT: table.get __indirect_function_table
+; CHECK-NEXT: ref.test (i64, f32, i64) -> (i32, i64, f32, f64)
+; CHECK-NEXT: call use
+; CHECK-NEXT: # fallthrough-return
+entry:
+ %res = tail call i32 (ptr, ...) @llvm.wasm.ref.test.func(ptr %func, i32 0, i64 0, float 0., double 0., token poison, i64 0, float 0., i64 0)
+ tail call void @use(i32 noundef %res) #3
+ ret void
+}
+
+
+declare void @use(i32 noundef) local_unnamed_addr #1
diff --git a/llvm/test/CodeGen/WebAssembly/removed-terminator.ll b/llvm/test/CodeGen/WebAssembly/removed-terminator.ll
new file mode 100644
index 0000000..188f6f6
--- /dev/null
+++ b/llvm/test/CodeGen/WebAssembly/removed-terminator.ll
@@ -0,0 +1,26 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -O0 -verify-machineinstrs < %s | FileCheck %s
+
+target triple = "wasm32-unknown-unknown"
+
+define void @test(i1 %x) {
+; CHECK-LABEL: test:
+; CHECK: .functype test (i32) -> ()
+; CHECK-NEXT: # %bb.0:
+; CHECK-NEXT: local.get 0
+; CHECK-NEXT: i32.const -1
+; CHECK-NEXT: i32.xor
+; CHECK-NEXT: i32.const 1
+; CHECK-NEXT: i32.and
+; CHECK-NEXT: drop
+; CHECK-NEXT: # %bb.1: # %exit
+; CHECK-NEXT: return
+ %y = xor i1 %x, true
+ ; This br_if's operand (%y) is stackified in RegStackify. But this terminator
+ ; will be removed in CFGSort after that. We need to make sure we unstackify %y
+ ; so that it can be dropped in ExplicitLocals.
+ br i1 %y, label %exit, label %exit
+
+exit:
+ ret void
+}
diff --git a/llvm/test/CodeGen/WinEH/wineh-noret-cleanup.ll b/llvm/test/CodeGen/WinEH/wineh-noret-cleanup.ll
index 3b3a460..ab6672e 100644
--- a/llvm/test/CodeGen/WinEH/wineh-noret-cleanup.ll
+++ b/llvm/test/CodeGen/WinEH/wineh-noret-cleanup.ll
@@ -1,4 +1,4 @@
-; RUN: sed -e s/.Cxx:// %s | llc -mtriple=x86_64-pc-windows-msvc | FileCheck %s --check-prefixes=CXX,X64CXX
+; RUN: sed -e s/.Cxx:// %s | llc -mtriple=x86_64-pc-windows-msvc | FileCheck %s --check-prefixes=CXX
; RUN: sed -e s/.Seh:// %s | llc -mtriple=x86_64-pc-windows-msvc | FileCheck %s --check-prefixes=SEH
; RUN: %if aarch64-registered-target %{ sed -e s/.Cxx:// %s | llc -mtriple=aarch64-pc-windows-msvc | FileCheck %s --check-prefix=CXX %}
; RUN: %if aarch64-registered-target %{ sed -e s/.Seh:// %s | llc -mtriple=aarch64-pc-windows-msvc | FileCheck %s --check-prefix=SEH %}
@@ -49,18 +49,14 @@ catch.body.2:
; CXX-NEXT: .[[ENTRY:long|word]] .Lfunc_begin0@IMGREL
; CXX-NEXT: .[[ENTRY]] -1
; CXX-NEXT: .[[ENTRY]] .Ltmp0@IMGREL
-; X64CXX-SAME: +1
; CXX-NEXT: .[[ENTRY]] 1
; CXX-NEXT: .[[ENTRY]] .Ltmp1@IMGREL
-; X64CXX-SAME: +1
; CXX-NEXT: .[[ENTRY]] -1
; CXX-NEXT: .[[ENTRY]] "?catch$3@?0?test@4HA"@IMGREL
; CXX-NEXT: .[[ENTRY]] 2
; CXX-NEXT: .[[ENTRY]] .Ltmp2@IMGREL
-; X64CXX-SAME: +1
; CXX-NEXT: .[[ENTRY]] 3
; CXX-NEXT: .[[ENTRY]] .Ltmp3@IMGREL
-; X64CXX-SAME: +1
; CXX-NEXT: .[[ENTRY]] 2
; CXX-NEXT: .[[ENTRY]] "?catch$5@?0?test@4HA"@IMGREL
; CXX-NEXT: .[[ENTRY]] 4
@@ -70,19 +66,19 @@ catch.body.2:
; SEH: .LBB0_[[CATCH:[0-9]+]]: {{.*}} %catch.body
; SEH-LABEL: .Llsda_begin0:
; SEH-NEXT: .[[ENTRY:long|word]] .Ltmp0@IMGREL
-; SEH-NEXT: .[[ENTRY]] .Ltmp1@IMGREL+1
+; SEH-NEXT: .[[ENTRY]] .Ltmp1@IMGREL
; SEH-NEXT: .[[ENTRY]] dummy_filter@IMGREL
; SEH-NEXT: .[[ENTRY]] .LBB0_[[CATCH]]@IMGREL
; SEH-NEXT: .[[ENTRY]] .Ltmp0@IMGREL
-; SEH-NEXT: .[[ENTRY]] .Ltmp1@IMGREL+1
+; SEH-NEXT: .[[ENTRY]] .Ltmp1@IMGREL
; SEH-NEXT: .[[ENTRY]] dummy_filter@IMGREL
; SEH-NEXT: .[[ENTRY]] .LBB0_[[CATCH2]]@IMGREL
; SEH-NEXT: .[[ENTRY]] .Ltmp2@IMGREL
-; SEH-NEXT: .[[ENTRY]] .Ltmp3@IMGREL+1
+; SEH-NEXT: .[[ENTRY]] .Ltmp3@IMGREL
; SEH-NEXT: .[[ENTRY]] "?dtor$[[DTOR:[0-9]+]]@?0?test@4HA"@IMGREL
; SEH-NEXT: .[[ENTRY]] 0
; SEH-NEXT: .[[ENTRY]] .Ltmp2@IMGREL
-; SEH-NEXT: .[[ENTRY]] .Ltmp3@IMGREL+1
+; SEH-NEXT: .[[ENTRY]] .Ltmp3@IMGREL
; SEH-NEXT: .[[ENTRY]] dummy_filter@IMGREL
; SEH-NEXT: .[[ENTRY]] .LBB0_[[CATCH2]]@IMGREL
; SEH-NEXT: .Llsda_end0:
diff --git a/llvm/test/CodeGen/X86/abds-neg.ll b/llvm/test/CodeGen/X86/abds-neg.ll
index 2911edf..d9064c6 100644
--- a/llvm/test/CodeGen/X86/abds-neg.ll
+++ b/llvm/test/CodeGen/X86/abds-neg.ll
@@ -1076,15 +1076,15 @@ define i64 @abd_subnsw_i64(i64 %a, i64 %b) nounwind {
; X86-NEXT: pushl %esi
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X86-NEXT: subl {{[0-9]+}}(%esp), %esi
-; X86-NEXT: sbbl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: movl %ecx, %edx
+; X86-NEXT: subl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: sbbl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: movl %esi, %edx
; X86-NEXT: sarl $31, %edx
-; X86-NEXT: xorl %edx, %ecx
; X86-NEXT: xorl %edx, %esi
+; X86-NEXT: xorl %edx, %ecx
; X86-NEXT: movl %edx, %eax
-; X86-NEXT: subl %esi, %eax
-; X86-NEXT: sbbl %ecx, %edx
+; X86-NEXT: subl %ecx, %eax
+; X86-NEXT: sbbl %esi, %edx
; X86-NEXT: popl %esi
; X86-NEXT: retl
;
@@ -1107,15 +1107,15 @@ define i64 @abd_subnsw_i64_undef(i64 %a, i64 %b) nounwind {
; X86-NEXT: pushl %esi
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X86-NEXT: subl {{[0-9]+}}(%esp), %esi
-; X86-NEXT: sbbl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: movl %ecx, %edx
+; X86-NEXT: subl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: sbbl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: movl %esi, %edx
; X86-NEXT: sarl $31, %edx
-; X86-NEXT: xorl %edx, %ecx
; X86-NEXT: xorl %edx, %esi
+; X86-NEXT: xorl %edx, %ecx
; X86-NEXT: movl %edx, %eax
-; X86-NEXT: subl %esi, %eax
-; X86-NEXT: sbbl %ecx, %edx
+; X86-NEXT: subl %ecx, %eax
+; X86-NEXT: sbbl %esi, %edx
; X86-NEXT: popl %esi
; X86-NEXT: retl
;
@@ -1142,32 +1142,32 @@ define i128 @abd_subnsw_i128(i128 %a, i128 %b) nounwind {
; X86-NEXT: pushl %esi
; X86-NEXT: andl $-16, %esp
; X86-NEXT: subl $16, %esp
-; X86-NEXT: movl 36(%ebp), %eax
; X86-NEXT: movl 32(%ebp), %ecx
+; X86-NEXT: movl 36(%ebp), %eax
+; X86-NEXT: movl 24(%ebp), %edi
; X86-NEXT: movl 28(%ebp), %edx
-; X86-NEXT: movl 24(%ebp), %esi
-; X86-NEXT: subl 40(%ebp), %esi
+; X86-NEXT: subl 40(%ebp), %edi
; X86-NEXT: sbbl 44(%ebp), %edx
; X86-NEXT: sbbl 48(%ebp), %ecx
; X86-NEXT: sbbl 52(%ebp), %eax
-; X86-NEXT: movl %eax, %edi
-; X86-NEXT: sarl $31, %edi
-; X86-NEXT: xorl %edi, %eax
-; X86-NEXT: xorl %edi, %ecx
-; X86-NEXT: xorl %edi, %edx
-; X86-NEXT: xorl %edi, %esi
-; X86-NEXT: movl %edi, %ebx
-; X86-NEXT: subl %esi, %ebx
-; X86-NEXT: movl %edi, %esi
-; X86-NEXT: sbbl %edx, %esi
-; X86-NEXT: movl %edi, %edx
+; X86-NEXT: movl %eax, %esi
+; X86-NEXT: sarl $31, %esi
+; X86-NEXT: xorl %esi, %eax
+; X86-NEXT: xorl %esi, %ecx
+; X86-NEXT: xorl %esi, %edx
+; X86-NEXT: xorl %esi, %edi
+; X86-NEXT: movl %esi, %ebx
+; X86-NEXT: subl %edi, %ebx
+; X86-NEXT: movl %esi, %edi
+; X86-NEXT: sbbl %edx, %edi
+; X86-NEXT: movl %esi, %edx
; X86-NEXT: sbbl %ecx, %edx
-; X86-NEXT: sbbl %eax, %edi
+; X86-NEXT: sbbl %eax, %esi
; X86-NEXT: movl 8(%ebp), %eax
; X86-NEXT: movl %ebx, (%eax)
-; X86-NEXT: movl %esi, 4(%eax)
+; X86-NEXT: movl %edi, 4(%eax)
; X86-NEXT: movl %edx, 8(%eax)
-; X86-NEXT: movl %edi, 12(%eax)
+; X86-NEXT: movl %esi, 12(%eax)
; X86-NEXT: leal -12(%ebp), %esp
; X86-NEXT: popl %esi
; X86-NEXT: popl %edi
@@ -1203,32 +1203,32 @@ define i128 @abd_subnsw_i128_undef(i128 %a, i128 %b) nounwind {
; X86-NEXT: pushl %esi
; X86-NEXT: andl $-16, %esp
; X86-NEXT: subl $16, %esp
-; X86-NEXT: movl 36(%ebp), %eax
; X86-NEXT: movl 32(%ebp), %ecx
+; X86-NEXT: movl 36(%ebp), %eax
+; X86-NEXT: movl 24(%ebp), %edi
; X86-NEXT: movl 28(%ebp), %edx
-; X86-NEXT: movl 24(%ebp), %esi
-; X86-NEXT: subl 40(%ebp), %esi
+; X86-NEXT: subl 40(%ebp), %edi
; X86-NEXT: sbbl 44(%ebp), %edx
; X86-NEXT: sbbl 48(%ebp), %ecx
; X86-NEXT: sbbl 52(%ebp), %eax
-; X86-NEXT: movl %eax, %edi
-; X86-NEXT: sarl $31, %edi
-; X86-NEXT: xorl %edi, %eax
-; X86-NEXT: xorl %edi, %ecx
-; X86-NEXT: xorl %edi, %edx
-; X86-NEXT: xorl %edi, %esi
-; X86-NEXT: movl %edi, %ebx
-; X86-NEXT: subl %esi, %ebx
-; X86-NEXT: movl %edi, %esi
-; X86-NEXT: sbbl %edx, %esi
-; X86-NEXT: movl %edi, %edx
+; X86-NEXT: movl %eax, %esi
+; X86-NEXT: sarl $31, %esi
+; X86-NEXT: xorl %esi, %eax
+; X86-NEXT: xorl %esi, %ecx
+; X86-NEXT: xorl %esi, %edx
+; X86-NEXT: xorl %esi, %edi
+; X86-NEXT: movl %esi, %ebx
+; X86-NEXT: subl %edi, %ebx
+; X86-NEXT: movl %esi, %edi
+; X86-NEXT: sbbl %edx, %edi
+; X86-NEXT: movl %esi, %edx
; X86-NEXT: sbbl %ecx, %edx
-; X86-NEXT: sbbl %eax, %edi
+; X86-NEXT: sbbl %eax, %esi
; X86-NEXT: movl 8(%ebp), %eax
; X86-NEXT: movl %ebx, (%eax)
-; X86-NEXT: movl %esi, 4(%eax)
+; X86-NEXT: movl %edi, 4(%eax)
; X86-NEXT: movl %edx, 8(%eax)
-; X86-NEXT: movl %edi, 12(%eax)
+; X86-NEXT: movl %esi, 12(%eax)
; X86-NEXT: leal -12(%ebp), %esp
; X86-NEXT: popl %esi
; X86-NEXT: popl %edi
diff --git a/llvm/test/CodeGen/X86/avg.ll b/llvm/test/CodeGen/X86/avg.ll
index 9be8166..0de308a 100644
--- a/llvm/test/CodeGen/X86/avg.ll
+++ b/llvm/test/CodeGen/X86/avg.ll
@@ -1734,20 +1734,20 @@ define void @not_avg_v16i8_wide_constants(ptr %a, ptr %b) nounwind {
; SSE2-LABEL: not_avg_v16i8_wide_constants:
; SSE2: # %bb.0:
; SSE2-NEXT: movaps (%rdi), %xmm1
-; SSE2-NEXT: movdqa (%rsi), %xmm2
+; SSE2-NEXT: movdqa (%rsi), %xmm0
; SSE2-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
; SSE2-NEXT: decl %eax
-; SSE2-NEXT: movd %eax, %xmm0
+; SSE2-NEXT: movd %eax, %xmm2
; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
; SSE2-NEXT: decl %eax
; SSE2-NEXT: movd %eax, %xmm1
; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
; SSE2-NEXT: decl %eax
-; SSE2-NEXT: movd %eax, %xmm4
+; SSE2-NEXT: movd %eax, %xmm3
; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
; SSE2-NEXT: decl %eax
-; SSE2-NEXT: movd %eax, %xmm3
+; SSE2-NEXT: movd %eax, %xmm4
; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
; SSE2-NEXT: decl %eax
; SSE2-NEXT: movd %eax, %xmm5
@@ -1762,6 +1762,9 @@ define void @not_avg_v16i8_wide_constants(ptr %a, ptr %b) nounwind {
; SSE2-NEXT: movd %eax, %xmm8
; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
; SSE2-NEXT: decl %eax
+; SSE2-NEXT: movd %eax, %xmm10
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; SSE2-NEXT: decl %eax
; SSE2-NEXT: movd %eax, %xmm9
; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
; SSE2-NEXT: decl %eax
@@ -1771,9 +1774,6 @@ define void @not_avg_v16i8_wide_constants(ptr %a, ptr %b) nounwind {
; SSE2-NEXT: movd %eax, %xmm12
; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
; SSE2-NEXT: decl %eax
-; SSE2-NEXT: movd %eax, %xmm10
-; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
-; SSE2-NEXT: decl %eax
; SSE2-NEXT: movd %eax, %xmm13
; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
; SSE2-NEXT: decl %eax
@@ -1783,43 +1783,45 @@ define void @not_avg_v16i8_wide_constants(ptr %a, ptr %b) nounwind {
; SSE2-NEXT: movd %eax, %xmm15
; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
; SSE2-NEXT: decl %eax
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; SSE2-NEXT: movd %eax, %xmm0
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3]
-; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; SSE2-NEXT: movd %eax, %xmm2
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm4[0,0,0,0]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3]
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm6[0,0,0,0]
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm6[0,0,0,0]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm8 = xmm8[0],xmm7[0],xmm8[1],xmm7[1],xmm8[2],xmm7[2],xmm8[3],xmm7[3]
; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm8[0,0,0,0]
-; SSE2-NEXT: punpckhdq {{.*#+}} xmm4 = xmm4[2],xmm1[2],xmm4[3],xmm1[3]
-; SSE2-NEXT: movsd {{.*#+}} xmm4 = xmm3[0],xmm4[1]
+; SSE2-NEXT: punpckhdq {{.*#+}} xmm4 = xmm4[2],xmm3[2],xmm4[3],xmm3[3]
+; SSE2-NEXT: movsd {{.*#+}} xmm4 = xmm1[0],xmm4[1]
; SSE2-NEXT: pxor %xmm3, %xmm3
-; SSE2-NEXT: movdqa %xmm2, %xmm1
+; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
; SSE2-NEXT: movapd %xmm4, %xmm5
; SSE2-NEXT: andpd %xmm1, %xmm5
; SSE2-NEXT: xorpd %xmm4, %xmm1
; SSE2-NEXT: psrlw $1, %xmm1
; SSE2-NEXT: paddw %xmm5, %xmm1
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm11 = xmm11[0],xmm9[0],xmm11[1],xmm9[1],xmm11[2],xmm9[2],xmm11[3],xmm9[3]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm10 = xmm10[0],xmm12[0],xmm10[1],xmm12[1],xmm10[2],xmm12[2],xmm10[3],xmm12[3]
-; SSE2-NEXT: punpckldq {{.*#+}} xmm10 = xmm10[0],xmm11[0],xmm10[1],xmm11[1]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm9 = xmm9[0],xmm10[0],xmm9[1],xmm10[1],xmm9[2],xmm10[2],xmm9[3],xmm10[3]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm12 = xmm12[0],xmm11[0],xmm12[1],xmm11[1],xmm12[2],xmm11[2],xmm12[3],xmm11[3]
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm12[0,0,0,0]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm9 = xmm9[0],xmm4[0],xmm9[1],xmm4[1]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm14 = xmm14[0],xmm13[0],xmm14[1],xmm13[1],xmm14[2],xmm13[2],xmm14[3],xmm13[3]
; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm14[0,0,0,0]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm15[0],xmm0[1],xmm15[1],xmm0[2],xmm15[2],xmm0[3],xmm15[3]
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
-; SSE2-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm4[2],xmm0[3],xmm4[3]
-; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm10[0],xmm0[1]
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm3[8],xmm2[9],xmm3[9],xmm2[10],xmm3[10],xmm2[11],xmm3[11],xmm2[12],xmm3[12],xmm2[13],xmm3[13],xmm2[14],xmm3[14],xmm2[15],xmm3[15]
-; SSE2-NEXT: movapd %xmm0, %xmm3
-; SSE2-NEXT: andpd %xmm2, %xmm3
-; SSE2-NEXT: xorpd %xmm0, %xmm2
-; SSE2-NEXT: psrlw $1, %xmm2
-; SSE2-NEXT: paddw %xmm3, %xmm2
-; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
-; SSE2-NEXT: pand %xmm0, %xmm2
-; SSE2-NEXT: pand %xmm0, %xmm1
-; SSE2-NEXT: packuswb %xmm2, %xmm1
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm15[0],xmm2[1],xmm15[1],xmm2[2],xmm15[2],xmm2[3],xmm15[3]
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
+; SSE2-NEXT: punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm4[2],xmm2[3],xmm4[3]
+; SSE2-NEXT: movsd {{.*#+}} xmm2 = xmm9[0],xmm2[1]
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm3[8],xmm0[9],xmm3[9],xmm0[10],xmm3[10],xmm0[11],xmm3[11],xmm0[12],xmm3[12],xmm0[13],xmm3[13],xmm0[14],xmm3[14],xmm0[15],xmm3[15]
+; SSE2-NEXT: movapd %xmm2, %xmm3
+; SSE2-NEXT: andpd %xmm0, %xmm3
+; SSE2-NEXT: xorpd %xmm2, %xmm0
+; SSE2-NEXT: psrlw $1, %xmm0
+; SSE2-NEXT: paddw %xmm3, %xmm0
+; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
+; SSE2-NEXT: pand %xmm2, %xmm0
+; SSE2-NEXT: pand %xmm2, %xmm1
+; SSE2-NEXT: packuswb %xmm0, %xmm1
; SSE2-NEXT: movdqu %xmm1, (%rax)
; SSE2-NEXT: retq
;
@@ -1829,71 +1831,75 @@ define void @not_avg_v16i8_wide_constants(ptr %a, ptr %b) nounwind {
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm3 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
-; AVX1-NEXT: vpextrw $3, %xmm3, %edx
-; AVX1-NEXT: vpextrw $2, %xmm3, %ecx
-; AVX1-NEXT: vpextrw $1, %xmm3, %eax
+; AVX1-NEXT: vpextrw $7, %xmm3, %edx
+; AVX1-NEXT: vpextrw $6, %xmm3, %ecx
+; AVX1-NEXT: vpextrw $5, %xmm3, %eax
; AVX1-NEXT: decl %edx
; AVX1-NEXT: vmovd %edx, %xmm4
-; AVX1-NEXT: vpextrw $0, %xmm3, %edx
+; AVX1-NEXT: vpextrw $4, %xmm3, %edx
; AVX1-NEXT: decl %ecx
; AVX1-NEXT: vmovd %ecx, %xmm5
-; AVX1-NEXT: vpextrw $3, %xmm2, %ecx
+; AVX1-NEXT: vpextrw $1, %xmm3, %ecx
; AVX1-NEXT: decl %eax
; AVX1-NEXT: vmovd %eax, %xmm6
-; AVX1-NEXT: vpextrw $2, %xmm2, %eax
+; AVX1-NEXT: vpextrw $0, %xmm3, %eax
; AVX1-NEXT: decl %edx
; AVX1-NEXT: vmovd %edx, %xmm7
-; AVX1-NEXT: vpextrw $1, %xmm2, %edx
-; AVX1-NEXT: decl %ecx
-; AVX1-NEXT: vmovd %ecx, %xmm8
-; AVX1-NEXT: vpextrw $0, %xmm2, %ecx
-; AVX1-NEXT: decl %eax
-; AVX1-NEXT: vmovd %eax, %xmm9
-; AVX1-NEXT: vpextrw $7, %xmm3, %eax
+; AVX1-NEXT: vpextrw $3, %xmm3, %edx
+; AVX1-NEXT: decq %rcx
+; AVX1-NEXT: vmovq %rcx, %xmm8
+; AVX1-NEXT: vpextrw $2, %xmm3, %ecx
+; AVX1-NEXT: decq %rax
+; AVX1-NEXT: vmovq %rax, %xmm3
+; AVX1-NEXT: vpextrw $7, %xmm2, %eax
; AVX1-NEXT: decl %edx
-; AVX1-NEXT: vmovd %edx, %xmm10
-; AVX1-NEXT: vpextrw $6, %xmm3, %edx
+; AVX1-NEXT: vmovd %edx, %xmm9
+; AVX1-NEXT: vpextrw $6, %xmm2, %edx
; AVX1-NEXT: decl %ecx
-; AVX1-NEXT: vmovd %ecx, %xmm11
-; AVX1-NEXT: vpextrw $7, %xmm2, %ecx
+; AVX1-NEXT: vmovd %ecx, %xmm10
+; AVX1-NEXT: vpextrw $5, %xmm2, %ecx
; AVX1-NEXT: decl %eax
-; AVX1-NEXT: vmovd %eax, %xmm12
-; AVX1-NEXT: vpextrw $6, %xmm2, %eax
+; AVX1-NEXT: vmovd %eax, %xmm11
+; AVX1-NEXT: vpextrw $4, %xmm2, %eax
; AVX1-NEXT: decl %edx
-; AVX1-NEXT: vmovd %edx, %xmm13
-; AVX1-NEXT: vpextrw $5, %xmm3, %edx
+; AVX1-NEXT: vmovd %edx, %xmm12
+; AVX1-NEXT: vpextrw $1, %xmm2, %edx
; AVX1-NEXT: decl %ecx
-; AVX1-NEXT: vmovd %ecx, %xmm14
-; AVX1-NEXT: vpextrw $4, %xmm3, %ecx
+; AVX1-NEXT: vmovd %ecx, %xmm13
+; AVX1-NEXT: vpextrw $0, %xmm2, %ecx
; AVX1-NEXT: decl %eax
-; AVX1-NEXT: vmovd %eax, %xmm3
-; AVX1-NEXT: vpextrw $5, %xmm2, %eax
-; AVX1-NEXT: decl %edx
-; AVX1-NEXT: vmovd %edx, %xmm15
-; AVX1-NEXT: vpextrw $4, %xmm2, %edx
-; AVX1-NEXT: decl %ecx
-; AVX1-NEXT: vmovd %ecx, %xmm2
+; AVX1-NEXT: vmovd %eax, %xmm14
+; AVX1-NEXT: vpextrw $3, %xmm2, %eax
+; AVX1-NEXT: decq %rdx
+; AVX1-NEXT: vmovq %rdx, %xmm15
+; AVX1-NEXT: vpextrw $2, %xmm2, %edx
+; AVX1-NEXT: decq %rcx
+; AVX1-NEXT: vmovq %rcx, %xmm2
; AVX1-NEXT: decl %eax
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm4 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3]
; AVX1-NEXT: vmovd %eax, %xmm5
; AVX1-NEXT: decl %edx
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm6 = xmm7[0],xmm6[0],xmm7[1],xmm6[1],xmm7[2],xmm6[2],xmm7[3],xmm6[3]
; AVX1-NEXT: vmovd %edx, %xmm7
-; AVX1-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm6[0],xmm4[0],xmm6[1],xmm4[1]
-; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm6 = xmm9[0],xmm8[0],xmm9[1],xmm8[1],xmm9[2],xmm8[2],xmm9[3],xmm8[3]
-; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm8 = xmm11[0],xmm10[0],xmm11[1],xmm10[1],xmm11[2],xmm10[2],xmm11[3],xmm10[3]
-; AVX1-NEXT: vpunpckldq {{.*#+}} xmm6 = xmm8[0],xmm6[0],xmm8[1],xmm6[1]
-; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm6, %ymm4
-; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm6 = xmm13[0],xmm12[0],xmm13[1],xmm12[1],xmm13[2],xmm12[2],xmm13[3],xmm12[3]
-; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm3[0],xmm14[0],xmm3[1],xmm14[1],xmm3[2],xmm14[2],xmm3[3],xmm14[3]
-; AVX1-NEXT: vinsertf128 $1, %xmm6, %ymm3, %ymm3
+; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm4[0,0,0,0]
+; AVX1-NEXT: vpshufd {{.*#+}} xmm6 = xmm6[0,1,0,1]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm6[0,1,2,3,4,5],xmm4[6,7]
+; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm3[0],xmm8[0],xmm3[1],xmm8[1],xmm3[2],xmm8[2],xmm3[3],xmm8[3]
+; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm6 = xmm10[0],xmm9[0],xmm10[1],xmm9[1],xmm10[2],xmm9[2],xmm10[3],xmm9[3]
+; AVX1-NEXT: vpshufd {{.*#+}} xmm6 = xmm6[0,0,1,1]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1],xmm6[2,3],xmm3[4,5,6,7]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm4[4,5,6,7]
+; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm4 = xmm12[0],xmm11[0],xmm12[1],xmm11[1],xmm12[2],xmm11[2],xmm12[3],xmm11[3]
+; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm6 = xmm14[0],xmm13[0],xmm14[1],xmm13[1],xmm14[2],xmm13[2],xmm14[3],xmm13[3]
+; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm4[0,0,0,0]
+; AVX1-NEXT: vpshufd {{.*#+}} xmm6 = xmm6[0,1,0,1]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm6[0,1,2,3,4,5],xmm4[6,7]
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm2[0],xmm15[0],xmm2[1],xmm15[1],xmm2[2],xmm15[2],xmm2[3],xmm15[3]
-; AVX1-NEXT: vshufps {{.*#+}} ymm3 = ymm3[0,0,0,0,4,4,4,4]
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm5 = xmm7[0],xmm5[0],xmm7[1],xmm5[1],xmm7[2],xmm5[2],xmm7[3],xmm5[3]
-; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm5, %ymm2
-; AVX1-NEXT: vmovddup {{.*#+}} ymm2 = ymm2[0,0,2,2]
-; AVX1-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2],ymm3[3],ymm2[4,5,6],ymm3[7]
-; AVX1-NEXT: vblendps {{.*#+}} ymm2 = ymm4[0,1],ymm2[2,3],ymm4[4,5],ymm2[6,7]
+; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm5[0,0,1,1]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm5[2,3],xmm2[4,5,6,7]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm4[4,5,6,7]
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: vandps %ymm0, %ymm2, %ymm1
; AVX1-NEXT: vxorps %ymm0, %ymm2, %ymm0
diff --git a/llvm/test/CodeGen/X86/catchret-empty-fallthrough.ll b/llvm/test/CodeGen/X86/catchret-empty-fallthrough.ll
index ab9fa22..24d3030 100644
--- a/llvm/test/CodeGen/X86/catchret-empty-fallthrough.ll
+++ b/llvm/test/CodeGen/X86/catchret-empty-fallthrough.ll
@@ -48,6 +48,6 @@ return: ; preds = %catch, %entry
; CHECK-NEXT: .long (.Llsda_end0-.Llsda_begin0)/16
; CHECK-NEXT: .Llsda_begin0:
; CHECK-NEXT: .long .Ltmp0@IMGREL
-; CHECK-NEXT: .long .Ltmp1@IMGREL+1
+; CHECK-NEXT: .long .Ltmp1@IMGREL
; CHECK-NEXT: .long 1
; CHECK-NEXT: .long .LBB0_[[catch]]@IMGREL
diff --git a/llvm/test/CodeGen/X86/conditional-tailcall-pgso.ll b/llvm/test/CodeGen/X86/conditional-tailcall-pgso.ll
index c4c194e..7855ff2 100644
--- a/llvm/test/CodeGen/X86/conditional-tailcall-pgso.ll
+++ b/llvm/test/CodeGen/X86/conditional-tailcall-pgso.ll
@@ -121,7 +121,6 @@ define void @f_non_leaf(i32 %x, i32 %y) !prof !14 {
; WIN64-NEXT: # encoding: [0xeb,A]
; WIN64-NEXT: # fixup A - offset: 1, value: foo, kind: FK_PCRel_1
; WIN64-NEXT: .LBB1_2: # %bb2
-; WIN64-NEXT: nop # encoding: [0x90]
; WIN64-NEXT: .seh_startepilogue
; WIN64-NEXT: popq %rbx # encoding: [0x5b]
; WIN64-NEXT: .seh_endepilogue
diff --git a/llvm/test/CodeGen/X86/conditional-tailcall.ll b/llvm/test/CodeGen/X86/conditional-tailcall.ll
index 9c1d830..2859a87 100644
--- a/llvm/test/CodeGen/X86/conditional-tailcall.ll
+++ b/llvm/test/CodeGen/X86/conditional-tailcall.ll
@@ -121,7 +121,6 @@ define void @f_non_leaf(i32 %x, i32 %y) optsize {
; WIN64-NEXT: # encoding: [0xeb,A]
; WIN64-NEXT: # fixup A - offset: 1, value: foo, kind: FK_PCRel_1
; WIN64-NEXT: .LBB1_2: # %bb2
-; WIN64-NEXT: nop # encoding: [0x90]
; WIN64-NEXT: .seh_startepilogue
; WIN64-NEXT: popq %rbx # encoding: [0x5b]
; WIN64-NEXT: .seh_endepilogue
diff --git a/llvm/test/CodeGen/X86/div-rem-pair-recomposition-signed.ll b/llvm/test/CodeGen/X86/div-rem-pair-recomposition-signed.ll
index 661e7bb..455b72d 100644
--- a/llvm/test/CodeGen/X86/div-rem-pair-recomposition-signed.ll
+++ b/llvm/test/CodeGen/X86/div-rem-pair-recomposition-signed.ll
@@ -172,10 +172,9 @@ define i128 @scalar_i128(i128 %x, i128 %y, ptr %divdst) nounwind {
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: sbbl %eax, %edi
; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 52(%ebp), %ecx
-; X86-NEXT: movl %ecx, %edx
+; X86-NEXT: movl 52(%ebp), %esi
+; X86-NEXT: movl %esi, %edx
; X86-NEXT: sarl $31, %edx
-; X86-NEXT: movl %ecx, %esi
; X86-NEXT: xorl %edx, %esi
; X86-NEXT: movl 48(%ebp), %ecx
; X86-NEXT: xorl %edx, %ecx
@@ -204,45 +203,45 @@ define i128 @scalar_i128(i128 %x, i128 %y, ptr %divdst) nounwind {
; X86-NEXT: sete %al
; X86-NEXT: orb %cl, %al
; X86-NEXT: movb %al, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: bsrl %eax, %edx
+; X86-NEXT: bsrl %esi, %edx
; X86-NEXT: xorl $31, %edx
-; X86-NEXT: addl $32, %edx
-; X86-NEXT: bsrl %esi, %ecx
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-NEXT: bsrl %eax, %ecx
; X86-NEXT: xorl $31, %ecx
+; X86-NEXT: orl $32, %ecx
; X86-NEXT: testl %esi, %esi
-; X86-NEXT: cmovel %edx, %ecx
+; X86-NEXT: cmovnel %edx, %ecx
; X86-NEXT: bsrl %ebx, %edx
; X86-NEXT: xorl $31, %edx
; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: bsrl %edi, %edi
; X86-NEXT: xorl $31, %edi
-; X86-NEXT: addl $32, %edi
+; X86-NEXT: orl $32, %edi
; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: testl %ebx, %ebx
; X86-NEXT: cmovnel %edx, %edi
-; X86-NEXT: addl $64, %edi
+; X86-NEXT: orl $64, %edi
; X86-NEXT: movl %eax, %edx
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: orl %esi, %edx
; X86-NEXT: cmovnel %ecx, %edi
-; X86-NEXT: bsrl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
-; X86-NEXT: xorl $31, %edx
-; X86-NEXT: addl $32, %edx
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: bsrl %eax, %ecx
+; X86-NEXT: bsrl %eax, %edx
+; X86-NEXT: xorl $31, %edx
+; X86-NEXT: bsrl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
; X86-NEXT: xorl $31, %ecx
+; X86-NEXT: orl $32, %ecx
; X86-NEXT: testl %eax, %eax
-; X86-NEXT: cmovel %edx, %ecx
+; X86-NEXT: cmovnel %edx, %ecx
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
; X86-NEXT: bsrl %ebx, %esi
; X86-NEXT: xorl $31, %esi
; X86-NEXT: bsrl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
; X86-NEXT: xorl $31, %edx
-; X86-NEXT: addl $32, %edx
+; X86-NEXT: orl $32, %edx
; X86-NEXT: testl %ebx, %ebx
; X86-NEXT: cmovnel %esi, %edx
-; X86-NEXT: addl $64, %edx
+; X86-NEXT: orl $64, %edx
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X86-NEXT: orl %eax, %esi
; X86-NEXT: cmovnel %ecx, %edx
@@ -380,9 +379,9 @@ define i128 @scalar_i128(i128 %x, i128 %y, ptr %divdst) nounwind {
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-NEXT: adcl $-1, %eax
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: adcl $-1, %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT: adcl $-1, %ecx
+; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NEXT: adcl $-1, %ecx
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
diff --git a/llvm/test/CodeGen/X86/div-rem-pair-recomposition-unsigned.ll b/llvm/test/CodeGen/X86/div-rem-pair-recomposition-unsigned.ll
index 370e1c6..859e924 100644
--- a/llvm/test/CodeGen/X86/div-rem-pair-recomposition-unsigned.ll
+++ b/llvm/test/CodeGen/X86/div-rem-pair-recomposition-unsigned.ll
@@ -173,17 +173,17 @@ define i128 @scalar_i128(i128 %x, i128 %y, ptr %divdst) nounwind {
; X86-NEXT: xorl $31, %edx
; X86-NEXT: bsrl 48(%ebp), %ecx
; X86-NEXT: xorl $31, %ecx
-; X86-NEXT: addl $32, %ecx
+; X86-NEXT: orl $32, %ecx
; X86-NEXT: testl %esi, %esi
; X86-NEXT: cmovnel %edx, %ecx
; X86-NEXT: bsrl %edi, %edx
; X86-NEXT: xorl $31, %edx
; X86-NEXT: bsrl %ebx, %eax
; X86-NEXT: xorl $31, %eax
-; X86-NEXT: addl $32, %eax
+; X86-NEXT: orl $32, %eax
; X86-NEXT: testl %edi, %edi
; X86-NEXT: cmovnel %edx, %eax
-; X86-NEXT: addl $64, %eax
+; X86-NEXT: orl $64, %eax
; X86-NEXT: movl 48(%ebp), %edx
; X86-NEXT: orl %esi, %edx
; X86-NEXT: cmovnel %ecx, %eax
@@ -193,7 +193,7 @@ define i128 @scalar_i128(i128 %x, i128 %y, ptr %divdst) nounwind {
; X86-NEXT: movl 32(%ebp), %ecx
; X86-NEXT: bsrl %ecx, %ecx
; X86-NEXT: xorl $31, %ecx
-; X86-NEXT: addl $32, %ecx
+; X86-NEXT: orl $32, %ecx
; X86-NEXT: testl %ebx, %ebx
; X86-NEXT: cmovnel %edx, %ecx
; X86-NEXT: movl 28(%ebp), %edi
@@ -201,10 +201,10 @@ define i128 @scalar_i128(i128 %x, i128 %y, ptr %divdst) nounwind {
; X86-NEXT: xorl $31, %esi
; X86-NEXT: bsrl 24(%ebp), %edx
; X86-NEXT: xorl $31, %edx
-; X86-NEXT: addl $32, %edx
+; X86-NEXT: orl $32, %edx
; X86-NEXT: testl %edi, %edi
; X86-NEXT: cmovnel %esi, %edx
-; X86-NEXT: addl $64, %edx
+; X86-NEXT: orl $64, %edx
; X86-NEXT: movl 32(%ebp), %esi
; X86-NEXT: orl %ebx, %esi
; X86-NEXT: cmovnel %ecx, %edx
diff --git a/llvm/test/CodeGen/X86/freeze-vector.ll b/llvm/test/CodeGen/X86/freeze-vector.ll
index 0f66d42..953a5e7 100644
--- a/llvm/test/CodeGen/X86/freeze-vector.ll
+++ b/llvm/test/CodeGen/X86/freeze-vector.ll
@@ -171,15 +171,15 @@ define void @freeze_extractelement(ptr %origin0, ptr %origin1, ptr %dst) nounwin
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X86-NEXT: vmovdqa (%ecx), %xmm0
-; X86-NEXT: vpand (%edx), %xmm0, %xmm0
+; X86-NEXT: vmovdqa (%edx), %xmm0
+; X86-NEXT: vpand (%ecx), %xmm0, %xmm0
; X86-NEXT: vpextrb $6, %xmm0, (%eax)
; X86-NEXT: retl
;
; X64-LABEL: freeze_extractelement:
; X64: # %bb.0:
-; X64-NEXT: vmovdqa (%rsi), %xmm0
-; X64-NEXT: vpand (%rdi), %xmm0, %xmm0
+; X64-NEXT: vmovdqa (%rdi), %xmm0
+; X64-NEXT: vpand (%rsi), %xmm0, %xmm0
; X64-NEXT: vpextrb $6, %xmm0, (%rdx)
; X64-NEXT: retq
%i0 = load <16 x i8>, ptr %origin0
@@ -198,8 +198,8 @@ define void @freeze_extractelement_escape(ptr %origin0, ptr %origin1, ptr %dst,
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X86-NEXT: vmovdqa (%edx), %xmm0
-; X86-NEXT: vpand (%esi), %xmm0, %xmm0
+; X86-NEXT: vmovdqa (%esi), %xmm0
+; X86-NEXT: vpand (%edx), %xmm0, %xmm0
; X86-NEXT: vmovdqa %xmm0, (%ecx)
; X86-NEXT: vpextrb $6, %xmm0, (%eax)
; X86-NEXT: popl %esi
@@ -207,8 +207,8 @@ define void @freeze_extractelement_escape(ptr %origin0, ptr %origin1, ptr %dst,
;
; X64-LABEL: freeze_extractelement_escape:
; X64: # %bb.0:
-; X64-NEXT: vmovdqa (%rsi), %xmm0
-; X64-NEXT: vpand (%rdi), %xmm0, %xmm0
+; X64-NEXT: vmovdqa (%rdi), %xmm0
+; X64-NEXT: vpand (%rsi), %xmm0, %xmm0
; X64-NEXT: vmovdqa %xmm0, (%rcx)
; X64-NEXT: vpextrb $6, %xmm0, (%rdx)
; X64-NEXT: retq
@@ -239,8 +239,8 @@ define void @freeze_extractelement_extra_use(ptr %origin0, ptr %origin1, i64 %id
; X86-NEXT: movl 32(%ebp), %edx
; X86-NEXT: movl 12(%ebp), %esi
; X86-NEXT: movl 8(%ebp), %edi
-; X86-NEXT: vmovaps (%esi), %xmm0
-; X86-NEXT: vandps (%edi), %xmm0, %xmm0
+; X86-NEXT: vmovaps (%edi), %xmm0
+; X86-NEXT: vandps (%esi), %xmm0, %xmm0
; X86-NEXT: vmovaps %xmm0, (%esp)
; X86-NEXT: movzbl (%esp,%ecx), %ecx
; X86-NEXT: cmpb (%esp,%eax), %cl
@@ -255,8 +255,8 @@ define void @freeze_extractelement_extra_use(ptr %origin0, ptr %origin1, i64 %id
; X64: # %bb.0:
; X64-NEXT: andl $15, %ecx
; X64-NEXT: andl $15, %edx
-; X64-NEXT: vmovaps (%rsi), %xmm0
-; X64-NEXT: vandps (%rdi), %xmm0, %xmm0
+; X64-NEXT: vmovaps (%rdi), %xmm0
+; X64-NEXT: vandps (%rsi), %xmm0, %xmm0
; X64-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; X64-NEXT: movzbl -24(%rsp,%rdx), %eax
; X64-NEXT: cmpb -24(%rsp,%rcx), %al
diff --git a/llvm/test/CodeGen/X86/noreturn-call-win64.ll b/llvm/test/CodeGen/X86/noreturn-call-win64.ll
index 57aa022..13be1f13 100644
--- a/llvm/test/CodeGen/X86/noreturn-call-win64.ll
+++ b/llvm/test/CodeGen/X86/noreturn-call-win64.ll
@@ -111,3 +111,15 @@ declare dso_local void @"??1MakeCleanup@@QEAA@XZ"(ptr)
; CHECK: # %unreachable
; CHECK: int3
; CHECK: .seh_handlerdata
+
+
+define dso_local void @last_call_no_return() {
+ call void @abort1()
+ unreachable
+}
+
+; CHECK-LABEL: last_call_no_return:
+; CHECK: callq abort1
+; CHECK-NEXT: int3
+; CHECK-NEXT: .seh_endproc
+
diff --git a/llvm/test/CodeGen/X86/seh-catch-all.ll b/llvm/test/CodeGen/X86/seh-catch-all.ll
index 5250bb9..4e25aab 100644
--- a/llvm/test/CodeGen/X86/seh-catch-all.ll
+++ b/llvm/test/CodeGen/X86/seh-catch-all.ll
@@ -40,7 +40,7 @@ catchall:
; CHECK-NEXT: .long (.Llsda_end0-.Llsda_begin0)/16
; CHECK-NEXT: .Llsda_begin0:
; CHECK-NEXT: .long .Ltmp{{[0-9]+}}@IMGREL
-; CHECK-NEXT: .long .Ltmp{{[0-9]+}}@IMGREL+1
+; CHECK-NEXT: .long .Ltmp{{[0-9]+}}@IMGREL
; CHECK-NEXT: .long 1
; CHECK-NEXT: .long .LBB0_2@IMGREL
; CHECK-NEXT: .Llsda_end0:
diff --git a/llvm/test/CodeGen/X86/seh-catchpad.ll b/llvm/test/CodeGen/X86/seh-catchpad.ll
index d958580..cb85f39 100644
--- a/llvm/test/CodeGen/X86/seh-catchpad.ll
+++ b/llvm/test/CodeGen/X86/seh-catchpad.ll
@@ -123,23 +123,23 @@ __except.ret: ; preds = %catch.dispatch.7
; CHECK-NEXT: .long (.Llsda_end0-.Llsda_begin0)/16
; CHECK-NEXT: .Llsda_begin0:
; CHECK-NEXT: .long .Ltmp0@IMGREL
-; CHECK-NEXT: .long .Ltmp1@IMGREL+1
+; CHECK-NEXT: .long .Ltmp1@IMGREL
; CHECK-NEXT: .long 1
; CHECK-NEXT: .long .LBB1_[[except1bb]]@IMGREL
; CHECK-NEXT: .long .Ltmp0@IMGREL
-; CHECK-NEXT: .long .Ltmp1@IMGREL+1
+; CHECK-NEXT: .long .Ltmp1@IMGREL
; CHECK-NEXT: .long "?filt$0@0@main@@"@IMGREL
; CHECK-NEXT: .long .LBB1_[[except2bb]]@IMGREL
; CHECK-NEXT: .long .Ltmp2@IMGREL
-; CHECK-NEXT: .long .Ltmp3@IMGREL+1
+; CHECK-NEXT: .long .Ltmp3@IMGREL
; CHECK-NEXT: .long "?dtor$[[finbb:[0-9]+]]@?0?main@4HA"@IMGREL
; CHECK-NEXT: .long 0
; CHECK-NEXT: .long .Ltmp2@IMGREL
-; CHECK-NEXT: .long .Ltmp3@IMGREL+1
+; CHECK-NEXT: .long .Ltmp3@IMGREL
; CHECK-NEXT: .long "?filt$0@0@main@@"@IMGREL
; CHECK-NEXT: .long .LBB1_3@IMGREL
; CHECK-NEXT: .long .Ltmp6@IMGREL
-; CHECK-NEXT: .long .Ltmp7@IMGREL+1
+; CHECK-NEXT: .long .Ltmp7@IMGREL
; CHECK-NEXT: .long "?filt$0@0@main@@"@IMGREL
; CHECK-NEXT: .long .LBB1_3@IMGREL
; CHECK-NEXT: .Llsda_end0:
diff --git a/llvm/test/CodeGen/X86/seh-except-finally.ll b/llvm/test/CodeGen/X86/seh-except-finally.ll
index 7f70655..539d776 100644
--- a/llvm/test/CodeGen/X86/seh-except-finally.ll
+++ b/llvm/test/CodeGen/X86/seh-except-finally.ll
@@ -83,15 +83,15 @@ __try.cont: ; preds = %__except, %invoke.c
; CHECK-NEXT: .long (.Llsda_end0-.Llsda_begin0)/16
; CHECK-NEXT: .Llsda_begin0:
; CHECK-NEXT: .long .Ltmp0@IMGREL
-; CHECK-NEXT: .long .Ltmp1@IMGREL+1
+; CHECK-NEXT: .long .Ltmp1@IMGREL
; CHECK-NEXT: .long "?dtor$2@?0?use_both@4HA"@IMGREL
; CHECK-NEXT: .long 0
; CHECK-NEXT: .long .Ltmp0@IMGREL
-; CHECK-NEXT: .long .Ltmp1@IMGREL+1
+; CHECK-NEXT: .long .Ltmp1@IMGREL
; CHECK-NEXT: .long "?filt$0@0@use_both@@"@IMGREL
; CHECK-NEXT: .long .LBB0_{{[0-9]+}}@IMGREL
; CHECK-NEXT: .long .Ltmp4@IMGREL
-; CHECK-NEXT: .long .Ltmp5@IMGREL+1
+; CHECK-NEXT: .long .Ltmp5@IMGREL
; CHECK-NEXT: .long "?filt$0@0@use_both@@"@IMGREL
; CHECK-NEXT: .long .LBB0_{{[0-9]+}}@IMGREL
; CHECK-NEXT: .Llsda_end0:
diff --git a/llvm/test/CodeGen/X86/seh-finally.ll b/llvm/test/CodeGen/X86/seh-finally.ll
index 41823df..6093e5e 100644
--- a/llvm/test/CodeGen/X86/seh-finally.ll
+++ b/llvm/test/CodeGen/X86/seh-finally.ll
@@ -30,7 +30,7 @@ lpad: ; preds = %entry
; X64-NEXT: .long (.Llsda_end0-.Llsda_begin0)/16 # Number of call sites
; X64-NEXT: .Llsda_begin0:
; X64-NEXT: .long .Ltmp0@IMGREL # LabelStart
-; X64-NEXT: .long .Ltmp1@IMGREL+1 # LabelEnd
+; X64-NEXT: .long .Ltmp1@IMGREL # LabelEnd
; X64-NEXT: .long "?dtor$2@?0?main@4HA"@IMGREL # FinallyFunclet
; X64-NEXT: .long 0 # Null
; X64-NEXT: .Llsda_end0:
diff --git a/llvm/test/CodeGen/X86/seh-safe-div.ll b/llvm/test/CodeGen/X86/seh-safe-div.ll
index 542d9f6..20169f8 100644
--- a/llvm/test/CodeGen/X86/seh-safe-div.ll
+++ b/llvm/test/CodeGen/X86/seh-safe-div.ll
@@ -60,6 +60,7 @@ __try.cont:
; CHECK: .Ltmp0:
; CHECK: leaq [[rloc:.*\(%rbp\)]], %rcx
; CHECK: callq try_body
+; CHECK: nop
; CHECK-NEXT: .Ltmp1
; CHECK: [[cont_bb:\.LBB0_[0-9]+]]:
; CHECK: movl [[rloc]], %eax
@@ -82,11 +83,11 @@ __try.cont:
; CHECK-NEXT: .long (.Llsda_end0-.Llsda_begin0)/16
; CHECK-NEXT: .Llsda_begin0:
; CHECK-NEXT: .long .Ltmp0@IMGREL
-; CHECK-NEXT: .long .Ltmp1@IMGREL+1
+; CHECK-NEXT: .long .Ltmp1@IMGREL
; CHECK-NEXT: .long safe_div_filt0@IMGREL
; CHECK-NEXT: .long [[handler0]]@IMGREL
; CHECK-NEXT: .long .Ltmp0@IMGREL
-; CHECK-NEXT: .long .Ltmp1@IMGREL+1
+; CHECK-NEXT: .long .Ltmp1@IMGREL
; CHECK-NEXT: .long safe_div_filt1@IMGREL
; CHECK-NEXT: .long [[handler1]]@IMGREL
; CHECK-NEXT: .Llsda_end0:
diff --git a/llvm/test/CodeGen/X86/seh-unwind-inline-asm-codegen.ll b/llvm/test/CodeGen/X86/seh-unwind-inline-asm-codegen.ll
index 2c576df..5a6aeb6 100644
--- a/llvm/test/CodeGen/X86/seh-unwind-inline-asm-codegen.ll
+++ b/llvm/test/CodeGen/X86/seh-unwind-inline-asm-codegen.ll
@@ -56,8 +56,8 @@ declare dso_local void @printf(ptr, ...)
; CHECK-NEXT:$ip2state$test:
; CHECK-NEXT: .long .Lfunc_begin0@IMGREL # IP
; CHECK-NEXT: .long -1 # ToState
-; CHECK-NEXT: .long .Ltmp0@IMGREL+1 # IP
+; CHECK-NEXT: .long .Ltmp0@IMGREL # IP
; CHECK-NEXT: .long 0 # ToState
-; CHECK-NEXT: .long .Ltmp1@IMGREL+1 # IP
+; CHECK-NEXT: .long .Ltmp1@IMGREL # IP
; CHECK-NEXT: .long -1 # ToState
diff --git a/llvm/test/CodeGen/X86/setcc-non-simple-type.ll b/llvm/test/CodeGen/X86/setcc-non-simple-type.ll
index d2b292f..2ac2be5 100644
--- a/llvm/test/CodeGen/X86/setcc-non-simple-type.ll
+++ b/llvm/test/CodeGen/X86/setcc-non-simple-type.ll
@@ -119,8 +119,8 @@ define void @failing(ptr %0, ptr %1) nounwind {
; CHECK-AVX2-NEXT: .LBB0_2: # %vector.body
; CHECK-AVX2-NEXT: # Parent Loop BB0_1 Depth=1
; CHECK-AVX2-NEXT: # => This Inner Loop Header: Depth=2
-; CHECK-AVX2-NEXT: vmovdqu 1024(%rdx,%rsi), %ymm5
-; CHECK-AVX2-NEXT: vextracti128 $1, %ymm5, %xmm6
+; CHECK-AVX2-NEXT: vmovdqu 1024(%rdx,%rsi), %xmm5
+; CHECK-AVX2-NEXT: vmovdqu 1040(%rdx,%rsi), %xmm6
; CHECK-AVX2-NEXT: vpextrq $1, %xmm5, %rdi
; CHECK-AVX2-NEXT: vpextrq $1, %xmm6, %r8
; CHECK-AVX2-NEXT: vmovq %xmm5, %r9
diff --git a/llvm/test/CodeGen/X86/stack-coloring-wineh.ll b/llvm/test/CodeGen/X86/stack-coloring-wineh.ll
index e2de2ff..74fe07e 100644
--- a/llvm/test/CodeGen/X86/stack-coloring-wineh.ll
+++ b/llvm/test/CodeGen/X86/stack-coloring-wineh.ll
@@ -84,12 +84,12 @@ define void @pr66984(ptr %arg) personality ptr @__CxxFrameHandler3 {
; X86_64-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X86_64-NEXT: .Ltmp0:
; X86_64-NEXT: callq throw
+; X86_64-NEXT: nop
; X86_64-NEXT: .Ltmp1:
; X86_64-NEXT: # %bb.1: # %bb14
; X86_64-NEXT: .LBB0_3: # Block address taken
; X86_64-NEXT: # %exit
; X86_64-NEXT: $ehgcr_0_3:
-; X86_64-NEXT: nop
; X86_64-NEXT: .seh_startepilogue
; X86_64-NEXT: addq $64, %rsp
; X86_64-NEXT: popq %rbp
diff --git a/llvm/test/CodeGen/X86/taildup-heapallocsite.ll b/llvm/test/CodeGen/X86/taildup-heapallocsite.ll
index 967e125..f3bef47 100644
--- a/llvm/test/CodeGen/X86/taildup-heapallocsite.ll
+++ b/llvm/test/CodeGen/X86/taildup-heapallocsite.ll
@@ -37,9 +37,11 @@ cond.end: ; preds = %entry, %cond.true
; CHECK: testq
; CHECK: je
; CHECK: callq alloc
+; CHECK-NEXT: nop
; CHECK-NEXT: [[L1:.Ltmp[0-9]+]]
; CHECK: jmp f2 # TAILCALL
; CHECK: callq alloc
+; CHECK-NEXT: nop
; CHECK-NEXT: [[L3:.Ltmp[0-9]+]]
; CHECK: jmp f2 # TAILCALL
diff --git a/llvm/test/CodeGen/X86/vec_extract.ll b/llvm/test/CodeGen/X86/vec_extract.ll
index 087cd30..9bd38db 100644
--- a/llvm/test/CodeGen/X86/vec_extract.ll
+++ b/llvm/test/CodeGen/X86/vec_extract.ll
@@ -104,6 +104,72 @@ entry:
}
declare <2 x double> @foo()
+define i64 @pr150117(<31 x i8> %a0) nounwind {
+; X86-LABEL: pr150117:
+; X86: # %bb.0:
+; X86-NEXT: pushl %ebx
+; X86-NEXT: pushl %edi
+; X86-NEXT: pushl %esi
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %edi
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ebx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: shll $8, %edx
+; X86-NEXT: orl %ebx, %edx
+; X86-NEXT: shll $8, %edi
+; X86-NEXT: orl %esi, %edi
+; X86-NEXT: shll $16, %ecx
+; X86-NEXT: orl %edi, %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: shll $24, %esi
+; X86-NEXT: orl %ecx, %esi
+; X86-NEXT: movd %esi, %xmm0
+; X86-NEXT: pinsrw $2, %edx, %xmm0
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: shll $8, %ecx
+; X86-NEXT: orl %eax, %ecx
+; X86-NEXT: pinsrw $3, %ecx, %xmm0
+; X86-NEXT: movd %xmm0, %eax
+; X86-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
+; X86-NEXT: movd %xmm0, %edx
+; X86-NEXT: popl %esi
+; X86-NEXT: popl %edi
+; X86-NEXT: popl %ebx
+; X86-NEXT: retl
+;
+; X64-LABEL: pr150117:
+; X64: # %bb.0:
+; X64-NEXT: movzbl {{[0-9]+}}(%rsp), %eax
+; X64-NEXT: movzbl {{[0-9]+}}(%rsp), %ecx
+; X64-NEXT: movzbl {{[0-9]+}}(%rsp), %edx
+; X64-NEXT: movzbl {{[0-9]+}}(%rsp), %esi
+; X64-NEXT: movzbl {{[0-9]+}}(%rsp), %edi
+; X64-NEXT: movl {{[0-9]+}}(%rsp), %r8d
+; X64-NEXT: shll $8, %r8d
+; X64-NEXT: orl %edi, %r8d
+; X64-NEXT: shll $8, %esi
+; X64-NEXT: orl %edx, %esi
+; X64-NEXT: shll $16, %ecx
+; X64-NEXT: orl %esi, %ecx
+; X64-NEXT: movl {{[0-9]+}}(%rsp), %edx
+; X64-NEXT: shll $24, %edx
+; X64-NEXT: orl %ecx, %edx
+; X64-NEXT: movd %edx, %xmm0
+; X64-NEXT: pinsrw $2, %r8d, %xmm0
+; X64-NEXT: movl {{[0-9]+}}(%rsp), %ecx
+; X64-NEXT: shll $8, %ecx
+; X64-NEXT: orl %eax, %ecx
+; X64-NEXT: pinsrw $3, %ecx, %xmm0
+; X64-NEXT: movq %xmm0, %rax
+; X64-NEXT: retq
+ %shuffle = shufflevector <31 x i8> %a0, <31 x i8> zeroinitializer, <32 x i32> <i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+ %bitcast = bitcast <32 x i8> %shuffle to <4 x i64>
+ %elt = extractelement <4 x i64> %bitcast, i64 0
+ ret i64 %elt
+}
+
; OSS-Fuzz #15662
; https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=15662
define <4 x i32> @ossfuzz15662(ptr %in) {
diff --git a/llvm/test/CodeGen/X86/win-catchpad-nested-cxx.ll b/llvm/test/CodeGen/X86/win-catchpad-nested-cxx.ll
index bfb9c43..0bf8370 100644
--- a/llvm/test/CodeGen/X86/win-catchpad-nested-cxx.ll
+++ b/llvm/test/CodeGen/X86/win-catchpad-nested-cxx.ll
@@ -103,15 +103,15 @@ handler2:
; X64: $ip2state$try_in_catch:
; X64-NEXT: .long .Lfunc_begin0@IMGREL
; X64-NEXT: .long -1
-; X64-NEXT: .long .Ltmp0@IMGREL+1
+; X64-NEXT: .long .Ltmp0@IMGREL
; X64-NEXT: .long 0
-; X64-NEXT: .long .Ltmp1@IMGREL+1
+; X64-NEXT: .long .Ltmp1@IMGREL
; X64-NEXT: .long -1
; X64-NEXT: .long "?catch$2@?0?try_in_catch@4HA"@IMGREL
; X64-NEXT: .long 1
-; X64-NEXT: .long .Ltmp2@IMGREL+1
+; X64-NEXT: .long .Ltmp2@IMGREL
; X64-NEXT: .long 2
-; X64-NEXT: .long .Ltmp3@IMGREL+1
+; X64-NEXT: .long .Ltmp3@IMGREL
; X64-NEXT: .long 1
; X64-NEXT: .long "?catch$4@?0?try_in_catch@4HA"@IMGREL
; X64-NEXT: .long 3
diff --git a/llvm/test/CodeGen/X86/win-catchpad.ll b/llvm/test/CodeGen/X86/win-catchpad.ll
index 2491946..62ea510 100644
--- a/llvm/test/CodeGen/X86/win-catchpad.ll
+++ b/llvm/test/CodeGen/X86/win-catchpad.ll
@@ -214,9 +214,9 @@ try.cont:
; X64: $ip2state$try_catch_catch:
; X64-NEXT: .long .Lfunc_begin0@IMGREL
; X64-NEXT: .long -1
-; X64-NEXT: .long .Ltmp0@IMGREL+1
+; X64-NEXT: .long .Ltmp0@IMGREL
; X64-NEXT: .long 0
-; X64-NEXT: .long .Ltmp1@IMGREL+1
+; X64-NEXT: .long .Ltmp1@IMGREL
; X64-NEXT: .long -1
; X64-NEXT: .long "?catch$[[catch1bb]]@?0?try_catch_catch@4HA"@IMGREL
; X64-NEXT: .long 1
@@ -357,9 +357,9 @@ try.cont:
; X64-LABEL: $ip2state$branch_to_normal_dest:
; X64-NEXT: .long .Lfunc_begin1@IMGREL
; X64-NEXT: .long -1
-; X64-NEXT: .long .Ltmp[[before_call]]@IMGREL+1
+; X64-NEXT: .long .Ltmp[[before_call]]@IMGREL
; X64-NEXT: .long 0
-; X64-NEXT: .long .Ltmp[[after_call]]@IMGREL+1
+; X64-NEXT: .long .Ltmp[[after_call]]@IMGREL
; X64-NEXT: .long -1
; X64-NEXT: .long "?catch$[[catchbb]]@?0?branch_to_normal_dest@4HA"@IMGREL
; X64-NEXT: .long 1
diff --git a/llvm/test/CodeGen/X86/win-cleanuppad.ll b/llvm/test/CodeGen/X86/win-cleanuppad.ll
index e3f7f5b..e9265a1 100644
--- a/llvm/test/CodeGen/X86/win-cleanuppad.ll
+++ b/llvm/test/CodeGen/X86/win-cleanuppad.ll
@@ -191,7 +191,7 @@ cleanup.outer: ; preds = %invoke.cont.1, %c
; X64-NEXT: .long 1
; X64-NEXT: .long .Ltmp6@IMGREL
; X64-NEXT: .long 0
-; X64-NEXT: .long .Ltmp7@IMGREL+1
+; X64-NEXT: .long .Ltmp7@IMGREL
; X64-NEXT: .long -1
attributes #0 = { "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-realign-stack" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
diff --git a/llvm/test/CodeGen/X86/win32-eh-states.ll b/llvm/test/CodeGen/X86/win32-eh-states.ll
index 42ae5b0..e645199 100644
--- a/llvm/test/CodeGen/X86/win32-eh-states.ll
+++ b/llvm/test/CodeGen/X86/win32-eh-states.ll
@@ -86,11 +86,11 @@ catch.7:
; X64-LABEL: $ip2state$f:
; X64-NEXT: .long .Lfunc_begin0@IMGREL
; X64-NEXT: .long -1
-; X64-NEXT: .long .Ltmp{{.*}}@IMGREL+1
+; X64-NEXT: .long .Ltmp{{.*}}@IMGREL
; X64-NEXT: .long 0
-; X64-NEXT: .long .Ltmp{{.*}}@IMGREL+1
+; X64-NEXT: .long .Ltmp{{.*}}@IMGREL
; X64-NEXT: .long 1
-; X64-NEXT: .long .Ltmp{{.*}}@IMGREL+1
+; X64-NEXT: .long .Ltmp{{.*}}@IMGREL
; X64-NEXT: .long -1
; X64-NEXT: .long "?catch${{.*}}@?0?f@4HA"@IMGREL
; X64-NEXT: .long 2
@@ -189,15 +189,15 @@ unreachable: ; preds = %entry
; X64-LABEL: $ip2state$g:
; X64-NEXT: .long .Lfunc_begin1@IMGREL
; X64-NEXT: .long -1
-; X64-NEXT: .long .Ltmp{{.*}}@IMGREL+1
+; X64-NEXT: .long .Ltmp{{.*}}@IMGREL
; X64-NEXT: .long 1
-; X64-NEXT: .long .Ltmp{{.*}}@IMGREL+1
+; X64-NEXT: .long .Ltmp{{.*}}@IMGREL
; X64-NEXT: .long -1
; X64-NEXT: .long "?catch${{.*}}@?0?g@4HA"@IMGREL
; X64-NEXT: .long 2
-; X64-NEXT: .long .Ltmp{{.*}}@IMGREL+1
+; X64-NEXT: .long .Ltmp{{.*}}@IMGREL
; X64-NEXT: .long 3
-; X64-NEXT: .long .Ltmp{{.*}}@IMGREL+1
+; X64-NEXT: .long .Ltmp{{.*}}@IMGREL
; X64-NEXT: .long 2
diff --git a/llvm/test/CodeGen/X86/win64-seh-epilogue-statepoint.ll b/llvm/test/CodeGen/X86/win64-seh-epilogue-statepoint.ll
index bc5be7a..75f156f 100644
--- a/llvm/test/CodeGen/X86/win64-seh-epilogue-statepoint.ll
+++ b/llvm/test/CodeGen/X86/win64-seh-epilogue-statepoint.ll
@@ -8,8 +8,8 @@ define i32 @foobar() gc "statepoint-example" personality ptr @__gxx_personality_
; CHECK-NEXT: .seh_stackalloc 40
; CHECK-NEXT: .seh_endprologue
; CHECK-NEXT: callq bar
-; CHECK-NEXT: .Ltmp0:
; CHECK-NEXT: nop
+; CHECK-NEXT: .Ltmp0:
; CHECK-NEXT: .seh_startepilogue
; CHECK-NEXT: addq $40, %rsp
; CHECK-NEXT: .seh_endepilogue
diff --git a/llvm/test/CodeGen/X86/wineh-coreclr.ll b/llvm/test/CodeGen/X86/wineh-coreclr.ll
index baf5eaa..a3d0fde 100644
--- a/llvm/test/CodeGen/X86/wineh-coreclr.ll
+++ b/llvm/test/CodeGen/X86/wineh-coreclr.ll
@@ -38,6 +38,7 @@ entry:
; CHECK: [[test1_before_f1:.+]]:
; CHECK-NEXT: movl $1, %ecx
; CHECK-NEXT: callq f
+; CHECK-NEXT: nop
; CHECK-NEXT: [[test1_after_f1:.+]]:
invoke void @f(i32 1)
to label %inner_try unwind label %finally
@@ -46,6 +47,7 @@ inner_try:
; CHECK: [[test1_before_f2:.+]]:
; CHECK-NEXT: movl $2, %ecx
; CHECK-NEXT: callq f
+; CHECK-NEXT: nop
; CHECK-NEXT: [[test1_after_f2:.+]]:
invoke void @f(i32 2)
to label %finally.clone unwind label %exn.dispatch
@@ -69,6 +71,7 @@ catch1:
; CHECK: [[test1_before_f3:.+]]:
; CHECK-NEXT: movl $3, %ecx
; CHECK-NEXT: callq f
+; CHECK-NEXT: nop
; CHECK-NEXT: [[test1_after_f3:.+]]:
invoke void @f(i32 3) [ "funclet"(token %catch.pad1) ]
to label %catch1.ret unwind label %finally
@@ -92,6 +95,7 @@ catch2:
; CHECK: [[test1_before_f4:.+]]:
; CHECK-NEXT: movl $4, %ecx
; CHECK-NEXT: callq f
+; CHECK-NEXT: nop
; CHECK-NEXT: [[test1_after_f4:.+]]:
invoke void @f(i32 4) [ "funclet"(token %catch.pad2) ]
to label %try_in_catch unwind label %finally
@@ -100,6 +104,7 @@ try_in_catch:
; CHECK: [[test1_before_f5:.+]]:
; CHECK-NEXT: movl $5, %ecx
; CHECK-NEXT: callq f
+; CHECK-NEXT: nop
; CHECK-NEXT: [[test1_after_f5:.+]]:
invoke void @f(i32 5) [ "funclet"(token %catch.pad2) ]
to label %catch2.ret unwind label %fault
@@ -116,6 +121,7 @@ fault:
; CHECK: [[test1_before_f6:.+]]:
; CHECK-NEXT: movl $6, %ecx
; CHECK-NEXT: callq f
+; CHECK-NEXT: nop
; CHECK-NEXT: [[test1_after_f6:.+]]:
invoke void @f(i32 6) [ "funclet"(token %fault.pad) ]
to label %fault.ret unwind label %finally
@@ -312,6 +318,7 @@ unreachable:
; CHECK: [[test2_before_f1:.+]]:
; CHECK-NEXT: movl $1, %ecx
; CHECK-NEXT: callq f
+; CHECK-NEXT: nop
; CHECK-NEXT: [[test2_after_f1:.+]]:
; CHECK: .seh_proc [[test2_catch1:[^ ]+]]
; CHECK: .seh_proc [[test2_catch2:[^ ]+]]
@@ -320,6 +327,7 @@ unreachable:
; CHECK: [[test2_before_f2:.+]]:
; CHECK-NEXT: movl $2, %ecx
; CHECK-NEXT: callq f
+; CHECK-NEXT: nop
; CHECK-NEXT: [[test2_after_f2:.+]]:
; CHECK: int3
; CHECK: [[test2_end:.*func_end.*]]:
@@ -448,6 +456,7 @@ entry:
; CHECK: [[test3_before_f1:.+]]:
; CHECK-NEXT: movl $1, %ecx
; CHECK-NEXT: callq f
+; CHECK-NEXT: nop
; CHECK-NEXT: [[test3_after_f1:.+]]:
invoke void @f(i32 1)
to label %exit unwind label %fault1
@@ -474,6 +483,7 @@ fault4:
; CHECK: [[test3_before_f6:.+]]:
; CHECK-NEXT: movl $6, %ecx
; CHECK-NEXT: callq f
+; CHECK-NEXT: nop
; CHECK-NEXT: [[test3_after_f6:.+]]:
invoke void @f(i32 6) ["funclet"(token %fault.pad4)]
to label %fault4.cont unwind label %exn.dispatch1
@@ -482,6 +492,7 @@ fault4.cont:
; CHECK: [[test3_before_f7:.+]]:
; CHECK-NEXT: movl $7, %ecx
; CHECK-NEXT: callq f
+; CHECK-NEXT: nop
; CHECK-NEXT: [[test3_after_f7:.+]]:
invoke void @f(i32 7) ["funclet"(token %fault.pad4)]
to label %unreachable unwind label %fault5
@@ -512,6 +523,7 @@ unreachable:
; CHECK: [[test3_before_f4:.+]]:
; CHECK-NEXT: movl $4, %ecx
; CHECK-NEXT: callq f
+; CHECK-NEXT: nop
; CHECK-NEXT: [[test3_after_f4:.+]]:
; CHECK: int3
; CHECK: .seh_proc [[test3_fault2:[^ ]+]]
@@ -520,6 +532,7 @@ unreachable:
; CHECK: [[test3_before_f3:.+]]:
; CHECK-NEXT: movl $3, %ecx
; CHECK-NEXT: callq f
+; CHECK-NEXT: nop
; CHECK-NEXT: [[test3_after_f3:.+]]:
; CHECK: int3
; CHECK: .seh_proc [[test3_fault1:[^ ]+]]
@@ -528,6 +541,7 @@ unreachable:
; CHECK: [[test3_before_f2:.+]]:
; CHECK-NEXT: movl $2, %ecx
; CHECK-NEXT: callq f
+; CHECK-NEXT: nop
; CHECK-NEXT: [[test3_after_f2:.+]]:
; CHECK: int3
; CHECK: [[test3_end:.*func_end.*]]:
diff --git a/llvm/test/CodeGen/XCore/exception.ll b/llvm/test/CodeGen/XCore/exception.ll
index f222297..bb5f3f4 100644
--- a/llvm/test/CodeGen/XCore/exception.ll
+++ b/llvm/test/CodeGen/XCore/exception.ll
@@ -60,7 +60,7 @@ entry:
; CHECK: [[PRE_G:.L[a-zA-Z0-9_]+]]
; CHECK: bl g
; CHECK: [[POST_G:.L[a-zA-Z0-9_]+]]
-; CHECK: [[RETURN:.L[a-zA-Z0-9_]+]]
+; CHECK: [[RETURN:^.L[a-zA-Z0-9_]+]]
; CHECK: ldw r6, sp[1]
; CHECK: ldw r5, sp[2]
; CHECK: ldw r4, sp[3]