; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -global-isel -new-reg-bank-select -mtriple=amdgcn-amd-amdpal -mcpu=gfx1200 -mattr=+unaligned-access-mode,-real-true16 < %s | FileCheck --check-prefixes=GFX12,GFX12-True16 %s ; RUN: llc -global-isel -new-reg-bank-select -mtriple=amdgcn-amd-amdpal -mcpu=gfx1200 -mattr=+unaligned-access-mode,+real-true16 < %s | FileCheck --check-prefixes=GFX12,GFX12-NoTrue16 %s define amdgpu_ps void @load_divergent_P0_i8_any_extending(ptr addrspace(0) %ptra, ptr addrspace(0) %out) { ; GFX12-LABEL: load_divergent_P0_i8_any_extending: ; GFX12: ; %bb.0: ; GFX12-NEXT: flat_load_u8 v0, v[0:1] ; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX12-NEXT: flat_store_b8 v[2:3], v0 ; GFX12-NEXT: s_endpgm %a = load i8, ptr addrspace(0) %ptra store i8 %a, ptr addrspace(0) %out ret void } ; with true16, S16 16-bit load ; without true16, S32 16-bit any-extending load define amdgpu_ps void @load_divergent_P0_i16(ptr addrspace(0) %ptra, ptr addrspace(0) %out) { ; GFX12-True16-LABEL: load_divergent_P0_i16: ; GFX12-True16: ; %bb.0: ; GFX12-True16-NEXT: flat_load_u16 v0, v[0:1] ; GFX12-True16-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX12-True16-NEXT: flat_store_b16 v[2:3], v0 ; GFX12-True16-NEXT: s_endpgm ; ; GFX12-NoTrue16-LABEL: load_divergent_P0_i16: ; GFX12-NoTrue16: ; %bb.0: ; GFX12-NoTrue16-NEXT: flat_load_d16_b16 v0, v[0:1] ; GFX12-NoTrue16-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX12-NoTrue16-NEXT: flat_store_b16 v[2:3], v0 ; GFX12-NoTrue16-NEXT: s_endpgm %a = load i16, ptr addrspace(0) %ptra store i16 %a, ptr addrspace(0) %out ret void } define amdgpu_ps void @load_divergent_P0_i32(ptr addrspace(0) %ptra, ptr addrspace(0) %out) { ; GFX12-LABEL: load_divergent_P0_i32: ; GFX12: ; %bb.0: ; GFX12-NEXT: flat_load_b32 v0, v[0:1] ; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX12-NEXT: flat_store_b32 v[2:3], v0 ; GFX12-NEXT: s_endpgm %a = load i32, ptr addrspace(0) %ptra store i32 %a, ptr addrspace(0) %out ret void } define amdgpu_ps void @load_divergent_P0_v2i32(ptr addrspace(0) %ptra, ptr addrspace(0) %out) { ; GFX12-LABEL: load_divergent_P0_v2i32: ; GFX12: ; %bb.0: ; GFX12-NEXT: flat_load_b64 v[0:1], v[0:1] ; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1] ; GFX12-NEXT: s_endpgm %a = load <2 x i32>, ptr addrspace(0) %ptra store <2 x i32> %a, ptr addrspace(0) %out ret void } define amdgpu_ps void @load_divergent_P0_v3i32(ptr addrspace(0) %ptra, ptr addrspace(0) %out) { ; GFX12-LABEL: load_divergent_P0_v3i32: ; GFX12: ; %bb.0: ; GFX12-NEXT: flat_load_b96 v[4:6], v[0:1] ; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX12-NEXT: flat_store_b96 v[2:3], v[4:6] ; GFX12-NEXT: s_endpgm %a = load <3 x i32>, ptr addrspace(0) %ptra store <3 x i32> %a, ptr addrspace(0) %out ret void } define amdgpu_ps void @load_divergent_P0_v4i32(ptr addrspace(0) %ptra, ptr addrspace(0) %out) { ; GFX12-LABEL: load_divergent_P0_v4i32: ; GFX12: ; %bb.0: ; GFX12-NEXT: flat_load_b128 v[4:7], v[0:1] ; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX12-NEXT: flat_store_b128 v[2:3], v[4:7] ; GFX12-NEXT: s_endpgm %a = load <4 x i32>, ptr addrspace(0) %ptra store <4 x i32> %a, ptr addrspace(0) %out ret void } define amdgpu_ps void @load_divergent_P1_i8_any_extending(ptr addrspace(1) %ptra, ptr addrspace(1) %out) { ; GFX12-LABEL: load_divergent_P1_i8_any_extending: ; GFX12: ; %bb.0: ; GFX12-NEXT: global_load_u8 v0, v[0:1], off ; GFX12-NEXT: s_wait_loadcnt 0x0 ; GFX12-NEXT: global_store_b8 v[2:3], v0, off ; GFX12-NEXT: s_endpgm %a = load i8, ptr addrspace(1) %ptra store i8 %a, ptr addrspace(1) %out ret void } ; with true16, S16 16-bit load ; without true16, S32 16-bit any-extending load define amdgpu_ps void @load_divergent_P1_i16(ptr addrspace(1) %ptra, ptr addrspace(1) %out) { ; GFX12-True16-LABEL: load_divergent_P1_i16: ; GFX12-True16: ; %bb.0: ; GFX12-True16-NEXT: global_load_u16 v0, v[0:1], off ; GFX12-True16-NEXT: s_wait_loadcnt 0x0 ; GFX12-True16-NEXT: global_store_b16 v[2:3], v0, off ; GFX12-True16-NEXT: s_endpgm ; ; GFX12-NoTrue16-LABEL: load_divergent_P1_i16: ; GFX12-NoTrue16: ; %bb.0: ; GFX12-NoTrue16-NEXT: global_load_d16_b16 v0, v[0:1], off ; GFX12-NoTrue16-NEXT: s_wait_loadcnt 0x0 ; GFX12-NoTrue16-NEXT: global_store_b16 v[2:3], v0, off ; GFX12-NoTrue16-NEXT: s_endpgm %a = load i16, ptr addrspace(1) %ptra store i16 %a, ptr addrspace(1) %out ret void } define amdgpu_ps void @load_divergent_P1_i32(ptr addrspace(1) %ptra, ptr addrspace(1) %out) { ; GFX12-LABEL: load_divergent_P1_i32: ; GFX12: ; %bb.0: ; GFX12-NEXT: global_load_b32 v0, v[0:1], off ; GFX12-NEXT: s_wait_loadcnt 0x0 ; GFX12-NEXT: global_store_b32 v[2:3], v0, off ; GFX12-NEXT: s_endpgm %a = load i32, ptr addrspace(1) %ptra store i32 %a, ptr addrspace(1) %out ret void } define amdgpu_ps void @load_divergent_P1_v2i32(ptr addrspace(1) %ptra, ptr addrspace(1) %out) { ; GFX12-LABEL: load_divergent_P1_v2i32: ; GFX12: ; %bb.0: ; GFX12-NEXT: global_load_b64 v[0:1], v[0:1], off ; GFX12-NEXT: s_wait_loadcnt 0x0 ; GFX12-NEXT: global_store_b64 v[2:3], v[0:1], off ; GFX12-NEXT: s_endpgm %a = load <2 x i32>, ptr addrspace(1) %ptra store <2 x i32> %a, ptr addrspace(1) %out ret void } define amdgpu_ps void @load_divergent_P1_v3i32(ptr addrspace(1) %ptra, ptr addrspace(1) %out) { ; GFX12-LABEL: load_divergent_P1_v3i32: ; GFX12: ; %bb.0: ; GFX12-NEXT: global_load_b96 v[4:6], v[0:1], off ; GFX12-NEXT: s_wait_loadcnt 0x0 ; GFX12-NEXT: global_store_b96 v[2:3], v[4:6], off ; GFX12-NEXT: s_endpgm %a = load <3 x i32>, ptr addrspace(1) %ptra store <3 x i32> %a, ptr addrspace(1) %out ret void } define amdgpu_ps void @load_divergent_P1_v4i32(ptr addrspace(1) %ptra, ptr addrspace(1) %out) { ; GFX12-LABEL: load_divergent_P1_v4i32: ; GFX12: ; %bb.0: ; GFX12-NEXT: global_load_b128 v[4:7], v[0:1], off ; GFX12-NEXT: s_wait_loadcnt 0x0 ; GFX12-NEXT: global_store_b128 v[2:3], v[4:7], off ; GFX12-NEXT: s_endpgm %a = load <4 x i32>, ptr addrspace(1) %ptra store <4 x i32> %a, ptr addrspace(1) %out ret void } define amdgpu_ps void @load_divergent_P1_v8i32(ptr addrspace(1) %ptra, ptr addrspace(1) %out) { ; GFX12-LABEL: load_divergent_P1_v8i32: ; GFX12: ; %bb.0: ; GFX12-NEXT: s_clause 0x1 ; GFX12-NEXT: global_load_b128 v[4:7], v[0:1], off ; GFX12-NEXT: global_load_b128 v[8:11], v[0:1], off offset:16 ; GFX12-NEXT: s_wait_loadcnt 0x1 ; GFX12-NEXT: global_store_b128 v[2:3], v[4:7], off ; GFX12-NEXT: s_wait_loadcnt 0x0 ; GFX12-NEXT: global_store_b128 v[2:3], v[8:11], off offset:16 ; GFX12-NEXT: s_endpgm %a = load <8 x i32>, ptr addrspace(1) %ptra store <8 x i32> %a, ptr addrspace(1) %out ret void } define amdgpu_ps void @load_divergent_P1_v16i32(ptr addrspace(1) %ptra, ptr addrspace(1) %out) { ; GFX12-LABEL: load_divergent_P1_v16i32: ; GFX12: ; %bb.0: ; GFX12-NEXT: s_clause 0x3 ; GFX12-NEXT: global_load_b128 v[4:7], v[0:1], off ; GFX12-NEXT: global_load_b128 v[8:11], v[0:1], off offset:16 ; GFX12-NEXT: global_load_b128 v[12:15], v[0:1], off offset:32 ; GFX12-NEXT: global_load_b128 v[16:19], v[0:1], off offset:48 ; GFX12-NEXT: s_wait_loadcnt 0x3 ; GFX12-NEXT: global_store_b128 v[2:3], v[4:7], off ; GFX12-NEXT: s_wait_loadcnt 0x2 ; GFX12-NEXT: global_store_b128 v[2:3], v[8:11], off offset:16 ; GFX12-NEXT: s_wait_loadcnt 0x1 ; GFX12-NEXT: global_store_b128 v[2:3], v[12:15], off offset:32 ; GFX12-NEXT: s_wait_loadcnt 0x0 ; GFX12-NEXT: global_store_b128 v[2:3], v[16:19], off offset:48 ; GFX12-NEXT: s_endpgm %a = load <16 x i32>, ptr addrspace(1) %ptra store <16 x i32> %a, ptr addrspace(1) %out ret void } define amdgpu_ps void @load_divergent_P3_i8_any_extending(ptr addrspace(3) %ptra, ptr addrspace(3) %out) { ; GFX12-LABEL: load_divergent_P3_i8_any_extending: ; GFX12: ; %bb.0: ; GFX12-NEXT: ds_load_u8 v0, v0 ; GFX12-NEXT: s_wait_dscnt 0x0 ; GFX12-NEXT: ds_store_b8 v1, v0 ; GFX12-NEXT: s_endpgm %a = load i8, ptr addrspace(3) %ptra store i8 %a, ptr addrspace(3) %out ret void } ; with true16, S16 16-bit load ; without true16, S32 16-bit any-extending load define amdgpu_ps void @load_divergent_P3_i16(ptr addrspace(3) %ptra, ptr addrspace(3) %out) { ; GFX12-True16-LABEL: load_divergent_P3_i16: ; GFX12-True16: ; %bb.0: ; GFX12-True16-NEXT: ds_load_u16 v0, v0 ; GFX12-True16-NEXT: s_wait_dscnt 0x0 ; GFX12-True16-NEXT: ds_store_b16 v1, v0 ; GFX12-True16-NEXT: s_endpgm ; ; GFX12-NoTrue16-LABEL: load_divergent_P3_i16: ; GFX12-NoTrue16: ; %bb.0: ; GFX12-NoTrue16-NEXT: ds_load_u16_d16 v0, v0 ; GFX12-NoTrue16-NEXT: s_wait_dscnt 0x0 ; GFX12-NoTrue16-NEXT: ds_store_b16 v1, v0 ; GFX12-NoTrue16-NEXT: s_endpgm %a = load i16, ptr addrspace(3) %ptra store i16 %a, ptr addrspace(3) %out ret void } define amdgpu_ps void @load_divergent_P3_i32(ptr addrspace(3) %ptra, ptr addrspace(3) %out) { ; GFX12-LABEL: load_divergent_P3_i32: ; GFX12: ; %bb.0: ; GFX12-NEXT: ds_load_b32 v0, v0 ; GFX12-NEXT: s_wait_dscnt 0x0 ; GFX12-NEXT: ds_store_b32 v1, v0 ; GFX12-NEXT: s_endpgm %a = load i32, ptr addrspace(3) %ptra store i32 %a, ptr addrspace(3) %out ret void } define amdgpu_ps void @load_divergent_P3_v2i32(ptr addrspace(3) %ptra, ptr addrspace(3) %out) { ; GFX12-LABEL: load_divergent_P3_v2i32: ; GFX12: ; %bb.0: ; GFX12-NEXT: ds_load_b64 v[2:3], v0 ; GFX12-NEXT: s_wait_dscnt 0x0 ; GFX12-NEXT: ds_store_b64 v1, v[2:3] ; GFX12-NEXT: s_endpgm %a = load <2 x i32>, ptr addrspace(3) %ptra store <2 x i32> %a, ptr addrspace(3) %out ret void } define amdgpu_ps void @load_divergent_P3_v3i32(ptr addrspace(3) %ptra, ptr addrspace(3) %out) { ; GFX12-LABEL: load_divergent_P3_v3i32: ; GFX12: ; %bb.0: ; GFX12-NEXT: ds_load_b96 v[2:4], v0 ; GFX12-NEXT: s_wait_dscnt 0x0 ; GFX12-NEXT: ds_store_b96 v1, v[2:4] ; GFX12-NEXT: s_endpgm %a = load <3 x i32>, ptr addrspace(3) %ptra store <3 x i32> %a, ptr addrspace(3) %out ret void } define amdgpu_ps void @load_divergent_P3_v4i32(ptr addrspace(3) %ptra, ptr addrspace(3) %out) { ; GFX12-LABEL: load_divergent_P3_v4i32: ; GFX12: ; %bb.0: ; GFX12-NEXT: ds_load_b128 v[2:5], v0 ; GFX12-NEXT: s_wait_dscnt 0x0 ; GFX12-NEXT: ds_store_b128 v1, v[2:5] ; GFX12-NEXT: s_endpgm %a = load <4 x i32>, ptr addrspace(3) %ptra store <4 x i32> %a, ptr addrspace(3) %out ret void } define amdgpu_ps void @load_divergent_P4_i8_any_extending(ptr addrspace(4) %ptra, ptr addrspace(1) %out) { ; GFX12-LABEL: load_divergent_P4_i8_any_extending: ; GFX12: ; %bb.0: ; GFX12-NEXT: global_load_u8 v0, v[0:1], off ; GFX12-NEXT: s_wait_loadcnt 0x0 ; GFX12-NEXT: global_store_b8 v[2:3], v0, off ; GFX12-NEXT: s_endpgm %a = load i8, ptr addrspace(4) %ptra store i8 %a, ptr addrspace(1) %out ret void } ; with true16, S16 16-bit load ; without true16, S32 16-bit any-extending load define amdgpu_ps void @load_divergent_P4_i16(ptr addrspace(4) %ptra, ptr addrspace(1) %out) { ; GFX12-True16-LABEL: load_divergent_P4_i16: ; GFX12-True16: ; %bb.0: ; GFX12-True16-NEXT: global_load_u16 v0, v[0:1], off ; GFX12-True16-NEXT: s_wait_loadcnt 0x0 ; GFX12-True16-NEXT: global_store_b16 v[2:3], v0, off ; GFX12-True16-NEXT: s_endpgm ; ; GFX12-NoTrue16-LABEL: load_divergent_P4_i16: ; GFX12-NoTrue16: ; %bb.0: ; GFX12-NoTrue16-NEXT: global_load_d16_b16 v0, v[0:1], off ; GFX12-NoTrue16-NEXT: s_wait_loadcnt 0x0 ; GFX12-NoTrue16-NEXT: global_store_b16 v[2:3], v0, off ; GFX12-NoTrue16-NEXT: s_endpgm %a = load i16, ptr addrspace(4) %ptra store i16 %a, ptr addrspace(1) %out ret void } define amdgpu_ps void @load_divergent_P4_i32(ptr addrspace(4) %ptra, ptr addrspace(1) %out) { ; GFX12-LABEL: load_divergent_P4_i32: ; GFX12: ; %bb.0: ; GFX12-NEXT: global_load_b32 v0, v[0:1], off ; GFX12-NEXT: s_wait_loadcnt 0x0 ; GFX12-NEXT: global_store_b32 v[2:3], v0, off ; GFX12-NEXT: s_endpgm %a = load i32, ptr addrspace(4) %ptra store i32 %a, ptr addrspace(1) %out ret void } define amdgpu_ps void @load_divergent_P4_v2i32(ptr addrspace(4) %ptra, ptr addrspace(1) %out) { ; GFX12-LABEL: load_divergent_P4_v2i32: ; GFX12: ; %bb.0: ; GFX12-NEXT: global_load_b64 v[0:1], v[0:1], off ; GFX12-NEXT: s_wait_loadcnt 0x0 ; GFX12-NEXT: global_store_b64 v[2:3], v[0:1], off ; GFX12-NEXT: s_endpgm %a = load <2 x i32>, ptr addrspace(4) %ptra store <2 x i32> %a, ptr addrspace(1) %out ret void } define amdgpu_ps void @load_divergent_P4_v3i32(ptr addrspace(4) %ptra, ptr addrspace(1) %out) { ; GFX12-LABEL: load_divergent_P4_v3i32: ; GFX12: ; %bb.0: ; GFX12-NEXT: global_load_b96 v[4:6], v[0:1], off ; GFX12-NEXT: s_wait_loadcnt 0x0 ; GFX12-NEXT: global_store_b96 v[2:3], v[4:6], off ; GFX12-NEXT: s_endpgm %a = load <3 x i32>, ptr addrspace(4) %ptra store <3 x i32> %a, ptr addrspace(1) %out ret void } define amdgpu_ps void @load_divergent_P4_v4i32(ptr addrspace(4) %ptra, ptr addrspace(1) %out) { ; GFX12-LABEL: load_divergent_P4_v4i32: ; GFX12: ; %bb.0: ; GFX12-NEXT: global_load_b128 v[4:7], v[0:1], off ; GFX12-NEXT: s_wait_loadcnt 0x0 ; GFX12-NEXT: global_store_b128 v[2:3], v[4:7], off ; GFX12-NEXT: s_endpgm %a = load <4 x i32>, ptr addrspace(4) %ptra store <4 x i32> %a, ptr addrspace(1) %out ret void } define amdgpu_ps void @load_divergent_P4_v8i32(ptr addrspace(4) %ptra, ptr addrspace(1) %out) { ; GFX12-LABEL: load_divergent_P4_v8i32: ; GFX12: ; %bb.0: ; GFX12-NEXT: s_clause 0x1 ; GFX12-NEXT: global_load_b128 v[4:7], v[0:1], off ; GFX12-NEXT: global_load_b128 v[8:11], v[0:1], off offset:16 ; GFX12-NEXT: s_wait_loadcnt 0x1 ; GFX12-NEXT: global_store_b128 v[2:3], v[4:7], off ; GFX12-NEXT: s_wait_loadcnt 0x0 ; GFX12-NEXT: global_store_b128 v[2:3], v[8:11], off offset:16 ; GFX12-NEXT: s_endpgm %a = load <8 x i32>, ptr addrspace(4) %ptra store <8 x i32> %a, ptr addrspace(1) %out ret void } define amdgpu_ps void @load_divergent_P4_v16i32(ptr addrspace(4) %ptra, ptr addrspace(1) %out) { ; GFX12-LABEL: load_divergent_P4_v16i32: ; GFX12: ; %bb.0: ; GFX12-NEXT: s_clause 0x3 ; GFX12-NEXT: global_load_b128 v[4:7], v[0:1], off ; GFX12-NEXT: global_load_b128 v[8:11], v[0:1], off offset:16 ; GFX12-NEXT: global_load_b128 v[12:15], v[0:1], off offset:32 ; GFX12-NEXT: global_load_b128 v[16:19], v[0:1], off offset:48 ; GFX12-NEXT: s_wait_loadcnt 0x3 ; GFX12-NEXT: global_store_b128 v[2:3], v[4:7], off ; GFX12-NEXT: s_wait_loadcnt 0x2 ; GFX12-NEXT: global_store_b128 v[2:3], v[8:11], off offset:16 ; GFX12-NEXT: s_wait_loadcnt 0x1 ; GFX12-NEXT: global_store_b128 v[2:3], v[12:15], off offset:32 ; GFX12-NEXT: s_wait_loadcnt 0x0 ; GFX12-NEXT: global_store_b128 v[2:3], v[16:19], off offset:48 ; GFX12-NEXT: s_endpgm %a = load <16 x i32>, ptr addrspace(4) %ptra store <16 x i32> %a, ptr addrspace(1) %out ret void } define amdgpu_ps void @load_divergent_P5_i8_any_extending(ptr addrspace(5) %ptra, ptr addrspace(5) %out) { ; GFX12-LABEL: load_divergent_P5_i8_any_extending: ; GFX12: ; %bb.0: ; GFX12-NEXT: scratch_load_u8 v0, v0, off ; GFX12-NEXT: s_wait_loadcnt 0x0 ; GFX12-NEXT: scratch_store_b8 v1, v0, off ; GFX12-NEXT: s_endpgm %a = load i8, ptr addrspace(5) %ptra store i8 %a, ptr addrspace(5) %out ret void } ; with true16, S16 16-bit load ; without true16, S32 16-bit any-extending load define amdgpu_ps void @load_divergent_P5_i16(ptr addrspace(5) %ptra, ptr addrspace(5) %out) { ; GFX12-True16-LABEL: load_divergent_P5_i16: ; GFX12-True16: ; %bb.0: ; GFX12-True16-NEXT: scratch_load_u16 v0, v0, off ; GFX12-True16-NEXT: s_wait_loadcnt 0x0 ; GFX12-True16-NEXT: scratch_store_b16 v1, v0, off ; GFX12-True16-NEXT: s_endpgm ; ; GFX12-NoTrue16-LABEL: load_divergent_P5_i16: ; GFX12-NoTrue16: ; %bb.0: ; GFX12-NoTrue16-NEXT: scratch_load_d16_b16 v0, v0, off ; GFX12-NoTrue16-NEXT: s_wait_loadcnt 0x0 ; GFX12-NoTrue16-NEXT: scratch_store_b16 v1, v0, off ; GFX12-NoTrue16-NEXT: s_endpgm %a = load i16, ptr addrspace(5) %ptra store i16 %a, ptr addrspace(5) %out ret void } define amdgpu_ps void @load_divergent_P5_i32(ptr addrspace(5) %ptra, ptr addrspace(5) %out) { ; GFX12-LABEL: load_divergent_P5_i32: ; GFX12: ; %bb.0: ; GFX12-NEXT: scratch_load_b32 v0, v0, off ; GFX12-NEXT: s_wait_loadcnt 0x0 ; GFX12-NEXT: scratch_store_b32 v1, v0, off ; GFX12-NEXT: s_endpgm %a = load i32, ptr addrspace(5) %ptra store i32 %a, ptr addrspace(5) %out ret void } define amdgpu_ps void @load_divergent_P5_v2i32(ptr addrspace(5) %ptra, ptr addrspace(5) %out) { ; GFX12-LABEL: load_divergent_P5_v2i32: ; GFX12: ; %bb.0: ; GFX12-NEXT: scratch_load_b64 v[2:3], v0, off ; GFX12-NEXT: s_wait_loadcnt 0x0 ; GFX12-NEXT: scratch_store_b64 v1, v[2:3], off ; GFX12-NEXT: s_endpgm %a = load <2 x i32>, ptr addrspace(5) %ptra store <2 x i32> %a, ptr addrspace(5) %out ret void } define amdgpu_ps void @load_divergent_P5_v3i32(ptr addrspace(5) %ptra, ptr addrspace(5) %out) { ; GFX12-LABEL: load_divergent_P5_v3i32: ; GFX12: ; %bb.0: ; GFX12-NEXT: scratch_load_b96 v[2:4], v0, off ; GFX12-NEXT: s_wait_loadcnt 0x0 ; GFX12-NEXT: scratch_store_b96 v1, v[2:4], off ; GFX12-NEXT: s_endpgm %a = load <3 x i32>, ptr addrspace(5) %ptra store <3 x i32> %a, ptr addrspace(5) %out ret void } define amdgpu_ps void @load_divergent_P5_v4i32(ptr addrspace(5) %ptra, ptr addrspace(5) %out) { ; GFX12-LABEL: load_divergent_P5_v4i32: ; GFX12: ; %bb.0: ; GFX12-NEXT: scratch_load_b128 v[2:5], v0, off ; GFX12-NEXT: s_wait_loadcnt 0x0 ; GFX12-NEXT: scratch_store_b128 v1, v[2:5], off ; GFX12-NEXT: s_endpgm %a = load <4 x i32>, ptr addrspace(5) %ptra store <4 x i32> %a, ptr addrspace(5) %out ret void }