diff options
Diffstat (limited to 'llvm/test')
16 files changed, 7582 insertions, 63 deletions
diff --git a/llvm/test/CodeGen/AMDGPU/coalesce-copy-to-agpr-to-av-registers.mir b/llvm/test/CodeGen/AMDGPU/coalesce-copy-to-agpr-to-av-registers.mir index 029aa39..ce1ea4d 100644 --- a/llvm/test/CodeGen/AMDGPU/coalesce-copy-to-agpr-to-av-registers.mir +++ b/llvm/test/CodeGen/AMDGPU/coalesce-copy-to-agpr-to-av-registers.mir @@ -128,13 +128,13 @@ body: | ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr2_vgpr3 ; CHECK-NEXT: undef [[COPY2:%[0-9]+]].sub0_sub1:areg_128 = COPY [[COPY]] ; CHECK-NEXT: [[COPY2:%[0-9]+]].sub2_sub3:areg_128 = COPY [[COPY1]] - ; CHECK-NEXT: INLINEASM &"; use $0", 0 /* attdialect */, 8323081 /* reguse:AReg_128 */, [[COPY2]] + ; CHECK-NEXT: INLINEASM &"; use $0", 0 /* attdialect */, 8847369 /* reguse:AReg_128 */, [[COPY2]] ; CHECK-NEXT: SI_RETURN %0:vreg_64 = COPY $vgpr0_vgpr1 %1:vreg_64 = COPY $vgpr2_vgpr3 undef %2.sub0_sub1:areg_128 = COPY %0 %2.sub2_sub3:areg_128 = COPY %1 - INLINEASM &"; use $0", 0 /* attdialect */, 8323081 /* reguse:AReg_128 */, killed %2 + INLINEASM &"; use $0", 0 /* attdialect */, 8847369 /* reguse:AReg_128 */, killed %2 SI_RETURN ... @@ -153,13 +153,13 @@ body: | ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr2_vgpr3 ; CHECK-NEXT: undef [[COPY2:%[0-9]+]].sub0_sub1:areg_128_align2 = COPY [[COPY]] ; CHECK-NEXT: [[COPY2:%[0-9]+]].sub2_sub3:areg_128_align2 = COPY [[COPY1]] - ; CHECK-NEXT: INLINEASM &"; use $0", 0 /* attdialect */, 8650761 /* reguse:AReg_128_Align2 */, [[COPY2]] + ; CHECK-NEXT: INLINEASM &"; use $0", 0 /* attdialect */, 9568265 /* reguse:AReg_128_Align2 */, [[COPY2]] ; CHECK-NEXT: SI_RETURN %0:vreg_64 = COPY $vgpr0_vgpr1 %1:vreg_64 = COPY $vgpr2_vgpr3 undef %2.sub0_sub1:areg_128_align2 = COPY %0 %2.sub2_sub3:areg_128_align2 = COPY %1 - INLINEASM &"; use $0", 0 /* attdialect */, 8650761 /* reguse:AReg_128_Align2 */, %2 + INLINEASM &"; use $0", 0 /* attdialect */, 9568265 /* reguse:AReg_128_Align2 */, %2 SI_RETURN ... @@ -398,14 +398,14 @@ body: | ; CHECK-NEXT: [[COPY1:%[0-9]+]].sub1:areg_128 = COPY [[COPY]] ; CHECK-NEXT: [[COPY1:%[0-9]+]].sub2:areg_128 = COPY [[COPY]] ; CHECK-NEXT: [[COPY1:%[0-9]+]].sub3:areg_128 = COPY [[COPY]] - ; CHECK-NEXT: INLINEASM &"; use $0", 0 /* attdialect */, 8323081 /* reguse:AReg_128 */, [[COPY1]] + ; CHECK-NEXT: INLINEASM &"; use $0", 0 /* attdialect */, 8847369 /* reguse:AReg_128 */, [[COPY1]] ; CHECK-NEXT: SI_RETURN %0:vgpr_32 = COPY $vgpr0 undef %1.sub0:areg_128 = COPY %0 %1.sub1:areg_128 = COPY %0 %1.sub2:areg_128 = COPY %0 %1.sub3:areg_128 = COPY %0 - INLINEASM &"; use $0", 0 /* attdialect */, 8323081 /* reguse:AReg_128 */, killed %1 + INLINEASM &"; use $0", 0 /* attdialect */, 8847369 /* reguse:AReg_128 */, killed %1 SI_RETURN ... @@ -425,14 +425,14 @@ body: | ; CHECK-NEXT: [[COPY1:%[0-9]+]].sub1:areg_128_align2 = COPY [[COPY]] ; CHECK-NEXT: [[COPY1:%[0-9]+]].sub2:areg_128_align2 = COPY [[COPY]] ; CHECK-NEXT: [[COPY1:%[0-9]+]].sub3:areg_128_align2 = COPY [[COPY]] - ; CHECK-NEXT: INLINEASM &"; use $0", 0 /* attdialect */, 8650761 /* reguse:AReg_128_Align2 */, [[COPY1]] + ; CHECK-NEXT: INLINEASM &"; use $0", 0 /* attdialect */, 9568265 /* reguse:AReg_128_Align2 */, [[COPY1]] ; CHECK-NEXT: SI_RETURN %0:vgpr_32 = COPY $vgpr0 undef %1.sub0:areg_128_align2 = COPY %0 %1.sub1:areg_128_align2 = COPY %0 %1.sub2:areg_128_align2 = COPY %0 %1.sub3:areg_128_align2 = COPY %0 - INLINEASM &"; use $0", 0 /* attdialect */, 8650761 /* reguse:AReg_128_Align2 */, %1 + INLINEASM &"; use $0", 0 /* attdialect */, 9568265 /* reguse:AReg_128_Align2 */, %1 SI_RETURN ... @@ -641,13 +641,13 @@ body: | ; CHECK-NEXT: [[COPY:%[0-9]+]].sub2_sub3:vreg_128 = COPY $vgpr2_vgpr3 ; CHECK-NEXT: undef [[COPY1:%[0-9]+]].sub0_sub1:areg_128 = COPY [[COPY]].sub0_sub1 ; CHECK-NEXT: [[COPY1:%[0-9]+]].sub2_sub3:areg_128 = COPY [[COPY]].sub2_sub3 - ; CHECK-NEXT: INLINEASM &"; use $0", 0 /* attdialect */, 8323081 /* reguse:AReg_128 */, [[COPY1]] + ; CHECK-NEXT: INLINEASM &"; use $0", 0 /* attdialect */, 8847369 /* reguse:AReg_128 */, [[COPY1]] ; CHECK-NEXT: SI_RETURN undef %0.sub0_sub1:vreg_128 =COPY $vgpr0_vgpr1 %0.sub2_sub3:vreg_128 = COPY $vgpr2_vgpr3 undef %2.sub0_sub1:areg_128 = COPY %0.sub0_sub1 %2.sub2_sub3:areg_128 = COPY %0.sub2_sub3 - INLINEASM &"; use $0", 0 /* attdialect */, 8323081 /* reguse:AReg_128 */, killed %2 + INLINEASM &"; use $0", 0 /* attdialect */, 8847369 /* reguse:AReg_128 */, killed %2 SI_RETURN ... @@ -668,13 +668,13 @@ body: | ; CHECK-NEXT: [[COPY:%[0-9]+]].sub1:vreg_128 = COPY $vgpr2_vgpr3 ; CHECK-NEXT: undef [[COPY1:%[0-9]+]].sub0_sub1:areg_128_align2 = COPY [[COPY]].sub0 ; CHECK-NEXT: [[COPY1:%[0-9]+]].sub2_sub3:areg_128_align2 = COPY [[COPY]].sub1 - ; CHECK-NEXT: INLINEASM &"; use $0", 0 /* attdialect */, 8650761 /* reguse:AReg_128_Align2 */, [[COPY1]] + ; CHECK-NEXT: INLINEASM &"; use $0", 0 /* attdialect */, 9568265 /* reguse:AReg_128_Align2 */, [[COPY1]] ; CHECK-NEXT: SI_RETURN undef %0.sub0:vreg_128 =COPY $vgpr0_vgpr1 %0.sub1:vreg_128 = COPY $vgpr2_vgpr3 undef %2.sub0_sub1:areg_128_align2 = COPY %0.sub0 %2.sub2_sub3:areg_128_align2 = COPY %0.sub1 - INLINEASM &"; use $0", 0 /* attdialect */, 8650761 /* reguse:AReg_128_Align2 */, %2 + INLINEASM &"; use $0", 0 /* attdialect */, 9568265 /* reguse:AReg_128_Align2 */, %2 SI_RETURN ... @@ -890,14 +890,14 @@ body: | ; CHECK-NEXT: [[COPY1:%[0-9]+]].sub1:areg_128 = COPY [[COPY]].sub0 ; CHECK-NEXT: [[COPY1:%[0-9]+]].sub2:areg_128 = COPY [[COPY]].sub0 ; CHECK-NEXT: [[COPY1:%[0-9]+]].sub3:areg_128 = COPY [[COPY]].sub0 - ; CHECK-NEXT: INLINEASM &"; use $0", 0 /* attdialect */, 8323081 /* reguse:AReg_128 */, [[COPY1]] + ; CHECK-NEXT: INLINEASM &"; use $0", 0 /* attdialect */, 8847369 /* reguse:AReg_128 */, [[COPY1]] ; CHECK-NEXT: SI_RETURN undef %0.sub0:vreg_64 = COPY $vgpr0 undef %1.sub0:areg_128 = COPY %0.sub0 %1.sub1:areg_128 = COPY %0.sub0 %1.sub2:areg_128 = COPY %0.sub0 %1.sub3:areg_128 = COPY %0.sub0 - INLINEASM &"; use $0", 0 /* attdialect */, 8323081 /* reguse:AReg_128 */, killed %1 + INLINEASM &"; use $0", 0 /* attdialect */, 8847369 /* reguse:AReg_128 */, killed %1 SI_RETURN ... @@ -917,14 +917,14 @@ body: | ; CHECK-NEXT: [[COPY1:%[0-9]+]].sub1:areg_128_align2 = COPY [[COPY]].sub0 ; CHECK-NEXT: [[COPY1:%[0-9]+]].sub2:areg_128_align2 = COPY [[COPY]].sub0 ; CHECK-NEXT: [[COPY1:%[0-9]+]].sub3:areg_128_align2 = COPY [[COPY]].sub0 - ; CHECK-NEXT: INLINEASM &"; use $0", 0 /* attdialect */, 8650761 /* reguse:AReg_128_Align2 */, [[COPY1]] + ; CHECK-NEXT: INLINEASM &"; use $0", 0 /* attdialect */, 9568265 /* reguse:AReg_128_Align2 */, [[COPY1]] ; CHECK-NEXT: SI_RETURN undef %0.sub0:vreg_64 = COPY $vgpr0 undef %1.sub0:areg_128_align2 = COPY %0.sub0 %1.sub1:areg_128_align2 = COPY %0.sub0 %1.sub2:areg_128_align2 = COPY %0.sub0 %1.sub3:areg_128_align2 = COPY %0.sub0 - INLINEASM &"; use $0", 0 /* attdialect */, 8650761 /* reguse:AReg_128_Align2 */, %1 + INLINEASM &"; use $0", 0 /* attdialect */, 9568265 /* reguse:AReg_128_Align2 */, %1 SI_RETURN ... @@ -1051,13 +1051,13 @@ body: | ; CHECK-NEXT: [[COPY:%[0-9]+]].sub2_sub3:vreg_128 = COPY $vgpr2_vgpr3 ; CHECK-NEXT: undef [[COPY1:%[0-9]+]].sub0_sub1:areg_128 = COPY [[COPY]].sub0_sub1 ; CHECK-NEXT: [[COPY1:%[0-9]+]].sub2_sub3:areg_128 = COPY [[COPY]].sub2_sub3 - ; CHECK-NEXT: INLINEASM &"; use $0", 0 /* attdialect */, 8323081 /* reguse:AReg_128 */, [[COPY1]] + ; CHECK-NEXT: INLINEASM &"; use $0", 0 /* attdialect */, 8847369 /* reguse:AReg_128 */, [[COPY1]] ; CHECK-NEXT: SI_RETURN undef %0.sub0_sub1:vreg_128 = COPY $vgpr0_vgpr1 %0.sub2_sub3:vreg_128 = COPY $vgpr2_vgpr3 undef %2.sub0_sub1:areg_128 = COPY %0.sub0_sub1 %2.sub2_sub3:areg_128 = COPY %0.sub2_sub3 - INLINEASM &"; use $0", 0 /* attdialect */, 8323081 /* reguse:AReg_128 */, killed %2 + INLINEASM &"; use $0", 0 /* attdialect */, 8847369 /* reguse:AReg_128 */, killed %2 SI_RETURN ... @@ -1076,13 +1076,13 @@ body: | ; CHECK-NEXT: [[COPY:%[0-9]+]].sub2_sub3:vreg_128_align2 = COPY $vgpr2_vgpr3 ; CHECK-NEXT: undef [[COPY1:%[0-9]+]].sub0_sub1:areg_128_align2 = COPY [[COPY]].sub0_sub1 ; CHECK-NEXT: [[COPY1:%[0-9]+]].sub2_sub3:areg_128_align2 = COPY [[COPY]].sub2_sub3 - ; CHECK-NEXT: INLINEASM &"; use $0", 0 /* attdialect */, 8650761 /* reguse:AReg_128_Align2 */, [[COPY1]] + ; CHECK-NEXT: INLINEASM &"; use $0", 0 /* attdialect */, 9568265 /* reguse:AReg_128_Align2 */, [[COPY1]] ; CHECK-NEXT: SI_RETURN undef %0.sub0_sub1:vreg_128_align2 = COPY $vgpr0_vgpr1 %0.sub2_sub3:vreg_128_align2 = COPY $vgpr2_vgpr3 undef %2.sub0_sub1:areg_128_align2 = COPY %0.sub0_sub1 %2.sub2_sub3:areg_128_align2 = COPY %0.sub2_sub3 - INLINEASM &"; use $0", 0 /* attdialect */, 8650761 /* reguse:AReg_128_Align2 */, %2 + INLINEASM &"; use $0", 0 /* attdialect */, 9568265 /* reguse:AReg_128_Align2 */, %2 SI_RETURN ... @@ -1358,11 +1358,11 @@ body: | ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: [[COPY:%[0-9]+]]:vreg_128 = COPY $vgpr0_vgpr1_vgpr2_vgpr3 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:areg_128 = COPY [[COPY]] - ; CHECK-NEXT: INLINEASM &"; use $0", 0 /* attdialect */, 8323081 /* reguse:AReg_128 */, [[COPY1]] + ; CHECK-NEXT: INLINEASM &"; use $0", 0 /* attdialect */, 8847369 /* reguse:AReg_128 */, [[COPY1]] ; CHECK-NEXT: SI_RETURN %0:vreg_128 = COPY $vgpr0_vgpr1_vgpr2_vgpr3 %2:areg_128 = COPY %0 - INLINEASM &"; use $0", 0 /* attdialect */, 8323081 /* reguse:AReg_128 */, killed %2 + INLINEASM &"; use $0", 0 /* attdialect */, 8847369 /* reguse:AReg_128 */, killed %2 SI_RETURN ... @@ -1379,11 +1379,11 @@ body: | ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: [[COPY:%[0-9]+]]:vreg_128_align2 = COPY $vgpr0_vgpr1_vgpr2_vgpr3 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:areg_128_align2 = COPY [[COPY]] - ; CHECK-NEXT: INLINEASM &"; use $0", 0 /* attdialect */, 8650761 /* reguse:AReg_128_Align2 */, [[COPY1]] + ; CHECK-NEXT: INLINEASM &"; use $0", 0 /* attdialect */, 9568265 /* reguse:AReg_128_Align2 */, [[COPY1]] ; CHECK-NEXT: SI_RETURN %0:vreg_128_align2 = COPY $vgpr0_vgpr1_vgpr2_vgpr3 %2:areg_128_align2 = COPY %0 - INLINEASM &"; use $0", 0 /* attdialect */, 8650761 /* reguse:AReg_128_Align2 */, %2 + INLINEASM &"; use $0", 0 /* attdialect */, 9568265 /* reguse:AReg_128_Align2 */, %2 SI_RETURN ... diff --git a/llvm/test/CodeGen/AMDGPU/inflate-reg-class-vgpr-mfma-to-av-with-load-source.mir b/llvm/test/CodeGen/AMDGPU/inflate-reg-class-vgpr-mfma-to-av-with-load-source.mir index 92836d8..63db24a 100644 --- a/llvm/test/CodeGen/AMDGPU/inflate-reg-class-vgpr-mfma-to-av-with-load-source.mir +++ b/llvm/test/CodeGen/AMDGPU/inflate-reg-class-vgpr-mfma-to-av-with-load-source.mir @@ -486,7 +486,7 @@ body: | ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55 ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 ; CHECK-NEXT: renamable $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = COPY killed renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 - ; CHECK-NEXT: INLINEASM &"; use $0 ", 1 /* sideeffect attdialect */, 39190537 /* reguse:VReg_512_Align2 */, killed renamable $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 + ; CHECK-NEXT: INLINEASM &"; use $0 ", 1 /* sideeffect attdialect */, 40239113 /* reguse:VReg_512_Align2 */, killed renamable $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 ; CHECK-NEXT: S_ENDPGM 0 bb.0: S_NOP 0, implicit-def $agpr0 @@ -516,7 +516,7 @@ body: | S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47 S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55 S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 - INLINEASM &"; use $0 ", 1 /* sideeffect attdialect */, 39190537 /* reguse:VReg_512_Align2 */, %0:vreg_512_align2 + INLINEASM &"; use $0 ", 1 /* sideeffect attdialect */, 40239113 /* reguse:VReg_512_Align2 */, %0:vreg_512_align2 S_ENDPGM 0 ... @@ -1368,7 +1368,7 @@ body: | ; CHECK-NEXT: renamable $vgpr0_vgpr1 = GLOBAL_LOAD_DWORDX2 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s64), addrspace 1) ; CHECK-NEXT: early-clobber renamable $vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr16_vgpr17, $vgpr16_vgpr17, $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 0, 0, 0, implicit $mode, implicit $exec ; CHECK-NEXT: early-clobber renamable $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr16_vgpr17, $vgpr16_vgpr17, $vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33, 0, 0, 0, implicit $mode, implicit $exec - ; CHECK-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 39190537 /* reguse:VReg_512_Align2 */, killed renamable $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 + ; CHECK-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 40239113 /* reguse:VReg_512_Align2 */, killed renamable $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 ; CHECK-NEXT: S_CBRANCH_VCCNZ %bb.1, implicit $vcc ; CHECK-NEXT: S_BRANCH %bb.2 ; CHECK-NEXT: {{ $}} @@ -1408,7 +1408,7 @@ body: | undef %2.sub0_sub1:vreg_512_align2 = GLOBAL_LOAD_DWORDX2 undef %3:vreg_64_align2, 0, 0, implicit $exec :: (load (s64), addrspace 1) early-clobber %0:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %2, 0, 0, 0, implicit $mode, implicit $exec early-clobber %4:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %0, 0, 0, 0, implicit $mode, implicit $exec - INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 39190537 /* reguse:VReg_512_Align2 */, %4 + INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 40239113 /* reguse:VReg_512_Align2 */, %4 S_CBRANCH_VCCNZ %bb.1, implicit $vcc S_BRANCH %bb.2 @@ -1726,7 +1726,7 @@ body: | ; CHECK-NEXT: renamable $vgpr0_vgpr1 = GLOBAL_LOAD_DWORDX2 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s64), addrspace 1) ; CHECK-NEXT: renamable $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = V_MFMA_F32_32X32X8F16_mac_vgprcd_e64 $vgpr16_vgpr17, $vgpr16_vgpr17, $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 0, 0, 0, implicit $mode, implicit $exec ; CHECK-NEXT: early-clobber renamable $vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr16_vgpr17, $vgpr16_vgpr17, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 0, 0, 0, implicit $mode, implicit $exec - ; CHECK-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 39190537 /* reguse:VReg_512_Align2 */, renamable $vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33 + ; CHECK-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 40239113 /* reguse:VReg_512_Align2 */, renamable $vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33 ; CHECK-NEXT: S_CBRANCH_VCCNZ %bb.1, implicit $vcc ; CHECK-NEXT: S_BRANCH %bb.2 ; CHECK-NEXT: {{ $}} @@ -1763,7 +1763,7 @@ body: | undef %0.sub0_sub1:vreg_512_align2 = GLOBAL_LOAD_DWORDX2 undef %3:vreg_64_align2, 0, 0, implicit $exec :: (load (s64), addrspace 1) %0:vreg_512_align2 = V_MFMA_F32_32X32X8F16_mac_vgprcd_e64 %1, %1, %0, 0, 0, 0, implicit $mode, implicit $exec %4:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %0, 0, 0, 0, implicit $mode, implicit $exec - INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 39190537 /* reguse:VReg_512_Align2 */, %4 + INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 40239113 /* reguse:VReg_512_Align2 */, %4 S_CBRANCH_VCCNZ %bb.1, implicit $vcc S_BRANCH %bb.2 diff --git a/llvm/test/CodeGen/AMDGPU/inline-asm.i128.ll b/llvm/test/CodeGen/AMDGPU/inline-asm.i128.ll index 9cbdc38..5b3e486 100644 --- a/llvm/test/CodeGen/AMDGPU/inline-asm.i128.ll +++ b/llvm/test/CodeGen/AMDGPU/inline-asm.i128.ll @@ -8,16 +8,16 @@ define amdgpu_kernel void @s_input_output_i128() { ; GFX908-LABEL: name: s_input_output_i128 ; GFX908: bb.0 (%ir-block.0): - ; GFX908-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 9633802 /* regdef:SGPR_128 */, def %13 + ; GFX908-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 10682378 /* regdef:SGPR_128 */, def %13 ; GFX908-NEXT: [[COPY:%[0-9]+]]:sgpr_128 = COPY %13 - ; GFX908-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 9633801 /* reguse:SGPR_128 */, [[COPY]] + ; GFX908-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 10682377 /* reguse:SGPR_128 */, [[COPY]] ; GFX908-NEXT: S_ENDPGM 0 ; ; GFX90A-LABEL: name: s_input_output_i128 ; GFX90A: bb.0 (%ir-block.0): - ; GFX90A-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 9633802 /* regdef:SGPR_128 */, def %11 + ; GFX90A-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 10682378 /* regdef:SGPR_128 */, def %11 ; GFX90A-NEXT: [[COPY:%[0-9]+]]:sgpr_128 = COPY %11 - ; GFX90A-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 9633801 /* reguse:SGPR_128 */, [[COPY]] + ; GFX90A-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 10682377 /* reguse:SGPR_128 */, [[COPY]] ; GFX90A-NEXT: S_ENDPGM 0 %val = tail call i128 asm sideeffect "; def $0", "=s"() call void asm sideeffect "; use $0", "s"(i128 %val) @@ -27,16 +27,16 @@ define amdgpu_kernel void @s_input_output_i128() { define amdgpu_kernel void @v_input_output_i128() { ; GFX908-LABEL: name: v_input_output_i128 ; GFX908: bb.0 (%ir-block.0): - ; GFX908-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 7798794 /* regdef:VReg_128 */, def %13 + ; GFX908-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 7929866 /* regdef:VReg_128 */, def %13 ; GFX908-NEXT: [[COPY:%[0-9]+]]:vreg_128 = COPY %13 - ; GFX908-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 7798793 /* reguse:VReg_128 */, [[COPY]] + ; GFX908-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 7929865 /* reguse:VReg_128 */, [[COPY]] ; GFX908-NEXT: S_ENDPGM 0 ; ; GFX90A-LABEL: name: v_input_output_i128 ; GFX90A: bb.0 (%ir-block.0): - ; GFX90A-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 7995402 /* regdef:VReg_128_Align2 */, def %11 + ; GFX90A-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 8257546 /* regdef:VReg_128_Align2 */, def %11 ; GFX90A-NEXT: [[COPY:%[0-9]+]]:vreg_128_align2 = COPY %11 - ; GFX90A-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 7995401 /* reguse:VReg_128_Align2 */, [[COPY]] + ; GFX90A-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 8257545 /* reguse:VReg_128_Align2 */, [[COPY]] ; GFX90A-NEXT: S_ENDPGM 0 %val = tail call i128 asm sideeffect "; def $0", "=v"() call void asm sideeffect "; use $0", "v"(i128 %val) @@ -47,16 +47,16 @@ define amdgpu_kernel void @a_input_output_i128() { ; GFX908-LABEL: name: a_input_output_i128 ; GFX908: bb.0 (%ir-block.0): - ; GFX908-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 8323082 /* regdef:AReg_128 */, def %13 + ; GFX908-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 8847370 /* regdef:AReg_128 */, def %13 ; GFX908-NEXT: [[COPY:%[0-9]+]]:areg_128 = COPY %13 - ; GFX908-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 8323081 /* reguse:AReg_128 */, [[COPY]] + ; GFX908-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 8847369 /* reguse:AReg_128 */, [[COPY]] ; GFX908-NEXT: S_ENDPGM 0 ; ; GFX90A-LABEL: name: a_input_output_i128 ; GFX90A: bb.0 (%ir-block.0): - ; GFX90A-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 8650762 /* regdef:AReg_128_Align2 */, def %11 + ; GFX90A-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 9568266 /* regdef:AReg_128_Align2 */, def %11 ; GFX90A-NEXT: [[COPY:%[0-9]+]]:areg_128_align2 = COPY %11 - ; GFX90A-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 8650761 /* reguse:AReg_128_Align2 */, [[COPY]] + ; GFX90A-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 9568265 /* reguse:AReg_128_Align2 */, [[COPY]] ; GFX90A-NEXT: S_ENDPGM 0 %val = call i128 asm sideeffect "; def $0", "=a"() call void asm sideeffect "; use $0", "a"(i128 %val) diff --git a/llvm/test/CodeGen/AMDGPU/partial-regcopy-and-spill-missed-at-regalloc.ll b/llvm/test/CodeGen/AMDGPU/partial-regcopy-and-spill-missed-at-regalloc.ll index 6509d80..f88b1bf 100644 --- a/llvm/test/CodeGen/AMDGPU/partial-regcopy-and-spill-missed-at-regalloc.ll +++ b/llvm/test/CodeGen/AMDGPU/partial-regcopy-and-spill-missed-at-regalloc.ll @@ -12,7 +12,7 @@ define amdgpu_kernel void @partial_copy(<4 x i32> %arg) #0 { ; REGALLOC-GFX908-NEXT: liveins: $sgpr4_sgpr5 ; REGALLOC-GFX908-NEXT: {{ $}} ; REGALLOC-GFX908-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 2424841 /* reguse:AGPR_32 */, undef %6:agpr_32 - ; REGALLOC-GFX908-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 7798794 /* regdef:VReg_128 */, def %25 + ; REGALLOC-GFX908-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 7929866 /* regdef:VReg_128 */, def %25 ; REGALLOC-GFX908-NEXT: [[COPY:%[0-9]+]]:av_128 = COPY %25 ; REGALLOC-GFX908-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 3735562 /* regdef:VReg_64 */, def %27 ; REGALLOC-GFX908-NEXT: SI_SPILL_AV64_SAVE %27, %stack.0, $sgpr32, 0, implicit $exec :: (store (s64) into %stack.0, align 4, addrspace 5) @@ -37,7 +37,7 @@ define amdgpu_kernel void @partial_copy(<4 x i32> %arg) #0 { ; PEI-GFX908-NEXT: $sgpr12 = S_ADD_U32 $sgpr12, $sgpr9, implicit-def $scc, implicit-def $sgpr12_sgpr13_sgpr14_sgpr15 ; PEI-GFX908-NEXT: $sgpr13 = S_ADDC_U32 $sgpr13, 0, implicit-def dead $scc, implicit $scc, implicit-def $sgpr12_sgpr13_sgpr14_sgpr15 ; PEI-GFX908-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 2424841 /* reguse:AGPR_32 */, undef renamable $agpr0 - ; PEI-GFX908-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 7798794 /* regdef:VReg_128 */, def renamable $vgpr0_vgpr1_vgpr2_vgpr3 + ; PEI-GFX908-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 7929866 /* regdef:VReg_128 */, def renamable $vgpr0_vgpr1_vgpr2_vgpr3 ; PEI-GFX908-NEXT: renamable $agpr0_agpr1_agpr2_agpr3 = COPY killed renamable $vgpr0_vgpr1_vgpr2_vgpr3, implicit $exec ; PEI-GFX908-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 3735562 /* regdef:VReg_64 */, def renamable $vgpr0_vgpr1 ; PEI-GFX908-NEXT: BUFFER_STORE_DWORD_OFFSET killed $vgpr0, $sgpr12_sgpr13_sgpr14_sgpr15, 0, 0, 0, 0, implicit $exec, implicit-def $vgpr0_vgpr1, implicit $vgpr0_vgpr1 :: (store (s32) into %stack.0, addrspace 5) @@ -61,7 +61,7 @@ define amdgpu_kernel void @partial_copy(<4 x i32> %arg) #0 { ; REGALLOC-GFX90A-NEXT: liveins: $sgpr4_sgpr5 ; REGALLOC-GFX90A-NEXT: {{ $}} ; REGALLOC-GFX90A-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 2424841 /* reguse:AGPR_32 */, undef %6:agpr_32 - ; REGALLOC-GFX90A-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 7995402 /* regdef:VReg_128_Align2 */, def %23 + ; REGALLOC-GFX90A-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 8257546 /* regdef:VReg_128_Align2 */, def %23 ; REGALLOC-GFX90A-NEXT: [[COPY:%[0-9]+]]:av_128_align2 = COPY %23 ; REGALLOC-GFX90A-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 3997706 /* regdef:VReg_64_Align2 */, def %21 ; REGALLOC-GFX90A-NEXT: [[COPY1:%[0-9]+]]:av_64_align2 = COPY %21 @@ -80,7 +80,7 @@ define amdgpu_kernel void @partial_copy(<4 x i32> %arg) #0 { ; PEI-GFX90A-NEXT: liveins: $sgpr4_sgpr5 ; PEI-GFX90A-NEXT: {{ $}} ; PEI-GFX90A-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 2424841 /* reguse:AGPR_32 */, undef renamable $agpr0 - ; PEI-GFX90A-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 7995402 /* regdef:VReg_128_Align2 */, def renamable $vgpr0_vgpr1_vgpr2_vgpr3 + ; PEI-GFX90A-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 8257546 /* regdef:VReg_128_Align2 */, def renamable $vgpr0_vgpr1_vgpr2_vgpr3 ; PEI-GFX90A-NEXT: renamable $agpr0_agpr1_agpr2_agpr3 = COPY killed renamable $vgpr0_vgpr1_vgpr2_vgpr3, implicit $exec ; PEI-GFX90A-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 3997706 /* regdef:VReg_64_Align2 */, def renamable $vgpr2_vgpr3 ; PEI-GFX90A-NEXT: GLOBAL_STORE_DWORDX4 undef renamable $vgpr0_vgpr1, killed renamable $agpr0_agpr1_agpr2_agpr3, 0, 0, implicit $exec :: (volatile store (s128) into `ptr addrspace(1) poison`, addrspace 1) diff --git a/llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr-subreg-insert-extract.mir b/llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr-subreg-insert-extract.mir index d7b713a..0b4e662 100644 --- a/llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr-subreg-insert-extract.mir +++ b/llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr-subreg-insert-extract.mir @@ -19,7 +19,7 @@ body: | ; CHECK-NEXT: [[V_MFMA_F64_4X4X4F64_e64_:%[0-9]+]]:areg_64_align2 = V_MFMA_F64_4X4X4F64_e64 [[COPY1]], [[COPY2]], [[GLOBAL_LOAD_DWORDX2_]], 0, 0, 0, implicit $mode, implicit $exec ; CHECK-NEXT: undef [[COPY3:%[0-9]+]].sub0_sub1:areg_128_align2 = COPY [[V_MFMA_F64_4X4X4F64_e64_]] ; CHECK-NEXT: [[COPY3:%[0-9]+]].sub2_sub3:areg_128_align2 = IMPLICIT_DEF - ; CHECK-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 8650761 /* reguse:AReg_128_Align2 */, [[COPY3]] + ; CHECK-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 9568265 /* reguse:AReg_128_Align2 */, [[COPY3]] ; CHECK-NEXT: GLOBAL_STORE_DWORDX4 [[COPY]], [[COPY3]], 0, 0, implicit $exec :: (store (s128), addrspace 1) ; CHECK-NEXT: GLOBAL_STORE_DWORDX2 [[COPY]], [[COPY3]].sub2_sub3, 0, 0, implicit $exec :: (store (s128), addrspace 1) ; CHECK-NEXT: SI_RETURN @@ -30,7 +30,7 @@ body: | %4:vreg_64_align2 = V_MFMA_F64_4X4X4F64_vgprcd_e64 %1, %2, %3, 0, 0, 0, implicit $mode, implicit $exec undef %5.sub0_sub1:areg_128_align2 = COPY %4 %5.sub2_sub3 = IMPLICIT_DEF - INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 8650761 /* reguse:AReg_128_Align2 */, %5 + INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 9568265 /* reguse:AReg_128_Align2 */, %5 GLOBAL_STORE_DWORDX4 %0, %5, 0, 0, implicit $exec :: (store (s128), addrspace 1) GLOBAL_STORE_DWORDX2 %0, %5.sub2_sub3, 0, 0, implicit $exec :: (store (s128), addrspace 1) SI_RETURN @@ -172,7 +172,7 @@ body: | ; CHECK-NEXT: undef [[V_MFMA_F64_4X4X4F64_e64_:%[0-9]+]].sub2_sub3:areg_128_align2 = V_MFMA_F64_4X4X4F64_e64 [[COPY1]], [[COPY2]], [[GLOBAL_LOAD_DWORDX2_]], 0, 0, 0, implicit $mode, implicit $exec ; CHECK-NEXT: undef [[COPY3:%[0-9]+]].sub0_sub1:areg_128_align2 = COPY [[V_MFMA_F64_4X4X4F64_e64_]].sub2_sub3 ; CHECK-NEXT: [[COPY3:%[0-9]+]].sub2_sub3:areg_128_align2 = IMPLICIT_DEF - ; CHECK-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 8650761 /* reguse:AReg_128_Align2 */, [[COPY3]] + ; CHECK-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 9568265 /* reguse:AReg_128_Align2 */, [[COPY3]] ; CHECK-NEXT: GLOBAL_STORE_DWORDX4 [[COPY]], [[COPY3]], 0, 0, implicit $exec :: (store (s128), addrspace 1) ; CHECK-NEXT: GLOBAL_STORE_DWORDX2 [[COPY]], [[COPY3]].sub2_sub3, 0, 0, implicit $exec :: (store (s128), addrspace 1) ; CHECK-NEXT: SI_RETURN @@ -183,7 +183,7 @@ body: | undef %4.sub2_sub3:vreg_128_align2 = V_MFMA_F64_4X4X4F64_vgprcd_e64 %1, %2, %3, 0, 0, 0, implicit $mode, implicit $exec undef %5.sub0_sub1:areg_128_align2 = COPY %4.sub2_sub3 %5.sub2_sub3 = IMPLICIT_DEF - INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 8650761 /* reguse:AReg_128_Align2 */, %5 + INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 9568265 /* reguse:AReg_128_Align2 */, %5 GLOBAL_STORE_DWORDX4 %0, %5, 0, 0, implicit $exec :: (store (s128), addrspace 1) GLOBAL_STORE_DWORDX2 %0, %5.sub2_sub3, 0, 0, implicit $exec :: (store (s128), addrspace 1) SI_RETURN @@ -208,7 +208,7 @@ body: | ; CHECK-NEXT: undef [[V_MFMA_F64_4X4X4F64_vgprcd_e64_:%[0-9]+]].sub2_sub3:vreg_128_align2 = V_MFMA_F64_4X4X4F64_vgprcd_e64 [[COPY1]], [[COPY2]], [[GLOBAL_LOAD_DWORDX2_]], 0, 0, 0, implicit $mode, implicit $exec ; CHECK-NEXT: undef [[COPY3:%[0-9]+]].sub1:areg_128_align2 = COPY [[V_MFMA_F64_4X4X4F64_vgprcd_e64_]].sub2 ; CHECK-NEXT: [[COPY3:%[0-9]+]].sub2_sub3:areg_128_align2 = IMPLICIT_DEF - ; CHECK-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 8650761 /* reguse:AReg_128_Align2 */, [[COPY3]] + ; CHECK-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 9568265 /* reguse:AReg_128_Align2 */, [[COPY3]] ; CHECK-NEXT: GLOBAL_STORE_DWORDX4 [[COPY]], [[COPY3]], 0, 0, implicit $exec :: (store (s128), addrspace 1) ; CHECK-NEXT: GLOBAL_STORE_DWORDX2 [[COPY]], [[COPY3]].sub2_sub3, 0, 0, implicit $exec :: (store (s128), addrspace 1) ; CHECK-NEXT: SI_RETURN @@ -219,7 +219,7 @@ body: | undef %4.sub2_sub3:vreg_128_align2 = V_MFMA_F64_4X4X4F64_vgprcd_e64 %1, %2, %3, 0, 0, 0, implicit $mode, implicit $exec undef %5.sub1:areg_128_align2 = COPY %4.sub2 %5.sub2_sub3 = IMPLICIT_DEF - INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 8650761 /* reguse:AReg_128_Align2 */, %5 + INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 9568265 /* reguse:AReg_128_Align2 */, %5 GLOBAL_STORE_DWORDX4 %0, %5, 0, 0, implicit $exec :: (store (s128), addrspace 1) GLOBAL_STORE_DWORDX2 %0, %5.sub2_sub3, 0, 0, implicit $exec :: (store (s128), addrspace 1) SI_RETURN diff --git a/llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr-subreg-src2-chain.mir b/llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr-subreg-src2-chain.mir index 57f611b..4c2ea2f 100644 --- a/llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr-subreg-src2-chain.mir +++ b/llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr-subreg-src2-chain.mir @@ -17,7 +17,7 @@ body: | ; CHECK-NEXT: [[GLOBAL_LOAD_DWORDX4_:%[0-9]+]]:areg_128_align2 = GLOBAL_LOAD_DWORDX4 [[COPY]], 0, 0, implicit $exec :: (load (s128), addrspace 1) ; CHECK-NEXT: [[V_MFMA_F64_4X4X4F64_e64_:%[0-9]+]]:areg_64_align2 = V_MFMA_F64_4X4X4F64_e64 [[COPY1]], [[COPY2]], [[GLOBAL_LOAD_DWORDX4_]].sub0_sub1, 0, 0, 0, implicit $mode, implicit $exec ; CHECK-NEXT: undef [[COPY3:%[0-9]+]].sub0_sub1:areg_128_align2 = COPY [[V_MFMA_F64_4X4X4F64_e64_]] - ; CHECK-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 8650761 /* reguse:AReg_128_Align2 */, [[COPY3]] + ; CHECK-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 9568265 /* reguse:AReg_128_Align2 */, [[COPY3]] ; CHECK-NEXT: GLOBAL_STORE_DWORDX4 [[COPY]], [[COPY3]], 0, 0, implicit $exec :: (store (s128), addrspace 1) ; CHECK-NEXT: SI_RETURN %0:vreg_64_align2 = COPY $vgpr4_vgpr5 @@ -26,7 +26,7 @@ body: | %3:vreg_128_align2 = GLOBAL_LOAD_DWORDX4 %0, 0, 0, implicit $exec :: (load (s128), addrspace 1) %4:vreg_64_align2 = V_MFMA_F64_4X4X4F64_vgprcd_e64 %1, %2, %3.sub0_sub1, 0, 0, 0, implicit $mode, implicit $exec undef %5.sub0_sub1:areg_128_align2 = COPY %4 - INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 8650761 /* reguse:AReg_128_Align2 */, %5 + INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 9568265 /* reguse:AReg_128_Align2 */, %5 GLOBAL_STORE_DWORDX4 %0, %5, 0, 0, implicit $exec :: (store (s128), addrspace 1) SI_RETURN ... @@ -47,7 +47,7 @@ body: | ; CHECK-NEXT: [[GLOBAL_LOAD_DWORDX4_:%[0-9]+]]:areg_128_align2 = GLOBAL_LOAD_DWORDX4 [[COPY]], 0, 0, implicit $exec :: (load (s128), addrspace 1) ; CHECK-NEXT: [[V_MFMA_F64_4X4X4F64_e64_:%[0-9]+]]:areg_64_align2 = V_MFMA_F64_4X4X4F64_e64 [[COPY1]], [[COPY2]], [[GLOBAL_LOAD_DWORDX4_]].sub2_sub3, 0, 0, 0, implicit $mode, implicit $exec ; CHECK-NEXT: undef [[COPY3:%[0-9]+]].sub0_sub1:areg_128_align2 = COPY [[V_MFMA_F64_4X4X4F64_e64_]] - ; CHECK-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 8650761 /* reguse:AReg_128_Align2 */, [[COPY3]] + ; CHECK-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 9568265 /* reguse:AReg_128_Align2 */, [[COPY3]] ; CHECK-NEXT: GLOBAL_STORE_DWORDX4 [[COPY]], [[COPY3]], 0, 0, implicit $exec :: (store (s128), addrspace 1) ; CHECK-NEXT: SI_RETURN %0:vreg_64_align2 = COPY $vgpr4_vgpr5 @@ -56,7 +56,7 @@ body: | %3:vreg_128_align2 = GLOBAL_LOAD_DWORDX4 %0, 0, 0, implicit $exec :: (load (s128), addrspace 1) %4:vreg_64_align2 = V_MFMA_F64_4X4X4F64_vgprcd_e64 %1, %2, %3.sub2_sub3, 0, 0, 0, implicit $mode, implicit $exec undef %5.sub0_sub1:areg_128_align2 = COPY %4 - INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 8650761 /* reguse:AReg_128_Align2 */, %5 + INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 9568265 /* reguse:AReg_128_Align2 */, %5 GLOBAL_STORE_DWORDX4 %0, %5, 0, 0, implicit $exec :: (store (s128), addrspace 1) SI_RETURN ... @@ -151,7 +151,7 @@ body: | ; CHECK-NEXT: dead %other_use:vreg_64_align2 = COPY [[V_MFMA_F64_4X4X4F64_e64_1]].sub0_sub1 ; CHECK-NEXT: [[V_MFMA_F64_4X4X4F64_e64_2:%[0-9]+]]:areg_64_align2 = V_MFMA_F64_4X4X4F64_e64 [[COPY1]], [[COPY2]], [[V_MFMA_F64_4X4X4F64_e64_1]].sub0_sub1, 0, 0, 0, implicit $mode, implicit $exec ; CHECK-NEXT: undef [[COPY3:%[0-9]+]].sub0_sub1:areg_128_align2 = COPY [[V_MFMA_F64_4X4X4F64_e64_2]] - ; CHECK-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 8650761 /* reguse:AReg_128_Align2 */, [[COPY3]] + ; CHECK-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 9568265 /* reguse:AReg_128_Align2 */, [[COPY3]] ; CHECK-NEXT: GLOBAL_STORE_DWORDX4 [[COPY]], [[COPY3]], 0, 0, implicit $exec :: (store (s128), addrspace 1) ; CHECK-NEXT: SI_RETURN %0:vreg_64_align2 = COPY $vgpr4_vgpr5 @@ -163,7 +163,7 @@ body: | %other_use:vreg_64_align2 = COPY %5.sub0_sub1 %6:vreg_64_align2 = V_MFMA_F64_4X4X4F64_vgprcd_e64 %1, %2, %5.sub0_sub1, 0, 0, 0, implicit $mode, implicit $exec undef %8.sub0_sub1:areg_128_align2 = COPY %6 - INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 8650761 /* reguse:AReg_128_Align2 */, %8:areg_128_align2 + INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 9568265 /* reguse:AReg_128_Align2 */, %8:areg_128_align2 GLOBAL_STORE_DWORDX4 %0, %8, 0, 0, implicit $exec :: (store (s128), addrspace 1) SI_RETURN @@ -231,7 +231,7 @@ body: | ; CHECK-NEXT: dead %other_use1:vreg_64_align2 = COPY [[V_MFMA_F64_4X4X4F64_e64_]].sub2_sub3 ; CHECK-NEXT: dead %other_use2:vreg_64 = COPY [[V_MFMA_F64_4X4X4F64_e64_]].sub1_sub2 ; CHECK-NEXT: [[COPY3:%[0-9]+]]:areg_128_align2 = COPY [[V_MFMA_F64_4X4X4F64_e64_]] - ; CHECK-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 8650761 /* reguse:AReg_128_Align2 */, [[COPY3]] + ; CHECK-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 9568265 /* reguse:AReg_128_Align2 */, [[COPY3]] ; CHECK-NEXT: GLOBAL_STORE_DWORDX4 [[COPY]], [[COPY3]], 0, 0, implicit $exec :: (store (s128), addrspace 1) ; CHECK-NEXT: SI_RETURN %0:vreg_64_align2 = COPY $vgpr4_vgpr5 @@ -245,7 +245,7 @@ body: | %other_use1:vreg_64_align2 = COPY %4.sub2_sub3 %other_use2:vreg_64 = COPY %4.sub1_sub2 %6:areg_128_align2 = COPY %4 - INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 8650761 /* reguse:AReg_128_Align2 */, %6:areg_128_align2 + INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 9568265 /* reguse:AReg_128_Align2 */, %6:areg_128_align2 GLOBAL_STORE_DWORDX4 %0, %6, 0, 0, implicit $exec :: (store (s128), addrspace 1) SI_RETURN ... diff --git a/llvm/test/CodeGen/ARM/and-mask-variable.ll b/llvm/test/CodeGen/ARM/and-mask-variable.ll new file mode 100644 index 0000000..0f84b76 --- /dev/null +++ b/llvm/test/CodeGen/ARM/and-mask-variable.ll @@ -0,0 +1,90 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=thumbv7m-eabi %s -o - | FileCheck %s --check-prefix V7M +; RUN: llc -mtriple=armv7a-eabi %s -o - | FileCheck %s --check-prefix V7A +; RUN: llc -mtriple=thumbv7a-eabi %s -o - | FileCheck %s --check-prefix V7A-T +; RUN: llc -mtriple=armv6m-eabi %s -o - | FileCheck %s --check-prefix V6M + +define i32 @mask_pair(i32 %x, i32 %y) { +; V7M-LABEL: mask_pair: +; V7M: @ %bb.0: +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: lsls r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: mask_pair: +; V7A: @ %bb.0: +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: lsl r0, r0, r1 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: mask_pair: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: lsls r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: mask_pair: +; V6M: @ %bb.0: +; V6M-NEXT: lsrs r0, r1 +; V6M-NEXT: lsls r0, r1 +; V6M-NEXT: bx lr + %shl = shl nsw i32 -1, %y + %and = and i32 %shl, %x + ret i32 %and +} + +define i64 @mask_pair_64(i64 %x, i64 %y) { +; V7M-LABEL: mask_pair_64: +; V7M: @ %bb.0: +; V7M-NEXT: mov.w r3, #-1 +; V7M-NEXT: lsl.w r12, r3, r2 +; V7M-NEXT: subs r2, #32 +; V7M-NEXT: it pl +; V7M-NEXT: movpl.w r12, #0 +; V7M-NEXT: it pl +; V7M-NEXT: lslpl r3, r2 +; V7M-NEXT: and.w r0, r0, r12 +; V7M-NEXT: ands r1, r3 +; V7M-NEXT: bx lr +; +; V7A-LABEL: mask_pair_64: +; V7A: @ %bb.0: +; V7A-NEXT: subs r12, r2, #32 +; V7A-NEXT: mvn r3, #0 +; V7A-NEXT: lsl r2, r3, r2 +; V7A-NEXT: lslpl r3, r3, r12 +; V7A-NEXT: movwpl r2, #0 +; V7A-NEXT: and r1, r3, r1 +; V7A-NEXT: and r0, r2, r0 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: mask_pair_64: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: mov.w r3, #-1 +; V7A-T-NEXT: lsl.w r12, r3, r2 +; V7A-T-NEXT: subs r2, #32 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl.w r12, #0 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lslpl r3, r2 +; V7A-T-NEXT: and.w r0, r0, r12 +; V7A-T-NEXT: ands r1, r3 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: mask_pair_64: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, r5, r7, lr} +; V6M-NEXT: push {r4, r5, r7, lr} +; V6M-NEXT: mov r4, r1 +; V6M-NEXT: mov r5, r0 +; V6M-NEXT: movs r0, #0 +; V6M-NEXT: mvns r0, r0 +; V6M-NEXT: mov r1, r0 +; V6M-NEXT: bl __aeabi_llsl +; V6M-NEXT: ands r0, r5 +; V6M-NEXT: ands r1, r4 +; V6M-NEXT: pop {r4, r5, r7, pc} + %shl = shl nsw i64 -1, %y + %and = and i64 %shl, %x + ret i64 %and +} diff --git a/llvm/test/CodeGen/ARM/extract-bits.ll b/llvm/test/CodeGen/ARM/extract-bits.ll new file mode 100644 index 0000000..77deaa5 --- /dev/null +++ b/llvm/test/CodeGen/ARM/extract-bits.ll @@ -0,0 +1,4591 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=thumbv7m-eabi %s -o - | FileCheck %s --check-prefix V7M +; RUN: llc -mtriple=armv7a-eabi %s -o - | FileCheck %s --check-prefix V7A +; RUN: llc -mtriple=thumbv7a-eabi %s -o - | FileCheck %s --check-prefix V7A-T +; RUN: llc -mtriple=armv6m-eabi %s -o - | FileCheck %s --check-prefix V6M + +; Patterns: +; a) (x >> start) & (1 << nbits) - 1 +; b) (x >> start) & ~(-1 << nbits) +; c) (x >> start) & (-1 >> (32 - y)) +; d) (x >> start) << (32 - y) >> (32 - y) +; are equivalent. + +; ---------------------------------------------------------------------------- ; +; Pattern a. 32-bit +; ---------------------------------------------------------------------------- ; + +define i32 @bextr32_a0(i32 %val, i32 %numskipbits, i32 %numlowbits) nounwind { +; V7M-LABEL: bextr32_a0: +; V7M: @ %bb.0: +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: movs r1, #1 +; V7M-NEXT: lsls r1, r2 +; V7M-NEXT: subs r1, #1 +; V7M-NEXT: ands r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bextr32_a0: +; V7A: @ %bb.0: +; V7A-NEXT: mov r12, #1 +; V7A-NEXT: mvn r3, #0 +; V7A-NEXT: add r2, r3, r12, lsl r2 +; V7A-NEXT: and r0, r2, r0, lsr r1 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bextr32_a0: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: movs r1, #1 +; V7A-T-NEXT: lsls r1, r2 +; V7A-T-NEXT: subs r1, #1 +; V7A-T-NEXT: ands r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bextr32_a0: +; V6M: @ %bb.0: +; V6M-NEXT: lsrs r0, r1 +; V6M-NEXT: movs r1, #1 +; V6M-NEXT: lsls r1, r2 +; V6M-NEXT: subs r1, r1, #1 +; V6M-NEXT: ands r0, r1 +; V6M-NEXT: bx lr + %shifted = lshr i32 %val, %numskipbits + %onebit = shl i32 1, %numlowbits + %mask = add nsw i32 %onebit, -1 + %masked = and i32 %mask, %shifted + ret i32 %masked +} + +define i32 @bextr32_a0_arithmetic(i32 %val, i32 %numskipbits, i32 %numlowbits) nounwind { +; V7M-LABEL: bextr32_a0_arithmetic: +; V7M: @ %bb.0: +; V7M-NEXT: asrs r0, r1 +; V7M-NEXT: movs r1, #1 +; V7M-NEXT: lsls r1, r2 +; V7M-NEXT: subs r1, #1 +; V7M-NEXT: ands r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bextr32_a0_arithmetic: +; V7A: @ %bb.0: +; V7A-NEXT: mov r12, #1 +; V7A-NEXT: mvn r3, #0 +; V7A-NEXT: add r2, r3, r12, lsl r2 +; V7A-NEXT: and r0, r2, r0, asr r1 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bextr32_a0_arithmetic: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: asrs r0, r1 +; V7A-T-NEXT: movs r1, #1 +; V7A-T-NEXT: lsls r1, r2 +; V7A-T-NEXT: subs r1, #1 +; V7A-T-NEXT: ands r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bextr32_a0_arithmetic: +; V6M: @ %bb.0: +; V6M-NEXT: asrs r0, r1 +; V6M-NEXT: movs r1, #1 +; V6M-NEXT: lsls r1, r2 +; V6M-NEXT: subs r1, r1, #1 +; V6M-NEXT: ands r0, r1 +; V6M-NEXT: bx lr + %shifted = ashr i32 %val, %numskipbits + %onebit = shl i32 1, %numlowbits + %mask = add nsw i32 %onebit, -1 + %masked = and i32 %mask, %shifted + ret i32 %masked +} + +define i32 @bextr32_a1_indexzext(i32 %val, i8 zeroext %numskipbits, i8 zeroext %numlowbits) nounwind { +; V7M-LABEL: bextr32_a1_indexzext: +; V7M: @ %bb.0: +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: movs r1, #1 +; V7M-NEXT: lsls r1, r2 +; V7M-NEXT: subs r1, #1 +; V7M-NEXT: ands r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bextr32_a1_indexzext: +; V7A: @ %bb.0: +; V7A-NEXT: mov r12, #1 +; V7A-NEXT: mvn r3, #0 +; V7A-NEXT: add r2, r3, r12, lsl r2 +; V7A-NEXT: and r0, r2, r0, lsr r1 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bextr32_a1_indexzext: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: movs r1, #1 +; V7A-T-NEXT: lsls r1, r2 +; V7A-T-NEXT: subs r1, #1 +; V7A-T-NEXT: ands r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bextr32_a1_indexzext: +; V6M: @ %bb.0: +; V6M-NEXT: lsrs r0, r1 +; V6M-NEXT: movs r1, #1 +; V6M-NEXT: lsls r1, r2 +; V6M-NEXT: subs r1, r1, #1 +; V6M-NEXT: ands r0, r1 +; V6M-NEXT: bx lr + %skip = zext i8 %numskipbits to i32 + %shifted = lshr i32 %val, %skip + %conv = zext i8 %numlowbits to i32 + %onebit = shl i32 1, %conv + %mask = add nsw i32 %onebit, -1 + %masked = and i32 %mask, %shifted + ret i32 %masked +} + +define i32 @bextr32_a2_load(ptr %w, i32 %numskipbits, i32 %numlowbits) nounwind { +; V7M-LABEL: bextr32_a2_load: +; V7M: @ %bb.0: +; V7M-NEXT: ldr r0, [r0] +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: movs r1, #1 +; V7M-NEXT: lsls r1, r2 +; V7M-NEXT: subs r1, #1 +; V7M-NEXT: ands r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bextr32_a2_load: +; V7A: @ %bb.0: +; V7A-NEXT: ldr r0, [r0] +; V7A-NEXT: mov r12, #1 +; V7A-NEXT: mvn r3, #0 +; V7A-NEXT: add r2, r3, r12, lsl r2 +; V7A-NEXT: and r0, r2, r0, lsr r1 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bextr32_a2_load: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: ldr r0, [r0] +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: movs r1, #1 +; V7A-T-NEXT: lsls r1, r2 +; V7A-T-NEXT: subs r1, #1 +; V7A-T-NEXT: ands r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bextr32_a2_load: +; V6M: @ %bb.0: +; V6M-NEXT: ldr r3, [r0] +; V6M-NEXT: lsrs r3, r1 +; V6M-NEXT: movs r0, #1 +; V6M-NEXT: lsls r0, r2 +; V6M-NEXT: subs r0, r0, #1 +; V6M-NEXT: ands r0, r3 +; V6M-NEXT: bx lr + %val = load i32, ptr %w + %shifted = lshr i32 %val, %numskipbits + %onebit = shl i32 1, %numlowbits + %mask = add nsw i32 %onebit, -1 + %masked = and i32 %mask, %shifted + ret i32 %masked +} + +define i32 @bextr32_a3_load_indexzext(ptr %w, i8 zeroext %numskipbits, i8 zeroext %numlowbits) nounwind { +; V7M-LABEL: bextr32_a3_load_indexzext: +; V7M: @ %bb.0: +; V7M-NEXT: ldr r0, [r0] +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: movs r1, #1 +; V7M-NEXT: lsls r1, r2 +; V7M-NEXT: subs r1, #1 +; V7M-NEXT: ands r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bextr32_a3_load_indexzext: +; V7A: @ %bb.0: +; V7A-NEXT: ldr r0, [r0] +; V7A-NEXT: mov r12, #1 +; V7A-NEXT: mvn r3, #0 +; V7A-NEXT: add r2, r3, r12, lsl r2 +; V7A-NEXT: and r0, r2, r0, lsr r1 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bextr32_a3_load_indexzext: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: ldr r0, [r0] +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: movs r1, #1 +; V7A-T-NEXT: lsls r1, r2 +; V7A-T-NEXT: subs r1, #1 +; V7A-T-NEXT: ands r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bextr32_a3_load_indexzext: +; V6M: @ %bb.0: +; V6M-NEXT: ldr r3, [r0] +; V6M-NEXT: lsrs r3, r1 +; V6M-NEXT: movs r0, #1 +; V6M-NEXT: lsls r0, r2 +; V6M-NEXT: subs r0, r0, #1 +; V6M-NEXT: ands r0, r3 +; V6M-NEXT: bx lr + %val = load i32, ptr %w + %skip = zext i8 %numskipbits to i32 + %shifted = lshr i32 %val, %skip + %conv = zext i8 %numlowbits to i32 + %onebit = shl i32 1, %conv + %mask = add nsw i32 %onebit, -1 + %masked = and i32 %mask, %shifted + ret i32 %masked +} + +define i32 @bextr32_a4_commutative(i32 %val, i32 %numskipbits, i32 %numlowbits) nounwind { +; V7M-LABEL: bextr32_a4_commutative: +; V7M: @ %bb.0: +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: movs r1, #1 +; V7M-NEXT: lsls r1, r2 +; V7M-NEXT: subs r1, #1 +; V7M-NEXT: ands r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bextr32_a4_commutative: +; V7A: @ %bb.0: +; V7A-NEXT: mov r12, #1 +; V7A-NEXT: mvn r3, #0 +; V7A-NEXT: add r2, r3, r12, lsl r2 +; V7A-NEXT: and r0, r2, r0, lsr r1 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bextr32_a4_commutative: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: movs r1, #1 +; V7A-T-NEXT: lsls r1, r2 +; V7A-T-NEXT: subs r1, #1 +; V7A-T-NEXT: ands r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bextr32_a4_commutative: +; V6M: @ %bb.0: +; V6M-NEXT: lsrs r0, r1 +; V6M-NEXT: movs r1, #1 +; V6M-NEXT: lsls r1, r2 +; V6M-NEXT: subs r1, r1, #1 +; V6M-NEXT: ands r0, r1 +; V6M-NEXT: bx lr + %shifted = lshr i32 %val, %numskipbits + %onebit = shl i32 1, %numlowbits + %mask = add nsw i32 %onebit, -1 + %masked = and i32 %shifted, %mask ; swapped order + ret i32 %masked +} + +; 64-bit + +define i64 @bextr64_a0(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind { +; V7M-LABEL: bextr64_a0: +; V7M: @ %bb.0: +; V7M-NEXT: .save {r4, lr} +; V7M-NEXT: push {r4, lr} +; V7M-NEXT: ldr.w r12, [sp, #8] +; V7M-NEXT: mov.w lr, #1 +; V7M-NEXT: lsrs r0, r2 +; V7M-NEXT: rsb.w r4, r12, #32 +; V7M-NEXT: subs.w r3, r12, #32 +; V7M-NEXT: lsr.w r4, lr, r4 +; V7M-NEXT: it pl +; V7M-NEXT: lslpl.w r4, lr, r3 +; V7M-NEXT: lsl.w r3, lr, r12 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r3, #0 +; V7M-NEXT: subs r3, #1 +; V7M-NEXT: sbc r12, r4, #0 +; V7M-NEXT: rsb.w r4, r2, #32 +; V7M-NEXT: lsl.w r4, r1, r4 +; V7M-NEXT: orrs r0, r4 +; V7M-NEXT: subs.w r4, r2, #32 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r0, r1, r4 +; V7M-NEXT: lsr.w r1, r1, r2 +; V7M-NEXT: and.w r0, r0, r3 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r1, #0 +; V7M-NEXT: and.w r1, r1, r12 +; V7M-NEXT: pop {r4, pc} +; +; V7A-LABEL: bextr64_a0: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r4, lr} +; V7A-NEXT: push {r4, lr} +; V7A-NEXT: ldr r12, [sp, #8] +; V7A-NEXT: mov lr, #1 +; V7A-NEXT: lsr r0, r0, r2 +; V7A-NEXT: rsb r3, r12, #32 +; V7A-NEXT: subs r4, r12, #32 +; V7A-NEXT: lsr r3, lr, r3 +; V7A-NEXT: lslpl r3, lr, r4 +; V7A-NEXT: lsl r4, lr, r12 +; V7A-NEXT: movwpl r4, #0 +; V7A-NEXT: subs r4, r4, #1 +; V7A-NEXT: sbc r12, r3, #0 +; V7A-NEXT: rsb r3, r2, #32 +; V7A-NEXT: orr r0, r0, r1, lsl r3 +; V7A-NEXT: subs r3, r2, #32 +; V7A-NEXT: lsrpl r0, r1, r3 +; V7A-NEXT: lsr r1, r1, r2 +; V7A-NEXT: movwpl r1, #0 +; V7A-NEXT: and r0, r4, r0 +; V7A-NEXT: and r1, r12, r1 +; V7A-NEXT: pop {r4, pc} +; +; V7A-T-LABEL: bextr64_a0: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: .save {r4, lr} +; V7A-T-NEXT: push {r4, lr} +; V7A-T-NEXT: ldr.w r12, [sp, #8] +; V7A-T-NEXT: mov.w lr, #1 +; V7A-T-NEXT: lsrs r0, r2 +; V7A-T-NEXT: rsb.w r4, r12, #32 +; V7A-T-NEXT: subs.w r3, r12, #32 +; V7A-T-NEXT: lsr.w r4, lr, r4 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lslpl.w r4, lr, r3 +; V7A-T-NEXT: lsl.w r3, lr, r12 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r3, #0 +; V7A-T-NEXT: subs r3, #1 +; V7A-T-NEXT: sbc r12, r4, #0 +; V7A-T-NEXT: rsb.w r4, r2, #32 +; V7A-T-NEXT: lsl.w r4, r1, r4 +; V7A-T-NEXT: orrs r0, r4 +; V7A-T-NEXT: subs.w r4, r2, #32 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r0, r1, r4 +; V7A-T-NEXT: lsr.w r1, r1, r2 +; V7A-T-NEXT: and.w r0, r0, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r1, #0 +; V7A-T-NEXT: and.w r1, r1, r12 +; V7A-T-NEXT: pop {r4, pc} +; +; V6M-LABEL: bextr64_a0: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, r5, r6, r7, lr} +; V6M-NEXT: push {r4, r5, r6, r7, lr} +; V6M-NEXT: .pad #12 +; V6M-NEXT: sub sp, #12 +; V6M-NEXT: str r2, [sp, #8] @ 4-byte Spill +; V6M-NEXT: str r1, [sp, #4] @ 4-byte Spill +; V6M-NEXT: mov r6, r0 +; V6M-NEXT: movs r0, #1 +; V6M-NEXT: movs r7, #0 +; V6M-NEXT: ldr r2, [sp, #32] +; V6M-NEXT: mov r1, r7 +; V6M-NEXT: bl __aeabi_llsl +; V6M-NEXT: mov r4, r1 +; V6M-NEXT: subs r5, r0, #1 +; V6M-NEXT: sbcs r4, r7 +; V6M-NEXT: mov r0, r6 +; V6M-NEXT: ldr r1, [sp, #4] @ 4-byte Reload +; V6M-NEXT: ldr r2, [sp, #8] @ 4-byte Reload +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: ands r0, r5 +; V6M-NEXT: ands r1, r4 +; V6M-NEXT: add sp, #12 +; V6M-NEXT: pop {r4, r5, r6, r7, pc} + %shifted = lshr i64 %val, %numskipbits + %onebit = shl i64 1, %numlowbits + %mask = add nsw i64 %onebit, -1 + %masked = and i64 %mask, %shifted + ret i64 %masked +} + +define i64 @bextr64_a0_arithmetic(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind { +; V7M-LABEL: bextr64_a0_arithmetic: +; V7M: @ %bb.0: +; V7M-NEXT: .save {r4, lr} +; V7M-NEXT: push {r4, lr} +; V7M-NEXT: ldr.w r12, [sp, #8] +; V7M-NEXT: mov.w lr, #1 +; V7M-NEXT: lsrs r0, r2 +; V7M-NEXT: rsb.w r4, r12, #32 +; V7M-NEXT: subs.w r3, r12, #32 +; V7M-NEXT: lsr.w r4, lr, r4 +; V7M-NEXT: it pl +; V7M-NEXT: lslpl.w r4, lr, r3 +; V7M-NEXT: lsl.w r3, lr, r12 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r3, #0 +; V7M-NEXT: subs r3, #1 +; V7M-NEXT: sbc r12, r4, #0 +; V7M-NEXT: rsb.w r4, r2, #32 +; V7M-NEXT: lsl.w r4, r1, r4 +; V7M-NEXT: orrs r0, r4 +; V7M-NEXT: subs.w r4, r2, #32 +; V7M-NEXT: it pl +; V7M-NEXT: asrpl.w r0, r1, r4 +; V7M-NEXT: asr.w r2, r1, r2 +; V7M-NEXT: and.w r0, r0, r3 +; V7M-NEXT: it pl +; V7M-NEXT: asrpl r2, r1, #31 +; V7M-NEXT: and.w r1, r12, r2 +; V7M-NEXT: pop {r4, pc} +; +; V7A-LABEL: bextr64_a0_arithmetic: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r4, lr} +; V7A-NEXT: push {r4, lr} +; V7A-NEXT: ldr r12, [sp, #8] +; V7A-NEXT: mov lr, #1 +; V7A-NEXT: lsr r0, r0, r2 +; V7A-NEXT: rsb r3, r12, #32 +; V7A-NEXT: subs r4, r12, #32 +; V7A-NEXT: lsr r3, lr, r3 +; V7A-NEXT: lslpl r3, lr, r4 +; V7A-NEXT: lsl r4, lr, r12 +; V7A-NEXT: movwpl r4, #0 +; V7A-NEXT: subs r4, r4, #1 +; V7A-NEXT: sbc r12, r3, #0 +; V7A-NEXT: rsb r3, r2, #32 +; V7A-NEXT: orr r0, r0, r1, lsl r3 +; V7A-NEXT: subs r3, r2, #32 +; V7A-NEXT: asr r2, r1, r2 +; V7A-NEXT: asrpl r0, r1, r3 +; V7A-NEXT: asrpl r2, r1, #31 +; V7A-NEXT: and r0, r4, r0 +; V7A-NEXT: and r1, r12, r2 +; V7A-NEXT: pop {r4, pc} +; +; V7A-T-LABEL: bextr64_a0_arithmetic: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: .save {r4, lr} +; V7A-T-NEXT: push {r4, lr} +; V7A-T-NEXT: ldr.w r12, [sp, #8] +; V7A-T-NEXT: mov.w lr, #1 +; V7A-T-NEXT: lsrs r0, r2 +; V7A-T-NEXT: rsb.w r4, r12, #32 +; V7A-T-NEXT: subs.w r3, r12, #32 +; V7A-T-NEXT: lsr.w r4, lr, r4 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lslpl.w r4, lr, r3 +; V7A-T-NEXT: lsl.w r3, lr, r12 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r3, #0 +; V7A-T-NEXT: subs r3, #1 +; V7A-T-NEXT: sbc r12, r4, #0 +; V7A-T-NEXT: rsb.w r4, r2, #32 +; V7A-T-NEXT: lsl.w r4, r1, r4 +; V7A-T-NEXT: orrs r0, r4 +; V7A-T-NEXT: subs.w r4, r2, #32 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: asrpl.w r0, r1, r4 +; V7A-T-NEXT: asr.w r2, r1, r2 +; V7A-T-NEXT: and.w r0, r0, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: asrpl r2, r1, #31 +; V7A-T-NEXT: and.w r1, r12, r2 +; V7A-T-NEXT: pop {r4, pc} +; +; V6M-LABEL: bextr64_a0_arithmetic: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, r5, r6, r7, lr} +; V6M-NEXT: push {r4, r5, r6, r7, lr} +; V6M-NEXT: .pad #12 +; V6M-NEXT: sub sp, #12 +; V6M-NEXT: str r2, [sp, #8] @ 4-byte Spill +; V6M-NEXT: str r1, [sp, #4] @ 4-byte Spill +; V6M-NEXT: mov r6, r0 +; V6M-NEXT: movs r0, #1 +; V6M-NEXT: movs r7, #0 +; V6M-NEXT: ldr r2, [sp, #32] +; V6M-NEXT: mov r1, r7 +; V6M-NEXT: bl __aeabi_llsl +; V6M-NEXT: mov r4, r1 +; V6M-NEXT: subs r5, r0, #1 +; V6M-NEXT: sbcs r4, r7 +; V6M-NEXT: mov r0, r6 +; V6M-NEXT: ldr r1, [sp, #4] @ 4-byte Reload +; V6M-NEXT: ldr r2, [sp, #8] @ 4-byte Reload +; V6M-NEXT: bl __aeabi_lasr +; V6M-NEXT: ands r0, r5 +; V6M-NEXT: ands r1, r4 +; V6M-NEXT: add sp, #12 +; V6M-NEXT: pop {r4, r5, r6, r7, pc} + %shifted = ashr i64 %val, %numskipbits + %onebit = shl i64 1, %numlowbits + %mask = add nsw i64 %onebit, -1 + %masked = and i64 %mask, %shifted + ret i64 %masked +} + +define i64 @bextr64_a1_indexzext(i64 %val, i8 zeroext %numskipbits, i8 zeroext %numlowbits) nounwind { +; V7M-LABEL: bextr64_a1_indexzext: +; V7M: @ %bb.0: +; V7M-NEXT: .save {r4, lr} +; V7M-NEXT: push {r4, lr} +; V7M-NEXT: rsb.w r4, r3, #32 +; V7M-NEXT: mov.w lr, #1 +; V7M-NEXT: subs.w r12, r3, #32 +; V7M-NEXT: lsl.w r3, lr, r3 +; V7M-NEXT: lsr.w r4, lr, r4 +; V7M-NEXT: lsr.w r0, r0, r2 +; V7M-NEXT: it pl +; V7M-NEXT: lslpl.w r4, lr, r12 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r3, #0 +; V7M-NEXT: subs r3, #1 +; V7M-NEXT: sbc r12, r4, #0 +; V7M-NEXT: rsb.w r4, r2, #32 +; V7M-NEXT: lsl.w r4, r1, r4 +; V7M-NEXT: orrs r0, r4 +; V7M-NEXT: subs.w r4, r2, #32 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r0, r1, r4 +; V7M-NEXT: lsr.w r1, r1, r2 +; V7M-NEXT: and.w r0, r0, r3 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r1, #0 +; V7M-NEXT: and.w r1, r1, r12 +; V7M-NEXT: pop {r4, pc} +; +; V7A-LABEL: bextr64_a1_indexzext: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r4, lr} +; V7A-NEXT: push {r4, lr} +; V7A-NEXT: rsb r12, r3, #32 +; V7A-NEXT: mov lr, #1 +; V7A-NEXT: subs r4, r3, #32 +; V7A-NEXT: lsl r3, lr, r3 +; V7A-NEXT: lsr r12, lr, r12 +; V7A-NEXT: movwpl r3, #0 +; V7A-NEXT: lslpl r12, lr, r4 +; V7A-NEXT: rsb r4, r2, #32 +; V7A-NEXT: lsr r0, r0, r2 +; V7A-NEXT: subs r3, r3, #1 +; V7A-NEXT: sbc r12, r12, #0 +; V7A-NEXT: orr r0, r0, r1, lsl r4 +; V7A-NEXT: subs r4, r2, #32 +; V7A-NEXT: lsrpl r0, r1, r4 +; V7A-NEXT: lsr r1, r1, r2 +; V7A-NEXT: movwpl r1, #0 +; V7A-NEXT: and r0, r3, r0 +; V7A-NEXT: and r1, r12, r1 +; V7A-NEXT: pop {r4, pc} +; +; V7A-T-LABEL: bextr64_a1_indexzext: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: .save {r4, lr} +; V7A-T-NEXT: push {r4, lr} +; V7A-T-NEXT: rsb.w r4, r3, #32 +; V7A-T-NEXT: mov.w lr, #1 +; V7A-T-NEXT: subs.w r12, r3, #32 +; V7A-T-NEXT: lsl.w r3, lr, r3 +; V7A-T-NEXT: lsr.w r4, lr, r4 +; V7A-T-NEXT: lsr.w r0, r0, r2 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lslpl.w r4, lr, r12 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r3, #0 +; V7A-T-NEXT: subs r3, #1 +; V7A-T-NEXT: sbc r12, r4, #0 +; V7A-T-NEXT: rsb.w r4, r2, #32 +; V7A-T-NEXT: lsl.w r4, r1, r4 +; V7A-T-NEXT: orrs r0, r4 +; V7A-T-NEXT: subs.w r4, r2, #32 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r0, r1, r4 +; V7A-T-NEXT: lsr.w r1, r1, r2 +; V7A-T-NEXT: and.w r0, r0, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r1, #0 +; V7A-T-NEXT: and.w r1, r1, r12 +; V7A-T-NEXT: pop {r4, pc} +; +; V6M-LABEL: bextr64_a1_indexzext: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, r5, r6, r7, lr} +; V6M-NEXT: push {r4, r5, r6, r7, lr} +; V6M-NEXT: .pad #12 +; V6M-NEXT: sub sp, #12 +; V6M-NEXT: str r2, [sp, #8] @ 4-byte Spill +; V6M-NEXT: str r1, [sp, #4] @ 4-byte Spill +; V6M-NEXT: mov r6, r0 +; V6M-NEXT: movs r0, #1 +; V6M-NEXT: movs r7, #0 +; V6M-NEXT: mov r1, r7 +; V6M-NEXT: mov r2, r3 +; V6M-NEXT: bl __aeabi_llsl +; V6M-NEXT: mov r4, r1 +; V6M-NEXT: subs r5, r0, #1 +; V6M-NEXT: sbcs r4, r7 +; V6M-NEXT: mov r0, r6 +; V6M-NEXT: ldr r1, [sp, #4] @ 4-byte Reload +; V6M-NEXT: ldr r2, [sp, #8] @ 4-byte Reload +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: ands r0, r5 +; V6M-NEXT: ands r1, r4 +; V6M-NEXT: add sp, #12 +; V6M-NEXT: pop {r4, r5, r6, r7, pc} + %skip = zext i8 %numskipbits to i64 + %shifted = lshr i64 %val, %skip + %conv = zext i8 %numlowbits to i64 + %onebit = shl i64 1, %conv + %mask = add nsw i64 %onebit, -1 + %masked = and i64 %mask, %shifted + ret i64 %masked +} + +define i64 @bextr64_a2_load(ptr %w, i64 %numskipbits, i64 %numlowbits) nounwind { +; V7M-LABEL: bextr64_a2_load: +; V7M: @ %bb.0: +; V7M-NEXT: .save {r7, lr} +; V7M-NEXT: push {r7, lr} +; V7M-NEXT: ldr.w r12, [sp, #8] +; V7M-NEXT: mov.w lr, #1 +; V7M-NEXT: rsb.w r1, r12, #32 +; V7M-NEXT: subs.w r3, r12, #32 +; V7M-NEXT: lsr.w r1, lr, r1 +; V7M-NEXT: it pl +; V7M-NEXT: lslpl.w r1, lr, r3 +; V7M-NEXT: lsl.w r3, lr, r12 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r3, #0 +; V7M-NEXT: subs.w lr, r3, #1 +; V7M-NEXT: ldrd r0, r3, [r0] +; V7M-NEXT: sbc r12, r1, #0 +; V7M-NEXT: rsb.w r1, r2, #32 +; V7M-NEXT: lsl.w r1, r3, r1 +; V7M-NEXT: lsrs r0, r2 +; V7M-NEXT: orrs r0, r1 +; V7M-NEXT: subs.w r1, r2, #32 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r0, r3, r1 +; V7M-NEXT: lsr.w r1, r3, r2 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r1, #0 +; V7M-NEXT: and.w r0, r0, lr +; V7M-NEXT: and.w r1, r1, r12 +; V7M-NEXT: pop {r7, pc} +; +; V7A-LABEL: bextr64_a2_load: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r4, r5, r6, lr} +; V7A-NEXT: push {r4, r5, r6, lr} +; V7A-NEXT: ldr r1, [sp, #16] +; V7A-NEXT: mov r3, #1 +; V7A-NEXT: ldr r6, [r0] +; V7A-NEXT: ldr r5, [r0, #4] +; V7A-NEXT: rsb r0, r1, #32 +; V7A-NEXT: subs r4, r1, #32 +; V7A-NEXT: lsl r1, r3, r1 +; V7A-NEXT: lsr r0, r3, r0 +; V7A-NEXT: movwpl r1, #0 +; V7A-NEXT: lslpl r0, r3, r4 +; V7A-NEXT: subs r1, r1, #1 +; V7A-NEXT: sbc r3, r0, #0 +; V7A-NEXT: lsr r0, r6, r2 +; V7A-NEXT: rsb r6, r2, #32 +; V7A-NEXT: orr r0, r0, r5, lsl r6 +; V7A-NEXT: subs r6, r2, #32 +; V7A-NEXT: lsrpl r0, r5, r6 +; V7A-NEXT: and r0, r1, r0 +; V7A-NEXT: lsr r1, r5, r2 +; V7A-NEXT: movwpl r1, #0 +; V7A-NEXT: and r1, r3, r1 +; V7A-NEXT: pop {r4, r5, r6, pc} +; +; V7A-T-LABEL: bextr64_a2_load: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: .save {r4, lr} +; V7A-T-NEXT: push {r4, lr} +; V7A-T-NEXT: ldr.w r12, [sp, #8] +; V7A-T-NEXT: movs r3, #1 +; V7A-T-NEXT: ldrd lr, r1, [r0] +; V7A-T-NEXT: rsb.w r4, r12, #32 +; V7A-T-NEXT: subs.w r0, r12, #32 +; V7A-T-NEXT: lsr.w r4, r3, r4 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lslpl.w r4, r3, r0 +; V7A-T-NEXT: lsl.w r0, r3, r12 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r0, #0 +; V7A-T-NEXT: lsr.w r3, lr, r2 +; V7A-T-NEXT: subs r0, #1 +; V7A-T-NEXT: sbc r12, r4, #0 +; V7A-T-NEXT: rsb.w r4, r2, #32 +; V7A-T-NEXT: lsl.w r4, r1, r4 +; V7A-T-NEXT: orrs r3, r4 +; V7A-T-NEXT: subs.w r4, r2, #32 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r3, r1, r4 +; V7A-T-NEXT: lsr.w r1, r1, r2 +; V7A-T-NEXT: and.w r0, r0, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r1, #0 +; V7A-T-NEXT: and.w r1, r1, r12 +; V7A-T-NEXT: pop {r4, pc} +; +; V6M-LABEL: bextr64_a2_load: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, r5, r6, r7, lr} +; V6M-NEXT: push {r4, r5, r6, r7, lr} +; V6M-NEXT: .pad #4 +; V6M-NEXT: sub sp, #4 +; V6M-NEXT: str r2, [sp] @ 4-byte Spill +; V6M-NEXT: mov r5, r0 +; V6M-NEXT: movs r0, #1 +; V6M-NEXT: movs r7, #0 +; V6M-NEXT: ldr r2, [sp, #24] +; V6M-NEXT: mov r1, r7 +; V6M-NEXT: bl __aeabi_llsl +; V6M-NEXT: mov r6, r1 +; V6M-NEXT: subs r4, r0, #1 +; V6M-NEXT: sbcs r6, r7 +; V6M-NEXT: ldm r5!, {r0, r1} +; V6M-NEXT: ldr r2, [sp] @ 4-byte Reload +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: ands r0, r4 +; V6M-NEXT: ands r1, r6 +; V6M-NEXT: add sp, #4 +; V6M-NEXT: pop {r4, r5, r6, r7, pc} + %val = load i64, ptr %w + %shifted = lshr i64 %val, %numskipbits + %onebit = shl i64 1, %numlowbits + %mask = add nsw i64 %onebit, -1 + %masked = and i64 %mask, %shifted + ret i64 %masked +} + +define i64 @bextr64_a3_load_indexzext(ptr %w, i8 zeroext %numskipbits, i8 zeroext %numlowbits) nounwind { +; V7M-LABEL: bextr64_a3_load_indexzext: +; V7M: @ %bb.0: +; V7M-NEXT: .save {r7, lr} +; V7M-NEXT: push {r7, lr} +; V7M-NEXT: rsb.w r3, r2, #32 +; V7M-NEXT: mov.w r12, #1 +; V7M-NEXT: subs.w lr, r2, #32 +; V7M-NEXT: lsl.w r2, r12, r2 +; V7M-NEXT: lsr.w r3, r12, r3 +; V7M-NEXT: it pl +; V7M-NEXT: lslpl.w r3, r12, lr +; V7M-NEXT: it pl +; V7M-NEXT: movpl r2, #0 +; V7M-NEXT: subs.w lr, r2, #1 +; V7M-NEXT: ldrd r0, r2, [r0] +; V7M-NEXT: sbc r12, r3, #0 +; V7M-NEXT: rsb.w r3, r1, #32 +; V7M-NEXT: lsl.w r3, r2, r3 +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: orrs r0, r3 +; V7M-NEXT: subs.w r3, r1, #32 +; V7M-NEXT: lsr.w r1, r2, r1 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r0, r2, r3 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r1, #0 +; V7M-NEXT: and.w r0, r0, lr +; V7M-NEXT: and.w r1, r1, r12 +; V7M-NEXT: pop {r7, pc} +; +; V7A-LABEL: bextr64_a3_load_indexzext: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r4, r5, r6, lr} +; V7A-NEXT: push {r4, r5, r6, lr} +; V7A-NEXT: ldr r6, [r0] +; V7A-NEXT: mov r3, #1 +; V7A-NEXT: ldr r5, [r0, #4] +; V7A-NEXT: rsb r0, r2, #32 +; V7A-NEXT: subs r4, r2, #32 +; V7A-NEXT: lsl r2, r3, r2 +; V7A-NEXT: lsr r0, r3, r0 +; V7A-NEXT: movwpl r2, #0 +; V7A-NEXT: lslpl r0, r3, r4 +; V7A-NEXT: subs r3, r2, #1 +; V7A-NEXT: sbc r0, r0, #0 +; V7A-NEXT: lsr r2, r5, r1 +; V7A-NEXT: subs r4, r1, #32 +; V7A-NEXT: movwpl r2, #0 +; V7A-NEXT: and r2, r0, r2 +; V7A-NEXT: lsr r0, r6, r1 +; V7A-NEXT: rsb r1, r1, #32 +; V7A-NEXT: orr r0, r0, r5, lsl r1 +; V7A-NEXT: mov r1, r2 +; V7A-NEXT: lsrpl r0, r5, r4 +; V7A-NEXT: and r0, r3, r0 +; V7A-NEXT: pop {r4, r5, r6, pc} +; +; V7A-T-LABEL: bextr64_a3_load_indexzext: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: .save {r4, lr} +; V7A-T-NEXT: push {r4, lr} +; V7A-T-NEXT: rsb.w r4, r2, #32 +; V7A-T-NEXT: mov.w lr, #1 +; V7A-T-NEXT: subs.w r3, r2, #32 +; V7A-T-NEXT: lsl.w r2, lr, r2 +; V7A-T-NEXT: lsr.w r4, lr, r4 +; V7A-T-NEXT: ldrd r12, r0, [r0] +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lslpl.w r4, lr, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r2, #0 +; V7A-T-NEXT: subs.w lr, r2, #1 +; V7A-T-NEXT: sbc r2, r4, #0 +; V7A-T-NEXT: lsr.w r4, r0, r1 +; V7A-T-NEXT: subs.w r3, r1, #32 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r4, #0 +; V7A-T-NEXT: and.w r2, r2, r4 +; V7A-T-NEXT: rsb.w r4, r1, #32 +; V7A-T-NEXT: lsr.w r1, r12, r1 +; V7A-T-NEXT: lsl.w r4, r0, r4 +; V7A-T-NEXT: orr.w r1, r1, r4 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r1, r0, r3 +; V7A-T-NEXT: and.w r0, lr, r1 +; V7A-T-NEXT: mov r1, r2 +; V7A-T-NEXT: pop {r4, pc} +; +; V6M-LABEL: bextr64_a3_load_indexzext: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, r5, r6, r7, lr} +; V6M-NEXT: push {r4, r5, r6, r7, lr} +; V6M-NEXT: .pad #4 +; V6M-NEXT: sub sp, #4 +; V6M-NEXT: str r1, [sp] @ 4-byte Spill +; V6M-NEXT: mov r6, r0 +; V6M-NEXT: movs r0, #1 +; V6M-NEXT: movs r7, #0 +; V6M-NEXT: mov r1, r7 +; V6M-NEXT: bl __aeabi_llsl +; V6M-NEXT: mov r5, r1 +; V6M-NEXT: subs r4, r0, #1 +; V6M-NEXT: sbcs r5, r7 +; V6M-NEXT: ldm r6!, {r0, r1} +; V6M-NEXT: ldr r2, [sp] @ 4-byte Reload +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: ands r0, r4 +; V6M-NEXT: ands r1, r5 +; V6M-NEXT: add sp, #4 +; V6M-NEXT: pop {r4, r5, r6, r7, pc} + %val = load i64, ptr %w + %skip = zext i8 %numskipbits to i64 + %shifted = lshr i64 %val, %skip + %conv = zext i8 %numlowbits to i64 + %onebit = shl i64 1, %conv + %mask = add nsw i64 %onebit, -1 + %masked = and i64 %mask, %shifted + ret i64 %masked +} + +define i64 @bextr64_a4_commutative(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind { +; V7M-LABEL: bextr64_a4_commutative: +; V7M: @ %bb.0: +; V7M-NEXT: .save {r4, lr} +; V7M-NEXT: push {r4, lr} +; V7M-NEXT: ldr.w r12, [sp, #8] +; V7M-NEXT: mov.w lr, #1 +; V7M-NEXT: lsrs r0, r2 +; V7M-NEXT: rsb.w r4, r12, #32 +; V7M-NEXT: subs.w r3, r12, #32 +; V7M-NEXT: lsr.w r4, lr, r4 +; V7M-NEXT: it pl +; V7M-NEXT: lslpl.w r4, lr, r3 +; V7M-NEXT: lsl.w r3, lr, r12 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r3, #0 +; V7M-NEXT: subs r3, #1 +; V7M-NEXT: sbc r12, r4, #0 +; V7M-NEXT: rsb.w r4, r2, #32 +; V7M-NEXT: lsl.w r4, r1, r4 +; V7M-NEXT: orrs r0, r4 +; V7M-NEXT: subs.w r4, r2, #32 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r0, r1, r4 +; V7M-NEXT: lsr.w r1, r1, r2 +; V7M-NEXT: and.w r0, r0, r3 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r1, #0 +; V7M-NEXT: and.w r1, r1, r12 +; V7M-NEXT: pop {r4, pc} +; +; V7A-LABEL: bextr64_a4_commutative: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r4, lr} +; V7A-NEXT: push {r4, lr} +; V7A-NEXT: ldr r12, [sp, #8] +; V7A-NEXT: mov lr, #1 +; V7A-NEXT: lsr r0, r0, r2 +; V7A-NEXT: rsb r3, r12, #32 +; V7A-NEXT: subs r4, r12, #32 +; V7A-NEXT: lsr r3, lr, r3 +; V7A-NEXT: lslpl r3, lr, r4 +; V7A-NEXT: lsl r4, lr, r12 +; V7A-NEXT: movwpl r4, #0 +; V7A-NEXT: subs r4, r4, #1 +; V7A-NEXT: sbc r12, r3, #0 +; V7A-NEXT: rsb r3, r2, #32 +; V7A-NEXT: orr r0, r0, r1, lsl r3 +; V7A-NEXT: subs r3, r2, #32 +; V7A-NEXT: lsrpl r0, r1, r3 +; V7A-NEXT: lsr r1, r1, r2 +; V7A-NEXT: movwpl r1, #0 +; V7A-NEXT: and r0, r0, r4 +; V7A-NEXT: and r1, r1, r12 +; V7A-NEXT: pop {r4, pc} +; +; V7A-T-LABEL: bextr64_a4_commutative: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: .save {r4, lr} +; V7A-T-NEXT: push {r4, lr} +; V7A-T-NEXT: ldr.w r12, [sp, #8] +; V7A-T-NEXT: mov.w lr, #1 +; V7A-T-NEXT: lsrs r0, r2 +; V7A-T-NEXT: rsb.w r4, r12, #32 +; V7A-T-NEXT: subs.w r3, r12, #32 +; V7A-T-NEXT: lsr.w r4, lr, r4 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lslpl.w r4, lr, r3 +; V7A-T-NEXT: lsl.w r3, lr, r12 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r3, #0 +; V7A-T-NEXT: subs r3, #1 +; V7A-T-NEXT: sbc r12, r4, #0 +; V7A-T-NEXT: rsb.w r4, r2, #32 +; V7A-T-NEXT: lsl.w r4, r1, r4 +; V7A-T-NEXT: orrs r0, r4 +; V7A-T-NEXT: subs.w r4, r2, #32 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r0, r1, r4 +; V7A-T-NEXT: lsr.w r1, r1, r2 +; V7A-T-NEXT: and.w r0, r0, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r1, #0 +; V7A-T-NEXT: and.w r1, r1, r12 +; V7A-T-NEXT: pop {r4, pc} +; +; V6M-LABEL: bextr64_a4_commutative: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, r5, r6, r7, lr} +; V6M-NEXT: push {r4, r5, r6, r7, lr} +; V6M-NEXT: .pad #12 +; V6M-NEXT: sub sp, #12 +; V6M-NEXT: str r2, [sp, #8] @ 4-byte Spill +; V6M-NEXT: str r1, [sp, #4] @ 4-byte Spill +; V6M-NEXT: mov r6, r0 +; V6M-NEXT: movs r0, #1 +; V6M-NEXT: movs r7, #0 +; V6M-NEXT: ldr r2, [sp, #32] +; V6M-NEXT: mov r1, r7 +; V6M-NEXT: bl __aeabi_llsl +; V6M-NEXT: mov r4, r1 +; V6M-NEXT: subs r5, r0, #1 +; V6M-NEXT: sbcs r4, r7 +; V6M-NEXT: mov r0, r6 +; V6M-NEXT: ldr r1, [sp, #4] @ 4-byte Reload +; V6M-NEXT: ldr r2, [sp, #8] @ 4-byte Reload +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: ands r0, r5 +; V6M-NEXT: ands r1, r4 +; V6M-NEXT: add sp, #12 +; V6M-NEXT: pop {r4, r5, r6, r7, pc} + %shifted = lshr i64 %val, %numskipbits + %onebit = shl i64 1, %numlowbits + %mask = add nsw i64 %onebit, -1 + %masked = and i64 %shifted, %mask ; swapped order + ret i64 %masked +} + +; 64-bit, but with 32-bit output + +; Everything done in 64-bit, truncation happens last. +define i32 @bextr64_32_a0(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind { +; V7M-LABEL: bextr64_32_a0: +; V7M: @ %bb.0: +; V7M-NEXT: rsb.w r3, r2, #32 +; V7M-NEXT: lsrs r0, r2 +; V7M-NEXT: subs r2, #32 +; V7M-NEXT: lsl.w r3, r1, r3 +; V7M-NEXT: orr.w r0, r0, r3 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r0, r1, r2 +; V7M-NEXT: ldr r1, [sp] +; V7M-NEXT: movs r2, #1 +; V7M-NEXT: lsls r2, r1 +; V7M-NEXT: subs r1, #32 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r2, #0 +; V7M-NEXT: subs r1, r2, #1 +; V7M-NEXT: ands r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bextr64_32_a0: +; V7A: @ %bb.0: +; V7A-NEXT: rsb r3, r2, #32 +; V7A-NEXT: lsr r0, r0, r2 +; V7A-NEXT: ldr r12, [sp] +; V7A-NEXT: subs r2, r2, #32 +; V7A-NEXT: orr r0, r0, r1, lsl r3 +; V7A-NEXT: lsrpl r0, r1, r2 +; V7A-NEXT: mov r1, #1 +; V7A-NEXT: lsl r1, r1, r12 +; V7A-NEXT: subs r2, r12, #32 +; V7A-NEXT: movwpl r1, #0 +; V7A-NEXT: sub r1, r1, #1 +; V7A-NEXT: and r0, r1, r0 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bextr64_32_a0: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: rsb.w r3, r2, #32 +; V7A-T-NEXT: lsrs r0, r2 +; V7A-T-NEXT: ldr.w r12, [sp] +; V7A-T-NEXT: subs r2, #32 +; V7A-T-NEXT: lsl.w r3, r1, r3 +; V7A-T-NEXT: orr.w r0, r0, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r0, r1, r2 +; V7A-T-NEXT: movs r1, #1 +; V7A-T-NEXT: lsl.w r1, r1, r12 +; V7A-T-NEXT: subs.w r2, r12, #32 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r1, #0 +; V7A-T-NEXT: subs r1, #1 +; V7A-T-NEXT: ands r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bextr64_32_a0: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, lr} +; V6M-NEXT: push {r4, lr} +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: mov r4, r0 +; V6M-NEXT: movs r0, #1 +; V6M-NEXT: movs r1, #0 +; V6M-NEXT: ldr r2, [sp, #8] +; V6M-NEXT: bl __aeabi_llsl +; V6M-NEXT: subs r0, r0, #1 +; V6M-NEXT: ands r0, r4 +; V6M-NEXT: pop {r4, pc} + %shifted = lshr i64 %val, %numskipbits + %onebit = shl i64 1, %numlowbits + %mask = add nsw i64 %onebit, -1 + %masked = and i64 %mask, %shifted + %res = trunc i64 %masked to i32 + ret i32 %res +} + +; Shifting happens in 64-bit, then truncation. Masking is 32-bit. +define i32 @bextr64_32_a1(i64 %val, i64 %numskipbits, i32 %numlowbits) nounwind { +; V7M-LABEL: bextr64_32_a1: +; V7M: @ %bb.0: +; V7M-NEXT: rsb.w r3, r2, #32 +; V7M-NEXT: lsrs r0, r2 +; V7M-NEXT: subs r2, #32 +; V7M-NEXT: lsl.w r3, r1, r3 +; V7M-NEXT: orr.w r0, r0, r3 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r0, r1, r2 +; V7M-NEXT: ldr r1, [sp] +; V7M-NEXT: movs r2, #1 +; V7M-NEXT: lsl.w r1, r2, r1 +; V7M-NEXT: subs r1, #1 +; V7M-NEXT: ands r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bextr64_32_a1: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r11, lr} +; V7A-NEXT: push {r11, lr} +; V7A-NEXT: ldr r12, [sp, #8] +; V7A-NEXT: mov lr, #1 +; V7A-NEXT: mvn r3, #0 +; V7A-NEXT: lsr r0, r0, r2 +; V7A-NEXT: add r12, r3, lr, lsl r12 +; V7A-NEXT: rsb r3, r2, #32 +; V7A-NEXT: subs r2, r2, #32 +; V7A-NEXT: orr r0, r0, r1, lsl r3 +; V7A-NEXT: lsrpl r0, r1, r2 +; V7A-NEXT: and r0, r12, r0 +; V7A-NEXT: pop {r11, pc} +; +; V7A-T-LABEL: bextr64_32_a1: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: rsb.w r3, r2, #32 +; V7A-T-NEXT: lsrs r0, r2 +; V7A-T-NEXT: ldr.w r12, [sp] +; V7A-T-NEXT: subs r2, #32 +; V7A-T-NEXT: lsl.w r3, r1, r3 +; V7A-T-NEXT: orr.w r0, r0, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r0, r1, r2 +; V7A-T-NEXT: movs r1, #1 +; V7A-T-NEXT: lsl.w r1, r1, r12 +; V7A-T-NEXT: subs r1, #1 +; V7A-T-NEXT: ands r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bextr64_32_a1: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r7, lr} +; V6M-NEXT: push {r7, lr} +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: ldr r1, [sp, #8] +; V6M-NEXT: movs r2, #1 +; V6M-NEXT: lsls r2, r1 +; V6M-NEXT: subs r1, r2, #1 +; V6M-NEXT: ands r0, r1 +; V6M-NEXT: pop {r7, pc} + %shifted = lshr i64 %val, %numskipbits + %truncshifted = trunc i64 %shifted to i32 + %onebit = shl i32 1, %numlowbits + %mask = add nsw i32 %onebit, -1 + %masked = and i32 %mask, %truncshifted + ret i32 %masked +} + +; Shifting happens in 64-bit. Mask is 32-bit, but extended to 64-bit. +; Masking is 64-bit. Then truncation. +define i32 @bextr64_32_a2(i64 %val, i64 %numskipbits, i32 %numlowbits) nounwind { +; V7M-LABEL: bextr64_32_a2: +; V7M: @ %bb.0: +; V7M-NEXT: rsb.w r3, r2, #32 +; V7M-NEXT: lsrs r0, r2 +; V7M-NEXT: subs r2, #32 +; V7M-NEXT: lsl.w r3, r1, r3 +; V7M-NEXT: orr.w r0, r0, r3 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r0, r1, r2 +; V7M-NEXT: ldr r1, [sp] +; V7M-NEXT: movs r2, #1 +; V7M-NEXT: lsl.w r1, r2, r1 +; V7M-NEXT: subs r1, #1 +; V7M-NEXT: ands r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bextr64_32_a2: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r11, lr} +; V7A-NEXT: push {r11, lr} +; V7A-NEXT: ldr r12, [sp, #8] +; V7A-NEXT: mov lr, #1 +; V7A-NEXT: mvn r3, #0 +; V7A-NEXT: lsr r0, r0, r2 +; V7A-NEXT: add r12, r3, lr, lsl r12 +; V7A-NEXT: rsb r3, r2, #32 +; V7A-NEXT: subs r2, r2, #32 +; V7A-NEXT: orr r0, r0, r1, lsl r3 +; V7A-NEXT: lsrpl r0, r1, r2 +; V7A-NEXT: and r0, r12, r0 +; V7A-NEXT: pop {r11, pc} +; +; V7A-T-LABEL: bextr64_32_a2: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: rsb.w r3, r2, #32 +; V7A-T-NEXT: lsrs r0, r2 +; V7A-T-NEXT: ldr.w r12, [sp] +; V7A-T-NEXT: subs r2, #32 +; V7A-T-NEXT: lsl.w r3, r1, r3 +; V7A-T-NEXT: orr.w r0, r0, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r0, r1, r2 +; V7A-T-NEXT: movs r1, #1 +; V7A-T-NEXT: lsl.w r1, r1, r12 +; V7A-T-NEXT: subs r1, #1 +; V7A-T-NEXT: ands r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bextr64_32_a2: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r7, lr} +; V6M-NEXT: push {r7, lr} +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: ldr r1, [sp, #8] +; V6M-NEXT: movs r2, #1 +; V6M-NEXT: lsls r2, r1 +; V6M-NEXT: subs r1, r2, #1 +; V6M-NEXT: ands r0, r1 +; V6M-NEXT: pop {r7, pc} + %shifted = lshr i64 %val, %numskipbits + %onebit = shl i32 1, %numlowbits + %mask = add nsw i32 %onebit, -1 + %zextmask = zext i32 %mask to i64 + %masked = and i64 %zextmask, %shifted + %truncmasked = trunc i64 %masked to i32 + ret i32 %truncmasked +} + +; ---------------------------------------------------------------------------- ; +; Pattern b. 32-bit +; ---------------------------------------------------------------------------- ; + +define i32 @bextr32_b0(i32 %val, i32 %numskipbits, i32 %numlowbits) nounwind { +; V7M-LABEL: bextr32_b0: +; V7M: @ %bb.0: +; V7M-NEXT: mov.w r3, #-1 +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: lsl.w r2, r3, r2 +; V7M-NEXT: bics r0, r2 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bextr32_b0: +; V7A: @ %bb.0: +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: mvn r1, #0 +; V7A-NEXT: bic r0, r0, r1, lsl r2 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bextr32_b0: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: mov.w r3, #-1 +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: lsl.w r2, r3, r2 +; V7A-T-NEXT: bics r0, r2 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bextr32_b0: +; V6M: @ %bb.0: +; V6M-NEXT: lsrs r0, r1 +; V6M-NEXT: movs r1, #0 +; V6M-NEXT: mvns r1, r1 +; V6M-NEXT: lsls r1, r2 +; V6M-NEXT: bics r0, r1 +; V6M-NEXT: bx lr + %shifted = lshr i32 %val, %numskipbits + %notmask = shl i32 -1, %numlowbits + %mask = xor i32 %notmask, -1 + %masked = and i32 %mask, %shifted + ret i32 %masked +} + +define i32 @bextr32_b1_indexzext(i32 %val, i8 zeroext %numskipbits, i8 zeroext %numlowbits) nounwind { +; V7M-LABEL: bextr32_b1_indexzext: +; V7M: @ %bb.0: +; V7M-NEXT: mov.w r3, #-1 +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: lsl.w r2, r3, r2 +; V7M-NEXT: bics r0, r2 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bextr32_b1_indexzext: +; V7A: @ %bb.0: +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: mvn r1, #0 +; V7A-NEXT: bic r0, r0, r1, lsl r2 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bextr32_b1_indexzext: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: mov.w r3, #-1 +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: lsl.w r2, r3, r2 +; V7A-T-NEXT: bics r0, r2 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bextr32_b1_indexzext: +; V6M: @ %bb.0: +; V6M-NEXT: lsrs r0, r1 +; V6M-NEXT: movs r1, #0 +; V6M-NEXT: mvns r1, r1 +; V6M-NEXT: lsls r1, r2 +; V6M-NEXT: bics r0, r1 +; V6M-NEXT: bx lr + %skip = zext i8 %numskipbits to i32 + %shifted = lshr i32 %val, %skip + %conv = zext i8 %numlowbits to i32 + %notmask = shl i32 -1, %conv + %mask = xor i32 %notmask, -1 + %masked = and i32 %mask, %shifted + ret i32 %masked +} + +define i32 @bextr32_b2_load(ptr %w, i32 %numskipbits, i32 %numlowbits) nounwind { +; V7M-LABEL: bextr32_b2_load: +; V7M: @ %bb.0: +; V7M-NEXT: ldr r0, [r0] +; V7M-NEXT: mov.w r3, #-1 +; V7M-NEXT: lsl.w r2, r3, r2 +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: bics r0, r2 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bextr32_b2_load: +; V7A: @ %bb.0: +; V7A-NEXT: ldr r0, [r0] +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: mvn r1, #0 +; V7A-NEXT: bic r0, r0, r1, lsl r2 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bextr32_b2_load: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: ldr r0, [r0] +; V7A-T-NEXT: mov.w r3, #-1 +; V7A-T-NEXT: lsl.w r2, r3, r2 +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: bics r0, r2 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bextr32_b2_load: +; V6M: @ %bb.0: +; V6M-NEXT: movs r3, #0 +; V6M-NEXT: mvns r3, r3 +; V6M-NEXT: lsls r3, r2 +; V6M-NEXT: ldr r0, [r0] +; V6M-NEXT: lsrs r0, r1 +; V6M-NEXT: bics r0, r3 +; V6M-NEXT: bx lr + %val = load i32, ptr %w + %shifted = lshr i32 %val, %numskipbits + %notmask = shl i32 -1, %numlowbits + %mask = xor i32 %notmask, -1 + %masked = and i32 %mask, %shifted + ret i32 %masked +} + +define i32 @bextr32_b3_load_indexzext(ptr %w, i8 zeroext %numskipbits, i8 zeroext %numlowbits) nounwind { +; V7M-LABEL: bextr32_b3_load_indexzext: +; V7M: @ %bb.0: +; V7M-NEXT: ldr r0, [r0] +; V7M-NEXT: mov.w r3, #-1 +; V7M-NEXT: lsl.w r2, r3, r2 +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: bics r0, r2 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bextr32_b3_load_indexzext: +; V7A: @ %bb.0: +; V7A-NEXT: ldr r0, [r0] +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: mvn r1, #0 +; V7A-NEXT: bic r0, r0, r1, lsl r2 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bextr32_b3_load_indexzext: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: ldr r0, [r0] +; V7A-T-NEXT: mov.w r3, #-1 +; V7A-T-NEXT: lsl.w r2, r3, r2 +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: bics r0, r2 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bextr32_b3_load_indexzext: +; V6M: @ %bb.0: +; V6M-NEXT: movs r3, #0 +; V6M-NEXT: mvns r3, r3 +; V6M-NEXT: lsls r3, r2 +; V6M-NEXT: ldr r0, [r0] +; V6M-NEXT: lsrs r0, r1 +; V6M-NEXT: bics r0, r3 +; V6M-NEXT: bx lr + %val = load i32, ptr %w + %skip = zext i8 %numskipbits to i32 + %shifted = lshr i32 %val, %skip + %conv = zext i8 %numlowbits to i32 + %notmask = shl i32 -1, %conv + %mask = xor i32 %notmask, -1 + %masked = and i32 %mask, %shifted + ret i32 %masked +} + +define i32 @bextr32_b4_commutative(i32 %val, i32 %numskipbits, i32 %numlowbits) nounwind { +; V7M-LABEL: bextr32_b4_commutative: +; V7M: @ %bb.0: +; V7M-NEXT: mov.w r3, #-1 +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: lsl.w r2, r3, r2 +; V7M-NEXT: bics r0, r2 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bextr32_b4_commutative: +; V7A: @ %bb.0: +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: mvn r1, #0 +; V7A-NEXT: bic r0, r0, r1, lsl r2 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bextr32_b4_commutative: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: mov.w r3, #-1 +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: lsl.w r2, r3, r2 +; V7A-T-NEXT: bics r0, r2 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bextr32_b4_commutative: +; V6M: @ %bb.0: +; V6M-NEXT: lsrs r0, r1 +; V6M-NEXT: movs r1, #0 +; V6M-NEXT: mvns r1, r1 +; V6M-NEXT: lsls r1, r2 +; V6M-NEXT: bics r0, r1 +; V6M-NEXT: bx lr + %shifted = lshr i32 %val, %numskipbits + %notmask = shl i32 -1, %numlowbits + %mask = xor i32 %notmask, -1 + %masked = and i32 %shifted, %mask ; swapped order + ret i32 %masked +} + +; 64-bit + +define i64 @bextr64_b0(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind { +; V7M-LABEL: bextr64_b0: +; V7M: @ %bb.0: +; V7M-NEXT: .save {r7, lr} +; V7M-NEXT: push {r7, lr} +; V7M-NEXT: rsb.w r3, r2, #32 +; V7M-NEXT: lsrs r0, r2 +; V7M-NEXT: ldr.w r12, [sp, #8] +; V7M-NEXT: lsl.w r3, r1, r3 +; V7M-NEXT: orrs r0, r3 +; V7M-NEXT: subs.w r3, r2, #32 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r0, r1, r3 +; V7M-NEXT: lsr.w r1, r1, r2 +; V7M-NEXT: mov.w r2, #-1 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r1, #0 +; V7M-NEXT: lsl.w r3, r2, r12 +; V7M-NEXT: subs.w lr, r12, #32 +; V7M-NEXT: it pl +; V7M-NEXT: lslpl.w r2, r2, lr +; V7M-NEXT: it pl +; V7M-NEXT: movpl r3, #0 +; V7M-NEXT: bics r1, r2 +; V7M-NEXT: bics r0, r3 +; V7M-NEXT: pop {r7, pc} +; +; V7A-LABEL: bextr64_b0: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r11, lr} +; V7A-NEXT: push {r11, lr} +; V7A-NEXT: rsb r3, r2, #32 +; V7A-NEXT: lsr r0, r0, r2 +; V7A-NEXT: ldr r12, [sp, #8] +; V7A-NEXT: orr r0, r0, r1, lsl r3 +; V7A-NEXT: subs r3, r2, #32 +; V7A-NEXT: lsrpl r0, r1, r3 +; V7A-NEXT: lsr r1, r1, r2 +; V7A-NEXT: movwpl r1, #0 +; V7A-NEXT: mvn r3, #0 +; V7A-NEXT: subs lr, r12, #32 +; V7A-NEXT: lsl r2, r3, r12 +; V7A-NEXT: movwpl r2, #0 +; V7A-NEXT: bic r0, r0, r2 +; V7A-NEXT: lslpl r3, r3, lr +; V7A-NEXT: bic r1, r1, r3 +; V7A-NEXT: pop {r11, pc} +; +; V7A-T-LABEL: bextr64_b0: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: .save {r4, r5, r7, lr} +; V7A-T-NEXT: push {r4, r5, r7, lr} +; V7A-T-NEXT: rsb.w r3, r2, #32 +; V7A-T-NEXT: ldr.w r12, [sp, #16] +; V7A-T-NEXT: lsrs r0, r2 +; V7A-T-NEXT: lsl.w r3, r1, r3 +; V7A-T-NEXT: orr.w r5, r0, r3 +; V7A-T-NEXT: mov.w r3, #-1 +; V7A-T-NEXT: subs.w lr, r12, #32 +; V7A-T-NEXT: lsl.w r0, r3, r12 +; V7A-T-NEXT: itt pl +; V7A-T-NEXT: lslpl.w r3, r3, lr +; V7A-T-NEXT: movpl r0, #0 +; V7A-T-NEXT: subs.w r4, r2, #32 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r5, r1, r4 +; V7A-T-NEXT: lsr.w r1, r1, r2 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r1, #0 +; V7A-T-NEXT: bic.w r0, r5, r0 +; V7A-T-NEXT: bics r1, r3 +; V7A-T-NEXT: pop {r4, r5, r7, pc} +; +; V6M-LABEL: bextr64_b0: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, r5, r7, lr} +; V6M-NEXT: push {r4, r5, r7, lr} +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: mov r4, r0 +; V6M-NEXT: mov r5, r1 +; V6M-NEXT: movs r0, #0 +; V6M-NEXT: mvns r0, r0 +; V6M-NEXT: ldr r2, [sp, #16] +; V6M-NEXT: mov r1, r0 +; V6M-NEXT: bl __aeabi_llsl +; V6M-NEXT: bics r4, r0 +; V6M-NEXT: bics r5, r1 +; V6M-NEXT: mov r0, r4 +; V6M-NEXT: mov r1, r5 +; V6M-NEXT: pop {r4, r5, r7, pc} + %shifted = lshr i64 %val, %numskipbits + %notmask = shl i64 -1, %numlowbits + %mask = xor i64 %notmask, -1 + %masked = and i64 %mask, %shifted + ret i64 %masked +} + +define i64 @bextr64_b1_indexzext(i64 %val, i8 zeroext %numskipbits, i8 zeroext %numlowbits) nounwind { +; V7M-LABEL: bextr64_b1_indexzext: +; V7M: @ %bb.0: +; V7M-NEXT: lsr.w r12, r0, r2 +; V7M-NEXT: rsb.w r0, r2, #32 +; V7M-NEXT: lsl.w r0, r1, r0 +; V7M-NEXT: orr.w r12, r12, r0 +; V7M-NEXT: subs.w r0, r2, #32 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r12, r1, r0 +; V7M-NEXT: lsr.w r0, r1, r2 +; V7M-NEXT: mov.w r2, #-1 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r0, #0 +; V7M-NEXT: subs.w r1, r3, #32 +; V7M-NEXT: lsl.w r3, r2, r3 +; V7M-NEXT: it pl +; V7M-NEXT: lslpl r2, r1 +; V7M-NEXT: bic.w r1, r0, r2 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r3, #0 +; V7M-NEXT: bic.w r0, r12, r3 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bextr64_b1_indexzext: +; V7A: @ %bb.0: +; V7A-NEXT: lsr r12, r0, r2 +; V7A-NEXT: rsb r0, r2, #32 +; V7A-NEXT: orr r12, r12, r1, lsl r0 +; V7A-NEXT: subs r0, r2, #32 +; V7A-NEXT: lsrpl r12, r1, r0 +; V7A-NEXT: lsr r0, r1, r2 +; V7A-NEXT: movwpl r0, #0 +; V7A-NEXT: subs r1, r3, #32 +; V7A-NEXT: mvn r2, #0 +; V7A-NEXT: lsl r3, r2, r3 +; V7A-NEXT: lslpl r2, r2, r1 +; V7A-NEXT: bic r1, r0, r2 +; V7A-NEXT: movwpl r3, #0 +; V7A-NEXT: bic r0, r12, r3 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bextr64_b1_indexzext: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: lsr.w r12, r0, r2 +; V7A-T-NEXT: rsb.w r0, r2, #32 +; V7A-T-NEXT: lsl.w r0, r1, r0 +; V7A-T-NEXT: orr.w r12, r12, r0 +; V7A-T-NEXT: subs.w r0, r2, #32 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r12, r1, r0 +; V7A-T-NEXT: lsr.w r0, r1, r2 +; V7A-T-NEXT: mov.w r2, #-1 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r0, #0 +; V7A-T-NEXT: subs.w r1, r3, #32 +; V7A-T-NEXT: lsl.w r3, r2, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lslpl r2, r1 +; V7A-T-NEXT: bic.w r1, r0, r2 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r3, #0 +; V7A-T-NEXT: bic.w r0, r12, r3 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bextr64_b1_indexzext: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, r5, r6, lr} +; V6M-NEXT: push {r4, r5, r6, lr} +; V6M-NEXT: mov r4, r3 +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: mov r5, r0 +; V6M-NEXT: mov r6, r1 +; V6M-NEXT: movs r0, #0 +; V6M-NEXT: mvns r0, r0 +; V6M-NEXT: mov r1, r0 +; V6M-NEXT: mov r2, r4 +; V6M-NEXT: bl __aeabi_llsl +; V6M-NEXT: bics r5, r0 +; V6M-NEXT: bics r6, r1 +; V6M-NEXT: mov r0, r5 +; V6M-NEXT: mov r1, r6 +; V6M-NEXT: pop {r4, r5, r6, pc} + %skip = zext i8 %numskipbits to i64 + %shifted = lshr i64 %val, %skip + %conv = zext i8 %numlowbits to i64 + %notmask = shl i64 -1, %conv + %mask = xor i64 %notmask, -1 + %masked = and i64 %mask, %shifted + ret i64 %masked +} + +define i64 @bextr64_b2_load(ptr %w, i64 %numskipbits, i64 %numlowbits) nounwind { +; V7M-LABEL: bextr64_b2_load: +; V7M: @ %bb.0: +; V7M-NEXT: .save {r7, lr} +; V7M-NEXT: push {r7, lr} +; V7M-NEXT: ldrd r0, r3, [r0] +; V7M-NEXT: rsb.w r1, r2, #32 +; V7M-NEXT: ldr.w r12, [sp, #8] +; V7M-NEXT: lsl.w r1, r3, r1 +; V7M-NEXT: lsrs r0, r2 +; V7M-NEXT: orrs r0, r1 +; V7M-NEXT: subs.w r1, r2, #32 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r0, r3, r1 +; V7M-NEXT: lsr.w r1, r3, r2 +; V7M-NEXT: mov.w r2, #-1 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r1, #0 +; V7M-NEXT: lsl.w r3, r2, r12 +; V7M-NEXT: subs.w lr, r12, #32 +; V7M-NEXT: it pl +; V7M-NEXT: lslpl.w r2, r2, lr +; V7M-NEXT: it pl +; V7M-NEXT: movpl r3, #0 +; V7M-NEXT: bics r1, r2 +; V7M-NEXT: bics r0, r3 +; V7M-NEXT: pop {r7, pc} +; +; V7A-LABEL: bextr64_b2_load: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r11, lr} +; V7A-NEXT: push {r11, lr} +; V7A-NEXT: ldrd r0, r1, [r0] +; V7A-NEXT: rsb r3, r2, #32 +; V7A-NEXT: ldr r12, [sp, #8] +; V7A-NEXT: lsr r0, r0, r2 +; V7A-NEXT: orr r0, r0, r1, lsl r3 +; V7A-NEXT: subs r3, r2, #32 +; V7A-NEXT: lsrpl r0, r1, r3 +; V7A-NEXT: lsr r1, r1, r2 +; V7A-NEXT: movwpl r1, #0 +; V7A-NEXT: mvn r3, #0 +; V7A-NEXT: subs lr, r12, #32 +; V7A-NEXT: lsl r2, r3, r12 +; V7A-NEXT: movwpl r2, #0 +; V7A-NEXT: bic r0, r0, r2 +; V7A-NEXT: lslpl r3, r3, lr +; V7A-NEXT: bic r1, r1, r3 +; V7A-NEXT: pop {r11, pc} +; +; V7A-T-LABEL: bextr64_b2_load: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: .save {r7, lr} +; V7A-T-NEXT: push {r7, lr} +; V7A-T-NEXT: ldrd r0, r3, [r0] +; V7A-T-NEXT: rsb.w r1, r2, #32 +; V7A-T-NEXT: ldr.w r12, [sp, #8] +; V7A-T-NEXT: lsl.w r1, r3, r1 +; V7A-T-NEXT: lsrs r0, r2 +; V7A-T-NEXT: orrs r0, r1 +; V7A-T-NEXT: subs.w r1, r2, #32 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r0, r3, r1 +; V7A-T-NEXT: lsr.w r1, r3, r2 +; V7A-T-NEXT: mov.w r3, #-1 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r1, #0 +; V7A-T-NEXT: lsl.w r2, r3, r12 +; V7A-T-NEXT: subs.w lr, r12, #32 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lslpl.w r3, r3, lr +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r2, #0 +; V7A-T-NEXT: bics r1, r3 +; V7A-T-NEXT: bics r0, r2 +; V7A-T-NEXT: pop {r7, pc} +; +; V6M-LABEL: bextr64_b2_load: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, r5, r7, lr} +; V6M-NEXT: push {r4, r5, r7, lr} +; V6M-NEXT: ldr r3, [r0] +; V6M-NEXT: ldr r1, [r0, #4] +; V6M-NEXT: mov r0, r3 +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: mov r4, r0 +; V6M-NEXT: mov r5, r1 +; V6M-NEXT: movs r0, #0 +; V6M-NEXT: mvns r0, r0 +; V6M-NEXT: ldr r2, [sp, #16] +; V6M-NEXT: mov r1, r0 +; V6M-NEXT: bl __aeabi_llsl +; V6M-NEXT: bics r4, r0 +; V6M-NEXT: bics r5, r1 +; V6M-NEXT: mov r0, r4 +; V6M-NEXT: mov r1, r5 +; V6M-NEXT: pop {r4, r5, r7, pc} + %val = load i64, ptr %w + %shifted = lshr i64 %val, %numskipbits + %notmask = shl i64 -1, %numlowbits + %mask = xor i64 %notmask, -1 + %masked = and i64 %mask, %shifted + ret i64 %masked +} + +define i64 @bextr64_b3_load_indexzext(ptr %w, i8 zeroext %numskipbits, i8 zeroext %numlowbits) nounwind { +; V7M-LABEL: bextr64_b3_load_indexzext: +; V7M: @ %bb.0: +; V7M-NEXT: .save {r7, lr} +; V7M-NEXT: push {r7, lr} +; V7M-NEXT: ldrd r12, r0, [r0] +; V7M-NEXT: rsb.w r3, r1, #32 +; V7M-NEXT: lsl.w lr, r0, r3 +; V7M-NEXT: lsr.w r3, r12, r1 +; V7M-NEXT: orr.w r12, r3, lr +; V7M-NEXT: subs.w r3, r1, #32 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r12, r0, r3 +; V7M-NEXT: lsr.w r0, r0, r1 +; V7M-NEXT: mov.w r3, #-1 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r0, #0 +; V7M-NEXT: subs.w r1, r2, #32 +; V7M-NEXT: lsl.w r2, r3, r2 +; V7M-NEXT: it pl +; V7M-NEXT: lslpl r3, r1 +; V7M-NEXT: bic.w r1, r0, r3 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r2, #0 +; V7M-NEXT: bic.w r0, r12, r2 +; V7M-NEXT: pop {r7, pc} +; +; V7A-LABEL: bextr64_b3_load_indexzext: +; V7A: @ %bb.0: +; V7A-NEXT: ldm r0, {r0, r3} +; V7A-NEXT: lsr r12, r0, r1 +; V7A-NEXT: rsb r0, r1, #32 +; V7A-NEXT: orr r12, r12, r3, lsl r0 +; V7A-NEXT: subs r0, r1, #32 +; V7A-NEXT: lsrpl r12, r3, r0 +; V7A-NEXT: lsr r0, r3, r1 +; V7A-NEXT: movwpl r0, #0 +; V7A-NEXT: subs r1, r2, #32 +; V7A-NEXT: mvn r3, #0 +; V7A-NEXT: lsl r2, r3, r2 +; V7A-NEXT: lslpl r3, r3, r1 +; V7A-NEXT: bic r1, r0, r3 +; V7A-NEXT: movwpl r2, #0 +; V7A-NEXT: bic r0, r12, r2 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bextr64_b3_load_indexzext: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: .save {r7, lr} +; V7A-T-NEXT: push {r7, lr} +; V7A-T-NEXT: ldrd r12, r3, [r0] +; V7A-T-NEXT: rsb.w r0, r1, #32 +; V7A-T-NEXT: lsl.w lr, r3, r0 +; V7A-T-NEXT: lsr.w r0, r12, r1 +; V7A-T-NEXT: orr.w r12, r0, lr +; V7A-T-NEXT: subs.w r0, r1, #32 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r12, r3, r0 +; V7A-T-NEXT: lsr.w r0, r3, r1 +; V7A-T-NEXT: mov.w r3, #-1 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r0, #0 +; V7A-T-NEXT: subs.w r1, r2, #32 +; V7A-T-NEXT: lsl.w r2, r3, r2 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lslpl r3, r1 +; V7A-T-NEXT: bic.w r1, r0, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r2, #0 +; V7A-T-NEXT: bic.w r0, r12, r2 +; V7A-T-NEXT: pop {r7, pc} +; +; V6M-LABEL: bextr64_b3_load_indexzext: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, r5, r6, lr} +; V6M-NEXT: push {r4, r5, r6, lr} +; V6M-NEXT: mov r4, r2 +; V6M-NEXT: mov r2, r1 +; V6M-NEXT: ldr r3, [r0] +; V6M-NEXT: ldr r1, [r0, #4] +; V6M-NEXT: mov r0, r3 +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: mov r5, r0 +; V6M-NEXT: mov r6, r1 +; V6M-NEXT: movs r0, #0 +; V6M-NEXT: mvns r0, r0 +; V6M-NEXT: mov r1, r0 +; V6M-NEXT: mov r2, r4 +; V6M-NEXT: bl __aeabi_llsl +; V6M-NEXT: bics r5, r0 +; V6M-NEXT: bics r6, r1 +; V6M-NEXT: mov r0, r5 +; V6M-NEXT: mov r1, r6 +; V6M-NEXT: pop {r4, r5, r6, pc} + %val = load i64, ptr %w + %skip = zext i8 %numskipbits to i64 + %shifted = lshr i64 %val, %skip + %conv = zext i8 %numlowbits to i64 + %notmask = shl i64 -1, %conv + %mask = xor i64 %notmask, -1 + %masked = and i64 %mask, %shifted + ret i64 %masked +} + +define i64 @bextr64_b4_commutative(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind { +; V7M-LABEL: bextr64_b4_commutative: +; V7M: @ %bb.0: +; V7M-NEXT: .save {r7, lr} +; V7M-NEXT: push {r7, lr} +; V7M-NEXT: rsb.w r3, r2, #32 +; V7M-NEXT: lsrs r0, r2 +; V7M-NEXT: ldr.w r12, [sp, #8] +; V7M-NEXT: lsl.w r3, r1, r3 +; V7M-NEXT: orrs r0, r3 +; V7M-NEXT: subs.w r3, r2, #32 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r0, r1, r3 +; V7M-NEXT: lsr.w r1, r1, r2 +; V7M-NEXT: mov.w r2, #-1 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r1, #0 +; V7M-NEXT: lsl.w r3, r2, r12 +; V7M-NEXT: subs.w lr, r12, #32 +; V7M-NEXT: it pl +; V7M-NEXT: lslpl.w r2, r2, lr +; V7M-NEXT: it pl +; V7M-NEXT: movpl r3, #0 +; V7M-NEXT: bics r1, r2 +; V7M-NEXT: bics r0, r3 +; V7M-NEXT: pop {r7, pc} +; +; V7A-LABEL: bextr64_b4_commutative: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r11, lr} +; V7A-NEXT: push {r11, lr} +; V7A-NEXT: rsb r3, r2, #32 +; V7A-NEXT: lsr r0, r0, r2 +; V7A-NEXT: ldr r12, [sp, #8] +; V7A-NEXT: orr r0, r0, r1, lsl r3 +; V7A-NEXT: subs r3, r2, #32 +; V7A-NEXT: lsrpl r0, r1, r3 +; V7A-NEXT: lsr r1, r1, r2 +; V7A-NEXT: movwpl r1, #0 +; V7A-NEXT: mvn r3, #0 +; V7A-NEXT: subs lr, r12, #32 +; V7A-NEXT: lsl r2, r3, r12 +; V7A-NEXT: movwpl r2, #0 +; V7A-NEXT: bic r0, r0, r2 +; V7A-NEXT: lslpl r3, r3, lr +; V7A-NEXT: bic r1, r1, r3 +; V7A-NEXT: pop {r11, pc} +; +; V7A-T-LABEL: bextr64_b4_commutative: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: .save {r4, r5, r7, lr} +; V7A-T-NEXT: push {r4, r5, r7, lr} +; V7A-T-NEXT: rsb.w r3, r2, #32 +; V7A-T-NEXT: ldr.w r12, [sp, #16] +; V7A-T-NEXT: lsrs r0, r2 +; V7A-T-NEXT: lsl.w r3, r1, r3 +; V7A-T-NEXT: orr.w r5, r0, r3 +; V7A-T-NEXT: mov.w r3, #-1 +; V7A-T-NEXT: subs.w lr, r12, #32 +; V7A-T-NEXT: lsl.w r0, r3, r12 +; V7A-T-NEXT: itt pl +; V7A-T-NEXT: lslpl.w r3, r3, lr +; V7A-T-NEXT: movpl r0, #0 +; V7A-T-NEXT: subs.w r4, r2, #32 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r5, r1, r4 +; V7A-T-NEXT: lsr.w r1, r1, r2 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r1, #0 +; V7A-T-NEXT: bic.w r0, r5, r0 +; V7A-T-NEXT: bics r1, r3 +; V7A-T-NEXT: pop {r4, r5, r7, pc} +; +; V6M-LABEL: bextr64_b4_commutative: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, r5, r7, lr} +; V6M-NEXT: push {r4, r5, r7, lr} +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: mov r4, r0 +; V6M-NEXT: mov r5, r1 +; V6M-NEXT: movs r0, #0 +; V6M-NEXT: mvns r0, r0 +; V6M-NEXT: ldr r2, [sp, #16] +; V6M-NEXT: mov r1, r0 +; V6M-NEXT: bl __aeabi_llsl +; V6M-NEXT: bics r4, r0 +; V6M-NEXT: bics r5, r1 +; V6M-NEXT: mov r0, r4 +; V6M-NEXT: mov r1, r5 +; V6M-NEXT: pop {r4, r5, r7, pc} + %shifted = lshr i64 %val, %numskipbits + %notmask = shl i64 -1, %numlowbits + %mask = xor i64 %notmask, -1 + %masked = and i64 %shifted, %mask ; swapped order + ret i64 %masked +} + +; 64-bit, but with 32-bit output + +; Everything done in 64-bit, truncation happens last. +define i32 @bextr64_32_b0(i64 %val, i64 %numskipbits, i8 %numlowbits) nounwind { +; V7M-LABEL: bextr64_32_b0: +; V7M: @ %bb.0: +; V7M-NEXT: rsb.w r3, r2, #32 +; V7M-NEXT: lsrs r0, r2 +; V7M-NEXT: subs r2, #32 +; V7M-NEXT: lsl.w r3, r1, r3 +; V7M-NEXT: orr.w r0, r0, r3 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r0, r1, r2 +; V7M-NEXT: ldrb.w r1, [sp] +; V7M-NEXT: mov.w r2, #-1 +; V7M-NEXT: lsls r2, r1 +; V7M-NEXT: subs r1, #32 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r2, #0 +; V7M-NEXT: bics r0, r2 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bextr64_32_b0: +; V7A: @ %bb.0: +; V7A-NEXT: rsb r3, r2, #32 +; V7A-NEXT: lsr r0, r0, r2 +; V7A-NEXT: ldrb r12, [sp] +; V7A-NEXT: subs r2, r2, #32 +; V7A-NEXT: orr r0, r0, r1, lsl r3 +; V7A-NEXT: lsrpl r0, r1, r2 +; V7A-NEXT: mvn r1, #0 +; V7A-NEXT: lsl r1, r1, r12 +; V7A-NEXT: subs r2, r12, #32 +; V7A-NEXT: movwpl r1, #0 +; V7A-NEXT: bic r0, r0, r1 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bextr64_32_b0: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: lsr.w r12, r0, r2 +; V7A-T-NEXT: rsb.w r0, r2, #32 +; V7A-T-NEXT: ldrb.w r3, [sp] +; V7A-T-NEXT: subs r2, #32 +; V7A-T-NEXT: lsl.w r0, r1, r0 +; V7A-T-NEXT: orr.w r0, r0, r12 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r0, r1, r2 +; V7A-T-NEXT: mov.w r1, #-1 +; V7A-T-NEXT: lsls r1, r3 +; V7A-T-NEXT: subs.w r2, r3, #32 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r1, #0 +; V7A-T-NEXT: bics r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bextr64_32_b0: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, lr} +; V6M-NEXT: push {r4, lr} +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: mov r4, r0 +; V6M-NEXT: movs r0, #0 +; V6M-NEXT: mvns r0, r0 +; V6M-NEXT: add r1, sp, #8 +; V6M-NEXT: ldrb r2, [r1] +; V6M-NEXT: mov r1, r0 +; V6M-NEXT: bl __aeabi_llsl +; V6M-NEXT: bics r4, r0 +; V6M-NEXT: mov r0, r4 +; V6M-NEXT: pop {r4, pc} + %shiftedval = lshr i64 %val, %numskipbits + %widenumlowbits = zext i8 %numlowbits to i64 + %notmask = shl nsw i64 -1, %widenumlowbits + %mask = xor i64 %notmask, -1 + %wideres = and i64 %shiftedval, %mask + %res = trunc i64 %wideres to i32 + ret i32 %res +} + +; Shifting happens in 64-bit, then truncation. Masking is 32-bit. +define i32 @bextr64_32_b1(i64 %val, i64 %numskipbits, i8 %numlowbits) nounwind { +; V7M-LABEL: bextr64_32_b1: +; V7M: @ %bb.0: +; V7M-NEXT: rsb.w r3, r2, #32 +; V7M-NEXT: lsrs r0, r2 +; V7M-NEXT: subs r2, #32 +; V7M-NEXT: lsl.w r3, r1, r3 +; V7M-NEXT: orr.w r0, r0, r3 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r0, r1, r2 +; V7M-NEXT: ldrb.w r1, [sp] +; V7M-NEXT: mov.w r2, #-1 +; V7M-NEXT: lsl.w r1, r2, r1 +; V7M-NEXT: bics r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bextr64_32_b1: +; V7A: @ %bb.0: +; V7A-NEXT: rsb r3, r2, #32 +; V7A-NEXT: lsr r0, r0, r2 +; V7A-NEXT: ldrb r12, [sp] +; V7A-NEXT: subs r2, r2, #32 +; V7A-NEXT: orr r0, r0, r1, lsl r3 +; V7A-NEXT: lsrpl r0, r1, r2 +; V7A-NEXT: mvn r1, #0 +; V7A-NEXT: bic r0, r0, r1, lsl r12 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bextr64_32_b1: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: rsb.w r3, r2, #32 +; V7A-T-NEXT: lsrs r0, r2 +; V7A-T-NEXT: ldrb.w r12, [sp] +; V7A-T-NEXT: subs r2, #32 +; V7A-T-NEXT: lsl.w r3, r1, r3 +; V7A-T-NEXT: orr.w r0, r0, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r0, r1, r2 +; V7A-T-NEXT: mov.w r1, #-1 +; V7A-T-NEXT: lsl.w r1, r1, r12 +; V7A-T-NEXT: bics r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bextr64_32_b1: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r7, lr} +; V6M-NEXT: push {r7, lr} +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: add r1, sp, #8 +; V6M-NEXT: ldrb r1, [r1] +; V6M-NEXT: movs r2, #0 +; V6M-NEXT: mvns r2, r2 +; V6M-NEXT: lsls r2, r1 +; V6M-NEXT: bics r0, r2 +; V6M-NEXT: pop {r7, pc} + %shiftedval = lshr i64 %val, %numskipbits + %truncshiftedval = trunc i64 %shiftedval to i32 + %widenumlowbits = zext i8 %numlowbits to i32 + %notmask = shl nsw i32 -1, %widenumlowbits + %mask = xor i32 %notmask, -1 + %res = and i32 %truncshiftedval, %mask + ret i32 %res +} + +; Shifting happens in 64-bit. Mask is 32-bit, but extended to 64-bit. +; Masking is 64-bit. Then truncation. +define i32 @bextr64_32_b2(i64 %val, i64 %numskipbits, i8 %numlowbits) nounwind { +; V7M-LABEL: bextr64_32_b2: +; V7M: @ %bb.0: +; V7M-NEXT: rsb.w r3, r2, #32 +; V7M-NEXT: lsrs r0, r2 +; V7M-NEXT: subs r2, #32 +; V7M-NEXT: lsl.w r3, r1, r3 +; V7M-NEXT: orr.w r0, r0, r3 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r0, r1, r2 +; V7M-NEXT: ldrb.w r1, [sp] +; V7M-NEXT: mov.w r2, #-1 +; V7M-NEXT: lsl.w r1, r2, r1 +; V7M-NEXT: bics r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bextr64_32_b2: +; V7A: @ %bb.0: +; V7A-NEXT: rsb r3, r2, #32 +; V7A-NEXT: lsr r0, r0, r2 +; V7A-NEXT: ldrb r12, [sp] +; V7A-NEXT: subs r2, r2, #32 +; V7A-NEXT: orr r0, r0, r1, lsl r3 +; V7A-NEXT: lsrpl r0, r1, r2 +; V7A-NEXT: mvn r1, #0 +; V7A-NEXT: bic r0, r0, r1, lsl r12 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bextr64_32_b2: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: rsb.w r3, r2, #32 +; V7A-T-NEXT: lsrs r0, r2 +; V7A-T-NEXT: ldrb.w r12, [sp] +; V7A-T-NEXT: subs r2, #32 +; V7A-T-NEXT: lsl.w r3, r1, r3 +; V7A-T-NEXT: orr.w r0, r0, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r0, r1, r2 +; V7A-T-NEXT: mov.w r1, #-1 +; V7A-T-NEXT: lsl.w r1, r1, r12 +; V7A-T-NEXT: bics r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bextr64_32_b2: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r7, lr} +; V6M-NEXT: push {r7, lr} +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: add r1, sp, #8 +; V6M-NEXT: ldrb r1, [r1] +; V6M-NEXT: movs r2, #0 +; V6M-NEXT: mvns r2, r2 +; V6M-NEXT: lsls r2, r1 +; V6M-NEXT: bics r0, r2 +; V6M-NEXT: pop {r7, pc} + %shiftedval = lshr i64 %val, %numskipbits + %widenumlowbits = zext i8 %numlowbits to i32 + %notmask = shl nsw i32 -1, %widenumlowbits + %mask = xor i32 %notmask, -1 + %zextmask = zext i32 %mask to i64 + %wideres = and i64 %shiftedval, %zextmask + %res = trunc i64 %wideres to i32 + ret i32 %res +} + +; ---------------------------------------------------------------------------- ; +; Pattern c. 32-bit +; ---------------------------------------------------------------------------- ; + +define i32 @bextr32_c0(i32 %val, i32 %numskipbits, i32 %numlowbits) nounwind { +; V7M-LABEL: bextr32_c0: +; V7M: @ %bb.0: +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: rsb.w r1, r2, #32 +; V7M-NEXT: lsls r0, r1 +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bextr32_c0: +; V7A: @ %bb.0: +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: rsb r1, r2, #32 +; V7A-NEXT: lsl r0, r0, r1 +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bextr32_c0: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: rsb.w r1, r2, #32 +; V7A-T-NEXT: lsls r0, r1 +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bextr32_c0: +; V6M: @ %bb.0: +; V6M-NEXT: movs r3, #32 +; V6M-NEXT: subs r2, r3, r2 +; V6M-NEXT: lsrs r0, r1 +; V6M-NEXT: lsls r0, r2 +; V6M-NEXT: lsrs r0, r2 +; V6M-NEXT: bx lr + %shifted = lshr i32 %val, %numskipbits + %numhighbits = sub i32 32, %numlowbits + %mask = lshr i32 -1, %numhighbits + %masked = and i32 %mask, %shifted + ret i32 %masked +} + +define i32 @bextr32_c1_indexzext(i32 %val, i8 %numskipbits, i8 %numlowbits) nounwind { +; V7M-LABEL: bextr32_c1_indexzext: +; V7M: @ %bb.0: +; V7M-NEXT: uxtb r1, r1 +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: rsb.w r1, r2, #32 +; V7M-NEXT: uxtb r1, r1 +; V7M-NEXT: lsls r0, r1 +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bextr32_c1_indexzext: +; V7A: @ %bb.0: +; V7A-NEXT: uxtb r1, r1 +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: rsb r1, r2, #32 +; V7A-NEXT: uxtb r1, r1 +; V7A-NEXT: lsl r0, r0, r1 +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bextr32_c1_indexzext: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: uxtb r1, r1 +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: rsb.w r1, r2, #32 +; V7A-T-NEXT: uxtb r1, r1 +; V7A-T-NEXT: lsls r0, r1 +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bextr32_c1_indexzext: +; V6M: @ %bb.0: +; V6M-NEXT: uxtb r1, r1 +; V6M-NEXT: lsrs r0, r1 +; V6M-NEXT: movs r1, #32 +; V6M-NEXT: subs r1, r1, r2 +; V6M-NEXT: uxtb r1, r1 +; V6M-NEXT: lsls r0, r1 +; V6M-NEXT: lsrs r0, r1 +; V6M-NEXT: bx lr + %skip = zext i8 %numskipbits to i32 + %shifted = lshr i32 %val, %skip + %numhighbits = sub i8 32, %numlowbits + %sh_prom = zext i8 %numhighbits to i32 + %mask = lshr i32 -1, %sh_prom + %masked = and i32 %mask, %shifted + ret i32 %masked +} + +define i32 @bextr32_c2_load(ptr %w, i32 %numskipbits, i32 %numlowbits) nounwind { +; V7M-LABEL: bextr32_c2_load: +; V7M: @ %bb.0: +; V7M-NEXT: ldr r0, [r0] +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: rsb.w r1, r2, #32 +; V7M-NEXT: lsls r0, r1 +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bextr32_c2_load: +; V7A: @ %bb.0: +; V7A-NEXT: ldr r0, [r0] +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: rsb r1, r2, #32 +; V7A-NEXT: lsl r0, r0, r1 +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bextr32_c2_load: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: ldr r0, [r0] +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: rsb.w r1, r2, #32 +; V7A-T-NEXT: lsls r0, r1 +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bextr32_c2_load: +; V6M: @ %bb.0: +; V6M-NEXT: movs r3, #32 +; V6M-NEXT: subs r2, r3, r2 +; V6M-NEXT: ldr r0, [r0] +; V6M-NEXT: lsrs r0, r1 +; V6M-NEXT: lsls r0, r2 +; V6M-NEXT: lsrs r0, r2 +; V6M-NEXT: bx lr + %val = load i32, ptr %w + %shifted = lshr i32 %val, %numskipbits + %numhighbits = sub i32 32, %numlowbits + %mask = lshr i32 -1, %numhighbits + %masked = and i32 %mask, %shifted + ret i32 %masked +} + +define i32 @bextr32_c3_load_indexzext(ptr %w, i8 %numskipbits, i8 %numlowbits) nounwind { +; V7M-LABEL: bextr32_c3_load_indexzext: +; V7M: @ %bb.0: +; V7M-NEXT: ldr r0, [r0] +; V7M-NEXT: uxtb r1, r1 +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: rsb.w r1, r2, #32 +; V7M-NEXT: uxtb r1, r1 +; V7M-NEXT: lsls r0, r1 +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bextr32_c3_load_indexzext: +; V7A: @ %bb.0: +; V7A-NEXT: ldr r0, [r0] +; V7A-NEXT: uxtb r1, r1 +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: rsb r1, r2, #32 +; V7A-NEXT: uxtb r1, r1 +; V7A-NEXT: lsl r0, r0, r1 +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bextr32_c3_load_indexzext: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: ldr r0, [r0] +; V7A-T-NEXT: uxtb r1, r1 +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: rsb.w r1, r2, #32 +; V7A-T-NEXT: uxtb r1, r1 +; V7A-T-NEXT: lsls r0, r1 +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bextr32_c3_load_indexzext: +; V6M: @ %bb.0: +; V6M-NEXT: uxtb r1, r1 +; V6M-NEXT: ldr r0, [r0] +; V6M-NEXT: lsrs r0, r1 +; V6M-NEXT: movs r1, #32 +; V6M-NEXT: subs r1, r1, r2 +; V6M-NEXT: uxtb r1, r1 +; V6M-NEXT: lsls r0, r1 +; V6M-NEXT: lsrs r0, r1 +; V6M-NEXT: bx lr + %val = load i32, ptr %w + %skip = zext i8 %numskipbits to i32 + %shifted = lshr i32 %val, %skip + %numhighbits = sub i8 32, %numlowbits + %sh_prom = zext i8 %numhighbits to i32 + %mask = lshr i32 -1, %sh_prom + %masked = and i32 %mask, %shifted + ret i32 %masked +} + +define i32 @bextr32_c4_commutative(i32 %val, i32 %numskipbits, i32 %numlowbits) nounwind { +; V7M-LABEL: bextr32_c4_commutative: +; V7M: @ %bb.0: +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: rsb.w r1, r2, #32 +; V7M-NEXT: lsls r0, r1 +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bextr32_c4_commutative: +; V7A: @ %bb.0: +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: rsb r1, r2, #32 +; V7A-NEXT: lsl r0, r0, r1 +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bextr32_c4_commutative: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: rsb.w r1, r2, #32 +; V7A-T-NEXT: lsls r0, r1 +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bextr32_c4_commutative: +; V6M: @ %bb.0: +; V6M-NEXT: movs r3, #32 +; V6M-NEXT: subs r2, r3, r2 +; V6M-NEXT: lsrs r0, r1 +; V6M-NEXT: lsls r0, r2 +; V6M-NEXT: lsrs r0, r2 +; V6M-NEXT: bx lr + %shifted = lshr i32 %val, %numskipbits + %numhighbits = sub i32 32, %numlowbits + %mask = lshr i32 -1, %numhighbits + %masked = and i32 %shifted, %mask ; swapped order + ret i32 %masked +} + +; 64-bit + +define i64 @bextr64_c0(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind { +; V7M-LABEL: bextr64_c0: +; V7M: @ %bb.0: +; V7M-NEXT: rsb.w r3, r2, #32 +; V7M-NEXT: lsrs r0, r2 +; V7M-NEXT: ldr.w r12, [sp] +; V7M-NEXT: lsl.w r3, r1, r3 +; V7M-NEXT: orrs r0, r3 +; V7M-NEXT: subs.w r3, r2, #32 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r0, r1, r3 +; V7M-NEXT: rsb.w r3, r12, #64 +; V7M-NEXT: lsr.w r1, r1, r2 +; V7M-NEXT: mov.w r2, #-1 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r1, #0 +; V7M-NEXT: lsr.w r3, r2, r3 +; V7M-NEXT: rsbs.w r12, r12, #32 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r3, #0 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r2, r2, r12 +; V7M-NEXT: ands r1, r3 +; V7M-NEXT: ands r0, r2 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bextr64_c0: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r4, r5, r11, lr} +; V7A-NEXT: push {r4, r5, r11, lr} +; V7A-NEXT: ldr r12, [sp, #16] +; V7A-NEXT: mvn r3, #0 +; V7A-NEXT: lsr r5, r1, r2 +; V7A-NEXT: lsr r0, r0, r2 +; V7A-NEXT: rsb r4, r12, #64 +; V7A-NEXT: rsbs lr, r12, #32 +; V7A-NEXT: lsr r4, r3, r4 +; V7A-NEXT: lsrpl r3, r3, lr +; V7A-NEXT: movwpl r4, #0 +; V7A-NEXT: subs lr, r2, #32 +; V7A-NEXT: rsb r2, r2, #32 +; V7A-NEXT: movwpl r5, #0 +; V7A-NEXT: and r12, r4, r5 +; V7A-NEXT: orr r0, r0, r1, lsl r2 +; V7A-NEXT: lsrpl r0, r1, lr +; V7A-NEXT: mov r1, r12 +; V7A-NEXT: and r0, r3, r0 +; V7A-NEXT: pop {r4, r5, r11, pc} +; +; V7A-T-LABEL: bextr64_c0: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: .save {r7, lr} +; V7A-T-NEXT: push {r7, lr} +; V7A-T-NEXT: rsb.w r3, r2, #32 +; V7A-T-NEXT: lsrs r0, r2 +; V7A-T-NEXT: ldr.w r12, [sp, #8] +; V7A-T-NEXT: mov.w lr, #-1 +; V7A-T-NEXT: lsl.w r3, r1, r3 +; V7A-T-NEXT: orrs r0, r3 +; V7A-T-NEXT: subs.w r3, r2, #32 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r0, r1, r3 +; V7A-T-NEXT: lsr.w r1, r1, r2 +; V7A-T-NEXT: mov.w r3, #-1 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r1, #0 +; V7A-T-NEXT: rsbs.w r2, r12, #32 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl r3, r2 +; V7A-T-NEXT: rsb.w r2, r12, #64 +; V7A-T-NEXT: and.w r0, r0, r3 +; V7A-T-NEXT: lsr.w r2, lr, r2 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r2, #0 +; V7A-T-NEXT: ands r1, r2 +; V7A-T-NEXT: pop {r7, pc} +; +; V6M-LABEL: bextr64_c0: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, r5, r7, lr} +; V6M-NEXT: push {r4, r5, r7, lr} +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: mov r5, r0 +; V6M-NEXT: mov r4, r1 +; V6M-NEXT: ldr r0, [sp, #16] +; V6M-NEXT: movs r1, #64 +; V6M-NEXT: subs r2, r1, r0 +; V6M-NEXT: movs r0, #0 +; V6M-NEXT: mvns r0, r0 +; V6M-NEXT: mov r1, r0 +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: ands r0, r5 +; V6M-NEXT: ands r1, r4 +; V6M-NEXT: pop {r4, r5, r7, pc} + %shifted = lshr i64 %val, %numskipbits + %numhighbits = sub i64 64, %numlowbits + %mask = lshr i64 -1, %numhighbits + %masked = and i64 %mask, %shifted + ret i64 %masked +} + +define i64 @bextr64_c1_indexzext(i64 %val, i8 %numskipbits, i8 %numlowbits) nounwind { +; V7M-LABEL: bextr64_c1_indexzext: +; V7M: @ %bb.0: +; V7M-NEXT: .save {r7, lr} +; V7M-NEXT: push {r7, lr} +; V7M-NEXT: uxtb r2, r2 +; V7M-NEXT: lsr.w r12, r0, r2 +; V7M-NEXT: rsb.w r0, r2, #32 +; V7M-NEXT: lsl.w r0, r1, r0 +; V7M-NEXT: orr.w r12, r12, r0 +; V7M-NEXT: subs.w r0, r2, #32 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r12, r1, r0 +; V7M-NEXT: rsb.w r0, r3, #64 +; V7M-NEXT: lsr.w r1, r1, r2 +; V7M-NEXT: mov.w r3, #-1 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r1, #0 +; V7M-NEXT: uxtb r0, r0 +; V7M-NEXT: subs.w lr, r0, #32 +; V7M-NEXT: lsr.w r2, r3, r0 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r3, r3, lr +; V7M-NEXT: it pl +; V7M-NEXT: movpl r2, #0 +; V7M-NEXT: and.w r0, r3, r12 +; V7M-NEXT: ands r1, r2 +; V7M-NEXT: pop {r7, pc} +; +; V7A-LABEL: bextr64_c1_indexzext: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r4, lr} +; V7A-NEXT: push {r4, lr} +; V7A-NEXT: uxtb r12, r2 +; V7A-NEXT: lsr lr, r0, r12 +; V7A-NEXT: rsb r0, r12, #32 +; V7A-NEXT: orr r4, lr, r1, lsl r0 +; V7A-NEXT: mvn lr, #31 +; V7A-NEXT: uxtab r2, lr, r2 +; V7A-NEXT: cmp r2, #0 +; V7A-NEXT: lsrpl r4, r1, r2 +; V7A-NEXT: rsb r2, r3, #64 +; V7A-NEXT: lsr r1, r1, r12 +; V7A-NEXT: mvn r3, #0 +; V7A-NEXT: uxtb r12, r2 +; V7A-NEXT: uxtab r2, lr, r2 +; V7A-NEXT: movwpl r1, #0 +; V7A-NEXT: lsr r0, r3, r12 +; V7A-NEXT: cmp r2, #0 +; V7A-NEXT: movwpl r0, #0 +; V7A-NEXT: and r1, r0, r1 +; V7A-NEXT: lsrpl r3, r3, r2 +; V7A-NEXT: and r0, r3, r4 +; V7A-NEXT: pop {r4, pc} +; +; V7A-T-LABEL: bextr64_c1_indexzext: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: .save {r4, lr} +; V7A-T-NEXT: push {r4, lr} +; V7A-T-NEXT: uxtb.w r12, r2 +; V7A-T-NEXT: lsr.w lr, r0, r12 +; V7A-T-NEXT: rsb.w r0, r12, #32 +; V7A-T-NEXT: lsl.w r0, r1, r0 +; V7A-T-NEXT: orr.w r4, lr, r0 +; V7A-T-NEXT: mvn lr, #31 +; V7A-T-NEXT: uxtab r2, lr, r2 +; V7A-T-NEXT: cmp r2, #0 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r4, r1, r2 +; V7A-T-NEXT: rsb.w r2, r3, #64 +; V7A-T-NEXT: lsr.w r1, r1, r12 +; V7A-T-NEXT: mov.w r3, #-1 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r1, #0 +; V7A-T-NEXT: uxtb.w r12, r2 +; V7A-T-NEXT: uxtab r2, lr, r2 +; V7A-T-NEXT: lsr.w r0, r3, r12 +; V7A-T-NEXT: cmp r2, #0 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r0, #0 +; V7A-T-NEXT: and.w r1, r1, r0 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl r3, r2 +; V7A-T-NEXT: and.w r0, r3, r4 +; V7A-T-NEXT: pop {r4, pc} +; +; V6M-LABEL: bextr64_c1_indexzext: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, r5, r6, lr} +; V6M-NEXT: push {r4, r5, r6, lr} +; V6M-NEXT: mov r5, r3 +; V6M-NEXT: uxtb r2, r2 +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: mov r6, r0 +; V6M-NEXT: mov r4, r1 +; V6M-NEXT: movs r0, #64 +; V6M-NEXT: subs r0, r0, r5 +; V6M-NEXT: uxtb r2, r0 +; V6M-NEXT: movs r0, #0 +; V6M-NEXT: mvns r0, r0 +; V6M-NEXT: mov r1, r0 +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: ands r0, r6 +; V6M-NEXT: ands r1, r4 +; V6M-NEXT: pop {r4, r5, r6, pc} + %skip = zext i8 %numskipbits to i64 + %shifted = lshr i64 %val, %skip + %numhighbits = sub i8 64, %numlowbits + %sh_prom = zext i8 %numhighbits to i64 + %mask = lshr i64 -1, %sh_prom + %masked = and i64 %mask, %shifted + ret i64 %masked +} + +define i64 @bextr64_c2_load(ptr %w, i64 %numskipbits, i64 %numlowbits) nounwind { +; V7M-LABEL: bextr64_c2_load: +; V7M: @ %bb.0: +; V7M-NEXT: ldrd r0, r3, [r0] +; V7M-NEXT: rsb.w r1, r2, #32 +; V7M-NEXT: ldr.w r12, [sp] +; V7M-NEXT: lsl.w r1, r3, r1 +; V7M-NEXT: lsrs r0, r2 +; V7M-NEXT: orrs r0, r1 +; V7M-NEXT: subs.w r1, r2, #32 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r0, r3, r1 +; V7M-NEXT: lsr.w r1, r3, r2 +; V7M-NEXT: rsb.w r3, r12, #64 +; V7M-NEXT: mov.w r2, #-1 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r1, #0 +; V7M-NEXT: rsbs.w r12, r12, #32 +; V7M-NEXT: lsr.w r3, r2, r3 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r3, #0 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r2, r2, r12 +; V7M-NEXT: ands r1, r3 +; V7M-NEXT: ands r0, r2 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bextr64_c2_load: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r4, r6, r8, lr} +; V7A-NEXT: push {r4, r6, r8, lr} +; V7A-NEXT: ldr r12, [sp, #16] +; V7A-NEXT: ldr r3, [r0, #4] +; V7A-NEXT: rsb r6, r12, #64 +; V7A-NEXT: ldr r8, [r0] +; V7A-NEXT: mvn r0, #0 +; V7A-NEXT: rsbs r1, r12, #32 +; V7A-NEXT: lsr r6, r0, r6 +; V7A-NEXT: lsr r4, r3, r2 +; V7A-NEXT: lsrpl r0, r0, r1 +; V7A-NEXT: movwpl r6, #0 +; V7A-NEXT: subs r12, r2, #32 +; V7A-NEXT: movwpl r4, #0 +; V7A-NEXT: and r1, r6, r4 +; V7A-NEXT: lsr r6, r8, r2 +; V7A-NEXT: rsb r2, r2, #32 +; V7A-NEXT: orr r2, r6, r3, lsl r2 +; V7A-NEXT: lsrpl r2, r3, r12 +; V7A-NEXT: and r0, r0, r2 +; V7A-NEXT: pop {r4, r6, r8, pc} +; +; V7A-T-LABEL: bextr64_c2_load: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: ldrd r0, r3, [r0] +; V7A-T-NEXT: rsb.w r1, r2, #32 +; V7A-T-NEXT: ldr.w r12, [sp] +; V7A-T-NEXT: lsl.w r1, r3, r1 +; V7A-T-NEXT: lsrs r0, r2 +; V7A-T-NEXT: orrs r0, r1 +; V7A-T-NEXT: subs.w r1, r2, #32 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r0, r3, r1 +; V7A-T-NEXT: lsr.w r1, r3, r2 +; V7A-T-NEXT: rsb.w r2, r12, #64 +; V7A-T-NEXT: mov.w r3, #-1 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r1, #0 +; V7A-T-NEXT: rsbs.w r12, r12, #32 +; V7A-T-NEXT: lsr.w r2, r3, r2 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r2, #0 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r3, r3, r12 +; V7A-T-NEXT: ands r1, r2 +; V7A-T-NEXT: ands r0, r3 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bextr64_c2_load: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, r5, r7, lr} +; V6M-NEXT: push {r4, r5, r7, lr} +; V6M-NEXT: ldr r3, [r0] +; V6M-NEXT: ldr r1, [r0, #4] +; V6M-NEXT: mov r0, r3 +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: mov r5, r0 +; V6M-NEXT: mov r4, r1 +; V6M-NEXT: ldr r0, [sp, #16] +; V6M-NEXT: movs r1, #64 +; V6M-NEXT: subs r2, r1, r0 +; V6M-NEXT: movs r0, #0 +; V6M-NEXT: mvns r0, r0 +; V6M-NEXT: mov r1, r0 +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: ands r0, r5 +; V6M-NEXT: ands r1, r4 +; V6M-NEXT: pop {r4, r5, r7, pc} + %val = load i64, ptr %w + %shifted = lshr i64 %val, %numskipbits + %numhighbits = sub i64 64, %numlowbits + %mask = lshr i64 -1, %numhighbits + %masked = and i64 %mask, %shifted + ret i64 %masked +} + +define i64 @bextr64_c3_load_indexzext(ptr %w, i8 %numskipbits, i8 %numlowbits) nounwind { +; V7M-LABEL: bextr64_c3_load_indexzext: +; V7M: @ %bb.0: +; V7M-NEXT: .save {r7, lr} +; V7M-NEXT: push {r7, lr} +; V7M-NEXT: ldrd r0, r3, [r0] +; V7M-NEXT: uxtb r1, r1 +; V7M-NEXT: lsr.w r12, r0, r1 +; V7M-NEXT: rsb.w r0, r1, #32 +; V7M-NEXT: lsl.w r0, r3, r0 +; V7M-NEXT: orr.w r12, r12, r0 +; V7M-NEXT: subs.w r0, r1, #32 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r12, r3, r0 +; V7M-NEXT: rsb.w r0, r2, #64 +; V7M-NEXT: lsr.w r1, r3, r1 +; V7M-NEXT: mov.w r3, #-1 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r1, #0 +; V7M-NEXT: uxtb r0, r0 +; V7M-NEXT: subs.w lr, r0, #32 +; V7M-NEXT: lsr.w r2, r3, r0 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r3, r3, lr +; V7M-NEXT: it pl +; V7M-NEXT: movpl r2, #0 +; V7M-NEXT: and.w r0, r3, r12 +; V7M-NEXT: ands r1, r2 +; V7M-NEXT: pop {r7, pc} +; +; V7A-LABEL: bextr64_c3_load_indexzext: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r4, lr} +; V7A-NEXT: push {r4, lr} +; V7A-NEXT: ldr r4, [r0] +; V7A-NEXT: ldr r3, [r0, #4] +; V7A-NEXT: uxtb r0, r1 +; V7A-NEXT: lsr r12, r4, r0 +; V7A-NEXT: rsb r4, r0, #32 +; V7A-NEXT: lsr r0, r3, r0 +; V7A-NEXT: orr lr, r12, r3, lsl r4 +; V7A-NEXT: mvn r12, #31 +; V7A-NEXT: uxtab r1, r12, r1 +; V7A-NEXT: cmp r1, #0 +; V7A-NEXT: lsrpl lr, r3, r1 +; V7A-NEXT: rsb r1, r2, #64 +; V7A-NEXT: mvn r3, #0 +; V7A-NEXT: movwpl r0, #0 +; V7A-NEXT: uxtb r2, r1 +; V7A-NEXT: uxtab r4, r12, r1 +; V7A-NEXT: lsr r2, r3, r2 +; V7A-NEXT: cmp r4, #0 +; V7A-NEXT: movwpl r2, #0 +; V7A-NEXT: and r1, r2, r0 +; V7A-NEXT: lsrpl r3, r3, r4 +; V7A-NEXT: and r0, r3, lr +; V7A-NEXT: pop {r4, pc} +; +; V7A-T-LABEL: bextr64_c3_load_indexzext: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: .save {r4, r5, r7, lr} +; V7A-T-NEXT: push {r4, r5, r7, lr} +; V7A-T-NEXT: ldrd r12, lr, [r0] +; V7A-T-NEXT: uxtb r0, r1 +; V7A-T-NEXT: rsb.w r3, r0, #32 +; V7A-T-NEXT: lsl.w r4, lr, r3 +; V7A-T-NEXT: lsr.w r3, r12, r0 +; V7A-T-NEXT: orr.w r5, r3, r4 +; V7A-T-NEXT: mvn r12, #31 +; V7A-T-NEXT: uxtab r1, r12, r1 +; V7A-T-NEXT: lsr.w r0, lr, r0 +; V7A-T-NEXT: cmp r1, #0 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r5, lr, r1 +; V7A-T-NEXT: rsb.w r1, r2, #64 +; V7A-T-NEXT: mov.w r4, #-1 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r0, #0 +; V7A-T-NEXT: uxtb r2, r1 +; V7A-T-NEXT: uxtab r3, r12, r1 +; V7A-T-NEXT: lsr.w r2, r4, r2 +; V7A-T-NEXT: cmp r3, #0 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r2, #0 +; V7A-T-NEXT: and.w r1, r2, r0 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl r4, r3 +; V7A-T-NEXT: and.w r0, r4, r5 +; V7A-T-NEXT: pop {r4, r5, r7, pc} +; +; V6M-LABEL: bextr64_c3_load_indexzext: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, r5, r6, lr} +; V6M-NEXT: push {r4, r5, r6, lr} +; V6M-NEXT: mov r5, r2 +; V6M-NEXT: ldr r4, [r0] +; V6M-NEXT: ldr r3, [r0, #4] +; V6M-NEXT: uxtb r2, r1 +; V6M-NEXT: mov r0, r4 +; V6M-NEXT: mov r1, r3 +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: mov r6, r0 +; V6M-NEXT: mov r4, r1 +; V6M-NEXT: movs r0, #64 +; V6M-NEXT: subs r0, r0, r5 +; V6M-NEXT: uxtb r2, r0 +; V6M-NEXT: movs r0, #0 +; V6M-NEXT: mvns r0, r0 +; V6M-NEXT: mov r1, r0 +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: ands r0, r6 +; V6M-NEXT: ands r1, r4 +; V6M-NEXT: pop {r4, r5, r6, pc} + %val = load i64, ptr %w + %skip = zext i8 %numskipbits to i64 + %shifted = lshr i64 %val, %skip + %numhighbits = sub i8 64, %numlowbits + %sh_prom = zext i8 %numhighbits to i64 + %mask = lshr i64 -1, %sh_prom + %masked = and i64 %mask, %shifted + ret i64 %masked +} + +define i64 @bextr64_c4_commutative(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind { +; V7M-LABEL: bextr64_c4_commutative: +; V7M: @ %bb.0: +; V7M-NEXT: rsb.w r3, r2, #32 +; V7M-NEXT: lsrs r0, r2 +; V7M-NEXT: ldr.w r12, [sp] +; V7M-NEXT: lsl.w r3, r1, r3 +; V7M-NEXT: orrs r0, r3 +; V7M-NEXT: subs.w r3, r2, #32 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r0, r1, r3 +; V7M-NEXT: rsb.w r3, r12, #64 +; V7M-NEXT: lsr.w r1, r1, r2 +; V7M-NEXT: mov.w r2, #-1 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r1, #0 +; V7M-NEXT: lsr.w r3, r2, r3 +; V7M-NEXT: rsbs.w r12, r12, #32 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r3, #0 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r2, r2, r12 +; V7M-NEXT: ands r1, r3 +; V7M-NEXT: ands r0, r2 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bextr64_c4_commutative: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r4, r5, r11, lr} +; V7A-NEXT: push {r4, r5, r11, lr} +; V7A-NEXT: ldr r12, [sp, #16] +; V7A-NEXT: mvn r3, #0 +; V7A-NEXT: lsr r5, r1, r2 +; V7A-NEXT: lsr r0, r0, r2 +; V7A-NEXT: rsb r4, r12, #64 +; V7A-NEXT: rsbs lr, r12, #32 +; V7A-NEXT: lsr r4, r3, r4 +; V7A-NEXT: lsrpl r3, r3, lr +; V7A-NEXT: movwpl r4, #0 +; V7A-NEXT: subs lr, r2, #32 +; V7A-NEXT: rsb r2, r2, #32 +; V7A-NEXT: movwpl r5, #0 +; V7A-NEXT: and r12, r5, r4 +; V7A-NEXT: orr r0, r0, r1, lsl r2 +; V7A-NEXT: lsrpl r0, r1, lr +; V7A-NEXT: mov r1, r12 +; V7A-NEXT: and r0, r0, r3 +; V7A-NEXT: pop {r4, r5, r11, pc} +; +; V7A-T-LABEL: bextr64_c4_commutative: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: .save {r7, lr} +; V7A-T-NEXT: push {r7, lr} +; V7A-T-NEXT: rsb.w r3, r2, #32 +; V7A-T-NEXT: lsrs r0, r2 +; V7A-T-NEXT: ldr.w r12, [sp, #8] +; V7A-T-NEXT: mov.w lr, #-1 +; V7A-T-NEXT: lsl.w r3, r1, r3 +; V7A-T-NEXT: orrs r0, r3 +; V7A-T-NEXT: subs.w r3, r2, #32 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r0, r1, r3 +; V7A-T-NEXT: lsr.w r1, r1, r2 +; V7A-T-NEXT: mov.w r3, #-1 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r1, #0 +; V7A-T-NEXT: rsbs.w r2, r12, #32 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl r3, r2 +; V7A-T-NEXT: rsb.w r2, r12, #64 +; V7A-T-NEXT: and.w r0, r0, r3 +; V7A-T-NEXT: lsr.w r2, lr, r2 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r2, #0 +; V7A-T-NEXT: ands r1, r2 +; V7A-T-NEXT: pop {r7, pc} +; +; V6M-LABEL: bextr64_c4_commutative: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, r5, r7, lr} +; V6M-NEXT: push {r4, r5, r7, lr} +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: mov r5, r0 +; V6M-NEXT: mov r4, r1 +; V6M-NEXT: ldr r0, [sp, #16] +; V6M-NEXT: movs r1, #64 +; V6M-NEXT: subs r2, r1, r0 +; V6M-NEXT: movs r0, #0 +; V6M-NEXT: mvns r0, r0 +; V6M-NEXT: mov r1, r0 +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: ands r0, r5 +; V6M-NEXT: ands r1, r4 +; V6M-NEXT: pop {r4, r5, r7, pc} + %shifted = lshr i64 %val, %numskipbits + %numhighbits = sub i64 64, %numlowbits + %mask = lshr i64 -1, %numhighbits + %masked = and i64 %shifted, %mask ; swapped order + ret i64 %masked +} + +; 64-bit, but with 32-bit output + +; Everything done in 64-bit, truncation happens last. +define i32 @bextr64_32_c0(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind { +; V7M-LABEL: bextr64_32_c0: +; V7M: @ %bb.0: +; V7M-NEXT: rsb.w r3, r2, #32 +; V7M-NEXT: lsrs r0, r2 +; V7M-NEXT: subs r2, #32 +; V7M-NEXT: lsl.w r3, r1, r3 +; V7M-NEXT: orr.w r0, r0, r3 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r0, r1, r2 +; V7M-NEXT: ldr r1, [sp] +; V7M-NEXT: mov.w r2, #-1 +; V7M-NEXT: rsbs.w r1, r1, #32 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl r2, r1 +; V7M-NEXT: ands r0, r2 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bextr64_32_c0: +; V7A: @ %bb.0: +; V7A-NEXT: ldr r3, [sp] +; V7A-NEXT: rsbs r12, r3, #32 +; V7A-NEXT: mvn r3, #0 +; V7A-NEXT: lsrpl r3, r3, r12 +; V7A-NEXT: lsr r12, r0, r2 +; V7A-NEXT: rsb r0, r2, #32 +; V7A-NEXT: subs r2, r2, #32 +; V7A-NEXT: orr r0, r12, r1, lsl r0 +; V7A-NEXT: lsrpl r0, r1, r2 +; V7A-NEXT: and r0, r3, r0 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bextr64_32_c0: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: rsb.w r3, r2, #32 +; V7A-T-NEXT: lsrs r0, r2 +; V7A-T-NEXT: ldr.w r12, [sp] +; V7A-T-NEXT: subs r2, #32 +; V7A-T-NEXT: lsl.w r3, r1, r3 +; V7A-T-NEXT: orr.w r0, r0, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r0, r1, r2 +; V7A-T-NEXT: mov.w r2, #-1 +; V7A-T-NEXT: rsbs.w r1, r12, #32 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl r2, r1 +; V7A-T-NEXT: ands r0, r2 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bextr64_32_c0: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, lr} +; V6M-NEXT: push {r4, lr} +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: mov r4, r0 +; V6M-NEXT: ldr r0, [sp, #8] +; V6M-NEXT: movs r1, #64 +; V6M-NEXT: subs r2, r1, r0 +; V6M-NEXT: movs r0, #0 +; V6M-NEXT: mvns r0, r0 +; V6M-NEXT: mov r1, r0 +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: ands r0, r4 +; V6M-NEXT: pop {r4, pc} + %shifted = lshr i64 %val, %numskipbits + %numhighbits = sub i64 64, %numlowbits + %mask = lshr i64 -1, %numhighbits + %masked = and i64 %mask, %shifted + %res = trunc i64 %masked to i32 + ret i32 %res +} + +; Shifting happens in 64-bit, then truncation. Masking is 32-bit. +define i32 @bextr64_32_c1(i64 %val, i64 %numskipbits, i32 %numlowbits) nounwind { +; V7M-LABEL: bextr64_32_c1: +; V7M: @ %bb.0: +; V7M-NEXT: rsb.w r3, r2, #32 +; V7M-NEXT: lsrs r0, r2 +; V7M-NEXT: subs r2, #32 +; V7M-NEXT: lsl.w r3, r1, r3 +; V7M-NEXT: orr.w r0, r0, r3 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r0, r1, r2 +; V7M-NEXT: ldr r1, [sp] +; V7M-NEXT: rsb.w r1, r1, #32 +; V7M-NEXT: lsls r0, r1 +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bextr64_32_c1: +; V7A: @ %bb.0: +; V7A-NEXT: rsb r3, r2, #32 +; V7A-NEXT: lsr r0, r0, r2 +; V7A-NEXT: ldr r12, [sp] +; V7A-NEXT: subs r2, r2, #32 +; V7A-NEXT: orr r0, r0, r1, lsl r3 +; V7A-NEXT: lsrpl r0, r1, r2 +; V7A-NEXT: rsb r1, r12, #32 +; V7A-NEXT: lsl r0, r0, r1 +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bextr64_32_c1: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: rsb.w r3, r2, #32 +; V7A-T-NEXT: lsrs r0, r2 +; V7A-T-NEXT: ldr.w r12, [sp] +; V7A-T-NEXT: subs r2, #32 +; V7A-T-NEXT: lsl.w r3, r1, r3 +; V7A-T-NEXT: orr.w r0, r0, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r0, r1, r2 +; V7A-T-NEXT: rsb.w r1, r12, #32 +; V7A-T-NEXT: lsls r0, r1 +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bextr64_32_c1: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r7, lr} +; V6M-NEXT: push {r7, lr} +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: ldr r1, [sp, #8] +; V6M-NEXT: movs r2, #32 +; V6M-NEXT: subs r1, r2, r1 +; V6M-NEXT: lsls r0, r1 +; V6M-NEXT: lsrs r0, r1 +; V6M-NEXT: pop {r7, pc} + %shifted = lshr i64 %val, %numskipbits + %truncshifted = trunc i64 %shifted to i32 + %numhighbits = sub i32 32, %numlowbits + %mask = lshr i32 -1, %numhighbits + %masked = and i32 %mask, %truncshifted + ret i32 %masked +} + +; Shifting happens in 64-bit. Mask is 32-bit, but extended to 64-bit. +; Masking is 64-bit. Then truncation. +define i32 @bextr64_32_c2(i64 %val, i64 %numskipbits, i32 %numlowbits) nounwind { +; V7M-LABEL: bextr64_32_c2: +; V7M: @ %bb.0: +; V7M-NEXT: rsb.w r3, r2, #32 +; V7M-NEXT: lsrs r0, r2 +; V7M-NEXT: subs r2, #32 +; V7M-NEXT: lsl.w r3, r1, r3 +; V7M-NEXT: orr.w r0, r0, r3 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r0, r1, r2 +; V7M-NEXT: ldr r1, [sp] +; V7M-NEXT: rsb.w r1, r1, #32 +; V7M-NEXT: lsls r0, r1 +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bextr64_32_c2: +; V7A: @ %bb.0: +; V7A-NEXT: rsb r3, r2, #32 +; V7A-NEXT: lsr r0, r0, r2 +; V7A-NEXT: ldr r12, [sp] +; V7A-NEXT: subs r2, r2, #32 +; V7A-NEXT: orr r0, r0, r1, lsl r3 +; V7A-NEXT: lsrpl r0, r1, r2 +; V7A-NEXT: rsb r1, r12, #32 +; V7A-NEXT: lsl r0, r0, r1 +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bextr64_32_c2: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: rsb.w r3, r2, #32 +; V7A-T-NEXT: lsrs r0, r2 +; V7A-T-NEXT: ldr.w r12, [sp] +; V7A-T-NEXT: subs r2, #32 +; V7A-T-NEXT: lsl.w r3, r1, r3 +; V7A-T-NEXT: orr.w r0, r0, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r0, r1, r2 +; V7A-T-NEXT: rsb.w r1, r12, #32 +; V7A-T-NEXT: lsls r0, r1 +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bextr64_32_c2: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r7, lr} +; V6M-NEXT: push {r7, lr} +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: ldr r1, [sp, #8] +; V6M-NEXT: movs r2, #32 +; V6M-NEXT: subs r1, r2, r1 +; V6M-NEXT: lsls r0, r1 +; V6M-NEXT: lsrs r0, r1 +; V6M-NEXT: pop {r7, pc} + %shifted = lshr i64 %val, %numskipbits + %numhighbits = sub i32 32, %numlowbits + %mask = lshr i32 -1, %numhighbits + %zextmask = zext i32 %mask to i64 + %masked = and i64 %zextmask, %shifted + %truncmasked = trunc i64 %masked to i32 + ret i32 %truncmasked +} + +; ---------------------------------------------------------------------------- ; +; Pattern d. 32-bit. +; ---------------------------------------------------------------------------- ; + +define i32 @bextr32_d0(i32 %val, i32 %numskipbits, i32 %numlowbits) nounwind { +; V7M-LABEL: bextr32_d0: +; V7M: @ %bb.0: +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: rsb.w r1, r2, #32 +; V7M-NEXT: lsls r0, r1 +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bextr32_d0: +; V7A: @ %bb.0: +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: rsb r1, r2, #32 +; V7A-NEXT: lsl r0, r0, r1 +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bextr32_d0: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: rsb.w r1, r2, #32 +; V7A-T-NEXT: lsls r0, r1 +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bextr32_d0: +; V6M: @ %bb.0: +; V6M-NEXT: movs r3, #32 +; V6M-NEXT: subs r2, r3, r2 +; V6M-NEXT: lsrs r0, r1 +; V6M-NEXT: lsls r0, r2 +; V6M-NEXT: lsrs r0, r2 +; V6M-NEXT: bx lr + %shifted = lshr i32 %val, %numskipbits + %numhighbits = sub i32 32, %numlowbits + %highbitscleared = shl i32 %shifted, %numhighbits + %masked = lshr i32 %highbitscleared, %numhighbits + ret i32 %masked +} + +define i32 @bextr32_d1_indexzext(i32 %val, i8 %numskipbits, i8 %numlowbits) nounwind { +; V7M-LABEL: bextr32_d1_indexzext: +; V7M: @ %bb.0: +; V7M-NEXT: uxtb r1, r1 +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: rsb.w r1, r2, #32 +; V7M-NEXT: uxtb r1, r1 +; V7M-NEXT: lsls r0, r1 +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bextr32_d1_indexzext: +; V7A: @ %bb.0: +; V7A-NEXT: uxtb r1, r1 +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: rsb r1, r2, #32 +; V7A-NEXT: uxtb r1, r1 +; V7A-NEXT: lsl r0, r0, r1 +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bextr32_d1_indexzext: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: uxtb r1, r1 +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: rsb.w r1, r2, #32 +; V7A-T-NEXT: uxtb r1, r1 +; V7A-T-NEXT: lsls r0, r1 +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bextr32_d1_indexzext: +; V6M: @ %bb.0: +; V6M-NEXT: uxtb r1, r1 +; V6M-NEXT: lsrs r0, r1 +; V6M-NEXT: movs r1, #32 +; V6M-NEXT: subs r1, r1, r2 +; V6M-NEXT: uxtb r1, r1 +; V6M-NEXT: lsls r0, r1 +; V6M-NEXT: lsrs r0, r1 +; V6M-NEXT: bx lr + %skip = zext i8 %numskipbits to i32 + %shifted = lshr i32 %val, %skip + %numhighbits = sub i8 32, %numlowbits + %sh_prom = zext i8 %numhighbits to i32 + %highbitscleared = shl i32 %shifted, %sh_prom + %masked = lshr i32 %highbitscleared, %sh_prom + ret i32 %masked +} + +define i32 @bextr32_d2_load(ptr %w, i32 %numskipbits, i32 %numlowbits) nounwind { +; V7M-LABEL: bextr32_d2_load: +; V7M: @ %bb.0: +; V7M-NEXT: ldr r0, [r0] +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: rsb.w r1, r2, #32 +; V7M-NEXT: lsls r0, r1 +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bextr32_d2_load: +; V7A: @ %bb.0: +; V7A-NEXT: ldr r0, [r0] +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: rsb r1, r2, #32 +; V7A-NEXT: lsl r0, r0, r1 +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bextr32_d2_load: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: ldr r0, [r0] +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: rsb.w r1, r2, #32 +; V7A-T-NEXT: lsls r0, r1 +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bextr32_d2_load: +; V6M: @ %bb.0: +; V6M-NEXT: movs r3, #32 +; V6M-NEXT: subs r2, r3, r2 +; V6M-NEXT: ldr r0, [r0] +; V6M-NEXT: lsrs r0, r1 +; V6M-NEXT: lsls r0, r2 +; V6M-NEXT: lsrs r0, r2 +; V6M-NEXT: bx lr + %val = load i32, ptr %w + %shifted = lshr i32 %val, %numskipbits + %numhighbits = sub i32 32, %numlowbits + %highbitscleared = shl i32 %shifted, %numhighbits + %masked = lshr i32 %highbitscleared, %numhighbits + ret i32 %masked +} + +define i32 @bextr32_d3_load_indexzext(ptr %w, i8 %numskipbits, i8 %numlowbits) nounwind { +; V7M-LABEL: bextr32_d3_load_indexzext: +; V7M: @ %bb.0: +; V7M-NEXT: ldr r0, [r0] +; V7M-NEXT: uxtb r1, r1 +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: rsb.w r1, r2, #32 +; V7M-NEXT: uxtb r1, r1 +; V7M-NEXT: lsls r0, r1 +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bextr32_d3_load_indexzext: +; V7A: @ %bb.0: +; V7A-NEXT: ldr r0, [r0] +; V7A-NEXT: uxtb r1, r1 +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: rsb r1, r2, #32 +; V7A-NEXT: uxtb r1, r1 +; V7A-NEXT: lsl r0, r0, r1 +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bextr32_d3_load_indexzext: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: ldr r0, [r0] +; V7A-T-NEXT: uxtb r1, r1 +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: rsb.w r1, r2, #32 +; V7A-T-NEXT: uxtb r1, r1 +; V7A-T-NEXT: lsls r0, r1 +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bextr32_d3_load_indexzext: +; V6M: @ %bb.0: +; V6M-NEXT: uxtb r1, r1 +; V6M-NEXT: ldr r0, [r0] +; V6M-NEXT: lsrs r0, r1 +; V6M-NEXT: movs r1, #32 +; V6M-NEXT: subs r1, r1, r2 +; V6M-NEXT: uxtb r1, r1 +; V6M-NEXT: lsls r0, r1 +; V6M-NEXT: lsrs r0, r1 +; V6M-NEXT: bx lr + %val = load i32, ptr %w + %skip = zext i8 %numskipbits to i32 + %shifted = lshr i32 %val, %skip + %numhighbits = sub i8 32, %numlowbits + %sh_prom = zext i8 %numhighbits to i32 + %highbitscleared = shl i32 %shifted, %sh_prom + %masked = lshr i32 %highbitscleared, %sh_prom + ret i32 %masked +} + +; 64-bit. + +define i64 @bextr64_d0(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind { +; V7M-LABEL: bextr64_d0: +; V7M: @ %bb.0: +; V7M-NEXT: .save {r4, lr} +; V7M-NEXT: push {r4, lr} +; V7M-NEXT: rsb.w r3, r2, #32 +; V7M-NEXT: ldr.w r12, [sp, #8] +; V7M-NEXT: lsrs r0, r2 +; V7M-NEXT: lsl.w r3, r1, r3 +; V7M-NEXT: orrs r0, r3 +; V7M-NEXT: subs.w r3, r2, #32 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r0, r1, r3 +; V7M-NEXT: lsr.w r1, r1, r2 +; V7M-NEXT: rsb.w r3, r12, #64 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r1, #0 +; V7M-NEXT: rsb.w lr, r12, #32 +; V7M-NEXT: rsb.w r12, r3, #32 +; V7M-NEXT: lsls r1, r3 +; V7M-NEXT: cmp.w lr, #0 +; V7M-NEXT: lsr.w r4, r0, r12 +; V7M-NEXT: orr.w r1, r1, r4 +; V7M-NEXT: it pl +; V7M-NEXT: lslpl.w r1, r0, lr +; V7M-NEXT: lsl.w r0, r0, r3 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r0, #0 +; V7M-NEXT: lsl.w r2, r1, r12 +; V7M-NEXT: lsr.w r0, r0, r3 +; V7M-NEXT: orr.w r0, r0, r2 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r0, r1, lr +; V7M-NEXT: lsr.w r1, r1, r3 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r1, #0 +; V7M-NEXT: pop {r4, pc} +; +; V7A-LABEL: bextr64_d0: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r11, lr} +; V7A-NEXT: push {r11, lr} +; V7A-NEXT: lsr r3, r1, r2 +; V7A-NEXT: subs lr, r2, #32 +; V7A-NEXT: lsr r0, r0, r2 +; V7A-NEXT: rsb r2, r2, #32 +; V7A-NEXT: ldr r12, [sp, #8] +; V7A-NEXT: movwpl r3, #0 +; V7A-NEXT: orr r0, r0, r1, lsl r2 +; V7A-NEXT: lsrpl r0, r1, lr +; V7A-NEXT: rsb r1, r12, #64 +; V7A-NEXT: rsb lr, r1, #32 +; V7A-NEXT: lsr r2, r0, lr +; V7A-NEXT: orr r2, r2, r3, lsl r1 +; V7A-NEXT: rsbs r3, r12, #32 +; V7A-NEXT: lslpl r2, r0, r3 +; V7A-NEXT: lsl r0, r0, r1 +; V7A-NEXT: movwpl r0, #0 +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: lsr r1, r2, r1 +; V7A-NEXT: orr r0, r0, r2, lsl lr +; V7A-NEXT: movwpl r1, #0 +; V7A-NEXT: lsrpl r0, r2, r3 +; V7A-NEXT: pop {r11, pc} +; +; V7A-T-LABEL: bextr64_d0: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: .save {r4, lr} +; V7A-T-NEXT: push {r4, lr} +; V7A-T-NEXT: rsb.w r3, r2, #32 +; V7A-T-NEXT: ldr.w r12, [sp, #8] +; V7A-T-NEXT: lsrs r0, r2 +; V7A-T-NEXT: lsl.w r3, r1, r3 +; V7A-T-NEXT: orrs r0, r3 +; V7A-T-NEXT: subs.w r3, r2, #32 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r0, r1, r3 +; V7A-T-NEXT: lsr.w r1, r1, r2 +; V7A-T-NEXT: rsb.w r3, r12, #64 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r1, #0 +; V7A-T-NEXT: rsb.w lr, r3, #32 +; V7A-T-NEXT: lsls r1, r3 +; V7A-T-NEXT: rsbs.w r2, r12, #32 +; V7A-T-NEXT: lsr.w r4, r0, lr +; V7A-T-NEXT: orr.w r1, r1, r4 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lslpl.w r1, r0, r2 +; V7A-T-NEXT: lsl.w r0, r0, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r0, #0 +; V7A-T-NEXT: lsl.w r4, r1, lr +; V7A-T-NEXT: lsr.w r0, r0, r3 +; V7A-T-NEXT: orr.w r0, r0, r4 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r0, r1, r2 +; V7A-T-NEXT: lsr.w r1, r1, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r1, #0 +; V7A-T-NEXT: pop {r4, pc} +; +; V6M-LABEL: bextr64_d0: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, lr} +; V6M-NEXT: push {r4, lr} +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: ldr r2, [sp, #8] +; V6M-NEXT: movs r3, #64 +; V6M-NEXT: subs r4, r3, r2 +; V6M-NEXT: mov r2, r4 +; V6M-NEXT: bl __aeabi_llsl +; V6M-NEXT: mov r2, r4 +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: pop {r4, pc} + %shifted = lshr i64 %val, %numskipbits + %numhighbits = sub i64 64, %numlowbits + %highbitscleared = shl i64 %shifted, %numhighbits + %masked = lshr i64 %highbitscleared, %numhighbits + ret i64 %masked +} + +define i64 @bextr64_d1_indexzext(i64 %val, i8 %numskipbits, i8 %numlowbits) nounwind { +; V7M-LABEL: bextr64_d1_indexzext: +; V7M: @ %bb.0: +; V7M-NEXT: .save {r4, lr} +; V7M-NEXT: push {r4, lr} +; V7M-NEXT: uxtb.w lr, r2 +; V7M-NEXT: subs.w r2, lr, #32 +; V7M-NEXT: lsr.w r12, r0, lr +; V7M-NEXT: rsb.w r0, lr, #32 +; V7M-NEXT: lsl.w r0, r1, r0 +; V7M-NEXT: orr.w r0, r0, r12 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r0, r1, r2 +; V7M-NEXT: rsb.w r2, r3, #64 +; V7M-NEXT: lsr.w r1, r1, lr +; V7M-NEXT: uxtb r2, r2 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r1, #0 +; V7M-NEXT: rsb.w r12, r2, #32 +; V7M-NEXT: lsls r1, r2 +; V7M-NEXT: sub.w r3, r2, #32 +; V7M-NEXT: lsr.w r4, r0, r12 +; V7M-NEXT: orrs r1, r4 +; V7M-NEXT: cmp r3, #0 +; V7M-NEXT: it pl +; V7M-NEXT: lslpl.w r1, r0, r3 +; V7M-NEXT: lsl.w r0, r0, r2 +; V7M-NEXT: lsl.w r4, r1, r12 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r0, #0 +; V7M-NEXT: lsr.w r0, r0, r2 +; V7M-NEXT: orr.w r0, r0, r4 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r0, r1, r3 +; V7M-NEXT: lsr.w r1, r1, r2 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r1, #0 +; V7M-NEXT: pop {r4, pc} +; +; V7A-LABEL: bextr64_d1_indexzext: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r4, r5, r11, lr} +; V7A-NEXT: push {r4, r5, r11, lr} +; V7A-NEXT: uxtb r12, r2 +; V7A-NEXT: lsr lr, r0, r12 +; V7A-NEXT: rsb r0, r12, #32 +; V7A-NEXT: orr r0, lr, r1, lsl r0 +; V7A-NEXT: mvn lr, #31 +; V7A-NEXT: uxtab r2, lr, r2 +; V7A-NEXT: cmp r2, #0 +; V7A-NEXT: lsrpl r0, r1, r2 +; V7A-NEXT: rsb r2, r3, #64 +; V7A-NEXT: lsr r1, r1, r12 +; V7A-NEXT: uxtb r3, r2 +; V7A-NEXT: rsb r4, r3, #32 +; V7A-NEXT: movwpl r1, #0 +; V7A-NEXT: uxtab r2, lr, r2 +; V7A-NEXT: lsr r5, r0, r4 +; V7A-NEXT: orr r1, r5, r1, lsl r3 +; V7A-NEXT: cmp r2, #0 +; V7A-NEXT: lslpl r1, r0, r2 +; V7A-NEXT: lsl r0, r0, r3 +; V7A-NEXT: movwpl r0, #0 +; V7A-NEXT: lsr r0, r0, r3 +; V7A-NEXT: orr r0, r0, r1, lsl r4 +; V7A-NEXT: lsrpl r0, r1, r2 +; V7A-NEXT: lsr r1, r1, r3 +; V7A-NEXT: movwpl r1, #0 +; V7A-NEXT: pop {r4, r5, r11, pc} +; +; V7A-T-LABEL: bextr64_d1_indexzext: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: .save {r4, r5, r6, r7, lr} +; V7A-T-NEXT: push {r4, r5, r6, r7, lr} +; V7A-T-NEXT: uxtb.w r12, r2 +; V7A-T-NEXT: rsb.w r6, r12, #32 +; V7A-T-NEXT: rsb.w r3, r3, #64 +; V7A-T-NEXT: lsr.w r0, r0, r12 +; V7A-T-NEXT: mvn r7, #31 +; V7A-T-NEXT: uxtab r2, r7, r2 +; V7A-T-NEXT: lsl.w r6, r1, r6 +; V7A-T-NEXT: lsr.w lr, r1, r12 +; V7A-T-NEXT: orrs r0, r6 +; V7A-T-NEXT: cmp r2, #0 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl.w lr, #0 +; V7A-T-NEXT: uxtb r5, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r0, r1, r2 +; V7A-T-NEXT: rsb.w r1, r5, #32 +; V7A-T-NEXT: uxtab r3, r7, r3 +; V7A-T-NEXT: lsl.w r4, lr, r5 +; V7A-T-NEXT: lsr.w r2, r0, r1 +; V7A-T-NEXT: cmp r3, #0 +; V7A-T-NEXT: orr.w r2, r2, r4 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lslpl.w r2, r0, r3 +; V7A-T-NEXT: lsl.w r0, r0, r5 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r0, #0 +; V7A-T-NEXT: lsl.w r1, r2, r1 +; V7A-T-NEXT: lsr.w r0, r0, r5 +; V7A-T-NEXT: orr.w r0, r0, r1 +; V7A-T-NEXT: lsr.w r1, r2, r5 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r0, r2, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r1, #0 +; V7A-T-NEXT: pop {r4, r5, r6, r7, pc} +; +; V6M-LABEL: bextr64_d1_indexzext: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, lr} +; V6M-NEXT: push {r4, lr} +; V6M-NEXT: mov r4, r3 +; V6M-NEXT: uxtb r2, r2 +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: movs r2, #64 +; V6M-NEXT: subs r2, r2, r4 +; V6M-NEXT: uxtb r4, r2 +; V6M-NEXT: mov r2, r4 +; V6M-NEXT: bl __aeabi_llsl +; V6M-NEXT: mov r2, r4 +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: pop {r4, pc} + %skip = zext i8 %numskipbits to i64 + %shifted = lshr i64 %val, %skip + %numhighbits = sub i8 64, %numlowbits + %sh_prom = zext i8 %numhighbits to i64 + %highbitscleared = shl i64 %shifted, %sh_prom + %masked = lshr i64 %highbitscleared, %sh_prom + ret i64 %masked +} + +define i64 @bextr64_d2_load(ptr %w, i64 %numskipbits, i64 %numlowbits) nounwind { +; V7M-LABEL: bextr64_d2_load: +; V7M: @ %bb.0: +; V7M-NEXT: .save {r4, lr} +; V7M-NEXT: push {r4, lr} +; V7M-NEXT: ldrd r0, r3, [r0] +; V7M-NEXT: rsb.w r1, r2, #32 +; V7M-NEXT: ldr.w r12, [sp, #8] +; V7M-NEXT: lsl.w r1, r3, r1 +; V7M-NEXT: lsrs r0, r2 +; V7M-NEXT: rsb.w lr, r12, #32 +; V7M-NEXT: orrs r0, r1 +; V7M-NEXT: subs.w r1, r2, #32 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r0, r3, r1 +; V7M-NEXT: rsb.w r1, r12, #64 +; V7M-NEXT: lsr.w r2, r3, r2 +; V7M-NEXT: rsb.w r12, r1, #32 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r2, #0 +; V7M-NEXT: cmp.w lr, #0 +; V7M-NEXT: lsl.w r2, r2, r1 +; V7M-NEXT: lsr.w r4, r0, r12 +; V7M-NEXT: orr.w r2, r2, r4 +; V7M-NEXT: it pl +; V7M-NEXT: lslpl.w r2, r0, lr +; V7M-NEXT: lsl.w r0, r0, r1 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r0, #0 +; V7M-NEXT: lsl.w r3, r2, r12 +; V7M-NEXT: lsr.w r0, r0, r1 +; V7M-NEXT: lsr.w r1, r2, r1 +; V7M-NEXT: orr.w r0, r0, r3 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r0, r2, lr +; V7M-NEXT: it pl +; V7M-NEXT: movpl r1, #0 +; V7M-NEXT: pop {r4, pc} +; +; V7A-LABEL: bextr64_d2_load: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r11, lr} +; V7A-NEXT: push {r11, lr} +; V7A-NEXT: ldrd r0, r1, [r0] +; V7A-NEXT: subs lr, r2, #32 +; V7A-NEXT: lsr r3, r1, r2 +; V7A-NEXT: ldr r12, [sp, #8] +; V7A-NEXT: movwpl r3, #0 +; V7A-NEXT: lsr r0, r0, r2 +; V7A-NEXT: rsb r2, r2, #32 +; V7A-NEXT: orr r0, r0, r1, lsl r2 +; V7A-NEXT: lsrpl r0, r1, lr +; V7A-NEXT: rsb r1, r12, #64 +; V7A-NEXT: rsb lr, r1, #32 +; V7A-NEXT: lsr r2, r0, lr +; V7A-NEXT: orr r2, r2, r3, lsl r1 +; V7A-NEXT: rsbs r3, r12, #32 +; V7A-NEXT: lslpl r2, r0, r3 +; V7A-NEXT: lsl r0, r0, r1 +; V7A-NEXT: movwpl r0, #0 +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: lsr r1, r2, r1 +; V7A-NEXT: orr r0, r0, r2, lsl lr +; V7A-NEXT: movwpl r1, #0 +; V7A-NEXT: lsrpl r0, r2, r3 +; V7A-NEXT: pop {r11, pc} +; +; V7A-T-LABEL: bextr64_d2_load: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: .save {r4, lr} +; V7A-T-NEXT: push {r4, lr} +; V7A-T-NEXT: ldrd r0, r3, [r0] +; V7A-T-NEXT: rsb.w r1, r2, #32 +; V7A-T-NEXT: ldr.w r12, [sp, #8] +; V7A-T-NEXT: lsl.w r1, r3, r1 +; V7A-T-NEXT: lsrs r0, r2 +; V7A-T-NEXT: orrs r0, r1 +; V7A-T-NEXT: subs.w r1, r2, #32 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r0, r3, r1 +; V7A-T-NEXT: lsr.w r2, r3, r2 +; V7A-T-NEXT: rsb.w r1, r12, #64 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r2, #0 +; V7A-T-NEXT: rsb.w lr, r1, #32 +; V7A-T-NEXT: rsbs.w r3, r12, #32 +; V7A-T-NEXT: lsl.w r2, r2, r1 +; V7A-T-NEXT: lsr.w r4, r0, lr +; V7A-T-NEXT: orr.w r2, r2, r4 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lslpl.w r2, r0, r3 +; V7A-T-NEXT: lsl.w r0, r0, r1 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r0, #0 +; V7A-T-NEXT: lsl.w r4, r2, lr +; V7A-T-NEXT: lsr.w r0, r0, r1 +; V7A-T-NEXT: lsr.w r1, r2, r1 +; V7A-T-NEXT: orr.w r0, r0, r4 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r0, r2, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r1, #0 +; V7A-T-NEXT: pop {r4, pc} +; +; V6M-LABEL: bextr64_d2_load: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, lr} +; V6M-NEXT: push {r4, lr} +; V6M-NEXT: ldr r3, [r0] +; V6M-NEXT: ldr r1, [r0, #4] +; V6M-NEXT: mov r0, r3 +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: ldr r2, [sp, #8] +; V6M-NEXT: movs r3, #64 +; V6M-NEXT: subs r4, r3, r2 +; V6M-NEXT: mov r2, r4 +; V6M-NEXT: bl __aeabi_llsl +; V6M-NEXT: mov r2, r4 +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: pop {r4, pc} + %val = load i64, ptr %w + %shifted = lshr i64 %val, %numskipbits + %numhighbits = sub i64 64, %numlowbits + %highbitscleared = shl i64 %shifted, %numhighbits + %masked = lshr i64 %highbitscleared, %numhighbits + ret i64 %masked +} + +define i64 @bextr64_d3_load_indexzext(ptr %w, i8 %numskipbits, i8 %numlowbits) nounwind { +; V7M-LABEL: bextr64_d3_load_indexzext: +; V7M: @ %bb.0: +; V7M-NEXT: .save {r4, lr} +; V7M-NEXT: push {r4, lr} +; V7M-NEXT: ldrd r0, lr, [r0] +; V7M-NEXT: uxtb r1, r1 +; V7M-NEXT: rsb.w r2, r2, #64 +; V7M-NEXT: subs.w r3, r1, #32 +; V7M-NEXT: lsr.w r12, r0, r1 +; V7M-NEXT: rsb.w r0, r1, #32 +; V7M-NEXT: lsr.w r1, lr, r1 +; V7M-NEXT: uxtb r2, r2 +; V7M-NEXT: lsl.w r0, lr, r0 +; V7M-NEXT: orr.w r0, r0, r12 +; V7M-NEXT: rsb.w r12, r2, #32 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r0, lr, r3 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r1, #0 +; V7M-NEXT: lsls r1, r2 +; V7M-NEXT: sub.w r3, r2, #32 +; V7M-NEXT: lsr.w r4, r0, r12 +; V7M-NEXT: orrs r1, r4 +; V7M-NEXT: cmp r3, #0 +; V7M-NEXT: it pl +; V7M-NEXT: lslpl.w r1, r0, r3 +; V7M-NEXT: lsl.w r0, r0, r2 +; V7M-NEXT: lsl.w r4, r1, r12 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r0, #0 +; V7M-NEXT: lsr.w r0, r0, r2 +; V7M-NEXT: orr.w r0, r0, r4 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r0, r1, r3 +; V7M-NEXT: lsr.w r1, r1, r2 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r1, #0 +; V7M-NEXT: pop {r4, pc} +; +; V7A-LABEL: bextr64_d3_load_indexzext: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r4, r5, r11, lr} +; V7A-NEXT: push {r4, r5, r11, lr} +; V7A-NEXT: ldr r4, [r0] +; V7A-NEXT: ldr r3, [r0, #4] +; V7A-NEXT: uxtb r0, r1 +; V7A-NEXT: lsr r12, r4, r0 +; V7A-NEXT: rsb r4, r0, #32 +; V7A-NEXT: lsr r0, r3, r0 +; V7A-NEXT: orr r4, r12, r3, lsl r4 +; V7A-NEXT: mvn r12, #31 +; V7A-NEXT: uxtab r1, r12, r1 +; V7A-NEXT: cmp r1, #0 +; V7A-NEXT: lsrpl r4, r3, r1 +; V7A-NEXT: rsb r1, r2, #64 +; V7A-NEXT: movwpl r0, #0 +; V7A-NEXT: uxtb r2, r1 +; V7A-NEXT: rsb lr, r2, #32 +; V7A-NEXT: uxtab r1, r12, r1 +; V7A-NEXT: lsr r5, r4, lr +; V7A-NEXT: orr r3, r5, r0, lsl r2 +; V7A-NEXT: cmp r1, #0 +; V7A-NEXT: lsl r0, r4, r2 +; V7A-NEXT: movwpl r0, #0 +; V7A-NEXT: lslpl r3, r4, r1 +; V7A-NEXT: lsr r0, r0, r2 +; V7A-NEXT: orr r0, r0, r3, lsl lr +; V7A-NEXT: lsrpl r0, r3, r1 +; V7A-NEXT: lsr r1, r3, r2 +; V7A-NEXT: movwpl r1, #0 +; V7A-NEXT: pop {r4, r5, r11, pc} +; +; V7A-T-LABEL: bextr64_d3_load_indexzext: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: .save {r4, r5, r6, lr} +; V7A-T-NEXT: push {r4, r5, r6, lr} +; V7A-T-NEXT: ldrd r12, lr, [r0] +; V7A-T-NEXT: uxtb r0, r1 +; V7A-T-NEXT: rsb.w r6, r0, #32 +; V7A-T-NEXT: lsr.w r3, lr, r0 +; V7A-T-NEXT: rsb.w r2, r2, #64 +; V7A-T-NEXT: mvn r4, #31 +; V7A-T-NEXT: lsr.w r0, r12, r0 +; V7A-T-NEXT: uxtab r1, r4, r1 +; V7A-T-NEXT: lsl.w r6, lr, r6 +; V7A-T-NEXT: orrs r0, r6 +; V7A-T-NEXT: cmp r1, #0 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r3, #0 +; V7A-T-NEXT: uxtb r5, r2 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r0, lr, r1 +; V7A-T-NEXT: rsb.w r1, r5, #32 +; V7A-T-NEXT: lsls r3, r5 +; V7A-T-NEXT: uxtab r2, r4, r2 +; V7A-T-NEXT: lsr.w r6, r0, r1 +; V7A-T-NEXT: orrs r3, r6 +; V7A-T-NEXT: cmp r2, #0 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lslpl.w r3, r0, r2 +; V7A-T-NEXT: lsl.w r0, r0, r5 +; V7A-T-NEXT: lsl.w r1, r3, r1 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r0, #0 +; V7A-T-NEXT: lsr.w r0, r0, r5 +; V7A-T-NEXT: orr.w r0, r0, r1 +; V7A-T-NEXT: lsr.w r1, r3, r5 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r0, r3, r2 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r1, #0 +; V7A-T-NEXT: pop {r4, r5, r6, pc} +; +; V6M-LABEL: bextr64_d3_load_indexzext: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, r5, r7, lr} +; V6M-NEXT: push {r4, r5, r7, lr} +; V6M-NEXT: mov r4, r2 +; V6M-NEXT: ldr r5, [r0] +; V6M-NEXT: ldr r3, [r0, #4] +; V6M-NEXT: uxtb r2, r1 +; V6M-NEXT: mov r0, r5 +; V6M-NEXT: mov r1, r3 +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: movs r2, #64 +; V6M-NEXT: subs r2, r2, r4 +; V6M-NEXT: uxtb r4, r2 +; V6M-NEXT: mov r2, r4 +; V6M-NEXT: bl __aeabi_llsl +; V6M-NEXT: mov r2, r4 +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: pop {r4, r5, r7, pc} + %val = load i64, ptr %w + %skip = zext i8 %numskipbits to i64 + %shifted = lshr i64 %val, %skip + %numhighbits = sub i8 64, %numlowbits + %sh_prom = zext i8 %numhighbits to i64 + %highbitscleared = shl i64 %shifted, %sh_prom + %masked = lshr i64 %highbitscleared, %sh_prom + ret i64 %masked +} + +; 64-bit, but with 32-bit output + +; Everything done in 64-bit, truncation happens last. +define i32 @bextr64_32_d0(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind { +; V7M-LABEL: bextr64_32_d0: +; V7M: @ %bb.0: +; V7M-NEXT: .save {r4, lr} +; V7M-NEXT: push {r4, lr} +; V7M-NEXT: rsb.w r3, r2, #32 +; V7M-NEXT: ldr.w r12, [sp, #8] +; V7M-NEXT: lsrs r0, r2 +; V7M-NEXT: lsl.w r3, r1, r3 +; V7M-NEXT: orrs r0, r3 +; V7M-NEXT: subs.w r3, r2, #32 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r0, r1, r3 +; V7M-NEXT: lsr.w r1, r1, r2 +; V7M-NEXT: rsb.w r3, r12, #64 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r1, #0 +; V7M-NEXT: rsb.w lr, r12, #32 +; V7M-NEXT: rsb.w r12, r3, #32 +; V7M-NEXT: lsls r1, r3 +; V7M-NEXT: cmp.w lr, #0 +; V7M-NEXT: lsr.w r4, r0, r12 +; V7M-NEXT: orr.w r1, r1, r4 +; V7M-NEXT: it pl +; V7M-NEXT: lslpl.w r1, r0, lr +; V7M-NEXT: lsl.w r0, r0, r3 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r0, #0 +; V7M-NEXT: lsl.w r2, r1, r12 +; V7M-NEXT: lsr.w r0, r0, r3 +; V7M-NEXT: orr.w r0, r0, r2 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r0, r1, lr +; V7M-NEXT: pop {r4, pc} +; +; V7A-LABEL: bextr64_32_d0: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r11, lr} +; V7A-NEXT: push {r11, lr} +; V7A-NEXT: lsr r3, r1, r2 +; V7A-NEXT: subs lr, r2, #32 +; V7A-NEXT: lsr r0, r0, r2 +; V7A-NEXT: rsb r2, r2, #32 +; V7A-NEXT: ldr r12, [sp, #8] +; V7A-NEXT: movwpl r3, #0 +; V7A-NEXT: orr r0, r0, r1, lsl r2 +; V7A-NEXT: lsrpl r0, r1, lr +; V7A-NEXT: rsb r1, r12, #64 +; V7A-NEXT: rsb lr, r1, #32 +; V7A-NEXT: lsr r2, r0, lr +; V7A-NEXT: orr r2, r2, r3, lsl r1 +; V7A-NEXT: rsbs r3, r12, #32 +; V7A-NEXT: lslpl r2, r0, r3 +; V7A-NEXT: lsl r0, r0, r1 +; V7A-NEXT: movwpl r0, #0 +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: orr r0, r0, r2, lsl lr +; V7A-NEXT: lsrpl r0, r2, r3 +; V7A-NEXT: pop {r11, pc} +; +; V7A-T-LABEL: bextr64_32_d0: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: .save {r4, lr} +; V7A-T-NEXT: push {r4, lr} +; V7A-T-NEXT: rsb.w r3, r2, #32 +; V7A-T-NEXT: ldr.w r12, [sp, #8] +; V7A-T-NEXT: lsrs r0, r2 +; V7A-T-NEXT: lsl.w r3, r1, r3 +; V7A-T-NEXT: orrs r0, r3 +; V7A-T-NEXT: subs.w r3, r2, #32 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r0, r1, r3 +; V7A-T-NEXT: lsr.w r1, r1, r2 +; V7A-T-NEXT: rsb.w r3, r12, #64 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r1, #0 +; V7A-T-NEXT: rsb.w lr, r3, #32 +; V7A-T-NEXT: lsls r1, r3 +; V7A-T-NEXT: rsbs.w r2, r12, #32 +; V7A-T-NEXT: lsr.w r4, r0, lr +; V7A-T-NEXT: orr.w r1, r1, r4 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lslpl.w r1, r0, r2 +; V7A-T-NEXT: lsl.w r0, r0, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r0, #0 +; V7A-T-NEXT: lsl.w r4, r1, lr +; V7A-T-NEXT: lsr.w r0, r0, r3 +; V7A-T-NEXT: orr.w r0, r0, r4 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r0, r1, r2 +; V7A-T-NEXT: pop {r4, pc} +; +; V6M-LABEL: bextr64_32_d0: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, lr} +; V6M-NEXT: push {r4, lr} +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: ldr r2, [sp, #8] +; V6M-NEXT: movs r3, #64 +; V6M-NEXT: subs r4, r3, r2 +; V6M-NEXT: mov r2, r4 +; V6M-NEXT: bl __aeabi_llsl +; V6M-NEXT: mov r2, r4 +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: pop {r4, pc} + %shifted = lshr i64 %val, %numskipbits + %numhighbits = sub i64 64, %numlowbits + %highbitscleared = shl i64 %shifted, %numhighbits + %masked = lshr i64 %highbitscleared, %numhighbits + %res = trunc i64 %masked to i32 + ret i32 %res +} + +; Shifting happens in 64-bit, then truncation. Masking is 32-bit. +define i32 @bextr64_32_d1(i64 %val, i64 %numskipbits, i32 %numlowbits) nounwind { +; V7M-LABEL: bextr64_32_d1: +; V7M: @ %bb.0: +; V7M-NEXT: rsb.w r3, r2, #32 +; V7M-NEXT: lsrs r0, r2 +; V7M-NEXT: subs r2, #32 +; V7M-NEXT: lsl.w r3, r1, r3 +; V7M-NEXT: orr.w r0, r0, r3 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r0, r1, r2 +; V7M-NEXT: ldr r1, [sp] +; V7M-NEXT: rsb.w r1, r1, #32 +; V7M-NEXT: lsls r0, r1 +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bextr64_32_d1: +; V7A: @ %bb.0: +; V7A-NEXT: rsb r3, r2, #32 +; V7A-NEXT: lsr r0, r0, r2 +; V7A-NEXT: ldr r12, [sp] +; V7A-NEXT: subs r2, r2, #32 +; V7A-NEXT: orr r0, r0, r1, lsl r3 +; V7A-NEXT: lsrpl r0, r1, r2 +; V7A-NEXT: rsb r1, r12, #32 +; V7A-NEXT: lsl r0, r0, r1 +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bextr64_32_d1: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: rsb.w r3, r2, #32 +; V7A-T-NEXT: lsrs r0, r2 +; V7A-T-NEXT: ldr.w r12, [sp] +; V7A-T-NEXT: subs r2, #32 +; V7A-T-NEXT: lsl.w r3, r1, r3 +; V7A-T-NEXT: orr.w r0, r0, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r0, r1, r2 +; V7A-T-NEXT: rsb.w r1, r12, #32 +; V7A-T-NEXT: lsls r0, r1 +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bextr64_32_d1: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r7, lr} +; V6M-NEXT: push {r7, lr} +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: ldr r1, [sp, #8] +; V6M-NEXT: movs r2, #32 +; V6M-NEXT: subs r1, r2, r1 +; V6M-NEXT: lsls r0, r1 +; V6M-NEXT: lsrs r0, r1 +; V6M-NEXT: pop {r7, pc} + %shifted = lshr i64 %val, %numskipbits + %truncshifted = trunc i64 %shifted to i32 + %numhighbits = sub i32 32, %numlowbits + %highbitscleared = shl i32 %truncshifted, %numhighbits + %masked = lshr i32 %highbitscleared, %numhighbits + ret i32 %masked +} + +; ---------------------------------------------------------------------------- ; +; Constant +; ---------------------------------------------------------------------------- ; + +; https://bugs.llvm.org/show_bug.cgi?id=38938 +define void @pr38938(ptr %a0, ptr %a1) nounwind { +; V7M-LABEL: pr38938: +; V7M: @ %bb.0: +; V7M-NEXT: ldr r1, [r1] +; V7M-NEXT: ubfx r1, r1, #21, #10 +; V7M-NEXT: ldr.w r2, [r0, r1, lsl #2] +; V7M-NEXT: adds r2, #1 +; V7M-NEXT: str.w r2, [r0, r1, lsl #2] +; V7M-NEXT: bx lr +; +; V7A-LABEL: pr38938: +; V7A: @ %bb.0: +; V7A-NEXT: ldr r1, [r1] +; V7A-NEXT: ubfx r1, r1, #21, #10 +; V7A-NEXT: ldr r2, [r0, r1, lsl #2] +; V7A-NEXT: add r2, r2, #1 +; V7A-NEXT: str r2, [r0, r1, lsl #2] +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: pr38938: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: ldr r1, [r1] +; V7A-T-NEXT: ubfx r1, r1, #21, #10 +; V7A-T-NEXT: ldr.w r2, [r0, r1, lsl #2] +; V7A-T-NEXT: adds r2, #1 +; V7A-T-NEXT: str.w r2, [r0, r1, lsl #2] +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: pr38938: +; V6M: @ %bb.0: +; V6M-NEXT: ldr r1, [r1] +; V6M-NEXT: lsrs r1, r1, #19 +; V6M-NEXT: ldr r2, .LCPI51_0 +; V6M-NEXT: ands r2, r1 +; V6M-NEXT: ldr r1, [r0, r2] +; V6M-NEXT: adds r1, r1, #1 +; V6M-NEXT: str r1, [r0, r2] +; V6M-NEXT: bx lr +; V6M-NEXT: .p2align 2 +; V6M-NEXT: @ %bb.1: +; V6M-NEXT: .LCPI51_0: +; V6M-NEXT: .long 4092 @ 0xffc + %tmp = load i64, ptr %a1, align 8 + %tmp1 = lshr i64 %tmp, 21 + %tmp2 = and i64 %tmp1, 1023 + %tmp3 = getelementptr inbounds i32, ptr %a0, i64 %tmp2 + %tmp4 = load i32, ptr %tmp3, align 4 + %tmp5 = add nsw i32 %tmp4, 1 + store i32 %tmp5, ptr %tmp3, align 4 + ret void +} + +; The most canonical variant +define i32 @c0_i32(i32 %arg) nounwind { +; V7M-LABEL: c0_i32: +; V7M: @ %bb.0: +; V7M-NEXT: ubfx r0, r0, #19, #10 +; V7M-NEXT: bx lr +; +; V7A-LABEL: c0_i32: +; V7A: @ %bb.0: +; V7A-NEXT: ubfx r0, r0, #19, #10 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: c0_i32: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: ubfx r0, r0, #19, #10 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: c0_i32: +; V6M: @ %bb.0: +; V6M-NEXT: lsls r0, r0, #3 +; V6M-NEXT: lsrs r0, r0, #22 +; V6M-NEXT: bx lr + %tmp0 = lshr i32 %arg, 19 + %tmp1 = and i32 %tmp0, 1023 + ret i32 %tmp1 +} + +; Should be still fine, but the mask is shifted +define i32 @c1_i32(i32 %arg) nounwind { +; V7M-LABEL: c1_i32: +; V7M: @ %bb.0: +; V7M-NEXT: movw r1, #4092 +; V7M-NEXT: and.w r0, r1, r0, lsr #19 +; V7M-NEXT: bx lr +; +; V7A-LABEL: c1_i32: +; V7A: @ %bb.0: +; V7A-NEXT: movw r1, #4092 +; V7A-NEXT: and r0, r1, r0, lsr #19 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: c1_i32: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: movw r1, #4092 +; V7A-T-NEXT: and.w r0, r1, r0, lsr #19 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: c1_i32: +; V6M: @ %bb.0: +; V6M-NEXT: lsrs r1, r0, #19 +; V6M-NEXT: ldr r0, .LCPI53_0 +; V6M-NEXT: ands r0, r1 +; V6M-NEXT: bx lr +; V6M-NEXT: .p2align 2 +; V6M-NEXT: @ %bb.1: +; V6M-NEXT: .LCPI53_0: +; V6M-NEXT: .long 4092 @ 0xffc + %tmp0 = lshr i32 %arg, 19 + %tmp1 = and i32 %tmp0, 4092 + ret i32 %tmp1 +} + +; Should be still fine, but the result is shifted left afterwards +define i32 @c2_i32(i32 %arg) nounwind { +; V7M-LABEL: c2_i32: +; V7M: @ %bb.0: +; V7M-NEXT: movw r1, #4092 +; V7M-NEXT: and.w r0, r1, r0, lsr #17 +; V7M-NEXT: bx lr +; +; V7A-LABEL: c2_i32: +; V7A: @ %bb.0: +; V7A-NEXT: movw r1, #4092 +; V7A-NEXT: and r0, r1, r0, lsr #17 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: c2_i32: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: movw r1, #4092 +; V7A-T-NEXT: and.w r0, r1, r0, lsr #17 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: c2_i32: +; V6M: @ %bb.0: +; V6M-NEXT: lsrs r1, r0, #17 +; V6M-NEXT: ldr r0, .LCPI54_0 +; V6M-NEXT: ands r0, r1 +; V6M-NEXT: bx lr +; V6M-NEXT: .p2align 2 +; V6M-NEXT: @ %bb.1: +; V6M-NEXT: .LCPI54_0: +; V6M-NEXT: .long 4092 @ 0xffc + %tmp0 = lshr i32 %arg, 19 + %tmp1 = and i32 %tmp0, 1023 + %tmp2 = shl i32 %tmp1, 2 + ret i32 %tmp2 +} + +; The mask covers newly shifted-in bit +define i32 @c4_i32_bad(i32 %arg) nounwind { +; V7M-LABEL: c4_i32_bad: +; V7M: @ %bb.0: +; V7M-NEXT: mvn r1, #1 +; V7M-NEXT: and.w r0, r1, r0, lsr #19 +; V7M-NEXT: bx lr +; +; V7A-LABEL: c4_i32_bad: +; V7A: @ %bb.0: +; V7A-NEXT: mvn r1, #1 +; V7A-NEXT: and r0, r1, r0, lsr #19 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: c4_i32_bad: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: mvn r1, #1 +; V7A-T-NEXT: and.w r0, r1, r0, lsr #19 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: c4_i32_bad: +; V6M: @ %bb.0: +; V6M-NEXT: lsrs r0, r0, #20 +; V6M-NEXT: lsls r0, r0, #1 +; V6M-NEXT: bx lr + %tmp0 = lshr i32 %arg, 19 + %tmp1 = and i32 %tmp0, 16382 + ret i32 %tmp1 +} + +; i64 + +; The most canonical variant +define i64 @c0_i64(i64 %arg) nounwind { +; V7M-LABEL: c0_i64: +; V7M: @ %bb.0: +; V7M-NEXT: ubfx r0, r1, #19, #10 +; V7M-NEXT: movs r1, #0 +; V7M-NEXT: bx lr +; +; V7A-LABEL: c0_i64: +; V7A: @ %bb.0: +; V7A-NEXT: ubfx r0, r1, #19, #10 +; V7A-NEXT: mov r1, #0 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: c0_i64: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: ubfx r0, r1, #19, #10 +; V7A-T-NEXT: movs r1, #0 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: c0_i64: +; V6M: @ %bb.0: +; V6M-NEXT: lsls r0, r1, #3 +; V6M-NEXT: lsrs r0, r0, #22 +; V6M-NEXT: movs r1, #0 +; V6M-NEXT: bx lr + %tmp0 = lshr i64 %arg, 51 + %tmp1 = and i64 %tmp0, 1023 + ret i64 %tmp1 +} + +; Should be still fine, but the mask is shifted +define i64 @c1_i64(i64 %arg) nounwind { +; V7M-LABEL: c1_i64: +; V7M: @ %bb.0: +; V7M-NEXT: movw r0, #4092 +; V7M-NEXT: and.w r0, r0, r1, lsr #19 +; V7M-NEXT: movs r1, #0 +; V7M-NEXT: bx lr +; +; V7A-LABEL: c1_i64: +; V7A: @ %bb.0: +; V7A-NEXT: movw r0, #4092 +; V7A-NEXT: and r0, r0, r1, lsr #19 +; V7A-NEXT: mov r1, #0 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: c1_i64: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: movw r0, #4092 +; V7A-T-NEXT: and.w r0, r0, r1, lsr #19 +; V7A-T-NEXT: movs r1, #0 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: c1_i64: +; V6M: @ %bb.0: +; V6M-NEXT: lsrs r1, r1, #19 +; V6M-NEXT: ldr r0, .LCPI57_0 +; V6M-NEXT: ands r0, r1 +; V6M-NEXT: movs r1, #0 +; V6M-NEXT: bx lr +; V6M-NEXT: .p2align 2 +; V6M-NEXT: @ %bb.1: +; V6M-NEXT: .LCPI57_0: +; V6M-NEXT: .long 4092 @ 0xffc + %tmp0 = lshr i64 %arg, 51 + %tmp1 = and i64 %tmp0, 4092 + ret i64 %tmp1 +} + +; Should be still fine, but the result is shifted left afterwards +define i64 @c2_i64(i64 %arg) nounwind { +; V7M-LABEL: c2_i64: +; V7M: @ %bb.0: +; V7M-NEXT: movw r0, #4092 +; V7M-NEXT: and.w r0, r0, r1, lsr #17 +; V7M-NEXT: movs r1, #0 +; V7M-NEXT: bx lr +; +; V7A-LABEL: c2_i64: +; V7A: @ %bb.0: +; V7A-NEXT: movw r0, #4092 +; V7A-NEXT: and r0, r0, r1, lsr #17 +; V7A-NEXT: mov r1, #0 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: c2_i64: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: movw r0, #4092 +; V7A-T-NEXT: and.w r0, r0, r1, lsr #17 +; V7A-T-NEXT: movs r1, #0 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: c2_i64: +; V6M: @ %bb.0: +; V6M-NEXT: lsrs r1, r1, #17 +; V6M-NEXT: ldr r0, .LCPI58_0 +; V6M-NEXT: ands r0, r1 +; V6M-NEXT: movs r1, #0 +; V6M-NEXT: bx lr +; V6M-NEXT: .p2align 2 +; V6M-NEXT: @ %bb.1: +; V6M-NEXT: .LCPI58_0: +; V6M-NEXT: .long 4092 @ 0xffc + %tmp0 = lshr i64 %arg, 51 + %tmp1 = and i64 %tmp0, 1023 + %tmp2 = shl i64 %tmp1, 2 + ret i64 %tmp2 +} + +; The mask covers newly shifted-in bit +define i64 @c4_i64_bad(i64 %arg) nounwind { +; V7M-LABEL: c4_i64_bad: +; V7M: @ %bb.0: +; V7M-NEXT: mvn r0, #1 +; V7M-NEXT: and.w r0, r0, r1, lsr #19 +; V7M-NEXT: movs r1, #0 +; V7M-NEXT: bx lr +; +; V7A-LABEL: c4_i64_bad: +; V7A: @ %bb.0: +; V7A-NEXT: mvn r0, #1 +; V7A-NEXT: and r0, r0, r1, lsr #19 +; V7A-NEXT: mov r1, #0 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: c4_i64_bad: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: mvn r0, #1 +; V7A-T-NEXT: and.w r0, r0, r1, lsr #19 +; V7A-T-NEXT: movs r1, #0 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: c4_i64_bad: +; V6M: @ %bb.0: +; V6M-NEXT: lsrs r0, r1, #20 +; V6M-NEXT: lsls r0, r0, #1 +; V6M-NEXT: movs r1, #0 +; V6M-NEXT: bx lr + %tmp0 = lshr i64 %arg, 51 + %tmp1 = and i64 %tmp0, 16382 + ret i64 %tmp1 +} + +; ---------------------------------------------------------------------------- ; +; Constant, storing the result afterwards. +; ---------------------------------------------------------------------------- ; + +; i32 + +; The most canonical variant +define void @c5_i32(i32 %arg, ptr %ptr) nounwind { +; V7M-LABEL: c5_i32: +; V7M: @ %bb.0: +; V7M-NEXT: ubfx r0, r0, #19, #10 +; V7M-NEXT: str r0, [r1] +; V7M-NEXT: bx lr +; +; V7A-LABEL: c5_i32: +; V7A: @ %bb.0: +; V7A-NEXT: ubfx r0, r0, #19, #10 +; V7A-NEXT: str r0, [r1] +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: c5_i32: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: ubfx r0, r0, #19, #10 +; V7A-T-NEXT: str r0, [r1] +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: c5_i32: +; V6M: @ %bb.0: +; V6M-NEXT: lsls r0, r0, #3 +; V6M-NEXT: lsrs r0, r0, #22 +; V6M-NEXT: str r0, [r1] +; V6M-NEXT: bx lr + %tmp0 = lshr i32 %arg, 19 + %tmp1 = and i32 %tmp0, 1023 + store i32 %tmp1, ptr %ptr + ret void +} + +; Should be still fine, but the mask is shifted +define void @c6_i32(i32 %arg, ptr %ptr) nounwind { +; V7M-LABEL: c6_i32: +; V7M: @ %bb.0: +; V7M-NEXT: ubfx r0, r0, #19, #12 +; V7M-NEXT: str r0, [r1] +; V7M-NEXT: bx lr +; +; V7A-LABEL: c6_i32: +; V7A: @ %bb.0: +; V7A-NEXT: ubfx r0, r0, #19, #12 +; V7A-NEXT: str r0, [r1] +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: c6_i32: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: ubfx r0, r0, #19, #12 +; V7A-T-NEXT: str r0, [r1] +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: c6_i32: +; V6M: @ %bb.0: +; V6M-NEXT: lsls r0, r0, #1 +; V6M-NEXT: lsrs r0, r0, #20 +; V6M-NEXT: str r0, [r1] +; V6M-NEXT: bx lr + %tmp0 = lshr i32 %arg, 19 + %tmp1 = and i32 %tmp0, 4095 + store i32 %tmp1, ptr %ptr + ret void +} + +; Should be still fine, but the result is shifted left afterwards +define void @c7_i32(i32 %arg, ptr %ptr) nounwind { +; V7M-LABEL: c7_i32: +; V7M: @ %bb.0: +; V7M-NEXT: movw r2, #4092 +; V7M-NEXT: and.w r0, r2, r0, lsr #17 +; V7M-NEXT: str r0, [r1] +; V7M-NEXT: bx lr +; +; V7A-LABEL: c7_i32: +; V7A: @ %bb.0: +; V7A-NEXT: movw r2, #4092 +; V7A-NEXT: and r0, r2, r0, lsr #17 +; V7A-NEXT: str r0, [r1] +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: c7_i32: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: movw r2, #4092 +; V7A-T-NEXT: and.w r0, r2, r0, lsr #17 +; V7A-T-NEXT: str r0, [r1] +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: c7_i32: +; V6M: @ %bb.0: +; V6M-NEXT: lsrs r0, r0, #17 +; V6M-NEXT: ldr r2, .LCPI62_0 +; V6M-NEXT: ands r2, r0 +; V6M-NEXT: str r2, [r1] +; V6M-NEXT: bx lr +; V6M-NEXT: .p2align 2 +; V6M-NEXT: @ %bb.1: +; V6M-NEXT: .LCPI62_0: +; V6M-NEXT: .long 4092 @ 0xffc + %tmp0 = lshr i32 %arg, 19 + %tmp1 = and i32 %tmp0, 1023 + %tmp2 = shl i32 %tmp1, 2 + store i32 %tmp2, ptr %ptr + ret void +} + +; i64 + +; The most canonical variant +define void @c5_i64(i64 %arg, ptr %ptr) nounwind { +; V7M-LABEL: c5_i64: +; V7M: @ %bb.0: +; V7M-NEXT: movs r0, #0 +; V7M-NEXT: ubfx r1, r1, #19, #10 +; V7M-NEXT: strd r1, r0, [r2] +; V7M-NEXT: bx lr +; +; V7A-LABEL: c5_i64: +; V7A: @ %bb.0: +; V7A-NEXT: mov r0, #0 +; V7A-NEXT: str r0, [r2, #4] +; V7A-NEXT: ubfx r0, r1, #19, #10 +; V7A-NEXT: str r0, [r2] +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: c5_i64: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: movs r0, #0 +; V7A-T-NEXT: ubfx r1, r1, #19, #10 +; V7A-T-NEXT: strd r1, r0, [r2] +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: c5_i64: +; V6M: @ %bb.0: +; V6M-NEXT: movs r0, #0 +; V6M-NEXT: lsls r1, r1, #3 +; V6M-NEXT: lsrs r1, r1, #22 +; V6M-NEXT: str r1, [r2] +; V6M-NEXT: str r0, [r2, #4] +; V6M-NEXT: bx lr + %tmp0 = lshr i64 %arg, 51 + %tmp1 = and i64 %tmp0, 1023 + store i64 %tmp1, ptr %ptr + ret void +} + +; Should be still fine, but the mask is shifted +define void @c6_i64(i64 %arg, ptr %ptr) nounwind { +; V7M-LABEL: c6_i64: +; V7M: @ %bb.0: +; V7M-NEXT: movs r0, #0 +; V7M-NEXT: ubfx r1, r1, #19, #12 +; V7M-NEXT: strd r1, r0, [r2] +; V7M-NEXT: bx lr +; +; V7A-LABEL: c6_i64: +; V7A: @ %bb.0: +; V7A-NEXT: mov r0, #0 +; V7A-NEXT: str r0, [r2, #4] +; V7A-NEXT: ubfx r0, r1, #19, #12 +; V7A-NEXT: str r0, [r2] +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: c6_i64: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: movs r0, #0 +; V7A-T-NEXT: ubfx r1, r1, #19, #12 +; V7A-T-NEXT: strd r1, r0, [r2] +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: c6_i64: +; V6M: @ %bb.0: +; V6M-NEXT: movs r0, #0 +; V6M-NEXT: lsls r1, r1, #1 +; V6M-NEXT: lsrs r1, r1, #20 +; V6M-NEXT: str r1, [r2] +; V6M-NEXT: str r0, [r2, #4] +; V6M-NEXT: bx lr + %tmp0 = lshr i64 %arg, 51 + %tmp1 = and i64 %tmp0, 4095 + store i64 %tmp1, ptr %ptr + ret void +} + +; Should be still fine, but the result is shifted left afterwards +define void @c7_i64(i64 %arg, ptr %ptr) nounwind { +; V7M-LABEL: c7_i64: +; V7M: @ %bb.0: +; V7M-NEXT: movs r0, #0 +; V7M-NEXT: movw r3, #4092 +; V7M-NEXT: and.w r1, r3, r1, lsr #17 +; V7M-NEXT: strd r1, r0, [r2] +; V7M-NEXT: bx lr +; +; V7A-LABEL: c7_i64: +; V7A: @ %bb.0: +; V7A-NEXT: movw r0, #4092 +; V7A-NEXT: mov r3, #0 +; V7A-NEXT: and r0, r0, r1, lsr #17 +; V7A-NEXT: stm r2, {r0, r3} +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: c7_i64: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: movs r0, #0 +; V7A-T-NEXT: movw r3, #4092 +; V7A-T-NEXT: and.w r1, r3, r1, lsr #17 +; V7A-T-NEXT: strd r1, r0, [r2] +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: c7_i64: +; V6M: @ %bb.0: +; V6M-NEXT: movs r0, #0 +; V6M-NEXT: lsrs r1, r1, #17 +; V6M-NEXT: ldr r3, .LCPI65_0 +; V6M-NEXT: ands r3, r1 +; V6M-NEXT: str r3, [r2] +; V6M-NEXT: str r0, [r2, #4] +; V6M-NEXT: bx lr +; V6M-NEXT: .p2align 2 +; V6M-NEXT: @ %bb.1: +; V6M-NEXT: .LCPI65_0: +; V6M-NEXT: .long 4092 @ 0xffc + %tmp0 = lshr i64 %arg, 51 + %tmp1 = and i64 %tmp0, 1023 + %tmp2 = shl i64 %tmp1, 2 + store i64 %tmp2, ptr %ptr + ret void +} diff --git a/llvm/test/CodeGen/ARM/extract-lowbits.ll b/llvm/test/CodeGen/ARM/extract-lowbits.ll new file mode 100644 index 0000000..b483793 --- /dev/null +++ b/llvm/test/CodeGen/ARM/extract-lowbits.ll @@ -0,0 +1,2752 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=thumbv7m-eabi %s -o - | FileCheck %s --check-prefix V7M +; RUN: llc -mtriple=armv7a-eabi %s -o - | FileCheck %s --check-prefix V7A +; RUN: llc -mtriple=thumbv7a-eabi %s -o - | FileCheck %s --check-prefix V7A-T +; RUN: llc -mtriple=armv6m-eabi %s -o - | FileCheck %s --check-prefix V6M + +; Patterns: +; a) x & (1 << nbits) - 1 +; b) x & ~(-1 << nbits) +; c) x & (-1 >> (32 - y)) +; d) x << (32 - y) >> (32 - y) +; are equivalent. + +; ---------------------------------------------------------------------------- ; +; Pattern a. 32-bit +; ---------------------------------------------------------------------------- ; + +define i32 @bzhi32_a0(i32 %val, i32 %numlowbits) nounwind { +; V7M-LABEL: bzhi32_a0: +; V7M: @ %bb.0: +; V7M-NEXT: movs r2, #1 +; V7M-NEXT: lsl.w r1, r2, r1 +; V7M-NEXT: subs r1, #1 +; V7M-NEXT: ands r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi32_a0: +; V7A: @ %bb.0: +; V7A-NEXT: mov r2, #1 +; V7A-NEXT: mvn r3, #0 +; V7A-NEXT: add r1, r3, r2, lsl r1 +; V7A-NEXT: and r0, r1, r0 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi32_a0: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: movs r2, #1 +; V7A-T-NEXT: lsl.w r1, r2, r1 +; V7A-T-NEXT: subs r1, #1 +; V7A-T-NEXT: ands r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi32_a0: +; V6M: @ %bb.0: +; V6M-NEXT: movs r2, #1 +; V6M-NEXT: lsls r2, r1 +; V6M-NEXT: subs r1, r2, #1 +; V6M-NEXT: ands r0, r1 +; V6M-NEXT: bx lr + %onebit = shl i32 1, %numlowbits + %mask = add nsw i32 %onebit, -1 + %masked = and i32 %mask, %val + ret i32 %masked +} + +define i32 @bzhi32_a1_indexzext(i32 %val, i8 zeroext %numlowbits) nounwind { +; V7M-LABEL: bzhi32_a1_indexzext: +; V7M: @ %bb.0: +; V7M-NEXT: movs r2, #1 +; V7M-NEXT: lsl.w r1, r2, r1 +; V7M-NEXT: subs r1, #1 +; V7M-NEXT: ands r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi32_a1_indexzext: +; V7A: @ %bb.0: +; V7A-NEXT: mov r2, #1 +; V7A-NEXT: mvn r3, #0 +; V7A-NEXT: add r1, r3, r2, lsl r1 +; V7A-NEXT: and r0, r1, r0 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi32_a1_indexzext: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: movs r2, #1 +; V7A-T-NEXT: lsl.w r1, r2, r1 +; V7A-T-NEXT: subs r1, #1 +; V7A-T-NEXT: ands r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi32_a1_indexzext: +; V6M: @ %bb.0: +; V6M-NEXT: movs r2, #1 +; V6M-NEXT: lsls r2, r1 +; V6M-NEXT: subs r1, r2, #1 +; V6M-NEXT: ands r0, r1 +; V6M-NEXT: bx lr + %conv = zext i8 %numlowbits to i32 + %onebit = shl i32 1, %conv + %mask = add nsw i32 %onebit, -1 + %masked = and i32 %mask, %val + ret i32 %masked +} + +define i32 @bzhi32_a2_load(ptr %w, i32 %numlowbits) nounwind { +; V7M-LABEL: bzhi32_a2_load: +; V7M: @ %bb.0: +; V7M-NEXT: movs r2, #1 +; V7M-NEXT: ldr r0, [r0] +; V7M-NEXT: lsl.w r1, r2, r1 +; V7M-NEXT: subs r1, #1 +; V7M-NEXT: ands r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi32_a2_load: +; V7A: @ %bb.0: +; V7A-NEXT: mov r2, #1 +; V7A-NEXT: ldr r0, [r0] +; V7A-NEXT: mvn r3, #0 +; V7A-NEXT: add r1, r3, r2, lsl r1 +; V7A-NEXT: and r0, r1, r0 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi32_a2_load: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: movs r2, #1 +; V7A-T-NEXT: ldr r0, [r0] +; V7A-T-NEXT: lsl.w r1, r2, r1 +; V7A-T-NEXT: subs r1, #1 +; V7A-T-NEXT: ands r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi32_a2_load: +; V6M: @ %bb.0: +; V6M-NEXT: movs r2, #1 +; V6M-NEXT: lsls r2, r1 +; V6M-NEXT: subs r1, r2, #1 +; V6M-NEXT: ldr r0, [r0] +; V6M-NEXT: ands r0, r1 +; V6M-NEXT: bx lr + %val = load i32, ptr %w + %onebit = shl i32 1, %numlowbits + %mask = add nsw i32 %onebit, -1 + %masked = and i32 %mask, %val + ret i32 %masked +} + +define i32 @bzhi32_a3_load_indexzext(ptr %w, i8 zeroext %numlowbits) nounwind { +; V7M-LABEL: bzhi32_a3_load_indexzext: +; V7M: @ %bb.0: +; V7M-NEXT: movs r2, #1 +; V7M-NEXT: ldr r0, [r0] +; V7M-NEXT: lsl.w r1, r2, r1 +; V7M-NEXT: subs r1, #1 +; V7M-NEXT: ands r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi32_a3_load_indexzext: +; V7A: @ %bb.0: +; V7A-NEXT: mov r2, #1 +; V7A-NEXT: ldr r0, [r0] +; V7A-NEXT: mvn r3, #0 +; V7A-NEXT: add r1, r3, r2, lsl r1 +; V7A-NEXT: and r0, r1, r0 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi32_a3_load_indexzext: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: movs r2, #1 +; V7A-T-NEXT: ldr r0, [r0] +; V7A-T-NEXT: lsl.w r1, r2, r1 +; V7A-T-NEXT: subs r1, #1 +; V7A-T-NEXT: ands r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi32_a3_load_indexzext: +; V6M: @ %bb.0: +; V6M-NEXT: movs r2, #1 +; V6M-NEXT: lsls r2, r1 +; V6M-NEXT: subs r1, r2, #1 +; V6M-NEXT: ldr r0, [r0] +; V6M-NEXT: ands r0, r1 +; V6M-NEXT: bx lr + %val = load i32, ptr %w + %conv = zext i8 %numlowbits to i32 + %onebit = shl i32 1, %conv + %mask = add nsw i32 %onebit, -1 + %masked = and i32 %mask, %val + ret i32 %masked +} + +define i32 @bzhi32_a4_commutative(i32 %val, i32 %numlowbits) nounwind { +; V7M-LABEL: bzhi32_a4_commutative: +; V7M: @ %bb.0: +; V7M-NEXT: movs r2, #1 +; V7M-NEXT: lsl.w r1, r2, r1 +; V7M-NEXT: subs r1, #1 +; V7M-NEXT: ands r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi32_a4_commutative: +; V7A: @ %bb.0: +; V7A-NEXT: mov r2, #1 +; V7A-NEXT: mvn r3, #0 +; V7A-NEXT: add r1, r3, r2, lsl r1 +; V7A-NEXT: and r0, r0, r1 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi32_a4_commutative: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: movs r2, #1 +; V7A-T-NEXT: lsl.w r1, r2, r1 +; V7A-T-NEXT: subs r1, #1 +; V7A-T-NEXT: ands r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi32_a4_commutative: +; V6M: @ %bb.0: +; V6M-NEXT: movs r2, #1 +; V6M-NEXT: lsls r2, r1 +; V6M-NEXT: subs r1, r2, #1 +; V6M-NEXT: ands r0, r1 +; V6M-NEXT: bx lr + %onebit = shl i32 1, %numlowbits + %mask = add nsw i32 %onebit, -1 + %masked = and i32 %val, %mask ; swapped order + ret i32 %masked +} + +; 64-bit + +define i64 @bzhi64_a0(i64 %val, i64 %numlowbits) nounwind { +; V7M-LABEL: bzhi64_a0: +; V7M: @ %bb.0: +; V7M-NEXT: .save {r7, lr} +; V7M-NEXT: push {r7, lr} +; V7M-NEXT: rsb.w r3, r2, #32 +; V7M-NEXT: mov.w r12, #1 +; V7M-NEXT: subs.w lr, r2, #32 +; V7M-NEXT: lsl.w r2, r12, r2 +; V7M-NEXT: lsr.w r3, r12, r3 +; V7M-NEXT: it pl +; V7M-NEXT: lslpl.w r3, r12, lr +; V7M-NEXT: it pl +; V7M-NEXT: movpl r2, #0 +; V7M-NEXT: subs r2, #1 +; V7M-NEXT: sbc r3, r3, #0 +; V7M-NEXT: ands r0, r2 +; V7M-NEXT: ands r1, r3 +; V7M-NEXT: pop {r7, pc} +; +; V7A-LABEL: bzhi64_a0: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r11, lr} +; V7A-NEXT: push {r11, lr} +; V7A-NEXT: rsb r3, r2, #32 +; V7A-NEXT: mov r12, #1 +; V7A-NEXT: lsr lr, r12, r3 +; V7A-NEXT: subs r3, r2, #32 +; V7A-NEXT: lsl r2, r12, r2 +; V7A-NEXT: movwpl r2, #0 +; V7A-NEXT: lslpl lr, r12, r3 +; V7A-NEXT: subs r2, r2, #1 +; V7A-NEXT: sbc r3, lr, #0 +; V7A-NEXT: and r0, r2, r0 +; V7A-NEXT: and r1, r3, r1 +; V7A-NEXT: pop {r11, pc} +; +; V7A-T-LABEL: bzhi64_a0: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: .save {r7, lr} +; V7A-T-NEXT: push {r7, lr} +; V7A-T-NEXT: rsb.w r3, r2, #32 +; V7A-T-NEXT: mov.w r12, #1 +; V7A-T-NEXT: subs.w lr, r2, #32 +; V7A-T-NEXT: lsl.w r2, r12, r2 +; V7A-T-NEXT: lsr.w r3, r12, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lslpl.w r3, r12, lr +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r2, #0 +; V7A-T-NEXT: subs r2, #1 +; V7A-T-NEXT: sbc r3, r3, #0 +; V7A-T-NEXT: ands r0, r2 +; V7A-T-NEXT: ands r1, r3 +; V7A-T-NEXT: pop {r7, pc} +; +; V6M-LABEL: bzhi64_a0: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, r5, r6, lr} +; V6M-NEXT: push {r4, r5, r6, lr} +; V6M-NEXT: mov r5, r1 +; V6M-NEXT: mov r4, r0 +; V6M-NEXT: movs r0, #1 +; V6M-NEXT: movs r6, #0 +; V6M-NEXT: mov r1, r6 +; V6M-NEXT: bl __aeabi_llsl +; V6M-NEXT: subs r0, r0, #1 +; V6M-NEXT: sbcs r1, r6 +; V6M-NEXT: ands r1, r5 +; V6M-NEXT: ands r0, r4 +; V6M-NEXT: pop {r4, r5, r6, pc} + %onebit = shl i64 1, %numlowbits + %mask = add nsw i64 %onebit, -1 + %masked = and i64 %mask, %val + ret i64 %masked +} + +; Check that we don't throw away the vreg_width-1 mask if not using shifts +define i64 @bzhi64_a0_masked(i64 %val, i64 %numlowbits) nounwind { +; V7M-LABEL: bzhi64_a0_masked: +; V7M: @ %bb.0: +; V7M-NEXT: .save {r7, lr} +; V7M-NEXT: push {r7, lr} +; V7M-NEXT: and r2, r2, #63 +; V7M-NEXT: mov.w r12, #1 +; V7M-NEXT: rsb.w r3, r2, #32 +; V7M-NEXT: subs.w lr, r2, #32 +; V7M-NEXT: lsl.w r2, r12, r2 +; V7M-NEXT: lsr.w r3, r12, r3 +; V7M-NEXT: it pl +; V7M-NEXT: lslpl.w r3, r12, lr +; V7M-NEXT: it pl +; V7M-NEXT: movpl r2, #0 +; V7M-NEXT: subs r2, #1 +; V7M-NEXT: sbc r3, r3, #0 +; V7M-NEXT: ands r0, r2 +; V7M-NEXT: ands r1, r3 +; V7M-NEXT: pop {r7, pc} +; +; V7A-LABEL: bzhi64_a0_masked: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r11, lr} +; V7A-NEXT: push {r11, lr} +; V7A-NEXT: and r2, r2, #63 +; V7A-NEXT: mov r12, #1 +; V7A-NEXT: rsb r3, r2, #32 +; V7A-NEXT: lsr lr, r12, r3 +; V7A-NEXT: subs r3, r2, #32 +; V7A-NEXT: lsl r2, r12, r2 +; V7A-NEXT: movwpl r2, #0 +; V7A-NEXT: lslpl lr, r12, r3 +; V7A-NEXT: subs r2, r2, #1 +; V7A-NEXT: sbc r3, lr, #0 +; V7A-NEXT: and r0, r2, r0 +; V7A-NEXT: and r1, r3, r1 +; V7A-NEXT: pop {r11, pc} +; +; V7A-T-LABEL: bzhi64_a0_masked: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: .save {r7, lr} +; V7A-T-NEXT: push {r7, lr} +; V7A-T-NEXT: and r2, r2, #63 +; V7A-T-NEXT: mov.w r12, #1 +; V7A-T-NEXT: rsb.w r3, r2, #32 +; V7A-T-NEXT: subs.w lr, r2, #32 +; V7A-T-NEXT: lsl.w r2, r12, r2 +; V7A-T-NEXT: lsr.w r3, r12, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lslpl.w r3, r12, lr +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r2, #0 +; V7A-T-NEXT: subs r2, #1 +; V7A-T-NEXT: sbc r3, r3, #0 +; V7A-T-NEXT: ands r0, r2 +; V7A-T-NEXT: ands r1, r3 +; V7A-T-NEXT: pop {r7, pc} +; +; V6M-LABEL: bzhi64_a0_masked: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, r5, r6, lr} +; V6M-NEXT: push {r4, r5, r6, lr} +; V6M-NEXT: mov r5, r1 +; V6M-NEXT: mov r4, r0 +; V6M-NEXT: movs r0, #63 +; V6M-NEXT: ands r2, r0 +; V6M-NEXT: movs r0, #1 +; V6M-NEXT: movs r6, #0 +; V6M-NEXT: mov r1, r6 +; V6M-NEXT: bl __aeabi_llsl +; V6M-NEXT: subs r0, r0, #1 +; V6M-NEXT: sbcs r1, r6 +; V6M-NEXT: ands r1, r5 +; V6M-NEXT: ands r0, r4 +; V6M-NEXT: pop {r4, r5, r6, pc} + %numlowbits.masked = and i64 %numlowbits, 63 + %onebit = shl i64 1, %numlowbits.masked + %mask = add nsw i64 %onebit, -1 + %masked = and i64 %mask, %val + ret i64 %masked +} + +define i64 @bzhi64_a1_indexzext(i64 %val, i8 zeroext %numlowbits) nounwind { +; V7M-LABEL: bzhi64_a1_indexzext: +; V7M: @ %bb.0: +; V7M-NEXT: .save {r7, lr} +; V7M-NEXT: push {r7, lr} +; V7M-NEXT: rsb.w r3, r2, #32 +; V7M-NEXT: mov.w r12, #1 +; V7M-NEXT: subs.w lr, r2, #32 +; V7M-NEXT: lsl.w r2, r12, r2 +; V7M-NEXT: lsr.w r3, r12, r3 +; V7M-NEXT: it pl +; V7M-NEXT: lslpl.w r3, r12, lr +; V7M-NEXT: it pl +; V7M-NEXT: movpl r2, #0 +; V7M-NEXT: subs r2, #1 +; V7M-NEXT: sbc r3, r3, #0 +; V7M-NEXT: ands r0, r2 +; V7M-NEXT: ands r1, r3 +; V7M-NEXT: pop {r7, pc} +; +; V7A-LABEL: bzhi64_a1_indexzext: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r11, lr} +; V7A-NEXT: push {r11, lr} +; V7A-NEXT: rsb r3, r2, #32 +; V7A-NEXT: mov r12, #1 +; V7A-NEXT: lsr lr, r12, r3 +; V7A-NEXT: subs r3, r2, #32 +; V7A-NEXT: lsl r2, r12, r2 +; V7A-NEXT: movwpl r2, #0 +; V7A-NEXT: lslpl lr, r12, r3 +; V7A-NEXT: subs r2, r2, #1 +; V7A-NEXT: sbc r3, lr, #0 +; V7A-NEXT: and r0, r2, r0 +; V7A-NEXT: and r1, r3, r1 +; V7A-NEXT: pop {r11, pc} +; +; V7A-T-LABEL: bzhi64_a1_indexzext: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: .save {r7, lr} +; V7A-T-NEXT: push {r7, lr} +; V7A-T-NEXT: rsb.w r3, r2, #32 +; V7A-T-NEXT: mov.w r12, #1 +; V7A-T-NEXT: subs.w lr, r2, #32 +; V7A-T-NEXT: lsl.w r2, r12, r2 +; V7A-T-NEXT: lsr.w r3, r12, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lslpl.w r3, r12, lr +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r2, #0 +; V7A-T-NEXT: subs r2, #1 +; V7A-T-NEXT: sbc r3, r3, #0 +; V7A-T-NEXT: ands r0, r2 +; V7A-T-NEXT: ands r1, r3 +; V7A-T-NEXT: pop {r7, pc} +; +; V6M-LABEL: bzhi64_a1_indexzext: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, r5, r6, lr} +; V6M-NEXT: push {r4, r5, r6, lr} +; V6M-NEXT: mov r5, r1 +; V6M-NEXT: mov r4, r0 +; V6M-NEXT: movs r0, #1 +; V6M-NEXT: movs r6, #0 +; V6M-NEXT: mov r1, r6 +; V6M-NEXT: bl __aeabi_llsl +; V6M-NEXT: subs r0, r0, #1 +; V6M-NEXT: sbcs r1, r6 +; V6M-NEXT: ands r1, r5 +; V6M-NEXT: ands r0, r4 +; V6M-NEXT: pop {r4, r5, r6, pc} + %conv = zext i8 %numlowbits to i64 + %onebit = shl i64 1, %conv + %mask = add nsw i64 %onebit, -1 + %masked = and i64 %mask, %val + ret i64 %masked +} + +define i64 @bzhi64_a2_load(ptr %w, i64 %numlowbits) nounwind { +; V7M-LABEL: bzhi64_a2_load: +; V7M: @ %bb.0: +; V7M-NEXT: rsb.w r1, r2, #32 +; V7M-NEXT: movs r3, #1 +; V7M-NEXT: subs.w r12, r2, #32 +; V7M-NEXT: lsl.w r2, r3, r2 +; V7M-NEXT: lsr.w r1, r3, r1 +; V7M-NEXT: it pl +; V7M-NEXT: lslpl.w r1, r3, r12 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r2, #0 +; V7M-NEXT: subs r2, #1 +; V7M-NEXT: ldrd r0, r3, [r0] +; V7M-NEXT: sbc r1, r1, #0 +; V7M-NEXT: ands r1, r3 +; V7M-NEXT: ands r0, r2 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi64_a2_load: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r4, r6, r11, lr} +; V7A-NEXT: push {r4, r6, r11, lr} +; V7A-NEXT: ldr r6, [r0] +; V7A-NEXT: mov r1, #1 +; V7A-NEXT: ldr r3, [r0, #4] +; V7A-NEXT: rsb r0, r2, #32 +; V7A-NEXT: subs r4, r2, #32 +; V7A-NEXT: lsr r0, r1, r0 +; V7A-NEXT: lslpl r0, r1, r4 +; V7A-NEXT: lsl r1, r1, r2 +; V7A-NEXT: movwpl r1, #0 +; V7A-NEXT: subs r2, r1, #1 +; V7A-NEXT: sbc r0, r0, #0 +; V7A-NEXT: and r1, r0, r3 +; V7A-NEXT: and r0, r2, r6 +; V7A-NEXT: pop {r4, r6, r11, pc} +; +; V7A-T-LABEL: bzhi64_a2_load: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: .save {r7, lr} +; V7A-T-NEXT: push {r7, lr} +; V7A-T-NEXT: rsb.w r3, r2, #32 +; V7A-T-NEXT: movs r1, #1 +; V7A-T-NEXT: ldrd r12, lr, [r0] +; V7A-T-NEXT: subs.w r0, r2, #32 +; V7A-T-NEXT: lsr.w r3, r1, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lslpl.w r3, r1, r0 +; V7A-T-NEXT: lsl.w r0, r1, r2 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r0, #0 +; V7A-T-NEXT: subs r0, #1 +; V7A-T-NEXT: sbc r1, r3, #0 +; V7A-T-NEXT: and.w r0, r0, r12 +; V7A-T-NEXT: and.w r1, r1, lr +; V7A-T-NEXT: pop {r7, pc} +; +; V6M-LABEL: bzhi64_a2_load: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, r5, r7, lr} +; V6M-NEXT: push {r4, r5, r7, lr} +; V6M-NEXT: mov r4, r0 +; V6M-NEXT: movs r0, #1 +; V6M-NEXT: movs r5, #0 +; V6M-NEXT: mov r1, r5 +; V6M-NEXT: bl __aeabi_llsl +; V6M-NEXT: subs r2, r0, #1 +; V6M-NEXT: sbcs r1, r5 +; V6M-NEXT: ldm r4!, {r0, r3} +; V6M-NEXT: ands r1, r3 +; V6M-NEXT: ands r0, r2 +; V6M-NEXT: pop {r4, r5, r7, pc} + %val = load i64, ptr %w + %onebit = shl i64 1, %numlowbits + %mask = add nsw i64 %onebit, -1 + %masked = and i64 %mask, %val + ret i64 %masked +} + +define i64 @bzhi64_a3_load_indexzext(ptr %w, i8 zeroext %numlowbits) nounwind { +; V7M-LABEL: bzhi64_a3_load_indexzext: +; V7M: @ %bb.0: +; V7M-NEXT: rsb.w r2, r1, #32 +; V7M-NEXT: movs r3, #1 +; V7M-NEXT: subs.w r12, r1, #32 +; V7M-NEXT: lsl.w r1, r3, r1 +; V7M-NEXT: lsr.w r2, r3, r2 +; V7M-NEXT: it pl +; V7M-NEXT: lslpl.w r2, r3, r12 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r1, #0 +; V7M-NEXT: subs r3, r1, #1 +; V7M-NEXT: sbc r1, r2, #0 +; V7M-NEXT: ldrd r0, r2, [r0] +; V7M-NEXT: ands r1, r2 +; V7M-NEXT: ands r0, r3 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi64_a3_load_indexzext: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r4, r6, r11, lr} +; V7A-NEXT: push {r4, r6, r11, lr} +; V7A-NEXT: ldr r6, [r0] +; V7A-NEXT: mov r2, #1 +; V7A-NEXT: ldr r3, [r0, #4] +; V7A-NEXT: rsb r0, r1, #32 +; V7A-NEXT: subs r4, r1, #32 +; V7A-NEXT: lsl r1, r2, r1 +; V7A-NEXT: lsr r0, r2, r0 +; V7A-NEXT: movwpl r1, #0 +; V7A-NEXT: lslpl r0, r2, r4 +; V7A-NEXT: subs r2, r1, #1 +; V7A-NEXT: sbc r0, r0, #0 +; V7A-NEXT: and r1, r0, r3 +; V7A-NEXT: and r0, r2, r6 +; V7A-NEXT: pop {r4, r6, r11, pc} +; +; V7A-T-LABEL: bzhi64_a3_load_indexzext: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: .save {r7, lr} +; V7A-T-NEXT: push {r7, lr} +; V7A-T-NEXT: rsb.w r3, r1, #32 +; V7A-T-NEXT: movs r2, #1 +; V7A-T-NEXT: ldrd r12, lr, [r0] +; V7A-T-NEXT: subs.w r0, r1, #32 +; V7A-T-NEXT: lsr.w r3, r2, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lslpl.w r3, r2, r0 +; V7A-T-NEXT: lsl.w r0, r2, r1 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r0, #0 +; V7A-T-NEXT: subs r0, #1 +; V7A-T-NEXT: sbc r1, r3, #0 +; V7A-T-NEXT: and.w r0, r0, r12 +; V7A-T-NEXT: and.w r1, r1, lr +; V7A-T-NEXT: pop {r7, pc} +; +; V6M-LABEL: bzhi64_a3_load_indexzext: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, r5, r7, lr} +; V6M-NEXT: push {r4, r5, r7, lr} +; V6M-NEXT: mov r2, r1 +; V6M-NEXT: mov r4, r0 +; V6M-NEXT: movs r0, #1 +; V6M-NEXT: movs r5, #0 +; V6M-NEXT: mov r1, r5 +; V6M-NEXT: bl __aeabi_llsl +; V6M-NEXT: subs r2, r0, #1 +; V6M-NEXT: sbcs r1, r5 +; V6M-NEXT: ldm r4!, {r0, r3} +; V6M-NEXT: ands r1, r3 +; V6M-NEXT: ands r0, r2 +; V6M-NEXT: pop {r4, r5, r7, pc} + %val = load i64, ptr %w + %conv = zext i8 %numlowbits to i64 + %onebit = shl i64 1, %conv + %mask = add nsw i64 %onebit, -1 + %masked = and i64 %mask, %val + ret i64 %masked +} + +define i64 @bzhi64_a4_commutative(i64 %val, i64 %numlowbits) nounwind { +; V7M-LABEL: bzhi64_a4_commutative: +; V7M: @ %bb.0: +; V7M-NEXT: .save {r7, lr} +; V7M-NEXT: push {r7, lr} +; V7M-NEXT: rsb.w r3, r2, #32 +; V7M-NEXT: mov.w r12, #1 +; V7M-NEXT: subs.w lr, r2, #32 +; V7M-NEXT: lsl.w r2, r12, r2 +; V7M-NEXT: lsr.w r3, r12, r3 +; V7M-NEXT: it pl +; V7M-NEXT: lslpl.w r3, r12, lr +; V7M-NEXT: it pl +; V7M-NEXT: movpl r2, #0 +; V7M-NEXT: subs r2, #1 +; V7M-NEXT: sbc r3, r3, #0 +; V7M-NEXT: ands r0, r2 +; V7M-NEXT: ands r1, r3 +; V7M-NEXT: pop {r7, pc} +; +; V7A-LABEL: bzhi64_a4_commutative: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r11, lr} +; V7A-NEXT: push {r11, lr} +; V7A-NEXT: rsb r3, r2, #32 +; V7A-NEXT: mov r12, #1 +; V7A-NEXT: lsr lr, r12, r3 +; V7A-NEXT: subs r3, r2, #32 +; V7A-NEXT: lsl r2, r12, r2 +; V7A-NEXT: movwpl r2, #0 +; V7A-NEXT: lslpl lr, r12, r3 +; V7A-NEXT: subs r2, r2, #1 +; V7A-NEXT: sbc r3, lr, #0 +; V7A-NEXT: and r0, r0, r2 +; V7A-NEXT: and r1, r1, r3 +; V7A-NEXT: pop {r11, pc} +; +; V7A-T-LABEL: bzhi64_a4_commutative: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: .save {r7, lr} +; V7A-T-NEXT: push {r7, lr} +; V7A-T-NEXT: rsb.w r3, r2, #32 +; V7A-T-NEXT: mov.w r12, #1 +; V7A-T-NEXT: subs.w lr, r2, #32 +; V7A-T-NEXT: lsl.w r2, r12, r2 +; V7A-T-NEXT: lsr.w r3, r12, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lslpl.w r3, r12, lr +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r2, #0 +; V7A-T-NEXT: subs r2, #1 +; V7A-T-NEXT: sbc r3, r3, #0 +; V7A-T-NEXT: ands r0, r2 +; V7A-T-NEXT: ands r1, r3 +; V7A-T-NEXT: pop {r7, pc} +; +; V6M-LABEL: bzhi64_a4_commutative: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, r5, r6, lr} +; V6M-NEXT: push {r4, r5, r6, lr} +; V6M-NEXT: mov r5, r1 +; V6M-NEXT: mov r4, r0 +; V6M-NEXT: movs r0, #1 +; V6M-NEXT: movs r6, #0 +; V6M-NEXT: mov r1, r6 +; V6M-NEXT: bl __aeabi_llsl +; V6M-NEXT: subs r0, r0, #1 +; V6M-NEXT: sbcs r1, r6 +; V6M-NEXT: ands r1, r5 +; V6M-NEXT: ands r0, r4 +; V6M-NEXT: pop {r4, r5, r6, pc} + %onebit = shl i64 1, %numlowbits + %mask = add nsw i64 %onebit, -1 + %masked = and i64 %val, %mask ; swapped order + ret i64 %masked +} + +; ---------------------------------------------------------------------------- ; +; Pattern b. 32-bit +; ---------------------------------------------------------------------------- ; + +define i32 @bzhi32_b0(i32 %val, i32 %numlowbits) nounwind { +; V7M-LABEL: bzhi32_b0: +; V7M: @ %bb.0: +; V7M-NEXT: mov.w r2, #-1 +; V7M-NEXT: lsl.w r1, r2, r1 +; V7M-NEXT: bics r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi32_b0: +; V7A: @ %bb.0: +; V7A-NEXT: mvn r2, #0 +; V7A-NEXT: bic r0, r0, r2, lsl r1 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi32_b0: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: mov.w r2, #-1 +; V7A-T-NEXT: lsl.w r1, r2, r1 +; V7A-T-NEXT: bics r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi32_b0: +; V6M: @ %bb.0: +; V6M-NEXT: movs r2, #0 +; V6M-NEXT: mvns r2, r2 +; V6M-NEXT: lsls r2, r1 +; V6M-NEXT: bics r0, r2 +; V6M-NEXT: bx lr + %notmask = shl i32 -1, %numlowbits + %mask = xor i32 %notmask, -1 + %masked = and i32 %mask, %val + ret i32 %masked +} + +define i32 @bzhi32_b1_indexzext(i32 %val, i8 zeroext %numlowbits) nounwind { +; V7M-LABEL: bzhi32_b1_indexzext: +; V7M: @ %bb.0: +; V7M-NEXT: mov.w r2, #-1 +; V7M-NEXT: lsl.w r1, r2, r1 +; V7M-NEXT: bics r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi32_b1_indexzext: +; V7A: @ %bb.0: +; V7A-NEXT: mvn r2, #0 +; V7A-NEXT: bic r0, r0, r2, lsl r1 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi32_b1_indexzext: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: mov.w r2, #-1 +; V7A-T-NEXT: lsl.w r1, r2, r1 +; V7A-T-NEXT: bics r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi32_b1_indexzext: +; V6M: @ %bb.0: +; V6M-NEXT: movs r2, #0 +; V6M-NEXT: mvns r2, r2 +; V6M-NEXT: lsls r2, r1 +; V6M-NEXT: bics r0, r2 +; V6M-NEXT: bx lr + %conv = zext i8 %numlowbits to i32 + %notmask = shl i32 -1, %conv + %mask = xor i32 %notmask, -1 + %masked = and i32 %mask, %val + ret i32 %masked +} + +define i32 @bzhi32_b2_load(ptr %w, i32 %numlowbits) nounwind { +; V7M-LABEL: bzhi32_b2_load: +; V7M: @ %bb.0: +; V7M-NEXT: ldr r0, [r0] +; V7M-NEXT: mov.w r2, #-1 +; V7M-NEXT: lsl.w r1, r2, r1 +; V7M-NEXT: bics r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi32_b2_load: +; V7A: @ %bb.0: +; V7A-NEXT: ldr r0, [r0] +; V7A-NEXT: mvn r2, #0 +; V7A-NEXT: bic r0, r0, r2, lsl r1 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi32_b2_load: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: ldr r0, [r0] +; V7A-T-NEXT: mov.w r2, #-1 +; V7A-T-NEXT: lsl.w r1, r2, r1 +; V7A-T-NEXT: bics r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi32_b2_load: +; V6M: @ %bb.0: +; V6M-NEXT: movs r2, #0 +; V6M-NEXT: mvns r2, r2 +; V6M-NEXT: lsls r2, r1 +; V6M-NEXT: ldr r0, [r0] +; V6M-NEXT: bics r0, r2 +; V6M-NEXT: bx lr + %val = load i32, ptr %w + %notmask = shl i32 -1, %numlowbits + %mask = xor i32 %notmask, -1 + %masked = and i32 %mask, %val + ret i32 %masked +} + +define i32 @bzhi32_b3_load_indexzext(ptr %w, i8 zeroext %numlowbits) nounwind { +; V7M-LABEL: bzhi32_b3_load_indexzext: +; V7M: @ %bb.0: +; V7M-NEXT: ldr r0, [r0] +; V7M-NEXT: mov.w r2, #-1 +; V7M-NEXT: lsl.w r1, r2, r1 +; V7M-NEXT: bics r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi32_b3_load_indexzext: +; V7A: @ %bb.0: +; V7A-NEXT: ldr r0, [r0] +; V7A-NEXT: mvn r2, #0 +; V7A-NEXT: bic r0, r0, r2, lsl r1 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi32_b3_load_indexzext: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: ldr r0, [r0] +; V7A-T-NEXT: mov.w r2, #-1 +; V7A-T-NEXT: lsl.w r1, r2, r1 +; V7A-T-NEXT: bics r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi32_b3_load_indexzext: +; V6M: @ %bb.0: +; V6M-NEXT: movs r2, #0 +; V6M-NEXT: mvns r2, r2 +; V6M-NEXT: lsls r2, r1 +; V6M-NEXT: ldr r0, [r0] +; V6M-NEXT: bics r0, r2 +; V6M-NEXT: bx lr + %val = load i32, ptr %w + %conv = zext i8 %numlowbits to i32 + %notmask = shl i32 -1, %conv + %mask = xor i32 %notmask, -1 + %masked = and i32 %mask, %val + ret i32 %masked +} + +define i32 @bzhi32_b4_commutative(i32 %val, i32 %numlowbits) nounwind { +; V7M-LABEL: bzhi32_b4_commutative: +; V7M: @ %bb.0: +; V7M-NEXT: mov.w r2, #-1 +; V7M-NEXT: lsl.w r1, r2, r1 +; V7M-NEXT: bics r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi32_b4_commutative: +; V7A: @ %bb.0: +; V7A-NEXT: mvn r2, #0 +; V7A-NEXT: bic r0, r0, r2, lsl r1 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi32_b4_commutative: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: mov.w r2, #-1 +; V7A-T-NEXT: lsl.w r1, r2, r1 +; V7A-T-NEXT: bics r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi32_b4_commutative: +; V6M: @ %bb.0: +; V6M-NEXT: movs r2, #0 +; V6M-NEXT: mvns r2, r2 +; V6M-NEXT: lsls r2, r1 +; V6M-NEXT: bics r0, r2 +; V6M-NEXT: bx lr + %notmask = shl i32 -1, %numlowbits + %mask = xor i32 %notmask, -1 + %masked = and i32 %val, %mask ; swapped order + ret i32 %masked +} + +; 64-bit + +define i64 @bzhi64_b0(i64 %val, i64 %numlowbits) nounwind { +; V7M-LABEL: bzhi64_b0: +; V7M: @ %bb.0: +; V7M-NEXT: mov.w r3, #-1 +; V7M-NEXT: lsl.w r12, r3, r2 +; V7M-NEXT: subs r2, #32 +; V7M-NEXT: it pl +; V7M-NEXT: movpl.w r12, #0 +; V7M-NEXT: it pl +; V7M-NEXT: lslpl r3, r2 +; V7M-NEXT: bic.w r0, r0, r12 +; V7M-NEXT: bics r1, r3 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi64_b0: +; V7A: @ %bb.0: +; V7A-NEXT: subs r12, r2, #32 +; V7A-NEXT: mvn r3, #0 +; V7A-NEXT: lsl r2, r3, r2 +; V7A-NEXT: lslpl r3, r3, r12 +; V7A-NEXT: movwpl r2, #0 +; V7A-NEXT: bic r1, r1, r3 +; V7A-NEXT: bic r0, r0, r2 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi64_b0: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: mov.w r3, #-1 +; V7A-T-NEXT: lsl.w r12, r3, r2 +; V7A-T-NEXT: subs r2, #32 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl.w r12, #0 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lslpl r3, r2 +; V7A-T-NEXT: bic.w r0, r0, r12 +; V7A-T-NEXT: bics r1, r3 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi64_b0: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, r5, r7, lr} +; V6M-NEXT: push {r4, r5, r7, lr} +; V6M-NEXT: mov r4, r1 +; V6M-NEXT: mov r5, r0 +; V6M-NEXT: movs r0, #0 +; V6M-NEXT: mvns r0, r0 +; V6M-NEXT: mov r1, r0 +; V6M-NEXT: bl __aeabi_llsl +; V6M-NEXT: bics r5, r0 +; V6M-NEXT: bics r4, r1 +; V6M-NEXT: mov r0, r5 +; V6M-NEXT: mov r1, r4 +; V6M-NEXT: pop {r4, r5, r7, pc} + %notmask = shl i64 -1, %numlowbits + %mask = xor i64 %notmask, -1 + %masked = and i64 %mask, %val + ret i64 %masked +} + +define i64 @bzhi64_b1_indexzext(i64 %val, i8 zeroext %numlowbits) nounwind { +; V7M-LABEL: bzhi64_b1_indexzext: +; V7M: @ %bb.0: +; V7M-NEXT: mov.w r3, #-1 +; V7M-NEXT: lsl.w r12, r3, r2 +; V7M-NEXT: subs r2, #32 +; V7M-NEXT: it pl +; V7M-NEXT: movpl.w r12, #0 +; V7M-NEXT: it pl +; V7M-NEXT: lslpl r3, r2 +; V7M-NEXT: bic.w r0, r0, r12 +; V7M-NEXT: bics r1, r3 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi64_b1_indexzext: +; V7A: @ %bb.0: +; V7A-NEXT: subs r12, r2, #32 +; V7A-NEXT: mvn r3, #0 +; V7A-NEXT: lsl r2, r3, r2 +; V7A-NEXT: lslpl r3, r3, r12 +; V7A-NEXT: movwpl r2, #0 +; V7A-NEXT: bic r1, r1, r3 +; V7A-NEXT: bic r0, r0, r2 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi64_b1_indexzext: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: mov.w r3, #-1 +; V7A-T-NEXT: lsl.w r12, r3, r2 +; V7A-T-NEXT: subs r2, #32 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl.w r12, #0 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lslpl r3, r2 +; V7A-T-NEXT: bic.w r0, r0, r12 +; V7A-T-NEXT: bics r1, r3 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi64_b1_indexzext: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, r5, r7, lr} +; V6M-NEXT: push {r4, r5, r7, lr} +; V6M-NEXT: mov r4, r1 +; V6M-NEXT: mov r5, r0 +; V6M-NEXT: movs r0, #0 +; V6M-NEXT: mvns r0, r0 +; V6M-NEXT: mov r1, r0 +; V6M-NEXT: bl __aeabi_llsl +; V6M-NEXT: bics r5, r0 +; V6M-NEXT: bics r4, r1 +; V6M-NEXT: mov r0, r5 +; V6M-NEXT: mov r1, r4 +; V6M-NEXT: pop {r4, r5, r7, pc} + %conv = zext i8 %numlowbits to i64 + %notmask = shl i64 -1, %conv + %mask = xor i64 %notmask, -1 + %masked = and i64 %mask, %val + ret i64 %masked +} + +define i64 @bzhi64_b2_load(ptr %w, i64 %numlowbits) nounwind { +; V7M-LABEL: bzhi64_b2_load: +; V7M: @ %bb.0: +; V7M-NEXT: mov.w r1, #-1 +; V7M-NEXT: subs.w r12, r2, #32 +; V7M-NEXT: lsl.w r3, r1, r2 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r3, #0 +; V7M-NEXT: ldrd r0, r2, [r0] +; V7M-NEXT: it pl +; V7M-NEXT: lslpl.w r1, r1, r12 +; V7M-NEXT: bics r0, r3 +; V7M-NEXT: bic.w r1, r2, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi64_b2_load: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r4, lr} +; V7A-NEXT: push {r4, lr} +; V7A-NEXT: ldr r4, [r0] +; V7A-NEXT: mvn r1, #0 +; V7A-NEXT: ldr r3, [r0, #4] +; V7A-NEXT: subs r0, r2, #32 +; V7A-NEXT: lsl r2, r1, r2 +; V7A-NEXT: lslpl r1, r1, r0 +; V7A-NEXT: movwpl r2, #0 +; V7A-NEXT: bic r1, r3, r1 +; V7A-NEXT: bic r0, r4, r2 +; V7A-NEXT: pop {r4, pc} +; +; V7A-T-LABEL: bzhi64_b2_load: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: mov.w r1, #-1 +; V7A-T-NEXT: ldrd r0, r12, [r0] +; V7A-T-NEXT: lsl.w r3, r1, r2 +; V7A-T-NEXT: subs r2, #32 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r3, #0 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lslpl r1, r2 +; V7A-T-NEXT: bics r0, r3 +; V7A-T-NEXT: bic.w r1, r12, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi64_b2_load: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, lr} +; V6M-NEXT: push {r4, lr} +; V6M-NEXT: mov r4, r0 +; V6M-NEXT: movs r0, #0 +; V6M-NEXT: mvns r0, r0 +; V6M-NEXT: mov r1, r0 +; V6M-NEXT: bl __aeabi_llsl +; V6M-NEXT: ldm r4!, {r2, r3} +; V6M-NEXT: bics r2, r0 +; V6M-NEXT: bics r3, r1 +; V6M-NEXT: mov r0, r2 +; V6M-NEXT: mov r1, r3 +; V6M-NEXT: pop {r4, pc} + %val = load i64, ptr %w + %notmask = shl i64 -1, %numlowbits + %mask = xor i64 %notmask, -1 + %masked = and i64 %mask, %val + ret i64 %masked +} + +define i64 @bzhi64_b3_load_indexzext(ptr %w, i8 zeroext %numlowbits) nounwind { +; V7M-LABEL: bzhi64_b3_load_indexzext: +; V7M: @ %bb.0: +; V7M-NEXT: mov.w r2, #-1 +; V7M-NEXT: subs.w r12, r1, #32 +; V7M-NEXT: lsl.w r3, r2, r1 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r3, #0 +; V7M-NEXT: ldrd r0, r1, [r0] +; V7M-NEXT: it pl +; V7M-NEXT: lslpl.w r2, r2, r12 +; V7M-NEXT: bics r1, r2 +; V7M-NEXT: bics r0, r3 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi64_b3_load_indexzext: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r4, r6, r11, lr} +; V7A-NEXT: push {r4, r6, r11, lr} +; V7A-NEXT: mvn r2, #0 +; V7A-NEXT: ldr r6, [r0] +; V7A-NEXT: ldr r3, [r0, #4] +; V7A-NEXT: subs r0, r1, #32 +; V7A-NEXT: lsl r4, r2, r1 +; V7A-NEXT: lslpl r2, r2, r0 +; V7A-NEXT: movwpl r4, #0 +; V7A-NEXT: bic r1, r3, r2 +; V7A-NEXT: bic r0, r6, r4 +; V7A-NEXT: pop {r4, r6, r11, pc} +; +; V7A-T-LABEL: bzhi64_b3_load_indexzext: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: mov.w r2, #-1 +; V7A-T-NEXT: ldrd r0, r12, [r0] +; V7A-T-NEXT: lsl.w r3, r2, r1 +; V7A-T-NEXT: subs r1, #32 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r3, #0 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lslpl r2, r1 +; V7A-T-NEXT: bics r0, r3 +; V7A-T-NEXT: bic.w r1, r12, r2 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi64_b3_load_indexzext: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, lr} +; V6M-NEXT: push {r4, lr} +; V6M-NEXT: mov r2, r1 +; V6M-NEXT: mov r4, r0 +; V6M-NEXT: movs r0, #0 +; V6M-NEXT: mvns r0, r0 +; V6M-NEXT: mov r1, r0 +; V6M-NEXT: bl __aeabi_llsl +; V6M-NEXT: ldm r4!, {r2, r3} +; V6M-NEXT: bics r2, r0 +; V6M-NEXT: bics r3, r1 +; V6M-NEXT: mov r0, r2 +; V6M-NEXT: mov r1, r3 +; V6M-NEXT: pop {r4, pc} + %val = load i64, ptr %w + %conv = zext i8 %numlowbits to i64 + %notmask = shl i64 -1, %conv + %mask = xor i64 %notmask, -1 + %masked = and i64 %mask, %val + ret i64 %masked +} + +define i64 @bzhi64_b4_commutative(i64 %val, i64 %numlowbits) nounwind { +; V7M-LABEL: bzhi64_b4_commutative: +; V7M: @ %bb.0: +; V7M-NEXT: mov.w r3, #-1 +; V7M-NEXT: lsl.w r12, r3, r2 +; V7M-NEXT: subs r2, #32 +; V7M-NEXT: it pl +; V7M-NEXT: movpl.w r12, #0 +; V7M-NEXT: it pl +; V7M-NEXT: lslpl r3, r2 +; V7M-NEXT: bic.w r0, r0, r12 +; V7M-NEXT: bics r1, r3 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi64_b4_commutative: +; V7A: @ %bb.0: +; V7A-NEXT: subs r12, r2, #32 +; V7A-NEXT: mvn r3, #0 +; V7A-NEXT: lsl r2, r3, r2 +; V7A-NEXT: lslpl r3, r3, r12 +; V7A-NEXT: movwpl r2, #0 +; V7A-NEXT: bic r1, r1, r3 +; V7A-NEXT: bic r0, r0, r2 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi64_b4_commutative: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: mov.w r3, #-1 +; V7A-T-NEXT: lsl.w r12, r3, r2 +; V7A-T-NEXT: subs r2, #32 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl.w r12, #0 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lslpl r3, r2 +; V7A-T-NEXT: bic.w r0, r0, r12 +; V7A-T-NEXT: bics r1, r3 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi64_b4_commutative: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, r5, r7, lr} +; V6M-NEXT: push {r4, r5, r7, lr} +; V6M-NEXT: mov r4, r1 +; V6M-NEXT: mov r5, r0 +; V6M-NEXT: movs r0, #0 +; V6M-NEXT: mvns r0, r0 +; V6M-NEXT: mov r1, r0 +; V6M-NEXT: bl __aeabi_llsl +; V6M-NEXT: bics r5, r0 +; V6M-NEXT: bics r4, r1 +; V6M-NEXT: mov r0, r5 +; V6M-NEXT: mov r1, r4 +; V6M-NEXT: pop {r4, r5, r7, pc} + %notmask = shl i64 -1, %numlowbits + %mask = xor i64 %notmask, -1 + %masked = and i64 %val, %mask ; swapped order + ret i64 %masked +} + +; ---------------------------------------------------------------------------- ; +; Pattern c. 32-bit +; ---------------------------------------------------------------------------- ; + +define i32 @bzhi32_c0(i32 %val, i32 %numlowbits) nounwind { +; V7M-LABEL: bzhi32_c0: +; V7M: @ %bb.0: +; V7M-NEXT: rsb.w r1, r1, #32 +; V7M-NEXT: lsls r0, r1 +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi32_c0: +; V7A: @ %bb.0: +; V7A-NEXT: rsb r1, r1, #32 +; V7A-NEXT: lsl r0, r0, r1 +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi32_c0: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: rsb.w r1, r1, #32 +; V7A-T-NEXT: lsls r0, r1 +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi32_c0: +; V6M: @ %bb.0: +; V6M-NEXT: movs r2, #32 +; V6M-NEXT: subs r1, r2, r1 +; V6M-NEXT: lsls r0, r1 +; V6M-NEXT: lsrs r0, r1 +; V6M-NEXT: bx lr + %numhighbits = sub i32 32, %numlowbits + %mask = lshr i32 -1, %numhighbits + %masked = and i32 %mask, %val + ret i32 %masked +} + +define i32 @bzhi32_c1_indexzext(i32 %val, i8 %numlowbits) nounwind { +; V7M-LABEL: bzhi32_c1_indexzext: +; V7M: @ %bb.0: +; V7M-NEXT: rsb.w r1, r1, #32 +; V7M-NEXT: uxtb r1, r1 +; V7M-NEXT: lsls r0, r1 +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi32_c1_indexzext: +; V7A: @ %bb.0: +; V7A-NEXT: rsb r1, r1, #32 +; V7A-NEXT: uxtb r1, r1 +; V7A-NEXT: lsl r0, r0, r1 +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi32_c1_indexzext: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: rsb.w r1, r1, #32 +; V7A-T-NEXT: uxtb r1, r1 +; V7A-T-NEXT: lsls r0, r1 +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi32_c1_indexzext: +; V6M: @ %bb.0: +; V6M-NEXT: movs r2, #32 +; V6M-NEXT: subs r1, r2, r1 +; V6M-NEXT: uxtb r1, r1 +; V6M-NEXT: lsls r0, r1 +; V6M-NEXT: lsrs r0, r1 +; V6M-NEXT: bx lr + %numhighbits = sub i8 32, %numlowbits + %sh_prom = zext i8 %numhighbits to i32 + %mask = lshr i32 -1, %sh_prom + %masked = and i32 %mask, %val + ret i32 %masked +} + +define i32 @bzhi32_c2_load(ptr %w, i32 %numlowbits) nounwind { +; V7M-LABEL: bzhi32_c2_load: +; V7M: @ %bb.0: +; V7M-NEXT: ldr r0, [r0] +; V7M-NEXT: rsb.w r1, r1, #32 +; V7M-NEXT: lsls r0, r1 +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi32_c2_load: +; V7A: @ %bb.0: +; V7A-NEXT: ldr r0, [r0] +; V7A-NEXT: rsb r1, r1, #32 +; V7A-NEXT: lsl r0, r0, r1 +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi32_c2_load: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: ldr r0, [r0] +; V7A-T-NEXT: rsb.w r1, r1, #32 +; V7A-T-NEXT: lsls r0, r1 +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi32_c2_load: +; V6M: @ %bb.0: +; V6M-NEXT: movs r2, #32 +; V6M-NEXT: subs r1, r2, r1 +; V6M-NEXT: ldr r0, [r0] +; V6M-NEXT: lsls r0, r1 +; V6M-NEXT: lsrs r0, r1 +; V6M-NEXT: bx lr + %val = load i32, ptr %w + %numhighbits = sub i32 32, %numlowbits + %mask = lshr i32 -1, %numhighbits + %masked = and i32 %mask, %val + ret i32 %masked +} + +define i32 @bzhi32_c3_load_indexzext(ptr %w, i8 %numlowbits) nounwind { +; V7M-LABEL: bzhi32_c3_load_indexzext: +; V7M: @ %bb.0: +; V7M-NEXT: rsb.w r1, r1, #32 +; V7M-NEXT: ldr r0, [r0] +; V7M-NEXT: uxtb r1, r1 +; V7M-NEXT: lsls r0, r1 +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi32_c3_load_indexzext: +; V7A: @ %bb.0: +; V7A-NEXT: rsb r1, r1, #32 +; V7A-NEXT: ldr r0, [r0] +; V7A-NEXT: uxtb r1, r1 +; V7A-NEXT: lsl r0, r0, r1 +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi32_c3_load_indexzext: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: rsb.w r1, r1, #32 +; V7A-T-NEXT: ldr r0, [r0] +; V7A-T-NEXT: uxtb r1, r1 +; V7A-T-NEXT: lsls r0, r1 +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi32_c3_load_indexzext: +; V6M: @ %bb.0: +; V6M-NEXT: movs r2, #32 +; V6M-NEXT: subs r1, r2, r1 +; V6M-NEXT: uxtb r1, r1 +; V6M-NEXT: ldr r0, [r0] +; V6M-NEXT: lsls r0, r1 +; V6M-NEXT: lsrs r0, r1 +; V6M-NEXT: bx lr + %val = load i32, ptr %w + %numhighbits = sub i8 32, %numlowbits + %sh_prom = zext i8 %numhighbits to i32 + %mask = lshr i32 -1, %sh_prom + %masked = and i32 %mask, %val + ret i32 %masked +} + +define i32 @bzhi32_c4_commutative(i32 %val, i32 %numlowbits) nounwind { +; V7M-LABEL: bzhi32_c4_commutative: +; V7M: @ %bb.0: +; V7M-NEXT: rsb.w r1, r1, #32 +; V7M-NEXT: lsls r0, r1 +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi32_c4_commutative: +; V7A: @ %bb.0: +; V7A-NEXT: rsb r1, r1, #32 +; V7A-NEXT: lsl r0, r0, r1 +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi32_c4_commutative: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: rsb.w r1, r1, #32 +; V7A-T-NEXT: lsls r0, r1 +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi32_c4_commutative: +; V6M: @ %bb.0: +; V6M-NEXT: movs r2, #32 +; V6M-NEXT: subs r1, r2, r1 +; V6M-NEXT: lsls r0, r1 +; V6M-NEXT: lsrs r0, r1 +; V6M-NEXT: bx lr + %numhighbits = sub i32 32, %numlowbits + %mask = lshr i32 -1, %numhighbits + %masked = and i32 %val, %mask ; swapped order + ret i32 %masked +} + +; 64-bit + +define i64 @bzhi64_c0(i64 %val, i64 %numlowbits) nounwind { +; V7M-LABEL: bzhi64_c0: +; V7M: @ %bb.0: +; V7M-NEXT: .save {r7, lr} +; V7M-NEXT: push {r7, lr} +; V7M-NEXT: rsbs.w lr, r2, #32 +; V7M-NEXT: rsb.w r2, r2, #64 +; V7M-NEXT: mov.w r12, #-1 +; V7M-NEXT: mov.w r3, #-1 +; V7M-NEXT: lsr.w r2, r12, r2 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r3, r3, lr +; V7M-NEXT: it pl +; V7M-NEXT: movpl r2, #0 +; V7M-NEXT: ands r0, r3 +; V7M-NEXT: ands r1, r2 +; V7M-NEXT: pop {r7, pc} +; +; V7A-LABEL: bzhi64_c0: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r11, lr} +; V7A-NEXT: push {r11, lr} +; V7A-NEXT: rsbs lr, r2, #32 +; V7A-NEXT: rsb r2, r2, #64 +; V7A-NEXT: mvn r12, #0 +; V7A-NEXT: mvn r3, #0 +; V7A-NEXT: lsr r2, r12, r2 +; V7A-NEXT: lsrpl r3, r3, lr +; V7A-NEXT: movwpl r2, #0 +; V7A-NEXT: and r0, r3, r0 +; V7A-NEXT: and r1, r2, r1 +; V7A-NEXT: pop {r11, pc} +; +; V7A-T-LABEL: bzhi64_c0: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: .save {r7, lr} +; V7A-T-NEXT: push {r7, lr} +; V7A-T-NEXT: rsbs.w lr, r2, #32 +; V7A-T-NEXT: rsb.w r2, r2, #64 +; V7A-T-NEXT: mov.w r12, #-1 +; V7A-T-NEXT: mov.w r3, #-1 +; V7A-T-NEXT: lsr.w r2, r12, r2 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r3, r3, lr +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r2, #0 +; V7A-T-NEXT: ands r0, r3 +; V7A-T-NEXT: ands r1, r2 +; V7A-T-NEXT: pop {r7, pc} +; +; V6M-LABEL: bzhi64_c0: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, r5, r7, lr} +; V6M-NEXT: push {r4, r5, r7, lr} +; V6M-NEXT: mov r4, r1 +; V6M-NEXT: mov r5, r0 +; V6M-NEXT: movs r0, #64 +; V6M-NEXT: subs r2, r0, r2 +; V6M-NEXT: movs r0, #0 +; V6M-NEXT: mvns r0, r0 +; V6M-NEXT: mov r1, r0 +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: ands r0, r5 +; V6M-NEXT: ands r1, r4 +; V6M-NEXT: pop {r4, r5, r7, pc} + %numhighbits = sub i64 64, %numlowbits + %mask = lshr i64 -1, %numhighbits + %masked = and i64 %mask, %val + ret i64 %masked +} + +define i64 @bzhi64_c1_indexzext(i64 %val, i8 %numlowbits) nounwind { +; V7M-LABEL: bzhi64_c1_indexzext: +; V7M: @ %bb.0: +; V7M-NEXT: rsb.w r2, r2, #64 +; V7M-NEXT: mov.w r3, #-1 +; V7M-NEXT: uxtb r2, r2 +; V7M-NEXT: subs.w r12, r2, #32 +; V7M-NEXT: lsr.w r2, r3, r2 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r3, r3, r12 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r2, #0 +; V7M-NEXT: ands r0, r3 +; V7M-NEXT: ands r1, r2 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi64_c1_indexzext: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r11, lr} +; V7A-NEXT: push {r11, lr} +; V7A-NEXT: rsb lr, r2, #64 +; V7A-NEXT: mvn r2, #31 +; V7A-NEXT: mvn r3, #0 +; V7A-NEXT: uxtb r12, lr +; V7A-NEXT: uxtab r2, r2, lr +; V7A-NEXT: lsr r12, r3, r12 +; V7A-NEXT: cmp r2, #0 +; V7A-NEXT: movwpl r12, #0 +; V7A-NEXT: lsrpl r3, r3, r2 +; V7A-NEXT: and r1, r12, r1 +; V7A-NEXT: and r0, r3, r0 +; V7A-NEXT: pop {r11, pc} +; +; V7A-T-LABEL: bzhi64_c1_indexzext: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: .save {r7, lr} +; V7A-T-NEXT: push {r7, lr} +; V7A-T-NEXT: rsb.w lr, r2, #64 +; V7A-T-NEXT: mvn r2, #31 +; V7A-T-NEXT: mov.w r3, #-1 +; V7A-T-NEXT: uxtb.w r12, lr +; V7A-T-NEXT: uxtab r2, r2, lr +; V7A-T-NEXT: lsr.w r12, r3, r12 +; V7A-T-NEXT: cmp r2, #0 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl.w r12, #0 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl r3, r2 +; V7A-T-NEXT: and.w r1, r1, r12 +; V7A-T-NEXT: ands r0, r3 +; V7A-T-NEXT: pop {r7, pc} +; +; V6M-LABEL: bzhi64_c1_indexzext: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, r5, r7, lr} +; V6M-NEXT: push {r4, r5, r7, lr} +; V6M-NEXT: mov r4, r1 +; V6M-NEXT: mov r5, r0 +; V6M-NEXT: movs r0, #64 +; V6M-NEXT: subs r0, r0, r2 +; V6M-NEXT: uxtb r2, r0 +; V6M-NEXT: movs r0, #0 +; V6M-NEXT: mvns r0, r0 +; V6M-NEXT: mov r1, r0 +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: ands r0, r5 +; V6M-NEXT: ands r1, r4 +; V6M-NEXT: pop {r4, r5, r7, pc} + %numhighbits = sub i8 64, %numlowbits + %sh_prom = zext i8 %numhighbits to i64 + %mask = lshr i64 -1, %sh_prom + %masked = and i64 %mask, %val + ret i64 %masked +} + +define i64 @bzhi64_c2_load(ptr %w, i64 %numlowbits) nounwind { +; V7M-LABEL: bzhi64_c2_load: +; V7M: @ %bb.0: +; V7M-NEXT: rsbs.w r1, r2, #32 +; V7M-NEXT: mov.w r3, #-1 +; V7M-NEXT: rsb.w r2, r2, #64 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl r3, r1 +; V7M-NEXT: ldrd r0, r1, [r0] +; V7M-NEXT: mov.w r12, #-1 +; V7M-NEXT: lsr.w r2, r12, r2 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r2, #0 +; V7M-NEXT: ands r0, r3 +; V7M-NEXT: ands r1, r2 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi64_c2_load: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r5, lr} +; V7A-NEXT: push {r5, lr} +; V7A-NEXT: rsbs r1, r2, #32 +; V7A-NEXT: mvn r3, #0 +; V7A-NEXT: mvn r12, #0 +; V7A-NEXT: ldm r0, {r0, r5} +; V7A-NEXT: lsrpl r3, r3, r1 +; V7A-NEXT: rsb r1, r2, #64 +; V7A-NEXT: and r0, r3, r0 +; V7A-NEXT: lsr r1, r12, r1 +; V7A-NEXT: movwpl r1, #0 +; V7A-NEXT: and r1, r1, r5 +; V7A-NEXT: pop {r5, pc} +; +; V7A-T-LABEL: bzhi64_c2_load: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: .save {r7, lr} +; V7A-T-NEXT: push {r7, lr} +; V7A-T-NEXT: rsbs.w r1, r2, #32 +; V7A-T-NEXT: mov.w r3, #-1 +; V7A-T-NEXT: ldrd r0, lr, [r0] +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl r3, r1 +; V7A-T-NEXT: rsb.w r1, r2, #64 +; V7A-T-NEXT: mov.w r12, #-1 +; V7A-T-NEXT: and.w r0, r0, r3 +; V7A-T-NEXT: lsr.w r1, r12, r1 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r1, #0 +; V7A-T-NEXT: and.w r1, r1, lr +; V7A-T-NEXT: pop {r7, pc} +; +; V6M-LABEL: bzhi64_c2_load: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, lr} +; V6M-NEXT: push {r4, lr} +; V6M-NEXT: mov r4, r0 +; V6M-NEXT: movs r0, #64 +; V6M-NEXT: subs r2, r0, r2 +; V6M-NEXT: movs r0, #0 +; V6M-NEXT: mvns r0, r0 +; V6M-NEXT: mov r1, r0 +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: ldm r4!, {r2, r3} +; V6M-NEXT: ands r0, r2 +; V6M-NEXT: ands r1, r3 +; V6M-NEXT: pop {r4, pc} + %val = load i64, ptr %w + %numhighbits = sub i64 64, %numlowbits + %mask = lshr i64 -1, %numhighbits + %masked = and i64 %mask, %val + ret i64 %masked +} + +define i64 @bzhi64_c3_load_indexzext(ptr %w, i8 %numlowbits) nounwind { +; V7M-LABEL: bzhi64_c3_load_indexzext: +; V7M: @ %bb.0: +; V7M-NEXT: rsb.w r1, r1, #64 +; V7M-NEXT: mov.w r3, #-1 +; V7M-NEXT: uxtb r1, r1 +; V7M-NEXT: subs.w r2, r1, #32 +; V7M-NEXT: lsr.w r1, r3, r1 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl r3, r2 +; V7M-NEXT: ldrd r0, r2, [r0] +; V7M-NEXT: it pl +; V7M-NEXT: movpl r1, #0 +; V7M-NEXT: ands r1, r2 +; V7M-NEXT: ands r0, r3 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi64_c3_load_indexzext: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r4, r6, r11, lr} +; V7A-NEXT: push {r4, r6, r11, lr} +; V7A-NEXT: rsb r1, r1, #64 +; V7A-NEXT: mvn r4, #31 +; V7A-NEXT: mvn r2, #0 +; V7A-NEXT: ldr r6, [r0] +; V7A-NEXT: ldr r3, [r0, #4] +; V7A-NEXT: uxtb r0, r1 +; V7A-NEXT: uxtab r4, r4, r1 +; V7A-NEXT: lsr r0, r2, r0 +; V7A-NEXT: cmp r4, #0 +; V7A-NEXT: movwpl r0, #0 +; V7A-NEXT: and r1, r0, r3 +; V7A-NEXT: lsrpl r2, r2, r4 +; V7A-NEXT: and r0, r2, r6 +; V7A-NEXT: pop {r4, r6, r11, pc} +; +; V7A-T-LABEL: bzhi64_c3_load_indexzext: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: .save {r7, lr} +; V7A-T-NEXT: push {r7, lr} +; V7A-T-NEXT: rsb.w r1, r1, #64 +; V7A-T-NEXT: mvn r3, #31 +; V7A-T-NEXT: ldrd r12, lr, [r0] +; V7A-T-NEXT: mov.w r2, #-1 +; V7A-T-NEXT: uxtb r0, r1 +; V7A-T-NEXT: uxtab r3, r3, r1 +; V7A-T-NEXT: lsr.w r0, r2, r0 +; V7A-T-NEXT: cmp r3, #0 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r0, #0 +; V7A-T-NEXT: and.w r1, r0, lr +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl r2, r3 +; V7A-T-NEXT: and.w r0, r2, r12 +; V7A-T-NEXT: pop {r7, pc} +; +; V6M-LABEL: bzhi64_c3_load_indexzext: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, lr} +; V6M-NEXT: push {r4, lr} +; V6M-NEXT: mov r4, r0 +; V6M-NEXT: movs r0, #64 +; V6M-NEXT: subs r0, r0, r1 +; V6M-NEXT: uxtb r2, r0 +; V6M-NEXT: movs r0, #0 +; V6M-NEXT: mvns r0, r0 +; V6M-NEXT: mov r1, r0 +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: ldm r4!, {r2, r3} +; V6M-NEXT: ands r0, r2 +; V6M-NEXT: ands r1, r3 +; V6M-NEXT: pop {r4, pc} + %val = load i64, ptr %w + %numhighbits = sub i8 64, %numlowbits + %sh_prom = zext i8 %numhighbits to i64 + %mask = lshr i64 -1, %sh_prom + %masked = and i64 %mask, %val + ret i64 %masked +} + +define i64 @bzhi64_c4_commutative(i64 %val, i64 %numlowbits) nounwind { +; V7M-LABEL: bzhi64_c4_commutative: +; V7M: @ %bb.0: +; V7M-NEXT: .save {r7, lr} +; V7M-NEXT: push {r7, lr} +; V7M-NEXT: rsbs.w lr, r2, #32 +; V7M-NEXT: rsb.w r2, r2, #64 +; V7M-NEXT: mov.w r12, #-1 +; V7M-NEXT: mov.w r3, #-1 +; V7M-NEXT: lsr.w r2, r12, r2 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r3, r3, lr +; V7M-NEXT: it pl +; V7M-NEXT: movpl r2, #0 +; V7M-NEXT: ands r0, r3 +; V7M-NEXT: ands r1, r2 +; V7M-NEXT: pop {r7, pc} +; +; V7A-LABEL: bzhi64_c4_commutative: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r11, lr} +; V7A-NEXT: push {r11, lr} +; V7A-NEXT: rsbs lr, r2, #32 +; V7A-NEXT: rsb r2, r2, #64 +; V7A-NEXT: mvn r12, #0 +; V7A-NEXT: mvn r3, #0 +; V7A-NEXT: lsr r2, r12, r2 +; V7A-NEXT: lsrpl r3, r3, lr +; V7A-NEXT: movwpl r2, #0 +; V7A-NEXT: and r0, r0, r3 +; V7A-NEXT: and r1, r1, r2 +; V7A-NEXT: pop {r11, pc} +; +; V7A-T-LABEL: bzhi64_c4_commutative: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: .save {r7, lr} +; V7A-T-NEXT: push {r7, lr} +; V7A-T-NEXT: rsbs.w lr, r2, #32 +; V7A-T-NEXT: rsb.w r2, r2, #64 +; V7A-T-NEXT: mov.w r12, #-1 +; V7A-T-NEXT: mov.w r3, #-1 +; V7A-T-NEXT: lsr.w r2, r12, r2 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r3, r3, lr +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r2, #0 +; V7A-T-NEXT: ands r0, r3 +; V7A-T-NEXT: ands r1, r2 +; V7A-T-NEXT: pop {r7, pc} +; +; V6M-LABEL: bzhi64_c4_commutative: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, r5, r7, lr} +; V6M-NEXT: push {r4, r5, r7, lr} +; V6M-NEXT: mov r4, r1 +; V6M-NEXT: mov r5, r0 +; V6M-NEXT: movs r0, #64 +; V6M-NEXT: subs r2, r0, r2 +; V6M-NEXT: movs r0, #0 +; V6M-NEXT: mvns r0, r0 +; V6M-NEXT: mov r1, r0 +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: ands r0, r5 +; V6M-NEXT: ands r1, r4 +; V6M-NEXT: pop {r4, r5, r7, pc} + %numhighbits = sub i64 64, %numlowbits + %mask = lshr i64 -1, %numhighbits + %masked = and i64 %val, %mask ; swapped order + ret i64 %masked +} + +; ---------------------------------------------------------------------------- ; +; Pattern d. 32-bit. +; ---------------------------------------------------------------------------- ; + +define i32 @bzhi32_d0(i32 %val, i32 %numlowbits) nounwind { +; V7M-LABEL: bzhi32_d0: +; V7M: @ %bb.0: +; V7M-NEXT: rsb.w r1, r1, #32 +; V7M-NEXT: lsls r0, r1 +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi32_d0: +; V7A: @ %bb.0: +; V7A-NEXT: rsb r1, r1, #32 +; V7A-NEXT: lsl r0, r0, r1 +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi32_d0: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: rsb.w r1, r1, #32 +; V7A-T-NEXT: lsls r0, r1 +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi32_d0: +; V6M: @ %bb.0: +; V6M-NEXT: movs r2, #32 +; V6M-NEXT: subs r1, r2, r1 +; V6M-NEXT: lsls r0, r1 +; V6M-NEXT: lsrs r0, r1 +; V6M-NEXT: bx lr + %numhighbits = sub i32 32, %numlowbits + %highbitscleared = shl i32 %val, %numhighbits + %masked = lshr i32 %highbitscleared, %numhighbits + ret i32 %masked +} + +define i32 @bzhi32_d1_indexzext(i32 %val, i8 %numlowbits) nounwind { +; V7M-LABEL: bzhi32_d1_indexzext: +; V7M: @ %bb.0: +; V7M-NEXT: rsb.w r1, r1, #32 +; V7M-NEXT: uxtb r1, r1 +; V7M-NEXT: lsls r0, r1 +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi32_d1_indexzext: +; V7A: @ %bb.0: +; V7A-NEXT: rsb r1, r1, #32 +; V7A-NEXT: uxtb r1, r1 +; V7A-NEXT: lsl r0, r0, r1 +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi32_d1_indexzext: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: rsb.w r1, r1, #32 +; V7A-T-NEXT: uxtb r1, r1 +; V7A-T-NEXT: lsls r0, r1 +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi32_d1_indexzext: +; V6M: @ %bb.0: +; V6M-NEXT: movs r2, #32 +; V6M-NEXT: subs r1, r2, r1 +; V6M-NEXT: uxtb r1, r1 +; V6M-NEXT: lsls r0, r1 +; V6M-NEXT: lsrs r0, r1 +; V6M-NEXT: bx lr + %numhighbits = sub i8 32, %numlowbits + %sh_prom = zext i8 %numhighbits to i32 + %highbitscleared = shl i32 %val, %sh_prom + %masked = lshr i32 %highbitscleared, %sh_prom + ret i32 %masked +} + +define i32 @bzhi32_d2_load(ptr %w, i32 %numlowbits) nounwind { +; V7M-LABEL: bzhi32_d2_load: +; V7M: @ %bb.0: +; V7M-NEXT: ldr r0, [r0] +; V7M-NEXT: rsb.w r1, r1, #32 +; V7M-NEXT: lsls r0, r1 +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi32_d2_load: +; V7A: @ %bb.0: +; V7A-NEXT: ldr r0, [r0] +; V7A-NEXT: rsb r1, r1, #32 +; V7A-NEXT: lsl r0, r0, r1 +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi32_d2_load: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: ldr r0, [r0] +; V7A-T-NEXT: rsb.w r1, r1, #32 +; V7A-T-NEXT: lsls r0, r1 +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi32_d2_load: +; V6M: @ %bb.0: +; V6M-NEXT: movs r2, #32 +; V6M-NEXT: subs r1, r2, r1 +; V6M-NEXT: ldr r0, [r0] +; V6M-NEXT: lsls r0, r1 +; V6M-NEXT: lsrs r0, r1 +; V6M-NEXT: bx lr + %val = load i32, ptr %w + %numhighbits = sub i32 32, %numlowbits + %highbitscleared = shl i32 %val, %numhighbits + %masked = lshr i32 %highbitscleared, %numhighbits + ret i32 %masked +} + +define i32 @bzhi32_d3_load_indexzext(ptr %w, i8 %numlowbits) nounwind { +; V7M-LABEL: bzhi32_d3_load_indexzext: +; V7M: @ %bb.0: +; V7M-NEXT: rsb.w r1, r1, #32 +; V7M-NEXT: ldr r0, [r0] +; V7M-NEXT: uxtb r1, r1 +; V7M-NEXT: lsls r0, r1 +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi32_d3_load_indexzext: +; V7A: @ %bb.0: +; V7A-NEXT: rsb r1, r1, #32 +; V7A-NEXT: ldr r0, [r0] +; V7A-NEXT: uxtb r1, r1 +; V7A-NEXT: lsl r0, r0, r1 +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi32_d3_load_indexzext: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: rsb.w r1, r1, #32 +; V7A-T-NEXT: ldr r0, [r0] +; V7A-T-NEXT: uxtb r1, r1 +; V7A-T-NEXT: lsls r0, r1 +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi32_d3_load_indexzext: +; V6M: @ %bb.0: +; V6M-NEXT: movs r2, #32 +; V6M-NEXT: subs r1, r2, r1 +; V6M-NEXT: uxtb r1, r1 +; V6M-NEXT: ldr r0, [r0] +; V6M-NEXT: lsls r0, r1 +; V6M-NEXT: lsrs r0, r1 +; V6M-NEXT: bx lr + %val = load i32, ptr %w + %numhighbits = sub i8 32, %numlowbits + %sh_prom = zext i8 %numhighbits to i32 + %highbitscleared = shl i32 %val, %sh_prom + %masked = lshr i32 %highbitscleared, %sh_prom + ret i32 %masked +} + +; 64-bit. + +define i64 @bzhi64_d0(i64 %val, i64 %numlowbits) nounwind { +; V7M-LABEL: bzhi64_d0: +; V7M: @ %bb.0: +; V7M-NEXT: .save {r7, lr} +; V7M-NEXT: push {r7, lr} +; V7M-NEXT: rsb.w r3, r2, #64 +; V7M-NEXT: rsbs.w r2, r2, #32 +; V7M-NEXT: rsb.w lr, r3, #32 +; V7M-NEXT: lsl.w r12, r1, r3 +; V7M-NEXT: lsr.w r1, r0, lr +; V7M-NEXT: orr.w r1, r1, r12 +; V7M-NEXT: it pl +; V7M-NEXT: lslpl.w r1, r0, r2 +; V7M-NEXT: lsl.w r0, r0, r3 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r0, #0 +; V7M-NEXT: lsl.w r12, r1, lr +; V7M-NEXT: lsr.w r0, r0, r3 +; V7M-NEXT: orr.w r0, r0, r12 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r0, r1, r2 +; V7M-NEXT: lsr.w r1, r1, r3 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r1, #0 +; V7M-NEXT: pop {r7, pc} +; +; V7A-LABEL: bzhi64_d0: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r11, lr} +; V7A-NEXT: push {r11, lr} +; V7A-NEXT: rsb lr, r2, #64 +; V7A-NEXT: rsbs r2, r2, #32 +; V7A-NEXT: rsb r12, lr, #32 +; V7A-NEXT: lsr r3, r0, r12 +; V7A-NEXT: orr r1, r3, r1, lsl lr +; V7A-NEXT: lslpl r1, r0, r2 +; V7A-NEXT: lsl r0, r0, lr +; V7A-NEXT: movwpl r0, #0 +; V7A-NEXT: lsr r0, r0, lr +; V7A-NEXT: orr r0, r0, r1, lsl r12 +; V7A-NEXT: lsrpl r0, r1, r2 +; V7A-NEXT: lsr r1, r1, lr +; V7A-NEXT: movwpl r1, #0 +; V7A-NEXT: pop {r11, pc} +; +; V7A-T-LABEL: bzhi64_d0: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: .save {r7, lr} +; V7A-T-NEXT: push {r7, lr} +; V7A-T-NEXT: rsb.w r3, r2, #64 +; V7A-T-NEXT: rsbs.w r2, r2, #32 +; V7A-T-NEXT: rsb.w lr, r3, #32 +; V7A-T-NEXT: lsl.w r12, r1, r3 +; V7A-T-NEXT: lsr.w r1, r0, lr +; V7A-T-NEXT: orr.w r1, r1, r12 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lslpl.w r1, r0, r2 +; V7A-T-NEXT: lsl.w r0, r0, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r0, #0 +; V7A-T-NEXT: lsl.w r12, r1, lr +; V7A-T-NEXT: lsr.w r0, r0, r3 +; V7A-T-NEXT: orr.w r0, r0, r12 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r0, r1, r2 +; V7A-T-NEXT: lsr.w r1, r1, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r1, #0 +; V7A-T-NEXT: pop {r7, pc} +; +; V6M-LABEL: bzhi64_d0: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, lr} +; V6M-NEXT: push {r4, lr} +; V6M-NEXT: movs r3, #64 +; V6M-NEXT: subs r4, r3, r2 +; V6M-NEXT: mov r2, r4 +; V6M-NEXT: bl __aeabi_llsl +; V6M-NEXT: mov r2, r4 +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: pop {r4, pc} + %numhighbits = sub i64 64, %numlowbits + %highbitscleared = shl i64 %val, %numhighbits + %masked = lshr i64 %highbitscleared, %numhighbits + ret i64 %masked +} + +define i64 @bzhi64_d1_indexzext(i64 %val, i8 %numlowbits) nounwind { +; V7M-LABEL: bzhi64_d1_indexzext: +; V7M: @ %bb.0: +; V7M-NEXT: rsb.w r2, r2, #64 +; V7M-NEXT: uxtb r2, r2 +; V7M-NEXT: rsb.w r3, r2, #32 +; V7M-NEXT: lsl.w r12, r1, r2 +; V7M-NEXT: lsr.w r1, r0, r3 +; V7M-NEXT: orr.w r1, r1, r12 +; V7M-NEXT: subs.w r12, r2, #32 +; V7M-NEXT: it pl +; V7M-NEXT: lslpl.w r1, r0, r12 +; V7M-NEXT: lsl.w r0, r0, r2 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r0, #0 +; V7M-NEXT: lsl.w r3, r1, r3 +; V7M-NEXT: lsr.w r0, r0, r2 +; V7M-NEXT: orr.w r0, r0, r3 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r0, r1, r12 +; V7M-NEXT: lsr.w r1, r1, r2 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r1, #0 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi64_d1_indexzext: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r11, lr} +; V7A-NEXT: push {r11, lr} +; V7A-NEXT: rsb lr, r2, #64 +; V7A-NEXT: uxtb r3, lr +; V7A-NEXT: rsb r12, r3, #32 +; V7A-NEXT: lsr r2, r0, r12 +; V7A-NEXT: orr r1, r2, r1, lsl r3 +; V7A-NEXT: mvn r2, #31 +; V7A-NEXT: uxtab r2, r2, lr +; V7A-NEXT: cmp r2, #0 +; V7A-NEXT: lslpl r1, r0, r2 +; V7A-NEXT: lsl r0, r0, r3 +; V7A-NEXT: movwpl r0, #0 +; V7A-NEXT: lsr r0, r0, r3 +; V7A-NEXT: orr r0, r0, r1, lsl r12 +; V7A-NEXT: lsrpl r0, r1, r2 +; V7A-NEXT: lsr r1, r1, r3 +; V7A-NEXT: movwpl r1, #0 +; V7A-NEXT: pop {r11, pc} +; +; V7A-T-LABEL: bzhi64_d1_indexzext: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: .save {r4, lr} +; V7A-T-NEXT: push {r4, lr} +; V7A-T-NEXT: rsb.w r4, r2, #64 +; V7A-T-NEXT: mvn r2, #31 +; V7A-T-NEXT: uxtb r3, r4 +; V7A-T-NEXT: rsb.w lr, r3, #32 +; V7A-T-NEXT: lsl.w r12, r1, r3 +; V7A-T-NEXT: uxtab r2, r2, r4 +; V7A-T-NEXT: lsr.w r1, r0, lr +; V7A-T-NEXT: cmp r2, #0 +; V7A-T-NEXT: orr.w r1, r1, r12 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lslpl.w r1, r0, r2 +; V7A-T-NEXT: lsl.w r0, r0, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r0, #0 +; V7A-T-NEXT: lsl.w r4, r1, lr +; V7A-T-NEXT: lsr.w r0, r0, r3 +; V7A-T-NEXT: orr.w r0, r0, r4 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r0, r1, r2 +; V7A-T-NEXT: lsr.w r1, r1, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r1, #0 +; V7A-T-NEXT: pop {r4, pc} +; +; V6M-LABEL: bzhi64_d1_indexzext: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, lr} +; V6M-NEXT: push {r4, lr} +; V6M-NEXT: movs r3, #64 +; V6M-NEXT: subs r2, r3, r2 +; V6M-NEXT: uxtb r4, r2 +; V6M-NEXT: mov r2, r4 +; V6M-NEXT: bl __aeabi_llsl +; V6M-NEXT: mov r2, r4 +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: pop {r4, pc} + %numhighbits = sub i8 64, %numlowbits + %sh_prom = zext i8 %numhighbits to i64 + %highbitscleared = shl i64 %val, %sh_prom + %masked = lshr i64 %highbitscleared, %sh_prom + ret i64 %masked +} + +define i64 @bzhi64_d2_load(ptr %w, i64 %numlowbits) nounwind { +; V7M-LABEL: bzhi64_d2_load: +; V7M: @ %bb.0: +; V7M-NEXT: .save {r7, lr} +; V7M-NEXT: push {r7, lr} +; V7M-NEXT: rsb.w r1, r2, #64 +; V7M-NEXT: ldrd r0, r3, [r0] +; V7M-NEXT: rsb.w lr, r1, #32 +; V7M-NEXT: rsbs.w r2, r2, #32 +; V7M-NEXT: lsl.w r12, r3, r1 +; V7M-NEXT: lsr.w r3, r0, lr +; V7M-NEXT: orr.w r3, r3, r12 +; V7M-NEXT: it pl +; V7M-NEXT: lslpl.w r3, r0, r2 +; V7M-NEXT: lsl.w r0, r0, r1 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r0, #0 +; V7M-NEXT: lsl.w r12, r3, lr +; V7M-NEXT: lsr.w r0, r0, r1 +; V7M-NEXT: lsr.w r1, r3, r1 +; V7M-NEXT: orr.w r0, r0, r12 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r0, r3, r2 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r1, #0 +; V7M-NEXT: pop {r7, pc} +; +; V7A-LABEL: bzhi64_d2_load: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r5, r7, r11, lr} +; V7A-NEXT: push {r5, r7, r11, lr} +; V7A-NEXT: rsb r3, r2, #64 +; V7A-NEXT: ldm r0, {r0, r7} +; V7A-NEXT: rsb r1, r3, #32 +; V7A-NEXT: rsbs r2, r2, #32 +; V7A-NEXT: lsr r5, r0, r1 +; V7A-NEXT: orr r7, r5, r7, lsl r3 +; V7A-NEXT: lslpl r7, r0, r2 +; V7A-NEXT: lsl r0, r0, r3 +; V7A-NEXT: movwpl r0, #0 +; V7A-NEXT: lsr r0, r0, r3 +; V7A-NEXT: orr r0, r0, r7, lsl r1 +; V7A-NEXT: lsr r1, r7, r3 +; V7A-NEXT: lsrpl r0, r7, r2 +; V7A-NEXT: movwpl r1, #0 +; V7A-NEXT: pop {r5, r7, r11, pc} +; +; V7A-T-LABEL: bzhi64_d2_load: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: .save {r7, lr} +; V7A-T-NEXT: push {r7, lr} +; V7A-T-NEXT: rsb.w r3, r2, #64 +; V7A-T-NEXT: ldrd r0, r1, [r0] +; V7A-T-NEXT: rsb.w lr, r3, #32 +; V7A-T-NEXT: rsbs.w r2, r2, #32 +; V7A-T-NEXT: lsl.w r12, r1, r3 +; V7A-T-NEXT: lsr.w r1, r0, lr +; V7A-T-NEXT: orr.w r1, r1, r12 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lslpl.w r1, r0, r2 +; V7A-T-NEXT: lsl.w r0, r0, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r0, #0 +; V7A-T-NEXT: lsl.w r12, r1, lr +; V7A-T-NEXT: lsr.w r0, r0, r3 +; V7A-T-NEXT: orr.w r0, r0, r12 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r0, r1, r2 +; V7A-T-NEXT: lsr.w r1, r1, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r1, #0 +; V7A-T-NEXT: pop {r7, pc} +; +; V6M-LABEL: bzhi64_d2_load: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, lr} +; V6M-NEXT: push {r4, lr} +; V6M-NEXT: movs r1, #64 +; V6M-NEXT: subs r4, r1, r2 +; V6M-NEXT: ldr r2, [r0] +; V6M-NEXT: ldr r1, [r0, #4] +; V6M-NEXT: mov r0, r2 +; V6M-NEXT: mov r2, r4 +; V6M-NEXT: bl __aeabi_llsl +; V6M-NEXT: mov r2, r4 +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: pop {r4, pc} + %val = load i64, ptr %w + %numhighbits = sub i64 64, %numlowbits + %highbitscleared = shl i64 %val, %numhighbits + %masked = lshr i64 %highbitscleared, %numhighbits + ret i64 %masked +} + +define i64 @bzhi64_d3_load_indexzext(ptr %w, i8 %numlowbits) nounwind { +; V7M-LABEL: bzhi64_d3_load_indexzext: +; V7M: @ %bb.0: +; V7M-NEXT: rsb.w r1, r1, #64 +; V7M-NEXT: ldrd r0, r2, [r0] +; V7M-NEXT: uxtb r1, r1 +; V7M-NEXT: rsb.w r3, r1, #32 +; V7M-NEXT: lsl.w r12, r2, r1 +; V7M-NEXT: lsr.w r2, r0, r3 +; V7M-NEXT: orr.w r2, r2, r12 +; V7M-NEXT: subs.w r12, r1, #32 +; V7M-NEXT: it pl +; V7M-NEXT: lslpl.w r2, r0, r12 +; V7M-NEXT: lsl.w r0, r0, r1 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r0, #0 +; V7M-NEXT: lsl.w r3, r2, r3 +; V7M-NEXT: lsr.w r0, r0, r1 +; V7M-NEXT: lsr.w r1, r2, r1 +; V7M-NEXT: orr.w r0, r0, r3 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r0, r2, r12 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r1, #0 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi64_d3_load_indexzext: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r5, r7, r11, lr} +; V7A-NEXT: push {r5, r7, r11, lr} +; V7A-NEXT: rsb r1, r1, #64 +; V7A-NEXT: ldm r0, {r0, r7} +; V7A-NEXT: uxtb r2, r1 +; V7A-NEXT: rsb r3, r2, #32 +; V7A-NEXT: lsr r5, r0, r3 +; V7A-NEXT: orr r7, r5, r7, lsl r2 +; V7A-NEXT: mvn r5, #31 +; V7A-NEXT: uxtab r1, r5, r1 +; V7A-NEXT: cmp r1, #0 +; V7A-NEXT: lslpl r7, r0, r1 +; V7A-NEXT: lsl r0, r0, r2 +; V7A-NEXT: movwpl r0, #0 +; V7A-NEXT: lsr r0, r0, r2 +; V7A-NEXT: orr r0, r0, r7, lsl r3 +; V7A-NEXT: lsrpl r0, r7, r1 +; V7A-NEXT: lsr r1, r7, r2 +; V7A-NEXT: movwpl r1, #0 +; V7A-NEXT: pop {r5, r7, r11, pc} +; +; V7A-T-LABEL: bzhi64_d3_load_indexzext: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: .save {r4, lr} +; V7A-T-NEXT: push {r4, lr} +; V7A-T-NEXT: rsb.w r4, r1, #64 +; V7A-T-NEXT: ldrd r0, r2, [r0] +; V7A-T-NEXT: mvn r1, #31 +; V7A-T-NEXT: uxtb r3, r4 +; V7A-T-NEXT: rsb.w lr, r3, #32 +; V7A-T-NEXT: lsl.w r12, r2, r3 +; V7A-T-NEXT: uxtab r1, r1, r4 +; V7A-T-NEXT: lsr.w r2, r0, lr +; V7A-T-NEXT: cmp r1, #0 +; V7A-T-NEXT: orr.w r2, r2, r12 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lslpl.w r2, r0, r1 +; V7A-T-NEXT: lsl.w r0, r0, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r0, #0 +; V7A-T-NEXT: lsl.w r4, r2, lr +; V7A-T-NEXT: lsr.w r0, r0, r3 +; V7A-T-NEXT: orr.w r0, r0, r4 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r0, r2, r1 +; V7A-T-NEXT: lsr.w r1, r2, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r1, #0 +; V7A-T-NEXT: pop {r4, pc} +; +; V6M-LABEL: bzhi64_d3_load_indexzext: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, lr} +; V6M-NEXT: push {r4, lr} +; V6M-NEXT: movs r2, #64 +; V6M-NEXT: subs r1, r2, r1 +; V6M-NEXT: uxtb r4, r1 +; V6M-NEXT: ldr r2, [r0] +; V6M-NEXT: ldr r1, [r0, #4] +; V6M-NEXT: mov r0, r2 +; V6M-NEXT: mov r2, r4 +; V6M-NEXT: bl __aeabi_llsl +; V6M-NEXT: mov r2, r4 +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: pop {r4, pc} + %val = load i64, ptr %w + %numhighbits = sub i8 64, %numlowbits + %sh_prom = zext i8 %numhighbits to i64 + %highbitscleared = shl i64 %val, %sh_prom + %masked = lshr i64 %highbitscleared, %sh_prom + ret i64 %masked +} + +; ---------------------------------------------------------------------------- ; +; Constant mask +; ---------------------------------------------------------------------------- ; + +; 32-bit + +define i32 @bzhi32_constant_mask32(i32 %val) nounwind { +; V7M-LABEL: bzhi32_constant_mask32: +; V7M: @ %bb.0: +; V7M-NEXT: bic r0, r0, #-2147483648 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi32_constant_mask32: +; V7A: @ %bb.0: +; V7A-NEXT: bic r0, r0, #-2147483648 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi32_constant_mask32: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: bic r0, r0, #-2147483648 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi32_constant_mask32: +; V6M: @ %bb.0: +; V6M-NEXT: movs r1, #1 +; V6M-NEXT: lsls r1, r1, #31 +; V6M-NEXT: bics r0, r1 +; V6M-NEXT: bx lr + %masked = and i32 %val, 2147483647 + ret i32 %masked +} + +define i32 @bzhi32_constant_mask32_load(ptr %val) nounwind { +; V7M-LABEL: bzhi32_constant_mask32_load: +; V7M: @ %bb.0: +; V7M-NEXT: ldr r0, [r0] +; V7M-NEXT: bic r0, r0, #-2147483648 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi32_constant_mask32_load: +; V7A: @ %bb.0: +; V7A-NEXT: ldr r0, [r0] +; V7A-NEXT: bic r0, r0, #-2147483648 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi32_constant_mask32_load: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: ldr r0, [r0] +; V7A-T-NEXT: bic r0, r0, #-2147483648 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi32_constant_mask32_load: +; V6M: @ %bb.0: +; V6M-NEXT: movs r1, #1 +; V6M-NEXT: lsls r1, r1, #31 +; V6M-NEXT: ldr r0, [r0] +; V6M-NEXT: bics r0, r1 +; V6M-NEXT: bx lr + %val1 = load i32, ptr %val + %masked = and i32 %val1, 2147483647 + ret i32 %masked +} + +define i32 @bzhi32_constant_mask16(i32 %val) nounwind { +; V7M-LABEL: bzhi32_constant_mask16: +; V7M: @ %bb.0: +; V7M-NEXT: bfc r0, #15, #17 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi32_constant_mask16: +; V7A: @ %bb.0: +; V7A-NEXT: bfc r0, #15, #17 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi32_constant_mask16: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: bfc r0, #15, #17 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi32_constant_mask16: +; V6M: @ %bb.0: +; V6M-NEXT: ldr r1, .LCPI41_0 +; V6M-NEXT: ands r0, r1 +; V6M-NEXT: bx lr +; V6M-NEXT: .p2align 2 +; V6M-NEXT: @ %bb.1: +; V6M-NEXT: .LCPI41_0: +; V6M-NEXT: .long 32767 @ 0x7fff + %masked = and i32 %val, 32767 + ret i32 %masked +} + +define i32 @bzhi32_constant_mask16_load(ptr %val) nounwind { +; V7M-LABEL: bzhi32_constant_mask16_load: +; V7M: @ %bb.0: +; V7M-NEXT: ldr r0, [r0] +; V7M-NEXT: bfc r0, #15, #17 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi32_constant_mask16_load: +; V7A: @ %bb.0: +; V7A-NEXT: ldr r0, [r0] +; V7A-NEXT: bfc r0, #15, #17 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi32_constant_mask16_load: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: ldr r0, [r0] +; V7A-T-NEXT: bfc r0, #15, #17 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi32_constant_mask16_load: +; V6M: @ %bb.0: +; V6M-NEXT: ldr r1, [r0] +; V6M-NEXT: ldr r0, .LCPI42_0 +; V6M-NEXT: ands r0, r1 +; V6M-NEXT: bx lr +; V6M-NEXT: .p2align 2 +; V6M-NEXT: @ %bb.1: +; V6M-NEXT: .LCPI42_0: +; V6M-NEXT: .long 32767 @ 0x7fff + %val1 = load i32, ptr %val + %masked = and i32 %val1, 32767 + ret i32 %masked +} + +define i32 @bzhi32_constant_mask8(i32 %val) nounwind { +; V7M-LABEL: bzhi32_constant_mask8: +; V7M: @ %bb.0: +; V7M-NEXT: and r0, r0, #127 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi32_constant_mask8: +; V7A: @ %bb.0: +; V7A-NEXT: and r0, r0, #127 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi32_constant_mask8: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: and r0, r0, #127 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi32_constant_mask8: +; V6M: @ %bb.0: +; V6M-NEXT: movs r1, #127 +; V6M-NEXT: ands r0, r1 +; V6M-NEXT: bx lr + %masked = and i32 %val, 127 + ret i32 %masked +} + +define i32 @bzhi32_constant_mask8_load(ptr %val) nounwind { +; V7M-LABEL: bzhi32_constant_mask8_load: +; V7M: @ %bb.0: +; V7M-NEXT: ldr r0, [r0] +; V7M-NEXT: and r0, r0, #127 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi32_constant_mask8_load: +; V7A: @ %bb.0: +; V7A-NEXT: ldr r0, [r0] +; V7A-NEXT: and r0, r0, #127 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi32_constant_mask8_load: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: ldr r0, [r0] +; V7A-T-NEXT: and r0, r0, #127 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi32_constant_mask8_load: +; V6M: @ %bb.0: +; V6M-NEXT: ldr r1, [r0] +; V6M-NEXT: movs r0, #127 +; V6M-NEXT: ands r0, r1 +; V6M-NEXT: bx lr + %val1 = load i32, ptr %val + %masked = and i32 %val1, 127 + ret i32 %masked +} + +; 64-bit + +define i64 @bzhi64_constant_mask64(i64 %val) nounwind { +; V7M-LABEL: bzhi64_constant_mask64: +; V7M: @ %bb.0: +; V7M-NEXT: bic r1, r1, #-1073741824 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi64_constant_mask64: +; V7A: @ %bb.0: +; V7A-NEXT: bic r1, r1, #-1073741824 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi64_constant_mask64: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: bic r1, r1, #-1073741824 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi64_constant_mask64: +; V6M: @ %bb.0: +; V6M-NEXT: movs r2, #3 +; V6M-NEXT: lsls r2, r2, #30 +; V6M-NEXT: bics r1, r2 +; V6M-NEXT: bx lr + %masked = and i64 %val, 4611686018427387903 + ret i64 %masked +} + +define i64 @bzhi64_constant_mask64_load(ptr %val) nounwind { +; V7M-LABEL: bzhi64_constant_mask64_load: +; V7M: @ %bb.0: +; V7M-NEXT: ldrd r0, r1, [r0] +; V7M-NEXT: bic r1, r1, #-1073741824 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi64_constant_mask64_load: +; V7A: @ %bb.0: +; V7A-NEXT: ldrd r0, r1, [r0] +; V7A-NEXT: bic r1, r1, #-1073741824 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi64_constant_mask64_load: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: ldrd r0, r1, [r0] +; V7A-T-NEXT: bic r1, r1, #-1073741824 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi64_constant_mask64_load: +; V6M: @ %bb.0: +; V6M-NEXT: movs r1, #3 +; V6M-NEXT: lsls r3, r1, #30 +; V6M-NEXT: ldr r2, [r0] +; V6M-NEXT: ldr r1, [r0, #4] +; V6M-NEXT: bics r1, r3 +; V6M-NEXT: mov r0, r2 +; V6M-NEXT: bx lr + %val1 = load i64, ptr %val + %masked = and i64 %val1, 4611686018427387903 + ret i64 %masked +} + +define i64 @bzhi64_constant_mask32(i64 %val) nounwind { +; V7M-LABEL: bzhi64_constant_mask32: +; V7M: @ %bb.0: +; V7M-NEXT: bic r0, r0, #-2147483648 +; V7M-NEXT: movs r1, #0 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi64_constant_mask32: +; V7A: @ %bb.0: +; V7A-NEXT: bic r0, r0, #-2147483648 +; V7A-NEXT: mov r1, #0 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi64_constant_mask32: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: bic r0, r0, #-2147483648 +; V7A-T-NEXT: movs r1, #0 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi64_constant_mask32: +; V6M: @ %bb.0: +; V6M-NEXT: movs r1, #1 +; V6M-NEXT: lsls r1, r1, #31 +; V6M-NEXT: bics r0, r1 +; V6M-NEXT: movs r1, #0 +; V6M-NEXT: bx lr + %masked = and i64 %val, 2147483647 + ret i64 %masked +} + +define i64 @bzhi64_constant_mask32_load(ptr %val) nounwind { +; V7M-LABEL: bzhi64_constant_mask32_load: +; V7M: @ %bb.0: +; V7M-NEXT: ldr r0, [r0] +; V7M-NEXT: movs r1, #0 +; V7M-NEXT: bic r0, r0, #-2147483648 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi64_constant_mask32_load: +; V7A: @ %bb.0: +; V7A-NEXT: ldr r0, [r0] +; V7A-NEXT: mov r1, #0 +; V7A-NEXT: bic r0, r0, #-2147483648 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi64_constant_mask32_load: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: ldr r0, [r0] +; V7A-T-NEXT: movs r1, #0 +; V7A-T-NEXT: bic r0, r0, #-2147483648 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi64_constant_mask32_load: +; V6M: @ %bb.0: +; V6M-NEXT: movs r1, #1 +; V6M-NEXT: lsls r1, r1, #31 +; V6M-NEXT: ldr r0, [r0] +; V6M-NEXT: bics r0, r1 +; V6M-NEXT: movs r1, #0 +; V6M-NEXT: bx lr + %val1 = load i64, ptr %val + %masked = and i64 %val1, 2147483647 + ret i64 %masked +} + +define i64 @bzhi64_constant_mask16(i64 %val) nounwind { +; V7M-LABEL: bzhi64_constant_mask16: +; V7M: @ %bb.0: +; V7M-NEXT: bfc r0, #15, #17 +; V7M-NEXT: movs r1, #0 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi64_constant_mask16: +; V7A: @ %bb.0: +; V7A-NEXT: bfc r0, #15, #17 +; V7A-NEXT: mov r1, #0 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi64_constant_mask16: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: bfc r0, #15, #17 +; V7A-T-NEXT: movs r1, #0 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi64_constant_mask16: +; V6M: @ %bb.0: +; V6M-NEXT: ldr r1, .LCPI49_0 +; V6M-NEXT: ands r0, r1 +; V6M-NEXT: movs r1, #0 +; V6M-NEXT: bx lr +; V6M-NEXT: .p2align 2 +; V6M-NEXT: @ %bb.1: +; V6M-NEXT: .LCPI49_0: +; V6M-NEXT: .long 32767 @ 0x7fff + %masked = and i64 %val, 32767 + ret i64 %masked +} + +define i64 @bzhi64_constant_mask16_load(ptr %val) nounwind { +; V7M-LABEL: bzhi64_constant_mask16_load: +; V7M: @ %bb.0: +; V7M-NEXT: ldr r0, [r0] +; V7M-NEXT: movs r1, #0 +; V7M-NEXT: bfc r0, #15, #17 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi64_constant_mask16_load: +; V7A: @ %bb.0: +; V7A-NEXT: ldr r0, [r0] +; V7A-NEXT: mov r1, #0 +; V7A-NEXT: bfc r0, #15, #17 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi64_constant_mask16_load: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: ldr r0, [r0] +; V7A-T-NEXT: movs r1, #0 +; V7A-T-NEXT: bfc r0, #15, #17 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi64_constant_mask16_load: +; V6M: @ %bb.0: +; V6M-NEXT: ldr r1, [r0] +; V6M-NEXT: ldr r0, .LCPI50_0 +; V6M-NEXT: ands r0, r1 +; V6M-NEXT: movs r1, #0 +; V6M-NEXT: bx lr +; V6M-NEXT: .p2align 2 +; V6M-NEXT: @ %bb.1: +; V6M-NEXT: .LCPI50_0: +; V6M-NEXT: .long 32767 @ 0x7fff + %val1 = load i64, ptr %val + %masked = and i64 %val1, 32767 + ret i64 %masked +} + +define i64 @bzhi64_constant_mask8(i64 %val) nounwind { +; V7M-LABEL: bzhi64_constant_mask8: +; V7M: @ %bb.0: +; V7M-NEXT: and r0, r0, #127 +; V7M-NEXT: movs r1, #0 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi64_constant_mask8: +; V7A: @ %bb.0: +; V7A-NEXT: and r0, r0, #127 +; V7A-NEXT: mov r1, #0 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi64_constant_mask8: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: and r0, r0, #127 +; V7A-T-NEXT: movs r1, #0 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi64_constant_mask8: +; V6M: @ %bb.0: +; V6M-NEXT: movs r1, #127 +; V6M-NEXT: ands r0, r1 +; V6M-NEXT: movs r1, #0 +; V6M-NEXT: bx lr + %masked = and i64 %val, 127 + ret i64 %masked +} + +define i64 @bzhi64_constant_mask8_load(ptr %val) nounwind { +; V7M-LABEL: bzhi64_constant_mask8_load: +; V7M: @ %bb.0: +; V7M-NEXT: ldr r0, [r0] +; V7M-NEXT: movs r1, #0 +; V7M-NEXT: and r0, r0, #127 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi64_constant_mask8_load: +; V7A: @ %bb.0: +; V7A-NEXT: ldr r0, [r0] +; V7A-NEXT: mov r1, #0 +; V7A-NEXT: and r0, r0, #127 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi64_constant_mask8_load: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: ldr r0, [r0] +; V7A-T-NEXT: movs r1, #0 +; V7A-T-NEXT: and r0, r0, #127 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi64_constant_mask8_load: +; V6M: @ %bb.0: +; V6M-NEXT: ldr r1, [r0] +; V6M-NEXT: movs r0, #127 +; V6M-NEXT: ands r0, r1 +; V6M-NEXT: movs r1, #0 +; V6M-NEXT: bx lr + %val1 = load i64, ptr %val + %masked = and i64 %val1, 127 + ret i64 %masked +} diff --git a/llvm/test/CodeGen/X86/pr161693.ll b/llvm/test/CodeGen/X86/pr161693.ll new file mode 100644 index 0000000..de8188f --- /dev/null +++ b/llvm/test/CodeGen/X86/pr161693.ll @@ -0,0 +1,40 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 +; RUN: llc < %s -mtriple=x86_64-- | FileCheck %s + +define void @PR161693() #0 { +; CHECK-LABEL: PR161693: +; CHECK: # %bb.0: # %start +; CHECK-NEXT: movzbl (%rax), %eax +; CHECK-NEXT: andb $-33, %al +; CHECK-NEXT: addb $-71, %al +; CHECK-NEXT: .p2align 4 +; CHECK-NEXT: .LBB0_1: # %loop +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: cmpb $-6, %al +; CHECK-NEXT: setb %cl +; CHECK-NEXT: leal (%rcx,%rcx), %edx +; CHECK-NEXT: orb %cl, %dl +; CHECK-NEXT: leal (,%rdx,4), %ecx +; CHECK-NEXT: orb %dl, %cl +; CHECK-NEXT: je .LBB0_1 +; CHECK-NEXT: # %bb.2: # %exit +; CHECK-NEXT: retq +start: + br label %loop + +loop: + %.val.i.i89 = load <16 x i8>, ptr poison, align 1 + %.not49.i = icmp ult <16 x i8> zeroinitializer, splat (i8 -10) + %i = and <16 x i8> %.val.i.i89, splat (i8 -33) + %i1 = add <16 x i8> %i, splat (i8 -71) + %.not51.i = icmp ult <16 x i8> %i1, splat (i8 -6) + %.not46.i = and <16 x i1> %.not49.i, %.not51.i + %i2 = bitcast <16 x i1> %.not46.i to i16 + %_0.i = icmp eq i16 %i2, 0 + br i1 %_0.i, label %loop, label %exit + +exit: + ret void +} + +attributes #0 = { "target-features"="+soft-float" } diff --git a/llvm/test/ExecutionEngine/JITLink/AArch32/ELF_data_alignment.s b/llvm/test/ExecutionEngine/JITLink/AArch32/ELF_data_alignment.s index 9296f04..ed76a28 100644 --- a/llvm/test/ExecutionEngine/JITLink/AArch32/ELF_data_alignment.s +++ b/llvm/test/ExecutionEngine/JITLink/AArch32/ELF_data_alignment.s @@ -22,7 +22,7 @@ # CHECK-OBJ: Contents of section .rodata: # CHECK-OBJ: 0000 48310048 32004833 00 H1.H2.H3. -# CHECK-LG: Starting link phase 1 for graph +# CHECK-LG: Starting link phase 1 # CHECK-LG: section .rodata: # CHECK-LG: block 0x0 size = 0x00000009, align = 1, alignment-offset = 0 diff --git a/llvm/test/ExecutionEngine/JITLink/AArch64/Inputs/x-0.s b/llvm/test/ExecutionEngine/JITLink/AArch64/Inputs/x-0.s new file mode 100644 index 0000000..557e403 --- /dev/null +++ b/llvm/test/ExecutionEngine/JITLink/AArch64/Inputs/x-0.s @@ -0,0 +1,7 @@ + .section __DATA,__data + .globl x + .p2align 2, 0x0 +x: + .long 0 + +.subsections_via_symbols diff --git a/llvm/test/ExecutionEngine/JITLink/AArch64/Inputs/x-1.s b/llvm/test/ExecutionEngine/JITLink/AArch64/Inputs/x-1.s new file mode 100644 index 0000000..711c8a0 --- /dev/null +++ b/llvm/test/ExecutionEngine/JITLink/AArch64/Inputs/x-1.s @@ -0,0 +1,7 @@ + .section __DATA,__data + .globl x + .p2align 2, 0x0 +x: + .long 1 + +.subsections_via_symbols diff --git a/llvm/test/ExecutionEngine/JITLink/AArch64/MachO_universal_slice_selection.s b/llvm/test/ExecutionEngine/JITLink/AArch64/MachO_universal_slice_selection.s new file mode 100644 index 0000000..c58f84e --- /dev/null +++ b/llvm/test/ExecutionEngine/JITLink/AArch64/MachO_universal_slice_selection.s @@ -0,0 +1,32 @@ +# RUN: rm -rf %t && mkdir -p %t +# RUN: llvm-mc -triple=arm64e-apple-darwin -filetype=obj -o %t/main.o %s +# RUN: llvm-mc -triple=arm64-apple-darwin -filetype=obj -o %t/x.arm64.o \ +# RUN: %S/Inputs/x-1.s +# RUN: llvm-ar crs %t/libX.arm64.a %t/x.arm64.o +# RUN: llvm-mc -triple=arm64e-apple-darwin -filetype=obj -o %t/x.arm64e.o \ +# RUN: %S/Inputs/x-0.s +# RUN: llvm-ar crs %t/libX.arm64e.a %t/x.arm64e.o +# RUN: llvm-lipo --create --output %t/libX.a %t/libX.arm64.a %t/libX.arm64e.a +# RUN: llvm-jitlink -noexec -check=%s %t/main.o -L%t -lX +# +# Create a universal archive with two slices (arm64e, arm64) each containing +# a definition of X: in arm64e X = 0, in arm64 X = 1. +# Check that if we load an arm64e object file then we link the arm64e slice +# of the archive by verifying that X = 0. +# + +# jitlink-check: *{4}x = 0 + + .section __TEXT,__text,regular,pure_instructions + .globl _main + .p2align 2 +_main: + mov w0, #0 + ret + + .section __DATA,__data + .globl p +p: + .quad x + +.subsections_via_symbols diff --git a/llvm/test/ExecutionEngine/JITLink/RISCV/ELF_relax_call.s b/llvm/test/ExecutionEngine/JITLink/RISCV/ELF_relax_call.s index 2b5c9e3..5f6babf 100644 --- a/llvm/test/ExecutionEngine/JITLink/RISCV/ELF_relax_call.s +++ b/llvm/test/ExecutionEngine/JITLink/RISCV/ELF_relax_call.s @@ -102,7 +102,7 @@ p: call o .size p, .-p -# CHECK: Link graph "{{.*}}" before copy-and-fixup: +# CHECK: Link graph before copy-and-fixup: # CHECK: section .text: # CHECK: block 0x1000 # CHECK: symbols: diff --git a/llvm/test/ExecutionEngine/JITLink/RISCV/ELF_relax_call_rvc.s b/llvm/test/ExecutionEngine/JITLink/RISCV/ELF_relax_call_rvc.s index 3bbfd55..c31250b 100644 --- a/llvm/test/ExecutionEngine/JITLink/RISCV/ELF_relax_call_rvc.s +++ b/llvm/test/ExecutionEngine/JITLink/RISCV/ELF_relax_call_rvc.s @@ -131,7 +131,7 @@ p: call o .size p, .-p -# CHECK: Link graph "{{.*}}" before copy-and-fixup: +# CHECK: Link graph before copy-and-fixup: # CHECK: section .text: # CHECK: block 0x1000 # CHECK: symbols: |