; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4 ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32 ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64 ; RUN: llc -mtriple=riscv32 -mattr=+v,+zvbb -verify-machineinstrs < %s | FileCheck %s --check-prefixes=ZVBB ; RUN: llc -mtriple=riscv64 -mattr=+v,+zvbb -verify-machineinstrs < %s | FileCheck %s --check-prefixes=ZVBB define void @bitreverse_v8i16(ptr %x, ptr %y) { ; CHECK-LABEL: bitreverse_v8i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: lui a1, 1 ; CHECK-NEXT: addi a1, a1, -241 ; CHECK-NEXT: vsrl.vi v9, v8, 8 ; CHECK-NEXT: vsll.vi v8, v8, 8 ; CHECK-NEXT: vor.vv v8, v8, v9 ; CHECK-NEXT: vsrl.vi v9, v8, 4 ; CHECK-NEXT: vand.vx v8, v8, a1 ; CHECK-NEXT: vand.vx v9, v9, a1 ; CHECK-NEXT: lui a1, 3 ; CHECK-NEXT: addi a1, a1, 819 ; CHECK-NEXT: vsll.vi v8, v8, 4 ; CHECK-NEXT: vor.vv v8, v9, v8 ; CHECK-NEXT: vsrl.vi v9, v8, 2 ; CHECK-NEXT: vand.vx v8, v8, a1 ; CHECK-NEXT: vand.vx v9, v9, a1 ; CHECK-NEXT: lui a1, 5 ; CHECK-NEXT: addi a1, a1, 1365 ; CHECK-NEXT: vsll.vi v8, v8, 2 ; CHECK-NEXT: vor.vv v8, v9, v8 ; CHECK-NEXT: vsrl.vi v9, v8, 1 ; CHECK-NEXT: vand.vx v8, v8, a1 ; CHECK-NEXT: vand.vx v9, v9, a1 ; CHECK-NEXT: vadd.vv v8, v8, v8 ; CHECK-NEXT: vor.vv v8, v9, v8 ; CHECK-NEXT: vse16.v v8, (a0) ; CHECK-NEXT: ret ; ; ZVBB-LABEL: bitreverse_v8i16: ; ZVBB: # %bb.0: ; ZVBB-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; ZVBB-NEXT: vle16.v v8, (a0) ; ZVBB-NEXT: vbrev.v v8, v8 ; ZVBB-NEXT: vse16.v v8, (a0) ; ZVBB-NEXT: ret %a = load <8 x i16>, ptr %x %b = load <8 x i16>, ptr %y %c = call <8 x i16> @llvm.bitreverse.v8i16(<8 x i16> %a) store <8 x i16> %c, ptr %x ret void } declare <8 x i16> @llvm.bitreverse.v8i16(<8 x i16>) define void @bitreverse_v4i32(ptr %x, ptr %y) { ; CHECK-LABEL: bitreverse_v4i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: lui a1, 16 ; CHECK-NEXT: addi a1, a1, -256 ; CHECK-NEXT: vsrl.vi v9, v8, 8 ; CHECK-NEXT: vsrl.vi v10, v8, 24 ; CHECK-NEXT: vand.vx v9, v9, a1 ; CHECK-NEXT: vor.vv v9, v9, v10 ; CHECK-NEXT: vand.vx v10, v8, a1 ; CHECK-NEXT: lui a1, 61681 ; CHECK-NEXT: addi a1, a1, -241 ; CHECK-NEXT: vsll.vi v8, v8, 24 ; CHECK-NEXT: vsll.vi v10, v10, 8 ; CHECK-NEXT: vor.vv v8, v8, v10 ; CHECK-NEXT: vor.vv v8, v8, v9 ; CHECK-NEXT: vsrl.vi v9, v8, 4 ; CHECK-NEXT: vand.vx v8, v8, a1 ; CHECK-NEXT: vand.vx v9, v9, a1 ; CHECK-NEXT: lui a1, 209715 ; CHECK-NEXT: addi a1, a1, 819 ; CHECK-NEXT: vsll.vi v8, v8, 4 ; CHECK-NEXT: vor.vv v8, v9, v8 ; CHECK-NEXT: vsrl.vi v9, v8, 2 ; CHECK-NEXT: vand.vx v8, v8, a1 ; CHECK-NEXT: vand.vx v9, v9, a1 ; CHECK-NEXT: lui a1, 349525 ; CHECK-NEXT: addi a1, a1, 1365 ; CHECK-NEXT: vsll.vi v8, v8, 2 ; CHECK-NEXT: vor.vv v8, v9, v8 ; CHECK-NEXT: vsrl.vi v9, v8, 1 ; CHECK-NEXT: vand.vx v8, v8, a1 ; CHECK-NEXT: vand.vx v9, v9, a1 ; CHECK-NEXT: vadd.vv v8, v8, v8 ; CHECK-NEXT: vor.vv v8, v9, v8 ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: ret ; ; ZVBB-LABEL: bitreverse_v4i32: ; ZVBB: # %bb.0: ; ZVBB-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; ZVBB-NEXT: vle32.v v8, (a0) ; ZVBB-NEXT: vbrev.v v8, v8 ; ZVBB-NEXT: vse32.v v8, (a0) ; ZVBB-NEXT: ret %a = load <4 x i32>, ptr %x %b = load <4 x i32>, ptr %y %c = call <4 x i32> @llvm.bitreverse.v4i32(<4 x i32> %a) store <4 x i32> %c, ptr %x ret void } declare <4 x i32> @llvm.bitreverse.v4i32(<4 x i32>) define void @bitreverse_v2i64(ptr %x, ptr %y) { ; RV32-LABEL: bitreverse_v2i64: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: lui a2, 1044480 ; RV32-NEXT: li a3, 56 ; RV32-NEXT: li a4, 40 ; RV32-NEXT: lui a5, 16 ; RV32-NEXT: lui a1, 4080 ; RV32-NEXT: addi a6, sp, 8 ; RV32-NEXT: sw a2, 8(sp) ; RV32-NEXT: sw zero, 12(sp) ; RV32-NEXT: addi a2, a5, -256 ; RV32-NEXT: vlse64.v v9, (a6), zero ; RV32-NEXT: vsrl.vx v10, v8, a3 ; RV32-NEXT: vsrl.vx v11, v8, a4 ; RV32-NEXT: vsrl.vi v12, v8, 24 ; RV32-NEXT: vsll.vx v13, v8, a3 ; RV32-NEXT: vand.vx v11, v11, a2 ; RV32-NEXT: vor.vv v10, v11, v10 ; RV32-NEXT: vand.vx v11, v8, a2 ; RV32-NEXT: vsll.vx v11, v11, a4 ; RV32-NEXT: vor.vv v11, v13, v11 ; RV32-NEXT: vsrl.vi v13, v8, 8 ; RV32-NEXT: vand.vx v12, v12, a1 ; RV32-NEXT: vand.vv v13, v13, v9 ; RV32-NEXT: vor.vv v12, v13, v12 ; RV32-NEXT: lui a2, 61681 ; RV32-NEXT: lui a3, 209715 ; RV32-NEXT: lui a4, 349525 ; RV32-NEXT: addi a2, a2, -241 ; RV32-NEXT: addi a3, a3, 819 ; RV32-NEXT: addi a4, a4, 1365 ; RV32-NEXT: vor.vv v10, v12, v10 ; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV32-NEXT: vmv.v.x v12, a2 ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vand.vv v9, v8, v9 ; RV32-NEXT: vand.vx v8, v8, a1 ; RV32-NEXT: vsll.vi v8, v8, 24 ; RV32-NEXT: vsll.vi v9, v9, 8 ; RV32-NEXT: vor.vv v8, v8, v9 ; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV32-NEXT: vmv.v.x v9, a3 ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vor.vv v8, v11, v8 ; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV32-NEXT: vmv.v.x v11, a4 ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vor.vv v8, v8, v10 ; RV32-NEXT: vsrl.vi v10, v8, 4 ; RV32-NEXT: vand.vv v8, v8, v12 ; RV32-NEXT: vand.vv v10, v10, v12 ; RV32-NEXT: vsll.vi v8, v8, 4 ; RV32-NEXT: vor.vv v8, v10, v8 ; RV32-NEXT: vsrl.vi v10, v8, 2 ; RV32-NEXT: vand.vv v8, v8, v9 ; RV32-NEXT: vand.vv v9, v10, v9 ; RV32-NEXT: vsll.vi v8, v8, 2 ; RV32-NEXT: vor.vv v8, v9, v8 ; RV32-NEXT: vsrl.vi v9, v8, 1 ; RV32-NEXT: vand.vv v8, v8, v11 ; RV32-NEXT: vand.vv v9, v9, v11 ; RV32-NEXT: vadd.vv v8, v8, v8 ; RV32-NEXT: vor.vv v8, v9, v8 ; RV32-NEXT: vse64.v v8, (a0) ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: bitreverse_v2i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: li a1, 56 ; RV64-NEXT: li a2, 40 ; RV64-NEXT: lui a3, 16 ; RV64-NEXT: lui a4, 4080 ; RV64-NEXT: li a5, 255 ; RV64-NEXT: addi a3, a3, -256 ; RV64-NEXT: slli a5, a5, 24 ; RV64-NEXT: vsrl.vx v9, v8, a1 ; RV64-NEXT: vsrl.vx v10, v8, a2 ; RV64-NEXT: vsrl.vi v11, v8, 24 ; RV64-NEXT: vsrl.vi v12, v8, 8 ; RV64-NEXT: vand.vx v10, v10, a3 ; RV64-NEXT: vor.vv v9, v10, v9 ; RV64-NEXT: vand.vx v10, v8, a5 ; RV64-NEXT: vand.vx v11, v11, a4 ; RV64-NEXT: vand.vx v12, v12, a5 ; RV64-NEXT: vor.vv v11, v12, v11 ; RV64-NEXT: vand.vx v12, v8, a4 ; RV64-NEXT: vsll.vi v10, v10, 8 ; RV64-NEXT: vsll.vi v12, v12, 24 ; RV64-NEXT: vor.vv v10, v12, v10 ; RV64-NEXT: vsll.vx v12, v8, a1 ; RV64-NEXT: vand.vx v8, v8, a3 ; RV64-NEXT: vsll.vx v8, v8, a2 ; RV64-NEXT: vor.vv v8, v12, v8 ; RV64-NEXT: lui a1, 61681 ; RV64-NEXT: lui a2, 209715 ; RV64-NEXT: lui a3, 349525 ; RV64-NEXT: addi a1, a1, -241 ; RV64-NEXT: addi a2, a2, 819 ; RV64-NEXT: addi a3, a3, 1365 ; RV64-NEXT: slli a4, a1, 32 ; RV64-NEXT: slli a5, a2, 32 ; RV64-NEXT: add a1, a1, a4 ; RV64-NEXT: slli a4, a3, 32 ; RV64-NEXT: add a2, a2, a5 ; RV64-NEXT: add a3, a3, a4 ; RV64-NEXT: vor.vv v9, v11, v9 ; RV64-NEXT: vor.vv v8, v8, v10 ; RV64-NEXT: vor.vv v8, v8, v9 ; RV64-NEXT: vsrl.vi v9, v8, 4 ; RV64-NEXT: vand.vx v8, v8, a1 ; RV64-NEXT: vand.vx v9, v9, a1 ; RV64-NEXT: vsll.vi v8, v8, 4 ; RV64-NEXT: vor.vv v8, v9, v8 ; RV64-NEXT: vsrl.vi v9, v8, 2 ; RV64-NEXT: vand.vx v8, v8, a2 ; RV64-NEXT: vand.vx v9, v9, a2 ; RV64-NEXT: vsll.vi v8, v8, 2 ; RV64-NEXT: vor.vv v8, v9, v8 ; RV64-NEXT: vsrl.vi v9, v8, 1 ; RV64-NEXT: vand.vx v8, v8, a3 ; RV64-NEXT: vand.vx v9, v9, a3 ; RV64-NEXT: vadd.vv v8, v8, v8 ; RV64-NEXT: vor.vv v8, v9, v8 ; RV64-NEXT: vse64.v v8, (a0) ; RV64-NEXT: ret ; ; ZVBB-LABEL: bitreverse_v2i64: ; ZVBB: # %bb.0: ; ZVBB-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; ZVBB-NEXT: vle64.v v8, (a0) ; ZVBB-NEXT: vbrev.v v8, v8 ; ZVBB-NEXT: vse64.v v8, (a0) ; ZVBB-NEXT: ret %a = load <2 x i64>, ptr %x %b = load <2 x i64>, ptr %y %c = call <2 x i64> @llvm.bitreverse.v2i64(<2 x i64> %a) store <2 x i64> %c, ptr %x ret void } declare <2 x i64> @llvm.bitreverse.v2i64(<2 x i64>) define void @bitreverse_v16i16(ptr %x, ptr %y) { ; CHECK-LABEL: bitreverse_v16i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: lui a1, 1 ; CHECK-NEXT: addi a1, a1, -241 ; CHECK-NEXT: vsrl.vi v10, v8, 8 ; CHECK-NEXT: vsll.vi v8, v8, 8 ; CHECK-NEXT: vor.vv v8, v8, v10 ; CHECK-NEXT: vsrl.vi v10, v8, 4 ; CHECK-NEXT: vand.vx v8, v8, a1 ; CHECK-NEXT: vand.vx v10, v10, a1 ; CHECK-NEXT: lui a1, 3 ; CHECK-NEXT: addi a1, a1, 819 ; CHECK-NEXT: vsll.vi v8, v8, 4 ; CHECK-NEXT: vor.vv v8, v10, v8 ; CHECK-NEXT: vsrl.vi v10, v8, 2 ; CHECK-NEXT: vand.vx v8, v8, a1 ; CHECK-NEXT: vand.vx v10, v10, a1 ; CHECK-NEXT: lui a1, 5 ; CHECK-NEXT: addi a1, a1, 1365 ; CHECK-NEXT: vsll.vi v8, v8, 2 ; CHECK-NEXT: vor.vv v8, v10, v8 ; CHECK-NEXT: vsrl.vi v10, v8, 1 ; CHECK-NEXT: vand.vx v8, v8, a1 ; CHECK-NEXT: vand.vx v10, v10, a1 ; CHECK-NEXT: vadd.vv v8, v8, v8 ; CHECK-NEXT: vor.vv v8, v10, v8 ; CHECK-NEXT: vse16.v v8, (a0) ; CHECK-NEXT: ret ; ; ZVBB-LABEL: bitreverse_v16i16: ; ZVBB: # %bb.0: ; ZVBB-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; ZVBB-NEXT: vle16.v v8, (a0) ; ZVBB-NEXT: vbrev.v v8, v8 ; ZVBB-NEXT: vse16.v v8, (a0) ; ZVBB-NEXT: ret %a = load <16 x i16>, ptr %x %b = load <16 x i16>, ptr %y %c = call <16 x i16> @llvm.bitreverse.v16i16(<16 x i16> %a) store <16 x i16> %c, ptr %x ret void } declare <16 x i16> @llvm.bitreverse.v16i16(<16 x i16>) define void @bitreverse_v8i32(ptr %x, ptr %y) { ; CHECK-LABEL: bitreverse_v8i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: lui a1, 16 ; CHECK-NEXT: addi a1, a1, -256 ; CHECK-NEXT: vsrl.vi v10, v8, 8 ; CHECK-NEXT: vsrl.vi v12, v8, 24 ; CHECK-NEXT: vand.vx v10, v10, a1 ; CHECK-NEXT: vor.vv v10, v10, v12 ; CHECK-NEXT: vand.vx v12, v8, a1 ; CHECK-NEXT: lui a1, 61681 ; CHECK-NEXT: addi a1, a1, -241 ; CHECK-NEXT: vsll.vi v8, v8, 24 ; CHECK-NEXT: vsll.vi v12, v12, 8 ; CHECK-NEXT: vor.vv v8, v8, v12 ; CHECK-NEXT: vor.vv v8, v8, v10 ; CHECK-NEXT: vsrl.vi v10, v8, 4 ; CHECK-NEXT: vand.vx v8, v8, a1 ; CHECK-NEXT: vand.vx v10, v10, a1 ; CHECK-NEXT: lui a1, 209715 ; CHECK-NEXT: addi a1, a1, 819 ; CHECK-NEXT: vsll.vi v8, v8, 4 ; CHECK-NEXT: vor.vv v8, v10, v8 ; CHECK-NEXT: vsrl.vi v10, v8, 2 ; CHECK-NEXT: vand.vx v8, v8, a1 ; CHECK-NEXT: vand.vx v10, v10, a1 ; CHECK-NEXT: lui a1, 349525 ; CHECK-NEXT: addi a1, a1, 1365 ; CHECK-NEXT: vsll.vi v8, v8, 2 ; CHECK-NEXT: vor.vv v8, v10, v8 ; CHECK-NEXT: vsrl.vi v10, v8, 1 ; CHECK-NEXT: vand.vx v8, v8, a1 ; CHECK-NEXT: vand.vx v10, v10, a1 ; CHECK-NEXT: vadd.vv v8, v8, v8 ; CHECK-NEXT: vor.vv v8, v10, v8 ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: ret ; ; ZVBB-LABEL: bitreverse_v8i32: ; ZVBB: # %bb.0: ; ZVBB-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; ZVBB-NEXT: vle32.v v8, (a0) ; ZVBB-NEXT: vbrev.v v8, v8 ; ZVBB-NEXT: vse32.v v8, (a0) ; ZVBB-NEXT: ret %a = load <8 x i32>, ptr %x %b = load <8 x i32>, ptr %y %c = call <8 x i32> @llvm.bitreverse.v8i32(<8 x i32> %a) store <8 x i32> %c, ptr %x ret void } declare <8 x i32> @llvm.bitreverse.v8i32(<8 x i32>) define void @bitreverse_v4i64(ptr %x, ptr %y) { ; RV32-LABEL: bitreverse_v4i64: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: lui a2, 1044480 ; RV32-NEXT: li a3, 56 ; RV32-NEXT: li a4, 40 ; RV32-NEXT: lui a5, 16 ; RV32-NEXT: lui a1, 4080 ; RV32-NEXT: addi a6, sp, 8 ; RV32-NEXT: sw a2, 8(sp) ; RV32-NEXT: sw zero, 12(sp) ; RV32-NEXT: addi a2, a5, -256 ; RV32-NEXT: vlse64.v v10, (a6), zero ; RV32-NEXT: vsrl.vx v12, v8, a3 ; RV32-NEXT: vsrl.vx v14, v8, a4 ; RV32-NEXT: vsrl.vi v16, v8, 24 ; RV32-NEXT: vsll.vx v18, v8, a3 ; RV32-NEXT: vand.vx v14, v14, a2 ; RV32-NEXT: vor.vv v14, v14, v12 ; RV32-NEXT: vand.vx v12, v8, a2 ; RV32-NEXT: vsll.vx v12, v12, a4 ; RV32-NEXT: vor.vv v12, v18, v12 ; RV32-NEXT: vsrl.vi v18, v8, 8 ; RV32-NEXT: vand.vx v16, v16, a1 ; RV32-NEXT: vand.vv v18, v18, v10 ; RV32-NEXT: vor.vv v16, v18, v16 ; RV32-NEXT: lui a2, 61681 ; RV32-NEXT: lui a3, 209715 ; RV32-NEXT: lui a4, 349525 ; RV32-NEXT: addi a2, a2, -241 ; RV32-NEXT: addi a3, a3, 819 ; RV32-NEXT: addi a4, a4, 1365 ; RV32-NEXT: vor.vv v14, v16, v14 ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vmv.v.x v16, a2 ; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV32-NEXT: vand.vv v10, v8, v10 ; RV32-NEXT: vand.vx v8, v8, a1 ; RV32-NEXT: vsll.vi v8, v8, 24 ; RV32-NEXT: vsll.vi v10, v10, 8 ; RV32-NEXT: vor.vv v8, v8, v10 ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vmv.v.x v10, a3 ; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV32-NEXT: vor.vv v8, v12, v8 ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vmv.v.x v12, a4 ; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV32-NEXT: vor.vv v8, v8, v14 ; RV32-NEXT: vsrl.vi v14, v8, 4 ; RV32-NEXT: vand.vv v8, v8, v16 ; RV32-NEXT: vand.vv v14, v14, v16 ; RV32-NEXT: vsll.vi v8, v8, 4 ; RV32-NEXT: vor.vv v8, v14, v8 ; RV32-NEXT: vsrl.vi v14, v8, 2 ; RV32-NEXT: vand.vv v8, v8, v10 ; RV32-NEXT: vand.vv v10, v14, v10 ; RV32-NEXT: vsll.vi v8, v8, 2 ; RV32-NEXT: vor.vv v8, v10, v8 ; RV32-NEXT: vsrl.vi v10, v8, 1 ; RV32-NEXT: vand.vv v8, v8, v12 ; RV32-NEXT: vand.vv v10, v10, v12 ; RV32-NEXT: vadd.vv v8, v8, v8 ; RV32-NEXT: vor.vv v8, v10, v8 ; RV32-NEXT: vse64.v v8, (a0) ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: bitreverse_v4i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV64-NEXT: vle64.v v14, (a0) ; RV64-NEXT: li a1, 56 ; RV64-NEXT: li a2, 40 ; RV64-NEXT: lui a3, 16 ; RV64-NEXT: lui a4, 4080 ; RV64-NEXT: li a5, 255 ; RV64-NEXT: addi a3, a3, -256 ; RV64-NEXT: slli a5, a5, 24 ; RV64-NEXT: vsrl.vx v8, v14, a1 ; RV64-NEXT: vsrl.vx v10, v14, a2 ; RV64-NEXT: vsrl.vi v12, v14, 24 ; RV64-NEXT: vsrl.vi v16, v14, 8 ; RV64-NEXT: vand.vx v10, v10, a3 ; RV64-NEXT: vor.vv v8, v10, v8 ; RV64-NEXT: vand.vx v18, v14, a5 ; RV64-NEXT: vand.vx v10, v12, a4 ; RV64-NEXT: vand.vx v12, v16, a5 ; RV64-NEXT: vor.vv v10, v12, v10 ; RV64-NEXT: vand.vx v12, v14, a4 ; RV64-NEXT: vsll.vi v16, v18, 8 ; RV64-NEXT: vsll.vi v12, v12, 24 ; RV64-NEXT: vor.vv v12, v12, v16 ; RV64-NEXT: vsll.vx v16, v14, a1 ; RV64-NEXT: vand.vx v14, v14, a3 ; RV64-NEXT: vsll.vx v14, v14, a2 ; RV64-NEXT: vor.vv v14, v16, v14 ; RV64-NEXT: lui a1, 61681 ; RV64-NEXT: lui a2, 209715 ; RV64-NEXT: lui a3, 349525 ; RV64-NEXT: addi a1, a1, -241 ; RV64-NEXT: addi a2, a2, 819 ; RV64-NEXT: addi a3, a3, 1365 ; RV64-NEXT: slli a4, a1, 32 ; RV64-NEXT: slli a5, a2, 32 ; RV64-NEXT: add a1, a1, a4 ; RV64-NEXT: slli a4, a3, 32 ; RV64-NEXT: add a2, a2, a5 ; RV64-NEXT: add a3, a3, a4 ; RV64-NEXT: vor.vv v8, v10, v8 ; RV64-NEXT: vor.vv v10, v14, v12 ; RV64-NEXT: vor.vv v8, v10, v8 ; RV64-NEXT: vsrl.vi v10, v8, 4 ; RV64-NEXT: vand.vx v8, v8, a1 ; RV64-NEXT: vand.vx v10, v10, a1 ; RV64-NEXT: vsll.vi v8, v8, 4 ; RV64-NEXT: vor.vv v8, v10, v8 ; RV64-NEXT: vsrl.vi v10, v8, 2 ; RV64-NEXT: vand.vx v8, v8, a2 ; RV64-NEXT: vand.vx v10, v10, a2 ; RV64-NEXT: vsll.vi v8, v8, 2 ; RV64-NEXT: vor.vv v8, v10, v8 ; RV64-NEXT: vsrl.vi v10, v8, 1 ; RV64-NEXT: vand.vx v8, v8, a3 ; RV64-NEXT: vand.vx v10, v10, a3 ; RV64-NEXT: vadd.vv v8, v8, v8 ; RV64-NEXT: vor.vv v8, v10, v8 ; RV64-NEXT: vse64.v v8, (a0) ; RV64-NEXT: ret ; ; ZVBB-LABEL: bitreverse_v4i64: ; ZVBB: # %bb.0: ; ZVBB-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; ZVBB-NEXT: vle64.v v8, (a0) ; ZVBB-NEXT: vbrev.v v8, v8 ; ZVBB-NEXT: vse64.v v8, (a0) ; ZVBB-NEXT: ret %a = load <4 x i64>, ptr %x %b = load <4 x i64>, ptr %y %c = call <4 x i64> @llvm.bitreverse.v4i64(<4 x i64> %a) store <4 x i64> %c, ptr %x ret void } declare <4 x i64> @llvm.bitreverse.v4i64(<4 x i64>)