; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4 ; RUN: llc -mtriple=aarch64 -mattr=+sha3 < %s | FileCheck --check-prefix=SHA3 %s ; RUN: llc -mtriple=aarch64 -mattr=-sha3 < %s | FileCheck --check-prefix=NOSHA3 %s ; RUN: llc -mtriple=aarch64 -mattr=+sve2 < %s | FileCheck --check-prefix=SVE2 %s /* 128-bit vectors */ define <2 x i64> @xar(<2 x i64> %x, <2 x i64> %y) { ; SHA3-LABEL: xar: ; SHA3: // %bb.0: ; SHA3-NEXT: xar v0.2d, v0.2d, v1.2d, #54 ; SHA3-NEXT: ret ; ; NOSHA3-LABEL: xar: ; NOSHA3: // %bb.0: ; NOSHA3-NEXT: eor v1.16b, v0.16b, v1.16b ; NOSHA3-NEXT: shl v0.2d, v1.2d, #10 ; NOSHA3-NEXT: usra v0.2d, v1.2d, #54 ; NOSHA3-NEXT: ret ; ; SVE2-LABEL: xar: ; SVE2: // %bb.0: ; SVE2-NEXT: // kill: def $q0 killed $q0 def $z0 ; SVE2-NEXT: // kill: def $q1 killed $q1 def $z1 ; SVE2-NEXT: xar z0.d, z0.d, z1.d, #54 ; SVE2-NEXT: // kill: def $q0 killed $q0 killed $z0 ; SVE2-NEXT: ret %a = xor <2 x i64> %x, %y %b = call <2 x i64> @llvm.fshl.v2i64(<2 x i64> %a, <2 x i64> %a, <2 x i64> ) ret <2 x i64> %b } define <1 x i64> @xar_v1i64(<1 x i64> %a, <1 x i64> %b) { ; SHA3-LABEL: xar_v1i64: ; SHA3: // %bb.0: ; SHA3-NEXT: // kill: def $d0 killed $d0 def $q0 ; SHA3-NEXT: // kill: def $d1 killed $d1 def $q1 ; SHA3-NEXT: xar v0.2d, v0.2d, v1.2d, #63 ; SHA3-NEXT: // kill: def $d0 killed $d0 killed $q0 ; SHA3-NEXT: ret ; ; NOSHA3-LABEL: xar_v1i64: ; NOSHA3: // %bb.0: ; NOSHA3-NEXT: eor v1.8b, v0.8b, v1.8b ; NOSHA3-NEXT: shl d0, d1, #1 ; NOSHA3-NEXT: usra d0, d1, #63 ; NOSHA3-NEXT: ret ; ; SVE2-LABEL: xar_v1i64: ; SVE2: // %bb.0: ; SVE2-NEXT: // kill: def $d0 killed $d0 def $z0 ; SVE2-NEXT: // kill: def $d1 killed $d1 def $z1 ; SVE2-NEXT: xar z0.d, z0.d, z1.d, #63 ; SVE2-NEXT: // kill: def $d0 killed $d0 killed $z0 ; SVE2-NEXT: ret %v.val = xor <1 x i64> %a, %b %fshl = tail call <1 x i64> @llvm.fshl.v1i64(<1 x i64> %v.val, <1 x i64> %v.val, <1 x i64> splat (i64 1)) ret <1 x i64> %fshl } define <2 x i64> @xar_instead_of_or_v2i64(<2 x i64> %r) { ; SHA3-LABEL: xar_instead_of_or_v2i64: ; SHA3: // %bb.0: // %entry ; SHA3-NEXT: movi v1.2d, #0000000000000000 ; SHA3-NEXT: xar v0.2d, v0.2d, v1.2d, #39 ; SHA3-NEXT: ret ; ; NOSHA3-LABEL: xar_instead_of_or_v2i64: ; NOSHA3: // %bb.0: // %entry ; NOSHA3-NEXT: shl v1.2d, v0.2d, #25 ; NOSHA3-NEXT: usra v1.2d, v0.2d, #39 ; NOSHA3-NEXT: mov v0.16b, v1.16b ; NOSHA3-NEXT: ret ; ; SVE2-LABEL: xar_instead_of_or_v2i64: ; SVE2: // %bb.0: // %entry ; SVE2-NEXT: movi v1.2d, #0000000000000000 ; SVE2-NEXT: // kill: def $q0 killed $q0 def $z0 ; SVE2-NEXT: xar z0.d, z0.d, z1.d, #39 ; SVE2-NEXT: // kill: def $q0 killed $q0 killed $z0 ; SVE2-NEXT: ret entry: %or = call <2 x i64> @llvm.fshl.v2i64(<2 x i64> %r, <2 x i64> %r, <2 x i64> splat (i64 25)) ret <2 x i64> %or } define <1 x i64> @xar_instead_of_or_v1i64(<1 x i64> %v.val) { ; SHA3-LABEL: xar_instead_of_or_v1i64: ; SHA3: // %bb.0: ; SHA3-NEXT: movi v1.2d, #0000000000000000 ; SHA3-NEXT: // kill: def $d0 killed $d0 def $q0 ; SHA3-NEXT: xar v0.2d, v0.2d, v1.2d, #63 ; SHA3-NEXT: // kill: def $d0 killed $d0 killed $q0 ; SHA3-NEXT: ret ; ; NOSHA3-LABEL: xar_instead_of_or_v1i64: ; NOSHA3: // %bb.0: ; NOSHA3-NEXT: shl d1, d0, #1 ; NOSHA3-NEXT: usra d1, d0, #63 ; NOSHA3-NEXT: fmov d0, d1 ; NOSHA3-NEXT: ret ; ; SVE2-LABEL: xar_instead_of_or_v1i64: ; SVE2: // %bb.0: ; SVE2-NEXT: movi v1.2d, #0000000000000000 ; SVE2-NEXT: // kill: def $d0 killed $d0 def $z0 ; SVE2-NEXT: xar z0.d, z0.d, z1.d, #63 ; SVE2-NEXT: // kill: def $d0 killed $d0 killed $z0 ; SVE2-NEXT: ret %fshl = tail call <1 x i64> @llvm.fshl.v1i64(<1 x i64> %v.val, <1 x i64> %v.val, <1 x i64> splat (i64 1)) ret <1 x i64> %fshl } define <4 x i32> @xar_instead_of_or_v4i32(<4 x i32> %r) { ; SHA3-LABEL: xar_instead_of_or_v4i32: ; SHA3: // %bb.0: // %entry ; SHA3-NEXT: shl v1.4s, v0.4s, #25 ; SHA3-NEXT: usra v1.4s, v0.4s, #7 ; SHA3-NEXT: mov v0.16b, v1.16b ; SHA3-NEXT: ret ; ; NOSHA3-LABEL: xar_instead_of_or_v4i32: ; NOSHA3: // %bb.0: // %entry ; NOSHA3-NEXT: shl v1.4s, v0.4s, #25 ; NOSHA3-NEXT: usra v1.4s, v0.4s, #7 ; NOSHA3-NEXT: mov v0.16b, v1.16b ; NOSHA3-NEXT: ret ; ; SVE2-LABEL: xar_instead_of_or_v4i32: ; SVE2: // %bb.0: // %entry ; SVE2-NEXT: movi v1.2d, #0000000000000000 ; SVE2-NEXT: // kill: def $q0 killed $q0 def $z0 ; SVE2-NEXT: xar z0.s, z0.s, z1.s, #7 ; SVE2-NEXT: // kill: def $q0 killed $q0 killed $z0 ; SVE2-NEXT: ret entry: %or = call <4 x i32> @llvm.fshl.v2i32(<4 x i32> %r, <4 x i32> %r, <4 x i32> splat (i32 25)) ret <4 x i32> %or } define <8 x i16> @xar_instead_of_or_v8i16(<8 x i16> %r) { ; SHA3-LABEL: xar_instead_of_or_v8i16: ; SHA3: // %bb.0: // %entry ; SHA3-NEXT: shl v1.8h, v0.8h, #9 ; SHA3-NEXT: usra v1.8h, v0.8h, #7 ; SHA3-NEXT: mov v0.16b, v1.16b ; SHA3-NEXT: ret ; ; NOSHA3-LABEL: xar_instead_of_or_v8i16: ; NOSHA3: // %bb.0: // %entry ; NOSHA3-NEXT: shl v1.8h, v0.8h, #9 ; NOSHA3-NEXT: usra v1.8h, v0.8h, #7 ; NOSHA3-NEXT: mov v0.16b, v1.16b ; NOSHA3-NEXT: ret ; ; SVE2-LABEL: xar_instead_of_or_v8i16: ; SVE2: // %bb.0: // %entry ; SVE2-NEXT: movi v1.2d, #0000000000000000 ; SVE2-NEXT: // kill: def $q0 killed $q0 def $z0 ; SVE2-NEXT: xar z0.h, z0.h, z1.h, #7 ; SVE2-NEXT: // kill: def $q0 killed $q0 killed $z0 ; SVE2-NEXT: ret entry: %or = call <8 x i16> @llvm.fshl.v2i16(<8 x i16> %r, <8 x i16> %r, <8 x i16> splat (i16 25)) ret <8 x i16> %or } define <16 x i8> @xar_instead_of_or_v16i8(<16 x i8> %r) { ; SHA3-LABEL: xar_instead_of_or_v16i8: ; SHA3: // %bb.0: // %entry ; SHA3-NEXT: add v1.16b, v0.16b, v0.16b ; SHA3-NEXT: usra v1.16b, v0.16b, #7 ; SHA3-NEXT: mov v0.16b, v1.16b ; SHA3-NEXT: ret ; ; NOSHA3-LABEL: xar_instead_of_or_v16i8: ; NOSHA3: // %bb.0: // %entry ; NOSHA3-NEXT: add v1.16b, v0.16b, v0.16b ; NOSHA3-NEXT: usra v1.16b, v0.16b, #7 ; NOSHA3-NEXT: mov v0.16b, v1.16b ; NOSHA3-NEXT: ret ; ; SVE2-LABEL: xar_instead_of_or_v16i8: ; SVE2: // %bb.0: // %entry ; SVE2-NEXT: movi v1.2d, #0000000000000000 ; SVE2-NEXT: // kill: def $q0 killed $q0 def $z0 ; SVE2-NEXT: xar z0.b, z0.b, z1.b, #7 ; SVE2-NEXT: // kill: def $q0 killed $q0 killed $z0 ; SVE2-NEXT: ret entry: %or = call <16 x i8> @llvm.fshl.v2i8(<16 x i8> %r, <16 x i8> %r, <16 x i8> splat (i8 25)) ret <16 x i8> %or } /* 64 bit vectors */ define <2 x i32> @xar_v2i32(<2 x i32> %x, <2 x i32> %y) { ; SHA3-LABEL: xar_v2i32: ; SHA3: // %bb.0: // %entry ; SHA3-NEXT: eor v1.8b, v0.8b, v1.8b ; SHA3-NEXT: shl v0.2s, v1.2s, #25 ; SHA3-NEXT: usra v0.2s, v1.2s, #7 ; SHA3-NEXT: ret ; ; NOSHA3-LABEL: xar_v2i32: ; NOSHA3: // %bb.0: // %entry ; NOSHA3-NEXT: eor v1.8b, v0.8b, v1.8b ; NOSHA3-NEXT: shl v0.2s, v1.2s, #25 ; NOSHA3-NEXT: usra v0.2s, v1.2s, #7 ; NOSHA3-NEXT: ret ; ; SVE2-LABEL: xar_v2i32: ; SVE2: // %bb.0: // %entry ; SVE2-NEXT: // kill: def $d0 killed $d0 def $z0 ; SVE2-NEXT: // kill: def $d1 killed $d1 def $z1 ; SVE2-NEXT: xar z0.s, z0.s, z1.s, #7 ; SVE2-NEXT: // kill: def $d0 killed $d0 killed $z0 ; SVE2-NEXT: ret entry: %a = xor <2 x i32> %x, %y %b = call <2 x i32> @llvm.fshl(<2 x i32> %a, <2 x i32> %a, <2 x i32> ) ret <2 x i32> %b } define <2 x i32> @xar_instead_of_or_v2i32(<2 x i32> %r) { ; SHA3-LABEL: xar_instead_of_or_v2i32: ; SHA3: // %bb.0: // %entry ; SHA3-NEXT: shl v1.2s, v0.2s, #25 ; SHA3-NEXT: usra v1.2s, v0.2s, #7 ; SHA3-NEXT: fmov d0, d1 ; SHA3-NEXT: ret ; ; NOSHA3-LABEL: xar_instead_of_or_v2i32: ; NOSHA3: // %bb.0: // %entry ; NOSHA3-NEXT: shl v1.2s, v0.2s, #25 ; NOSHA3-NEXT: usra v1.2s, v0.2s, #7 ; NOSHA3-NEXT: fmov d0, d1 ; NOSHA3-NEXT: ret ; ; SVE2-LABEL: xar_instead_of_or_v2i32: ; SVE2: // %bb.0: // %entry ; SVE2-NEXT: movi v1.2d, #0000000000000000 ; SVE2-NEXT: // kill: def $d0 killed $d0 def $z0 ; SVE2-NEXT: xar z0.s, z0.s, z1.s, #7 ; SVE2-NEXT: // kill: def $d0 killed $d0 killed $z0 ; SVE2-NEXT: ret entry: %or = call <2 x i32> @llvm.fshl(<2 x i32> %r, <2 x i32> %r, <2 x i32> splat (i32 25)) ret <2 x i32> %or } define <4 x i16> @xar_v4i16(<4 x i16> %x, <4 x i16> %y) { ; SHA3-LABEL: xar_v4i16: ; SHA3: // %bb.0: // %entry ; SHA3-NEXT: eor v1.8b, v0.8b, v1.8b ; SHA3-NEXT: shl v0.4h, v1.4h, #9 ; SHA3-NEXT: usra v0.4h, v1.4h, #7 ; SHA3-NEXT: ret ; ; NOSHA3-LABEL: xar_v4i16: ; NOSHA3: // %bb.0: // %entry ; NOSHA3-NEXT: eor v1.8b, v0.8b, v1.8b ; NOSHA3-NEXT: shl v0.4h, v1.4h, #9 ; NOSHA3-NEXT: usra v0.4h, v1.4h, #7 ; NOSHA3-NEXT: ret ; ; SVE2-LABEL: xar_v4i16: ; SVE2: // %bb.0: // %entry ; SVE2-NEXT: // kill: def $d0 killed $d0 def $z0 ; SVE2-NEXT: // kill: def $d1 killed $d1 def $z1 ; SVE2-NEXT: xar z0.h, z0.h, z1.h, #7 ; SVE2-NEXT: // kill: def $d0 killed $d0 killed $z0 ; SVE2-NEXT: ret entry: %a = xor <4 x i16> %x, %y %b = call <4 x i16> @llvm.fshl(<4 x i16> %a, <4 x i16> %a, <4 x i16> splat (i16 25)) ret <4 x i16> %b } define <4 x i16> @xar_instead_of_or_v4i16(<4 x i16> %r) { ; SHA3-LABEL: xar_instead_of_or_v4i16: ; SHA3: // %bb.0: // %entry ; SHA3-NEXT: shl v1.4h, v0.4h, #9 ; SHA3-NEXT: usra v1.4h, v0.4h, #7 ; SHA3-NEXT: fmov d0, d1 ; SHA3-NEXT: ret ; ; NOSHA3-LABEL: xar_instead_of_or_v4i16: ; NOSHA3: // %bb.0: // %entry ; NOSHA3-NEXT: shl v1.4h, v0.4h, #9 ; NOSHA3-NEXT: usra v1.4h, v0.4h, #7 ; NOSHA3-NEXT: fmov d0, d1 ; NOSHA3-NEXT: ret ; ; SVE2-LABEL: xar_instead_of_or_v4i16: ; SVE2: // %bb.0: // %entry ; SVE2-NEXT: movi v1.2d, #0000000000000000 ; SVE2-NEXT: // kill: def $d0 killed $d0 def $z0 ; SVE2-NEXT: xar z0.h, z0.h, z1.h, #7 ; SVE2-NEXT: // kill: def $d0 killed $d0 killed $z0 ; SVE2-NEXT: ret entry: %or = call <4 x i16> @llvm.fshl(<4 x i16> %r, <4 x i16> %r, <4 x i16> splat (i16 25)) ret <4 x i16> %or } define <8 x i8> @xar_v8i8(<8 x i8> %x, <8 x i8> %y) { ; SHA3-LABEL: xar_v8i8: ; SHA3: // %bb.0: // %entry ; SHA3-NEXT: eor v1.8b, v0.8b, v1.8b ; SHA3-NEXT: add v0.8b, v1.8b, v1.8b ; SHA3-NEXT: usra v0.8b, v1.8b, #7 ; SHA3-NEXT: ret ; ; NOSHA3-LABEL: xar_v8i8: ; NOSHA3: // %bb.0: // %entry ; NOSHA3-NEXT: eor v1.8b, v0.8b, v1.8b ; NOSHA3-NEXT: add v0.8b, v1.8b, v1.8b ; NOSHA3-NEXT: usra v0.8b, v1.8b, #7 ; NOSHA3-NEXT: ret ; ; SVE2-LABEL: xar_v8i8: ; SVE2: // %bb.0: // %entry ; SVE2-NEXT: // kill: def $d0 killed $d0 def $z0 ; SVE2-NEXT: // kill: def $d1 killed $d1 def $z1 ; SVE2-NEXT: xar z0.b, z0.b, z1.b, #7 ; SVE2-NEXT: // kill: def $d0 killed $d0 killed $z0 ; SVE2-NEXT: ret entry: %a = xor <8 x i8> %x, %y %b = call <8 x i8> @llvm.fshl(<8 x i8> %a, <8 x i8> %a, <8 x i8> splat (i8 25)) ret <8 x i8> %b } define <8 x i8> @xar_instead_of_or_v8i8(<8 x i8> %r) { ; SHA3-LABEL: xar_instead_of_or_v8i8: ; SHA3: // %bb.0: // %entry ; SHA3-NEXT: add v1.8b, v0.8b, v0.8b ; SHA3-NEXT: usra v1.8b, v0.8b, #7 ; SHA3-NEXT: fmov d0, d1 ; SHA3-NEXT: ret ; ; NOSHA3-LABEL: xar_instead_of_or_v8i8: ; NOSHA3: // %bb.0: // %entry ; NOSHA3-NEXT: add v1.8b, v0.8b, v0.8b ; NOSHA3-NEXT: usra v1.8b, v0.8b, #7 ; NOSHA3-NEXT: fmov d0, d1 ; NOSHA3-NEXT: ret ; ; SVE2-LABEL: xar_instead_of_or_v8i8: ; SVE2: // %bb.0: // %entry ; SVE2-NEXT: movi v1.2d, #0000000000000000 ; SVE2-NEXT: // kill: def $d0 killed $d0 def $z0 ; SVE2-NEXT: xar z0.b, z0.b, z1.b, #7 ; SVE2-NEXT: // kill: def $d0 killed $d0 killed $z0 ; SVE2-NEXT: ret entry: %or = call <8 x i8> @llvm.fshl(<8 x i8> %r, <8 x i8> %r, <8 x i8> splat (i8 25)) ret <8 x i8> %or } declare <2 x i64> @llvm.fshl.v2i64(<2 x i64>, <2 x i64>, <2 x i64>) declare <4 x i32> @llvm.fshl.v4i32(<4 x i32>, <4 x i32>, <4 x i32>) declare <8 x i16> @llvm.fshl.v8i16(<8 x i16>, <8 x i16>, <8 x i16>) declare <16 x i8> @llvm.fshl.v16i8(<16 x i8>, <16 x i8>, <16 x i8>)