; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefix=RV32I %s ; RUN: llc -mtriple=riscv32 -mattr=+a,+no-trailing-seq-cst-fence -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefixes=RV32IA,RV32IA-WMO %s ; RUN: llc -mtriple=riscv32 -mattr=+a,+ztso,+no-trailing-seq-cst-fence -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefixes=RV32IA,RV32IA-TSO %s ; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefix=RV64I %s ; RUN: llc -mtriple=riscv64 -mattr=+a,+no-trailing-seq-cst-fence -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefixes=RV64IA,RV64IA-WMO %s ; RUN: llc -mtriple=riscv64 -mattr=+a,+ztso,+no-trailing-seq-cst-fence -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefixes=RV64IA,RV64IA-TSO %s ; RUN: llc -mtriple=riscv32 -mattr=+a -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefixes=RV32IA,RV32IA-WMO-TRAILING-FENCE %s ; RUN: llc -mtriple=riscv32 -mattr=+a,+ztso -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefixes=RV32IA,RV32IA-TSO-TRAILING-FENCE %s ; RUN: llc -mtriple=riscv64 -mattr=+a -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefixes=RV64IA,RV64IA-WMO-TRAILING-FENCE %s ; RUN: llc -mtriple=riscv64 -mattr=+a,+ztso -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefixes=RV64IA,RV64IA-TSO-TRAILING-FENCE %s ; RUN: llc -mtriple=riscv32 -mattr=+a,+experimental-zalasr -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefixes=RV32IA,RV32IA-ZALASR,RV32IA-ZALASR-WMO %s ; RUN: llc -mtriple=riscv32 -mattr=+a,+experimental-zalasr,+ztso -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefixes=RV32IA,RV32IA-ZALASR,RV32IA-ZALASR-TSO %s ; RUN: llc -mtriple=riscv64 -mattr=+a,+experimental-zalasr -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefixes=RV64IA,RV64IA-ZALASR,RV64IA-ZALASR-WMO %s ; RUN: llc -mtriple=riscv64 -mattr=+a,+experimental-zalasr,+ztso -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefixes=RV64IA,RV64IA-ZALASR,RV64IA-ZALASR-TSO %s define zeroext i1 @atomic_load_i1_unordered(ptr %a) nounwind { ; RV32I-LABEL: atomic_load_i1_unordered: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: li a1, 0 ; RV32I-NEXT: call __atomic_load_1 ; RV32I-NEXT: andi a0, a0, 1 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: atomic_load_i1_unordered: ; RV32IA: # %bb.0: ; RV32IA-NEXT: lb a0, 0(a0) ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomic_load_i1_unordered: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: li a1, 0 ; RV64I-NEXT: call __atomic_load_1 ; RV64I-NEXT: andi a0, a0, 1 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: atomic_load_i1_unordered: ; RV64IA: # %bb.0: ; RV64IA-NEXT: lb a0, 0(a0) ; RV64IA-NEXT: ret %1 = load atomic i8, ptr %a unordered, align 1, !range !0, !noundef !1 %2 = trunc nuw i8 %1 to i1 ret i1 %2 } define zeroext i1 @atomic_load_i1_monotonic(ptr %a) nounwind { ; RV32I-LABEL: atomic_load_i1_monotonic: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: li a1, 0 ; RV32I-NEXT: call __atomic_load_1 ; RV32I-NEXT: andi a0, a0, 1 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: atomic_load_i1_monotonic: ; RV32IA: # %bb.0: ; RV32IA-NEXT: lb a0, 0(a0) ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomic_load_i1_monotonic: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: li a1, 0 ; RV64I-NEXT: call __atomic_load_1 ; RV64I-NEXT: andi a0, a0, 1 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: atomic_load_i1_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: lb a0, 0(a0) ; RV64IA-NEXT: ret %1 = load atomic i8, ptr %a monotonic, align 1, !range !0, !noundef !1 %2 = trunc nuw i8 %1 to i1 ret i1 %2 } define zeroext i1 @atomic_load_i1_acquire(ptr %a) nounwind { ; RV32I-LABEL: atomic_load_i1_acquire: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: li a1, 2 ; RV32I-NEXT: call __atomic_load_1 ; RV32I-NEXT: andi a0, a0, 1 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 ; RV32I-NEXT: ret ; ; RV32IA-WMO-LABEL: atomic_load_i1_acquire: ; RV32IA-WMO: # %bb.0: ; RV32IA-WMO-NEXT: lb a0, 0(a0) ; RV32IA-WMO-NEXT: fence r, rw ; RV32IA-WMO-NEXT: ret ; ; RV32IA-TSO-LABEL: atomic_load_i1_acquire: ; RV32IA-TSO: # %bb.0: ; RV32IA-TSO-NEXT: lb a0, 0(a0) ; RV32IA-TSO-NEXT: ret ; ; RV64I-LABEL: atomic_load_i1_acquire: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: li a1, 2 ; RV64I-NEXT: call __atomic_load_1 ; RV64I-NEXT: andi a0, a0, 1 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret ; ; RV64IA-WMO-LABEL: atomic_load_i1_acquire: ; RV64IA-WMO: # %bb.0: ; RV64IA-WMO-NEXT: lb a0, 0(a0) ; RV64IA-WMO-NEXT: fence r, rw ; RV64IA-WMO-NEXT: ret ; ; RV64IA-TSO-LABEL: atomic_load_i1_acquire: ; RV64IA-TSO: # %bb.0: ; RV64IA-TSO-NEXT: lb a0, 0(a0) ; RV64IA-TSO-NEXT: ret ; ; RV32IA-WMO-TRAILING-FENCE-LABEL: atomic_load_i1_acquire: ; RV32IA-WMO-TRAILING-FENCE: # %bb.0: ; RV32IA-WMO-TRAILING-FENCE-NEXT: lb a0, 0(a0) ; RV32IA-WMO-TRAILING-FENCE-NEXT: fence r, rw ; RV32IA-WMO-TRAILING-FENCE-NEXT: ret ; ; RV32IA-TSO-TRAILING-FENCE-LABEL: atomic_load_i1_acquire: ; RV32IA-TSO-TRAILING-FENCE: # %bb.0: ; RV32IA-TSO-TRAILING-FENCE-NEXT: lb a0, 0(a0) ; RV32IA-TSO-TRAILING-FENCE-NEXT: ret ; ; RV64IA-WMO-TRAILING-FENCE-LABEL: atomic_load_i1_acquire: ; RV64IA-WMO-TRAILING-FENCE: # %bb.0: ; RV64IA-WMO-TRAILING-FENCE-NEXT: lb a0, 0(a0) ; RV64IA-WMO-TRAILING-FENCE-NEXT: fence r, rw ; RV64IA-WMO-TRAILING-FENCE-NEXT: ret ; ; RV64IA-TSO-TRAILING-FENCE-LABEL: atomic_load_i1_acquire: ; RV64IA-TSO-TRAILING-FENCE: # %bb.0: ; RV64IA-TSO-TRAILING-FENCE-NEXT: lb a0, 0(a0) ; RV64IA-TSO-TRAILING-FENCE-NEXT: ret ; ; RV32IA-ZALASR-WMO-LABEL: atomic_load_i1_acquire: ; RV32IA-ZALASR-WMO: # %bb.0: ; RV32IA-ZALASR-WMO-NEXT: lb.aq a0, (a0) ; RV32IA-ZALASR-WMO-NEXT: ret ; ; RV32IA-ZALASR-TSO-LABEL: atomic_load_i1_acquire: ; RV32IA-ZALASR-TSO: # %bb.0: ; RV32IA-ZALASR-TSO-NEXT: lb a0, 0(a0) ; RV32IA-ZALASR-TSO-NEXT: ret ; ; RV64IA-ZALASR-WMO-LABEL: atomic_load_i1_acquire: ; RV64IA-ZALASR-WMO: # %bb.0: ; RV64IA-ZALASR-WMO-NEXT: lb.aq a0, (a0) ; RV64IA-ZALASR-WMO-NEXT: ret ; ; RV64IA-ZALASR-TSO-LABEL: atomic_load_i1_acquire: ; RV64IA-ZALASR-TSO: # %bb.0: ; RV64IA-ZALASR-TSO-NEXT: lb a0, 0(a0) ; RV64IA-ZALASR-TSO-NEXT: ret %1 = load atomic i8, ptr %a acquire, align 1, !range !0, !noundef !1 %2 = trunc nuw i8 %1 to i1 ret i1 %2 } define zeroext i1 @atomic_load_i1_seq_cst(ptr %a) nounwind { ; RV32I-LABEL: atomic_load_i1_seq_cst: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: li a1, 5 ; RV32I-NEXT: call __atomic_load_1 ; RV32I-NEXT: andi a0, a0, 1 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 ; RV32I-NEXT: ret ; ; RV32IA-WMO-LABEL: atomic_load_i1_seq_cst: ; RV32IA-WMO: # %bb.0: ; RV32IA-WMO-NEXT: fence rw, rw ; RV32IA-WMO-NEXT: lb a0, 0(a0) ; RV32IA-WMO-NEXT: fence r, rw ; RV32IA-WMO-NEXT: ret ; ; RV32IA-TSO-LABEL: atomic_load_i1_seq_cst: ; RV32IA-TSO: # %bb.0: ; RV32IA-TSO-NEXT: fence rw, rw ; RV32IA-TSO-NEXT: lb a0, 0(a0) ; RV32IA-TSO-NEXT: ret ; ; RV64I-LABEL: atomic_load_i1_seq_cst: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: li a1, 5 ; RV64I-NEXT: call __atomic_load_1 ; RV64I-NEXT: andi a0, a0, 1 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret ; ; RV64IA-WMO-LABEL: atomic_load_i1_seq_cst: ; RV64IA-WMO: # %bb.0: ; RV64IA-WMO-NEXT: fence rw, rw ; RV64IA-WMO-NEXT: lb a0, 0(a0) ; RV64IA-WMO-NEXT: fence r, rw ; RV64IA-WMO-NEXT: ret ; ; RV64IA-TSO-LABEL: atomic_load_i1_seq_cst: ; RV64IA-TSO: # %bb.0: ; RV64IA-TSO-NEXT: fence rw, rw ; RV64IA-TSO-NEXT: lb a0, 0(a0) ; RV64IA-TSO-NEXT: ret ; ; RV32IA-WMO-TRAILING-FENCE-LABEL: atomic_load_i1_seq_cst: ; RV32IA-WMO-TRAILING-FENCE: # %bb.0: ; RV32IA-WMO-TRAILING-FENCE-NEXT: fence rw, rw ; RV32IA-WMO-TRAILING-FENCE-NEXT: lb a0, 0(a0) ; RV32IA-WMO-TRAILING-FENCE-NEXT: fence r, rw ; RV32IA-WMO-TRAILING-FENCE-NEXT: ret ; ; RV32IA-TSO-TRAILING-FENCE-LABEL: atomic_load_i1_seq_cst: ; RV32IA-TSO-TRAILING-FENCE: # %bb.0: ; RV32IA-TSO-TRAILING-FENCE-NEXT: fence rw, rw ; RV32IA-TSO-TRAILING-FENCE-NEXT: lb a0, 0(a0) ; RV32IA-TSO-TRAILING-FENCE-NEXT: ret ; ; RV64IA-WMO-TRAILING-FENCE-LABEL: atomic_load_i1_seq_cst: ; RV64IA-WMO-TRAILING-FENCE: # %bb.0: ; RV64IA-WMO-TRAILING-FENCE-NEXT: fence rw, rw ; RV64IA-WMO-TRAILING-FENCE-NEXT: lb a0, 0(a0) ; RV64IA-WMO-TRAILING-FENCE-NEXT: fence r, rw ; RV64IA-WMO-TRAILING-FENCE-NEXT: ret ; ; RV64IA-TSO-TRAILING-FENCE-LABEL: atomic_load_i1_seq_cst: ; RV64IA-TSO-TRAILING-FENCE: # %bb.0: ; RV64IA-TSO-TRAILING-FENCE-NEXT: fence rw, rw ; RV64IA-TSO-TRAILING-FENCE-NEXT: lb a0, 0(a0) ; RV64IA-TSO-TRAILING-FENCE-NEXT: ret ; ; RV32IA-ZALASR-LABEL: atomic_load_i1_seq_cst: ; RV32IA-ZALASR: # %bb.0: ; RV32IA-ZALASR-NEXT: lb.aq a0, (a0) ; RV32IA-ZALASR-NEXT: ret ; ; RV64IA-ZALASR-LABEL: atomic_load_i1_seq_cst: ; RV64IA-ZALASR: # %bb.0: ; RV64IA-ZALASR-NEXT: lb.aq a0, (a0) ; RV64IA-ZALASR-NEXT: ret %1 = load atomic i8, ptr %a seq_cst, align 1, !range !0, !noundef !1 %2 = trunc nuw i8 %1 to i1 ret i1 %2 } define zeroext i8 @atomic_load_i8_unordered(ptr %a) nounwind { ; RV32I-LABEL: atomic_load_i8_unordered: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: li a1, 0 ; RV32I-NEXT: call __atomic_load_1 ; RV32I-NEXT: zext.b a0, a0 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: atomic_load_i8_unordered: ; RV32IA: # %bb.0: ; RV32IA-NEXT: lbu a0, 0(a0) ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomic_load_i8_unordered: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: li a1, 0 ; RV64I-NEXT: call __atomic_load_1 ; RV64I-NEXT: zext.b a0, a0 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: atomic_load_i8_unordered: ; RV64IA: # %bb.0: ; RV64IA-NEXT: lbu a0, 0(a0) ; RV64IA-NEXT: ret %1 = load atomic i8, ptr %a unordered, align 1 ret i8 %1 } define zeroext i8 @atomic_load_i8_monotonic(ptr %a) nounwind { ; RV32I-LABEL: atomic_load_i8_monotonic: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: li a1, 0 ; RV32I-NEXT: call __atomic_load_1 ; RV32I-NEXT: zext.b a0, a0 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: atomic_load_i8_monotonic: ; RV32IA: # %bb.0: ; RV32IA-NEXT: lbu a0, 0(a0) ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomic_load_i8_monotonic: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: li a1, 0 ; RV64I-NEXT: call __atomic_load_1 ; RV64I-NEXT: zext.b a0, a0 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: atomic_load_i8_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: lbu a0, 0(a0) ; RV64IA-NEXT: ret %1 = load atomic i8, ptr %a monotonic, align 1 ret i8 %1 } define zeroext i8 @atomic_load_i8_acquire(ptr %a) nounwind { ; RV32I-LABEL: atomic_load_i8_acquire: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: li a1, 2 ; RV32I-NEXT: call __atomic_load_1 ; RV32I-NEXT: zext.b a0, a0 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 ; RV32I-NEXT: ret ; ; RV32IA-WMO-LABEL: atomic_load_i8_acquire: ; RV32IA-WMO: # %bb.0: ; RV32IA-WMO-NEXT: lbu a0, 0(a0) ; RV32IA-WMO-NEXT: fence r, rw ; RV32IA-WMO-NEXT: ret ; ; RV32IA-TSO-LABEL: atomic_load_i8_acquire: ; RV32IA-TSO: # %bb.0: ; RV32IA-TSO-NEXT: lbu a0, 0(a0) ; RV32IA-TSO-NEXT: ret ; ; RV64I-LABEL: atomic_load_i8_acquire: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: li a1, 2 ; RV64I-NEXT: call __atomic_load_1 ; RV64I-NEXT: zext.b a0, a0 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret ; ; RV64IA-WMO-LABEL: atomic_load_i8_acquire: ; RV64IA-WMO: # %bb.0: ; RV64IA-WMO-NEXT: lbu a0, 0(a0) ; RV64IA-WMO-NEXT: fence r, rw ; RV64IA-WMO-NEXT: ret ; ; RV64IA-TSO-LABEL: atomic_load_i8_acquire: ; RV64IA-TSO: # %bb.0: ; RV64IA-TSO-NEXT: lbu a0, 0(a0) ; RV64IA-TSO-NEXT: ret ; ; RV32IA-WMO-TRAILING-FENCE-LABEL: atomic_load_i8_acquire: ; RV32IA-WMO-TRAILING-FENCE: # %bb.0: ; RV32IA-WMO-TRAILING-FENCE-NEXT: lbu a0, 0(a0) ; RV32IA-WMO-TRAILING-FENCE-NEXT: fence r, rw ; RV32IA-WMO-TRAILING-FENCE-NEXT: ret ; ; RV32IA-TSO-TRAILING-FENCE-LABEL: atomic_load_i8_acquire: ; RV32IA-TSO-TRAILING-FENCE: # %bb.0: ; RV32IA-TSO-TRAILING-FENCE-NEXT: lbu a0, 0(a0) ; RV32IA-TSO-TRAILING-FENCE-NEXT: ret ; ; RV64IA-WMO-TRAILING-FENCE-LABEL: atomic_load_i8_acquire: ; RV64IA-WMO-TRAILING-FENCE: # %bb.0: ; RV64IA-WMO-TRAILING-FENCE-NEXT: lbu a0, 0(a0) ; RV64IA-WMO-TRAILING-FENCE-NEXT: fence r, rw ; RV64IA-WMO-TRAILING-FENCE-NEXT: ret ; ; RV64IA-TSO-TRAILING-FENCE-LABEL: atomic_load_i8_acquire: ; RV64IA-TSO-TRAILING-FENCE: # %bb.0: ; RV64IA-TSO-TRAILING-FENCE-NEXT: lbu a0, 0(a0) ; RV64IA-TSO-TRAILING-FENCE-NEXT: ret ; ; RV32IA-ZALASR-WMO-LABEL: atomic_load_i8_acquire: ; RV32IA-ZALASR-WMO: # %bb.0: ; RV32IA-ZALASR-WMO-NEXT: lb.aq a0, (a0) ; RV32IA-ZALASR-WMO-NEXT: zext.b a0, a0 ; RV32IA-ZALASR-WMO-NEXT: ret ; ; RV32IA-ZALASR-TSO-LABEL: atomic_load_i8_acquire: ; RV32IA-ZALASR-TSO: # %bb.0: ; RV32IA-ZALASR-TSO-NEXT: lbu a0, 0(a0) ; RV32IA-ZALASR-TSO-NEXT: ret ; ; RV64IA-ZALASR-WMO-LABEL: atomic_load_i8_acquire: ; RV64IA-ZALASR-WMO: # %bb.0: ; RV64IA-ZALASR-WMO-NEXT: lb.aq a0, (a0) ; RV64IA-ZALASR-WMO-NEXT: zext.b a0, a0 ; RV64IA-ZALASR-WMO-NEXT: ret ; ; RV64IA-ZALASR-TSO-LABEL: atomic_load_i8_acquire: ; RV64IA-ZALASR-TSO: # %bb.0: ; RV64IA-ZALASR-TSO-NEXT: lbu a0, 0(a0) ; RV64IA-ZALASR-TSO-NEXT: ret %1 = load atomic i8, ptr %a acquire, align 1 ret i8 %1 } define zeroext i8 @atomic_load_i8_seq_cst(ptr %a) nounwind { ; RV32I-LABEL: atomic_load_i8_seq_cst: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: li a1, 5 ; RV32I-NEXT: call __atomic_load_1 ; RV32I-NEXT: zext.b a0, a0 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 ; RV32I-NEXT: ret ; ; RV32IA-WMO-LABEL: atomic_load_i8_seq_cst: ; RV32IA-WMO: # %bb.0: ; RV32IA-WMO-NEXT: fence rw, rw ; RV32IA-WMO-NEXT: lbu a0, 0(a0) ; RV32IA-WMO-NEXT: fence r, rw ; RV32IA-WMO-NEXT: ret ; ; RV32IA-TSO-LABEL: atomic_load_i8_seq_cst: ; RV32IA-TSO: # %bb.0: ; RV32IA-TSO-NEXT: fence rw, rw ; RV32IA-TSO-NEXT: lbu a0, 0(a0) ; RV32IA-TSO-NEXT: ret ; ; RV64I-LABEL: atomic_load_i8_seq_cst: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: li a1, 5 ; RV64I-NEXT: call __atomic_load_1 ; RV64I-NEXT: zext.b a0, a0 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret ; ; RV64IA-WMO-LABEL: atomic_load_i8_seq_cst: ; RV64IA-WMO: # %bb.0: ; RV64IA-WMO-NEXT: fence rw, rw ; RV64IA-WMO-NEXT: lbu a0, 0(a0) ; RV64IA-WMO-NEXT: fence r, rw ; RV64IA-WMO-NEXT: ret ; ; RV64IA-TSO-LABEL: atomic_load_i8_seq_cst: ; RV64IA-TSO: # %bb.0: ; RV64IA-TSO-NEXT: fence rw, rw ; RV64IA-TSO-NEXT: lbu a0, 0(a0) ; RV64IA-TSO-NEXT: ret ; ; RV32IA-WMO-TRAILING-FENCE-LABEL: atomic_load_i8_seq_cst: ; RV32IA-WMO-TRAILING-FENCE: # %bb.0: ; RV32IA-WMO-TRAILING-FENCE-NEXT: fence rw, rw ; RV32IA-WMO-TRAILING-FENCE-NEXT: lbu a0, 0(a0) ; RV32IA-WMO-TRAILING-FENCE-NEXT: fence r, rw ; RV32IA-WMO-TRAILING-FENCE-NEXT: ret ; ; RV32IA-TSO-TRAILING-FENCE-LABEL: atomic_load_i8_seq_cst: ; RV32IA-TSO-TRAILING-FENCE: # %bb.0: ; RV32IA-TSO-TRAILING-FENCE-NEXT: fence rw, rw ; RV32IA-TSO-TRAILING-FENCE-NEXT: lbu a0, 0(a0) ; RV32IA-TSO-TRAILING-FENCE-NEXT: ret ; ; RV64IA-WMO-TRAILING-FENCE-LABEL: atomic_load_i8_seq_cst: ; RV64IA-WMO-TRAILING-FENCE: # %bb.0: ; RV64IA-WMO-TRAILING-FENCE-NEXT: fence rw, rw ; RV64IA-WMO-TRAILING-FENCE-NEXT: lbu a0, 0(a0) ; RV64IA-WMO-TRAILING-FENCE-NEXT: fence r, rw ; RV64IA-WMO-TRAILING-FENCE-NEXT: ret ; ; RV64IA-TSO-TRAILING-FENCE-LABEL: atomic_load_i8_seq_cst: ; RV64IA-TSO-TRAILING-FENCE: # %bb.0: ; RV64IA-TSO-TRAILING-FENCE-NEXT: fence rw, rw ; RV64IA-TSO-TRAILING-FENCE-NEXT: lbu a0, 0(a0) ; RV64IA-TSO-TRAILING-FENCE-NEXT: ret ; ; RV32IA-ZALASR-LABEL: atomic_load_i8_seq_cst: ; RV32IA-ZALASR: # %bb.0: ; RV32IA-ZALASR-NEXT: lb.aq a0, (a0) ; RV32IA-ZALASR-NEXT: zext.b a0, a0 ; RV32IA-ZALASR-NEXT: ret ; ; RV64IA-ZALASR-LABEL: atomic_load_i8_seq_cst: ; RV64IA-ZALASR: # %bb.0: ; RV64IA-ZALASR-NEXT: lb.aq a0, (a0) ; RV64IA-ZALASR-NEXT: zext.b a0, a0 ; RV64IA-ZALASR-NEXT: ret %1 = load atomic i8, ptr %a seq_cst, align 1 ret i8 %1 } define zeroext i16 @atomic_load_i16_unordered(ptr %a) nounwind { ; RV32I-LABEL: atomic_load_i16_unordered: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: li a1, 0 ; RV32I-NEXT: call __atomic_load_2 ; RV32I-NEXT: slli a0, a0, 16 ; RV32I-NEXT: srli a0, a0, 16 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: atomic_load_i16_unordered: ; RV32IA: # %bb.0: ; RV32IA-NEXT: lhu a0, 0(a0) ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomic_load_i16_unordered: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: li a1, 0 ; RV64I-NEXT: call __atomic_load_2 ; RV64I-NEXT: slli a0, a0, 48 ; RV64I-NEXT: srli a0, a0, 48 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: atomic_load_i16_unordered: ; RV64IA: # %bb.0: ; RV64IA-NEXT: lhu a0, 0(a0) ; RV64IA-NEXT: ret %1 = load atomic i16, ptr %a unordered, align 2 ret i16 %1 } define zeroext i16 @atomic_load_i16_monotonic(ptr %a) nounwind { ; RV32I-LABEL: atomic_load_i16_monotonic: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: li a1, 0 ; RV32I-NEXT: call __atomic_load_2 ; RV32I-NEXT: slli a0, a0, 16 ; RV32I-NEXT: srli a0, a0, 16 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: atomic_load_i16_monotonic: ; RV32IA: # %bb.0: ; RV32IA-NEXT: lhu a0, 0(a0) ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomic_load_i16_monotonic: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: li a1, 0 ; RV64I-NEXT: call __atomic_load_2 ; RV64I-NEXT: slli a0, a0, 48 ; RV64I-NEXT: srli a0, a0, 48 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: atomic_load_i16_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: lhu a0, 0(a0) ; RV64IA-NEXT: ret %1 = load atomic i16, ptr %a monotonic, align 2 ret i16 %1 } define zeroext i16 @atomic_load_i16_acquire(ptr %a) nounwind { ; RV32I-LABEL: atomic_load_i16_acquire: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: li a1, 2 ; RV32I-NEXT: call __atomic_load_2 ; RV32I-NEXT: slli a0, a0, 16 ; RV32I-NEXT: srli a0, a0, 16 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 ; RV32I-NEXT: ret ; ; RV32IA-WMO-LABEL: atomic_load_i16_acquire: ; RV32IA-WMO: # %bb.0: ; RV32IA-WMO-NEXT: lhu a0, 0(a0) ; RV32IA-WMO-NEXT: fence r, rw ; RV32IA-WMO-NEXT: ret ; ; RV32IA-TSO-LABEL: atomic_load_i16_acquire: ; RV32IA-TSO: # %bb.0: ; RV32IA-TSO-NEXT: lhu a0, 0(a0) ; RV32IA-TSO-NEXT: ret ; ; RV64I-LABEL: atomic_load_i16_acquire: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: li a1, 2 ; RV64I-NEXT: call __atomic_load_2 ; RV64I-NEXT: slli a0, a0, 48 ; RV64I-NEXT: srli a0, a0, 48 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret ; ; RV64IA-WMO-LABEL: atomic_load_i16_acquire: ; RV64IA-WMO: # %bb.0: ; RV64IA-WMO-NEXT: lhu a0, 0(a0) ; RV64IA-WMO-NEXT: fence r, rw ; RV64IA-WMO-NEXT: ret ; ; RV64IA-TSO-LABEL: atomic_load_i16_acquire: ; RV64IA-TSO: # %bb.0: ; RV64IA-TSO-NEXT: lhu a0, 0(a0) ; RV64IA-TSO-NEXT: ret ; ; RV32IA-WMO-TRAILING-FENCE-LABEL: atomic_load_i16_acquire: ; RV32IA-WMO-TRAILING-FENCE: # %bb.0: ; RV32IA-WMO-TRAILING-FENCE-NEXT: lhu a0, 0(a0) ; RV32IA-WMO-TRAILING-FENCE-NEXT: fence r, rw ; RV32IA-WMO-TRAILING-FENCE-NEXT: ret ; ; RV32IA-TSO-TRAILING-FENCE-LABEL: atomic_load_i16_acquire: ; RV32IA-TSO-TRAILING-FENCE: # %bb.0: ; RV32IA-TSO-TRAILING-FENCE-NEXT: lhu a0, 0(a0) ; RV32IA-TSO-TRAILING-FENCE-NEXT: ret ; ; RV64IA-WMO-TRAILING-FENCE-LABEL: atomic_load_i16_acquire: ; RV64IA-WMO-TRAILING-FENCE: # %bb.0: ; RV64IA-WMO-TRAILING-FENCE-NEXT: lhu a0, 0(a0) ; RV64IA-WMO-TRAILING-FENCE-NEXT: fence r, rw ; RV64IA-WMO-TRAILING-FENCE-NEXT: ret ; ; RV64IA-TSO-TRAILING-FENCE-LABEL: atomic_load_i16_acquire: ; RV64IA-TSO-TRAILING-FENCE: # %bb.0: ; RV64IA-TSO-TRAILING-FENCE-NEXT: lhu a0, 0(a0) ; RV64IA-TSO-TRAILING-FENCE-NEXT: ret ; ; RV32IA-ZALASR-WMO-LABEL: atomic_load_i16_acquire: ; RV32IA-ZALASR-WMO: # %bb.0: ; RV32IA-ZALASR-WMO-NEXT: lh.aq a0, (a0) ; RV32IA-ZALASR-WMO-NEXT: slli a0, a0, 16 ; RV32IA-ZALASR-WMO-NEXT: srli a0, a0, 16 ; RV32IA-ZALASR-WMO-NEXT: ret ; ; RV32IA-ZALASR-TSO-LABEL: atomic_load_i16_acquire: ; RV32IA-ZALASR-TSO: # %bb.0: ; RV32IA-ZALASR-TSO-NEXT: lhu a0, 0(a0) ; RV32IA-ZALASR-TSO-NEXT: ret ; ; RV64IA-ZALASR-WMO-LABEL: atomic_load_i16_acquire: ; RV64IA-ZALASR-WMO: # %bb.0: ; RV64IA-ZALASR-WMO-NEXT: lh.aq a0, (a0) ; RV64IA-ZALASR-WMO-NEXT: slli a0, a0, 48 ; RV64IA-ZALASR-WMO-NEXT: srli a0, a0, 48 ; RV64IA-ZALASR-WMO-NEXT: ret ; ; RV64IA-ZALASR-TSO-LABEL: atomic_load_i16_acquire: ; RV64IA-ZALASR-TSO: # %bb.0: ; RV64IA-ZALASR-TSO-NEXT: lhu a0, 0(a0) ; RV64IA-ZALASR-TSO-NEXT: ret %1 = load atomic i16, ptr %a acquire, align 2 ret i16 %1 } define zeroext i16 @atomic_load_i16_seq_cst(ptr %a) nounwind { ; RV32I-LABEL: atomic_load_i16_seq_cst: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: li a1, 5 ; RV32I-NEXT: call __atomic_load_2 ; RV32I-NEXT: slli a0, a0, 16 ; RV32I-NEXT: srli a0, a0, 16 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 ; RV32I-NEXT: ret ; ; RV32IA-WMO-LABEL: atomic_load_i16_seq_cst: ; RV32IA-WMO: # %bb.0: ; RV32IA-WMO-NEXT: fence rw, rw ; RV32IA-WMO-NEXT: lhu a0, 0(a0) ; RV32IA-WMO-NEXT: fence r, rw ; RV32IA-WMO-NEXT: ret ; ; RV32IA-TSO-LABEL: atomic_load_i16_seq_cst: ; RV32IA-TSO: # %bb.0: ; RV32IA-TSO-NEXT: fence rw, rw ; RV32IA-TSO-NEXT: lhu a0, 0(a0) ; RV32IA-TSO-NEXT: ret ; ; RV64I-LABEL: atomic_load_i16_seq_cst: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: li a1, 5 ; RV64I-NEXT: call __atomic_load_2 ; RV64I-NEXT: slli a0, a0, 48 ; RV64I-NEXT: srli a0, a0, 48 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret ; ; RV64IA-WMO-LABEL: atomic_load_i16_seq_cst: ; RV64IA-WMO: # %bb.0: ; RV64IA-WMO-NEXT: fence rw, rw ; RV64IA-WMO-NEXT: lhu a0, 0(a0) ; RV64IA-WMO-NEXT: fence r, rw ; RV64IA-WMO-NEXT: ret ; ; RV64IA-TSO-LABEL: atomic_load_i16_seq_cst: ; RV64IA-TSO: # %bb.0: ; RV64IA-TSO-NEXT: fence rw, rw ; RV64IA-TSO-NEXT: lhu a0, 0(a0) ; RV64IA-TSO-NEXT: ret ; ; RV32IA-WMO-TRAILING-FENCE-LABEL: atomic_load_i16_seq_cst: ; RV32IA-WMO-TRAILING-FENCE: # %bb.0: ; RV32IA-WMO-TRAILING-FENCE-NEXT: fence rw, rw ; RV32IA-WMO-TRAILING-FENCE-NEXT: lhu a0, 0(a0) ; RV32IA-WMO-TRAILING-FENCE-NEXT: fence r, rw ; RV32IA-WMO-TRAILING-FENCE-NEXT: ret ; ; RV32IA-TSO-TRAILING-FENCE-LABEL: atomic_load_i16_seq_cst: ; RV32IA-TSO-TRAILING-FENCE: # %bb.0: ; RV32IA-TSO-TRAILING-FENCE-NEXT: fence rw, rw ; RV32IA-TSO-TRAILING-FENCE-NEXT: lhu a0, 0(a0) ; RV32IA-TSO-TRAILING-FENCE-NEXT: ret ; ; RV64IA-WMO-TRAILING-FENCE-LABEL: atomic_load_i16_seq_cst: ; RV64IA-WMO-TRAILING-FENCE: # %bb.0: ; RV64IA-WMO-TRAILING-FENCE-NEXT: fence rw, rw ; RV64IA-WMO-TRAILING-FENCE-NEXT: lhu a0, 0(a0) ; RV64IA-WMO-TRAILING-FENCE-NEXT: fence r, rw ; RV64IA-WMO-TRAILING-FENCE-NEXT: ret ; ; RV64IA-TSO-TRAILING-FENCE-LABEL: atomic_load_i16_seq_cst: ; RV64IA-TSO-TRAILING-FENCE: # %bb.0: ; RV64IA-TSO-TRAILING-FENCE-NEXT: fence rw, rw ; RV64IA-TSO-TRAILING-FENCE-NEXT: lhu a0, 0(a0) ; RV64IA-TSO-TRAILING-FENCE-NEXT: ret ; ; RV32IA-ZALASR-LABEL: atomic_load_i16_seq_cst: ; RV32IA-ZALASR: # %bb.0: ; RV32IA-ZALASR-NEXT: lh.aq a0, (a0) ; RV32IA-ZALASR-NEXT: slli a0, a0, 16 ; RV32IA-ZALASR-NEXT: srli a0, a0, 16 ; RV32IA-ZALASR-NEXT: ret ; ; RV64IA-ZALASR-LABEL: atomic_load_i16_seq_cst: ; RV64IA-ZALASR: # %bb.0: ; RV64IA-ZALASR-NEXT: lh.aq a0, (a0) ; RV64IA-ZALASR-NEXT: slli a0, a0, 48 ; RV64IA-ZALASR-NEXT: srli a0, a0, 48 ; RV64IA-ZALASR-NEXT: ret %1 = load atomic i16, ptr %a seq_cst, align 2 ret i16 %1 } define zeroext i32 @atomic_load_i32_unordered(ptr %a) nounwind { ; RV32I-LABEL: atomic_load_i32_unordered: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: li a1, 0 ; RV32I-NEXT: call __atomic_load_4 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: atomic_load_i32_unordered: ; RV32IA: # %bb.0: ; RV32IA-NEXT: lw a0, 0(a0) ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomic_load_i32_unordered: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: li a1, 0 ; RV64I-NEXT: call __atomic_load_4 ; RV64I-NEXT: slli a0, a0, 32 ; RV64I-NEXT: srli a0, a0, 32 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: atomic_load_i32_unordered: ; RV64IA: # %bb.0: ; RV64IA-NEXT: lwu a0, 0(a0) ; RV64IA-NEXT: ret %1 = load atomic i32, ptr %a unordered, align 4 ret i32 %1 } define zeroext i32 @atomic_load_i32_monotonic(ptr %a) nounwind { ; RV32I-LABEL: atomic_load_i32_monotonic: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: li a1, 0 ; RV32I-NEXT: call __atomic_load_4 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: atomic_load_i32_monotonic: ; RV32IA: # %bb.0: ; RV32IA-NEXT: lw a0, 0(a0) ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomic_load_i32_monotonic: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: li a1, 0 ; RV64I-NEXT: call __atomic_load_4 ; RV64I-NEXT: slli a0, a0, 32 ; RV64I-NEXT: srli a0, a0, 32 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: atomic_load_i32_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: lwu a0, 0(a0) ; RV64IA-NEXT: ret %1 = load atomic i32, ptr %a monotonic, align 4 ret i32 %1 } define zeroext i32 @atomic_load_i32_acquire(ptr %a) nounwind { ; RV32I-LABEL: atomic_load_i32_acquire: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: li a1, 2 ; RV32I-NEXT: call __atomic_load_4 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 ; RV32I-NEXT: ret ; ; RV32IA-WMO-LABEL: atomic_load_i32_acquire: ; RV32IA-WMO: # %bb.0: ; RV32IA-WMO-NEXT: lw a0, 0(a0) ; RV32IA-WMO-NEXT: fence r, rw ; RV32IA-WMO-NEXT: ret ; ; RV32IA-TSO-LABEL: atomic_load_i32_acquire: ; RV32IA-TSO: # %bb.0: ; RV32IA-TSO-NEXT: lw a0, 0(a0) ; RV32IA-TSO-NEXT: ret ; ; RV64I-LABEL: atomic_load_i32_acquire: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: li a1, 2 ; RV64I-NEXT: call __atomic_load_4 ; RV64I-NEXT: slli a0, a0, 32 ; RV64I-NEXT: srli a0, a0, 32 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret ; ; RV64IA-WMO-LABEL: atomic_load_i32_acquire: ; RV64IA-WMO: # %bb.0: ; RV64IA-WMO-NEXT: lwu a0, 0(a0) ; RV64IA-WMO-NEXT: fence r, rw ; RV64IA-WMO-NEXT: ret ; ; RV64IA-TSO-LABEL: atomic_load_i32_acquire: ; RV64IA-TSO: # %bb.0: ; RV64IA-TSO-NEXT: lwu a0, 0(a0) ; RV64IA-TSO-NEXT: ret ; ; RV32IA-WMO-TRAILING-FENCE-LABEL: atomic_load_i32_acquire: ; RV32IA-WMO-TRAILING-FENCE: # %bb.0: ; RV32IA-WMO-TRAILING-FENCE-NEXT: lw a0, 0(a0) ; RV32IA-WMO-TRAILING-FENCE-NEXT: fence r, rw ; RV32IA-WMO-TRAILING-FENCE-NEXT: ret ; ; RV32IA-TSO-TRAILING-FENCE-LABEL: atomic_load_i32_acquire: ; RV32IA-TSO-TRAILING-FENCE: # %bb.0: ; RV32IA-TSO-TRAILING-FENCE-NEXT: lw a0, 0(a0) ; RV32IA-TSO-TRAILING-FENCE-NEXT: ret ; ; RV64IA-WMO-TRAILING-FENCE-LABEL: atomic_load_i32_acquire: ; RV64IA-WMO-TRAILING-FENCE: # %bb.0: ; RV64IA-WMO-TRAILING-FENCE-NEXT: lwu a0, 0(a0) ; RV64IA-WMO-TRAILING-FENCE-NEXT: fence r, rw ; RV64IA-WMO-TRAILING-FENCE-NEXT: ret ; ; RV64IA-TSO-TRAILING-FENCE-LABEL: atomic_load_i32_acquire: ; RV64IA-TSO-TRAILING-FENCE: # %bb.0: ; RV64IA-TSO-TRAILING-FENCE-NEXT: lwu a0, 0(a0) ; RV64IA-TSO-TRAILING-FENCE-NEXT: ret ; ; RV32IA-ZALASR-WMO-LABEL: atomic_load_i32_acquire: ; RV32IA-ZALASR-WMO: # %bb.0: ; RV32IA-ZALASR-WMO-NEXT: lw.aq a0, (a0) ; RV32IA-ZALASR-WMO-NEXT: ret ; ; RV32IA-ZALASR-TSO-LABEL: atomic_load_i32_acquire: ; RV32IA-ZALASR-TSO: # %bb.0: ; RV32IA-ZALASR-TSO-NEXT: lw a0, 0(a0) ; RV32IA-ZALASR-TSO-NEXT: ret ; ; RV64IA-ZALASR-WMO-LABEL: atomic_load_i32_acquire: ; RV64IA-ZALASR-WMO: # %bb.0: ; RV64IA-ZALASR-WMO-NEXT: lw.aq a0, (a0) ; RV64IA-ZALASR-WMO-NEXT: slli a0, a0, 32 ; RV64IA-ZALASR-WMO-NEXT: srli a0, a0, 32 ; RV64IA-ZALASR-WMO-NEXT: ret ; ; RV64IA-ZALASR-TSO-LABEL: atomic_load_i32_acquire: ; RV64IA-ZALASR-TSO: # %bb.0: ; RV64IA-ZALASR-TSO-NEXT: lwu a0, 0(a0) ; RV64IA-ZALASR-TSO-NEXT: ret %1 = load atomic i32, ptr %a acquire, align 4 ret i32 %1 } define zeroext i32 @atomic_load_i32_seq_cst(ptr %a) nounwind { ; RV32I-LABEL: atomic_load_i32_seq_cst: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: li a1, 5 ; RV32I-NEXT: call __atomic_load_4 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 ; RV32I-NEXT: ret ; ; RV32IA-WMO-LABEL: atomic_load_i32_seq_cst: ; RV32IA-WMO: # %bb.0: ; RV32IA-WMO-NEXT: fence rw, rw ; RV32IA-WMO-NEXT: lw a0, 0(a0) ; RV32IA-WMO-NEXT: fence r, rw ; RV32IA-WMO-NEXT: ret ; ; RV32IA-TSO-LABEL: atomic_load_i32_seq_cst: ; RV32IA-TSO: # %bb.0: ; RV32IA-TSO-NEXT: fence rw, rw ; RV32IA-TSO-NEXT: lw a0, 0(a0) ; RV32IA-TSO-NEXT: ret ; ; RV64I-LABEL: atomic_load_i32_seq_cst: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: li a1, 5 ; RV64I-NEXT: call __atomic_load_4 ; RV64I-NEXT: slli a0, a0, 32 ; RV64I-NEXT: srli a0, a0, 32 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret ; ; RV64IA-WMO-LABEL: atomic_load_i32_seq_cst: ; RV64IA-WMO: # %bb.0: ; RV64IA-WMO-NEXT: fence rw, rw ; RV64IA-WMO-NEXT: lwu a0, 0(a0) ; RV64IA-WMO-NEXT: fence r, rw ; RV64IA-WMO-NEXT: ret ; ; RV64IA-TSO-LABEL: atomic_load_i32_seq_cst: ; RV64IA-TSO: # %bb.0: ; RV64IA-TSO-NEXT: fence rw, rw ; RV64IA-TSO-NEXT: lwu a0, 0(a0) ; RV64IA-TSO-NEXT: ret ; ; RV32IA-WMO-TRAILING-FENCE-LABEL: atomic_load_i32_seq_cst: ; RV32IA-WMO-TRAILING-FENCE: # %bb.0: ; RV32IA-WMO-TRAILING-FENCE-NEXT: fence rw, rw ; RV32IA-WMO-TRAILING-FENCE-NEXT: lw a0, 0(a0) ; RV32IA-WMO-TRAILING-FENCE-NEXT: fence r, rw ; RV32IA-WMO-TRAILING-FENCE-NEXT: ret ; ; RV32IA-TSO-TRAILING-FENCE-LABEL: atomic_load_i32_seq_cst: ; RV32IA-TSO-TRAILING-FENCE: # %bb.0: ; RV32IA-TSO-TRAILING-FENCE-NEXT: fence rw, rw ; RV32IA-TSO-TRAILING-FENCE-NEXT: lw a0, 0(a0) ; RV32IA-TSO-TRAILING-FENCE-NEXT: ret ; ; RV64IA-WMO-TRAILING-FENCE-LABEL: atomic_load_i32_seq_cst: ; RV64IA-WMO-TRAILING-FENCE: # %bb.0: ; RV64IA-WMO-TRAILING-FENCE-NEXT: fence rw, rw ; RV64IA-WMO-TRAILING-FENCE-NEXT: lwu a0, 0(a0) ; RV64IA-WMO-TRAILING-FENCE-NEXT: fence r, rw ; RV64IA-WMO-TRAILING-FENCE-NEXT: ret ; ; RV64IA-TSO-TRAILING-FENCE-LABEL: atomic_load_i32_seq_cst: ; RV64IA-TSO-TRAILING-FENCE: # %bb.0: ; RV64IA-TSO-TRAILING-FENCE-NEXT: fence rw, rw ; RV64IA-TSO-TRAILING-FENCE-NEXT: lwu a0, 0(a0) ; RV64IA-TSO-TRAILING-FENCE-NEXT: ret ; ; RV32IA-ZALASR-LABEL: atomic_load_i32_seq_cst: ; RV32IA-ZALASR: # %bb.0: ; RV32IA-ZALASR-NEXT: lw.aq a0, (a0) ; RV32IA-ZALASR-NEXT: ret ; ; RV64IA-ZALASR-LABEL: atomic_load_i32_seq_cst: ; RV64IA-ZALASR: # %bb.0: ; RV64IA-ZALASR-NEXT: lw.aq a0, (a0) ; RV64IA-ZALASR-NEXT: slli a0, a0, 32 ; RV64IA-ZALASR-NEXT: srli a0, a0, 32 ; RV64IA-ZALASR-NEXT: ret %1 = load atomic i32, ptr %a seq_cst, align 4 ret i32 %1 } define zeroext i64 @atomic_load_i64_unordered(ptr %a) nounwind { ; RV32I-LABEL: atomic_load_i64_unordered: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: li a1, 0 ; RV32I-NEXT: call __atomic_load_8 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: atomic_load_i64_unordered: ; RV32IA: # %bb.0: ; RV32IA-NEXT: addi sp, sp, -16 ; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IA-NEXT: li a1, 0 ; RV32IA-NEXT: call __atomic_load_8 ; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IA-NEXT: addi sp, sp, 16 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomic_load_i64_unordered: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: li a1, 0 ; RV64I-NEXT: call __atomic_load_8 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: atomic_load_i64_unordered: ; RV64IA: # %bb.0: ; RV64IA-NEXT: ld a0, 0(a0) ; RV64IA-NEXT: ret %1 = load atomic i64, ptr %a unordered, align 8 ret i64 %1 } define zeroext i64 @atomic_load_i64_monotonic(ptr %a) nounwind { ; RV32I-LABEL: atomic_load_i64_monotonic: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: li a1, 0 ; RV32I-NEXT: call __atomic_load_8 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: atomic_load_i64_monotonic: ; RV32IA: # %bb.0: ; RV32IA-NEXT: addi sp, sp, -16 ; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IA-NEXT: li a1, 0 ; RV32IA-NEXT: call __atomic_load_8 ; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IA-NEXT: addi sp, sp, 16 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomic_load_i64_monotonic: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: li a1, 0 ; RV64I-NEXT: call __atomic_load_8 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: atomic_load_i64_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: ld a0, 0(a0) ; RV64IA-NEXT: ret %1 = load atomic i64, ptr %a monotonic, align 8 ret i64 %1 } define zeroext i64 @atomic_load_i64_acquire(ptr %a) nounwind { ; RV32I-LABEL: atomic_load_i64_acquire: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: li a1, 2 ; RV32I-NEXT: call __atomic_load_8 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: atomic_load_i64_acquire: ; RV32IA: # %bb.0: ; RV32IA-NEXT: addi sp, sp, -16 ; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IA-NEXT: li a1, 2 ; RV32IA-NEXT: call __atomic_load_8 ; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IA-NEXT: addi sp, sp, 16 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomic_load_i64_acquire: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: li a1, 2 ; RV64I-NEXT: call __atomic_load_8 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret ; ; RV64IA-WMO-LABEL: atomic_load_i64_acquire: ; RV64IA-WMO: # %bb.0: ; RV64IA-WMO-NEXT: ld a0, 0(a0) ; RV64IA-WMO-NEXT: fence r, rw ; RV64IA-WMO-NEXT: ret ; ; RV64IA-TSO-LABEL: atomic_load_i64_acquire: ; RV64IA-TSO: # %bb.0: ; RV64IA-TSO-NEXT: ld a0, 0(a0) ; RV64IA-TSO-NEXT: ret ; ; RV64IA-WMO-TRAILING-FENCE-LABEL: atomic_load_i64_acquire: ; RV64IA-WMO-TRAILING-FENCE: # %bb.0: ; RV64IA-WMO-TRAILING-FENCE-NEXT: ld a0, 0(a0) ; RV64IA-WMO-TRAILING-FENCE-NEXT: fence r, rw ; RV64IA-WMO-TRAILING-FENCE-NEXT: ret ; ; RV64IA-TSO-TRAILING-FENCE-LABEL: atomic_load_i64_acquire: ; RV64IA-TSO-TRAILING-FENCE: # %bb.0: ; RV64IA-TSO-TRAILING-FENCE-NEXT: ld a0, 0(a0) ; RV64IA-TSO-TRAILING-FENCE-NEXT: ret ; ; RV64IA-ZALASR-WMO-LABEL: atomic_load_i64_acquire: ; RV64IA-ZALASR-WMO: # %bb.0: ; RV64IA-ZALASR-WMO-NEXT: ld.aq a0, (a0) ; RV64IA-ZALASR-WMO-NEXT: ret ; ; RV64IA-ZALASR-TSO-LABEL: atomic_load_i64_acquire: ; RV64IA-ZALASR-TSO: # %bb.0: ; RV64IA-ZALASR-TSO-NEXT: ld a0, 0(a0) ; RV64IA-ZALASR-TSO-NEXT: ret %1 = load atomic i64, ptr %a acquire, align 8 ret i64 %1 } define zeroext i64 @atomic_load_i64_seq_cst(ptr %a) nounwind { ; RV32I-LABEL: atomic_load_i64_seq_cst: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: li a1, 5 ; RV32I-NEXT: call __atomic_load_8 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: atomic_load_i64_seq_cst: ; RV32IA: # %bb.0: ; RV32IA-NEXT: addi sp, sp, -16 ; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IA-NEXT: li a1, 5 ; RV32IA-NEXT: call __atomic_load_8 ; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IA-NEXT: addi sp, sp, 16 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomic_load_i64_seq_cst: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: li a1, 5 ; RV64I-NEXT: call __atomic_load_8 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret ; ; RV64IA-WMO-LABEL: atomic_load_i64_seq_cst: ; RV64IA-WMO: # %bb.0: ; RV64IA-WMO-NEXT: fence rw, rw ; RV64IA-WMO-NEXT: ld a0, 0(a0) ; RV64IA-WMO-NEXT: fence r, rw ; RV64IA-WMO-NEXT: ret ; ; RV64IA-TSO-LABEL: atomic_load_i64_seq_cst: ; RV64IA-TSO: # %bb.0: ; RV64IA-TSO-NEXT: fence rw, rw ; RV64IA-TSO-NEXT: ld a0, 0(a0) ; RV64IA-TSO-NEXT: ret ; ; RV64IA-WMO-TRAILING-FENCE-LABEL: atomic_load_i64_seq_cst: ; RV64IA-WMO-TRAILING-FENCE: # %bb.0: ; RV64IA-WMO-TRAILING-FENCE-NEXT: fence rw, rw ; RV64IA-WMO-TRAILING-FENCE-NEXT: ld a0, 0(a0) ; RV64IA-WMO-TRAILING-FENCE-NEXT: fence r, rw ; RV64IA-WMO-TRAILING-FENCE-NEXT: ret ; ; RV64IA-TSO-TRAILING-FENCE-LABEL: atomic_load_i64_seq_cst: ; RV64IA-TSO-TRAILING-FENCE: # %bb.0: ; RV64IA-TSO-TRAILING-FENCE-NEXT: fence rw, rw ; RV64IA-TSO-TRAILING-FENCE-NEXT: ld a0, 0(a0) ; RV64IA-TSO-TRAILING-FENCE-NEXT: ret ; ; RV64IA-ZALASR-LABEL: atomic_load_i64_seq_cst: ; RV64IA-ZALASR: # %bb.0: ; RV64IA-ZALASR-NEXT: ld.aq a0, (a0) ; RV64IA-ZALASR-NEXT: ret %1 = load atomic i64, ptr %a seq_cst, align 8 ret i64 %1 } !0 = !{i8 0, i8 2} !1 = !{}