aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--clang/include/clang/Basic/DiagnosticDriverKinds.td8
-rw-r--r--clang/lib/CodeGen/CGDebugInfo.cpp21
-rw-r--r--clang/lib/CodeGen/CGDebugInfo.h7
-rw-r--r--clang/lib/Frontend/CompilerInvocation.cpp26
-rw-r--r--clang/test/CodeGenCXX/debug-info-dtor-implicit-args.cpp24
-rw-r--r--clang/test/Preprocessor/print-header-json.c12
-rw-r--r--clang/tools/driver/driver.cpp5
-rw-r--r--libc/config/linux/x86_64/entrypoints.txt2
-rw-r--r--libc/hdr/CMakeLists.txt9
-rw-r--r--libc/hdr/offsetof_macros.h23
-rw-r--r--libc/include/llvm-libc-types/CMakeLists.txt2
-rw-r--r--libc/include/llvm-libc-types/jmp_buf.h15
-rw-r--r--libc/include/setjmp.yaml16
-rw-r--r--libc/src/setjmp/CMakeLists.txt27
-rw-r--r--libc/src/setjmp/linux/CMakeLists.txt12
-rw-r--r--libc/src/setjmp/linux/sigsetjmp_epilogue.cpp25
-rw-r--r--libc/src/setjmp/setjmp_impl.h3
-rw-r--r--libc/src/setjmp/siglongjmp.cpp23
-rw-r--r--libc/src/setjmp/siglongjmp.h25
-rw-r--r--libc/src/setjmp/sigsetjmp.h26
-rw-r--r--libc/src/setjmp/sigsetjmp_epilogue.h19
-rw-r--r--libc/src/setjmp/x86_64/CMakeLists.txt20
-rw-r--r--libc/src/setjmp/x86_64/setjmp.cpp2
-rw-r--r--libc/src/setjmp/x86_64/sigsetjmp.cpp68
-rw-r--r--libc/test/src/setjmp/CMakeLists.txt17
-rw-r--r--libc/test/src/setjmp/sigsetjmp_test.cpp88
-rw-r--r--lldb/include/lldb/Interpreter/CommandObject.h11
-rw-r--r--lldb/include/lldb/Utility/CompletionRequest.h24
-rw-r--r--lldb/packages/Python/lldbsuite/test/lldbtest.py4
-rw-r--r--lldb/source/API/SBCommandInterpreter.cpp15
-rw-r--r--lldb/source/Commands/CommandCompletions.cpp19
-rw-r--r--lldb/source/Commands/CommandObjectProcess.cpp13
-rw-r--r--lldb/source/Commands/Options.td8
-rw-r--r--lldb/source/Plugins/ExpressionParser/Clang/ClangModulesDeclVendor.cpp2
-rw-r--r--lldb/source/Plugins/LanguageRuntime/ObjC/AppleObjCRuntime/AppleObjCRuntimeV2.cpp2
-rw-r--r--lldb/source/Plugins/Process/elf-core/ProcessElfCore.cpp2
-rw-r--r--lldb/test/API/commands/expression/completion/TestExprCompletion.py31
-rw-r--r--lldb/test/API/commands/process/reverse-continue/Makefile3
-rw-r--r--lldb/test/API/commands/process/reverse-continue/TestReverseContinue.py66
-rw-r--r--lldb/test/API/commands/process/reverse-continue/TestReverseContinueNotSupported.py51
-rw-r--r--lldb/test/API/commands/process/reverse-continue/main.c12
-rw-r--r--lldb/tools/lldb-dap/Handler/NextRequestHandler.cpp3
-rw-r--r--lldb/tools/lldb-dap/Handler/RequestHandler.h7
-rw-r--r--lldb/tools/lldb-dap/Handler/StepInRequestHandler.cpp111
-rw-r--r--lldb/tools/lldb-dap/Protocol/ProtocolRequests.cpp9
-rw-r--r--lldb/tools/lldb-dap/Protocol/ProtocolRequests.h22
-rw-r--r--llvm/docs/ReleaseNotes.md4
-rw-r--r--llvm/include/llvm/ProfileData/IndexedMemProfData.h23
-rw-r--r--llvm/lib/ProfileData/CMakeLists.txt1
-rw-r--r--llvm/lib/ProfileData/IndexedMemProfData.cpp300
-rw-r--r--llvm/lib/ProfileData/InstrProfWriter.cpp283
-rw-r--r--llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp11
-rw-r--r--llvm/lib/Target/NVPTX/NVPTXRegisterInfo.cpp4
-rw-r--r--llvm/lib/Target/RISCV/RISCVFrameLowering.cpp11
-rw-r--r--llvm/lib/Target/RISCV/RISCVInstrInfoA.td33
-rw-r--r--llvm/lib/Target/RISCV/RISCVInstrInfoZalasr.td26
-rw-r--r--llvm/lib/Transforms/IPO/FunctionAttrs.cpp9
-rw-r--r--llvm/test/CodeGen/NVPTX/LoadStoreVectorizer.ll8
-rw-r--r--llvm/test/CodeGen/NVPTX/and-or-setcc.ll4
-rw-r--r--llvm/test/CodeGen/NVPTX/atomics.ll14
-rw-r--r--llvm/test/CodeGen/NVPTX/bf16-instructions.ll122
-rw-r--r--llvm/test/CodeGen/NVPTX/bf16x2-instructions-approx.ll4
-rw-r--r--llvm/test/CodeGen/NVPTX/bf16x2-instructions.ll26
-rw-r--r--llvm/test/CodeGen/NVPTX/convert-fp-i8.ll16
-rw-r--r--llvm/test/CodeGen/NVPTX/convert-sm100.ll8
-rw-r--r--llvm/test/CodeGen/NVPTX/convert-sm100a.ll16
-rw-r--r--llvm/test/CodeGen/NVPTX/convert-sm80.ll30
-rw-r--r--llvm/test/CodeGen/NVPTX/convert-sm90.ll8
-rw-r--r--llvm/test/CodeGen/NVPTX/copysign.ll12
-rw-r--r--llvm/test/CodeGen/NVPTX/distributed-shared-cluster.ll4
-rw-r--r--llvm/test/CodeGen/NVPTX/div.ll2
-rw-r--r--llvm/test/CodeGen/NVPTX/f16-abs.ll2
-rw-r--r--llvm/test/CodeGen/NVPTX/f16x2-instructions.ll98
-rw-r--r--llvm/test/CodeGen/NVPTX/f32-ex2.ll4
-rw-r--r--llvm/test/CodeGen/NVPTX/f32-lg2.ll4
-rw-r--r--llvm/test/CodeGen/NVPTX/fabs-intrinsics.ll6
-rw-r--r--llvm/test/CodeGen/NVPTX/fexp2.ll32
-rw-r--r--llvm/test/CodeGen/NVPTX/flog2.ll18
-rw-r--r--llvm/test/CodeGen/NVPTX/fma-relu-contract.ll24
-rw-r--r--llvm/test/CodeGen/NVPTX/fma-relu-fma-intrinsic.ll20
-rw-r--r--llvm/test/CodeGen/NVPTX/fma-relu-instruction-flag.ll40
-rw-r--r--llvm/test/CodeGen/NVPTX/fp-contract.ll12
-rw-r--r--llvm/test/CodeGen/NVPTX/frem.ll32
-rw-r--r--llvm/test/CodeGen/NVPTX/i8x4-instructions.ll4
-rw-r--r--llvm/test/CodeGen/NVPTX/intrinsics.ll8
-rw-r--r--llvm/test/CodeGen/NVPTX/ldg-invariant.ll6
-rw-r--r--llvm/test/CodeGen/NVPTX/ldu-ldg.ll8
-rw-r--r--llvm/test/CodeGen/NVPTX/load-store-scalars.ll128
-rw-r--r--llvm/test/CodeGen/NVPTX/load-store-vectors.ll48
-rw-r--r--llvm/test/CodeGen/NVPTX/math-intrins.ll158
-rw-r--r--llvm/test/CodeGen/NVPTX/misched_func_call.ll2
-rw-r--r--llvm/test/CodeGen/NVPTX/param-add.ll2
-rw-r--r--llvm/test/CodeGen/NVPTX/rcp-opt.ll6
-rw-r--r--llvm/test/CodeGen/NVPTX/reduction-intrinsics.ll36
-rw-r--r--llvm/test/CodeGen/NVPTX/redux-sync-f32.ll16
-rw-r--r--llvm/test/CodeGen/NVPTX/reg-types.ll4
-rw-r--r--llvm/test/CodeGen/NVPTX/st-param-imm.ll36
-rw-r--r--llvm/test/CodeGen/NVPTX/surf-read-cuda.ll4
-rw-r--r--llvm/test/CodeGen/NVPTX/tex-read-cuda.ll6
-rw-r--r--llvm/test/CodeGen/NVPTX/variadics-backend.ll2
-rw-r--r--llvm/test/CodeGen/RISCV/callee-saved-gprs.ll236
-rw-r--r--llvm/test/CodeGen/RISCV/push-pop-popret.ll1024
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/rv32-spill-vector-csr.ll36
-rw-r--r--llvm/test/CodeGen/RISCV/xqccmp-additional-stack.ll6
-rw-r--r--llvm/test/CodeGen/RISCV/xqccmp-callee-saved-gprs.ll506
-rw-r--r--llvm/test/CodeGen/RISCV/xqccmp-push-pop-popret.ll1442
-rw-r--r--llvm/test/CodeGen/RISCV/xqccmp-with-float.ll36
-rw-r--r--llvm/test/CodeGen/RISCV/zcmp-additional-stack.ll6
-rw-r--r--llvm/test/CodeGen/RISCV/zcmp-with-float.ll36
-rw-r--r--llvm/test/CodeGen/X86/extractelement-load.ll67
-rw-r--r--llvm/test/DebugInfo/NVPTX/debug-info.ll2
-rw-r--r--llvm/test/Transforms/FunctionAttrs/initializes.ll14
-rw-r--r--llvm/utils/gn/secondary/llvm/lib/ProfileData/BUILD.gn1
-rw-r--r--llvm/utils/vim/syntax/llvm.vim22
-rw-r--r--mlir/include/mlir/Dialect/GPU/Transforms/Passes.h15
-rw-r--r--mlir/lib/Dialect/GPU/Transforms/SubgroupReduceLowering.cpp177
-rw-r--r--mlir/test/Dialect/GPU/subgroup-reduce-lowering.mlir152
-rw-r--r--mlir/test/lib/Dialect/GPU/TestGpuRewrite.cpp22
-rw-r--r--utils/bazel/llvm-project-overlay/mlir/BUILD.bazel2
119 files changed, 3420 insertions, 3067 deletions
diff --git a/clang/include/clang/Basic/DiagnosticDriverKinds.td b/clang/include/clang/Basic/DiagnosticDriverKinds.td
index c69ad3a..b15cba6 100644
--- a/clang/include/clang/Basic/DiagnosticDriverKinds.td
+++ b/clang/include/clang/Basic/DiagnosticDriverKinds.td
@@ -396,8 +396,14 @@ def err_drv_print_header_env_var : Error<
"environment variable CC_PRINT_HEADERS_%select{FORMAT|FILTERING}0 has invalid value %1">;
def err_drv_print_header_env_var_combination : Error<
"unsupported combination: CC_PRINT_HEADERS_FORMAT=%0 and CC_PRINT_HEADERS_FILTERING=%1">;
-def err_drv_print_header_env_var_combination_cc1 : Error<
+def err_drv_print_header_env_var_invalid_format : Error<
+ "environment variable CC_PRINT_HEADERS_FORMAT=%0 requires a compatible value for CC_PRINT_HEADERS_FILTERING">;
+def err_drv_print_header_cc1_invalid_combination : Error<
"unsupported combination: -header-include-format=%0 and -header-include-filtering=%1">;
+def err_drv_print_header_cc1_invalid_filtering : Error<
+ "-header-include-filtering=%0 requires a compatible value for -header-include-format">;
+def err_drv_print_header_cc1_invalid_format : Error<
+ "-header-include-format=%0 requires a compatible value for -header-include-filtering">;
def warn_O4_is_O3 : Warning<"-O4 is equivalent to -O3">, InGroup<Deprecated>;
def warn_drv_optimization_value : Warning<"optimization level '%0' is not supported; using '%1%2' instead">,
diff --git a/clang/lib/CodeGen/CGDebugInfo.cpp b/clang/lib/CodeGen/CGDebugInfo.cpp
index 1582d91..f3ec498 100644
--- a/clang/lib/CodeGen/CGDebugInfo.cpp
+++ b/clang/lib/CodeGen/CGDebugInfo.cpp
@@ -2018,17 +2018,8 @@ CGDebugInfo::getOrCreateMethodType(const CXXMethodDecl *Method,
return getOrCreateInstanceMethodType(ThisType, Func, Unit);
}
-llvm::DISubroutineType *CGDebugInfo::getOrCreateMethodTypeForDestructor(
- const CXXMethodDecl *Method, llvm::DIFile *Unit, QualType FNType) {
- const FunctionProtoType *Func = FNType->getAs<FunctionProtoType>();
- // skip the first param since it is also this
- return getOrCreateInstanceMethodType(Method->getThisType(), Func, Unit, true);
-}
-
-llvm::DISubroutineType *
-CGDebugInfo::getOrCreateInstanceMethodType(QualType ThisPtr,
- const FunctionProtoType *Func,
- llvm::DIFile *Unit, bool SkipFirst) {
+llvm::DISubroutineType *CGDebugInfo::getOrCreateInstanceMethodType(
+ QualType ThisPtr, const FunctionProtoType *Func, llvm::DIFile *Unit) {
FunctionProtoType::ExtProtoInfo EPI = Func->getExtProtoInfo();
Qualifiers &Qc = EPI.TypeQuals;
Qc.removeConst();
@@ -2068,7 +2059,7 @@ CGDebugInfo::getOrCreateInstanceMethodType(QualType ThisPtr,
}
// Copy rest of the arguments.
- for (unsigned i = (SkipFirst ? 2 : 1), e = Args.size(); i != e; ++i)
+ for (unsigned i = 1, e = Args.size(); i != e; ++i)
Elts.push_back(Args[i]);
// Attach FlagObjectPointer to the explicit "this" parameter.
@@ -4381,12 +4372,6 @@ llvm::DISubroutineType *CGDebugInfo::getOrCreateFunctionType(const Decl *D,
// subprogram DIE will miss DW_AT_decl_file and DW_AT_decl_line fields.
return DBuilder.createSubroutineType(DBuilder.getOrCreateTypeArray({}));
- if (const auto *Method = dyn_cast<CXXDestructorDecl>(D)) {
- // Read method type from 'FnType' because 'D.getType()' does not cover
- // implicit arguments for destructors.
- return getOrCreateMethodTypeForDestructor(Method, F, FnType);
- }
-
if (const auto *Method = dyn_cast<CXXMethodDecl>(D))
return getOrCreateMethodType(Method, F);
diff --git a/clang/lib/CodeGen/CGDebugInfo.h b/clang/lib/CodeGen/CGDebugInfo.h
index 771c129..b287ce7 100644
--- a/clang/lib/CodeGen/CGDebugInfo.h
+++ b/clang/lib/CodeGen/CGDebugInfo.h
@@ -249,14 +249,9 @@ class CGDebugInfo {
/// to get a method type which includes \c this pointer.
llvm::DISubroutineType *getOrCreateMethodType(const CXXMethodDecl *Method,
llvm::DIFile *F);
-
- llvm::DISubroutineType *
- getOrCreateMethodTypeForDestructor(const CXXMethodDecl *Method,
- llvm::DIFile *F, QualType FNType);
-
llvm::DISubroutineType *
getOrCreateInstanceMethodType(QualType ThisPtr, const FunctionProtoType *Func,
- llvm::DIFile *Unit, bool SkipFirst = false);
+ llvm::DIFile *Unit);
llvm::DISubroutineType *
getOrCreateFunctionType(const Decl *D, QualType FnType, llvm::DIFile *F);
/// \return debug info descriptor for vtable.
diff --git a/clang/lib/Frontend/CompilerInvocation.cpp b/clang/lib/Frontend/CompilerInvocation.cpp
index 9e9eed4..1df5038 100644
--- a/clang/lib/Frontend/CompilerInvocation.cpp
+++ b/clang/lib/Frontend/CompilerInvocation.cpp
@@ -2435,13 +2435,25 @@ static bool ParseDependencyOutputArgs(DependencyOutputOptions &Opts,
// Check for invalid combinations of header-include-format
// and header-include-filtering.
- if ((Opts.HeaderIncludeFormat == HIFMT_Textual &&
- Opts.HeaderIncludeFiltering != HIFIL_None) ||
- (Opts.HeaderIncludeFormat == HIFMT_JSON &&
- Opts.HeaderIncludeFiltering != HIFIL_Only_Direct_System))
- Diags.Report(diag::err_drv_print_header_env_var_combination_cc1)
- << Args.getLastArg(OPT_header_include_format_EQ)->getValue()
- << Args.getLastArg(OPT_header_include_filtering_EQ)->getValue();
+ if (Opts.HeaderIncludeFormat == HIFMT_Textual &&
+ Opts.HeaderIncludeFiltering != HIFIL_None) {
+ if (Args.hasArg(OPT_header_include_format_EQ))
+ Diags.Report(diag::err_drv_print_header_cc1_invalid_combination)
+ << headerIncludeFormatKindToString(Opts.HeaderIncludeFormat)
+ << headerIncludeFilteringKindToString(Opts.HeaderIncludeFiltering);
+ else
+ Diags.Report(diag::err_drv_print_header_cc1_invalid_filtering)
+ << headerIncludeFilteringKindToString(Opts.HeaderIncludeFiltering);
+ } else if (Opts.HeaderIncludeFormat == HIFMT_JSON &&
+ Opts.HeaderIncludeFiltering == HIFIL_None) {
+ if (Args.hasArg(OPT_header_include_filtering_EQ))
+ Diags.Report(diag::err_drv_print_header_cc1_invalid_combination)
+ << headerIncludeFormatKindToString(Opts.HeaderIncludeFormat)
+ << headerIncludeFilteringKindToString(Opts.HeaderIncludeFiltering);
+ else
+ Diags.Report(diag::err_drv_print_header_cc1_invalid_format)
+ << headerIncludeFormatKindToString(Opts.HeaderIncludeFormat);
+ }
return Diags.getNumErrors() == NumErrorsBefore;
}
diff --git a/clang/test/CodeGenCXX/debug-info-dtor-implicit-args.cpp b/clang/test/CodeGenCXX/debug-info-dtor-implicit-args.cpp
deleted file mode 100644
index 4bb51dc..0000000
--- a/clang/test/CodeGenCXX/debug-info-dtor-implicit-args.cpp
+++ /dev/null
@@ -1,24 +0,0 @@
-// RUN: %clang_cc1 -triple x86_64-none-linux-gnu -emit-llvm -debug-info-kind=limited %s -o - | FileCheck %s
-// RUN: %clang_cc1 -triple x86_64-pc-windows-msvc -emit-llvm -debug-info-kind=limited %s -o - | FileCheck --check-prefix MSVC %s
-
-struct B {
- virtual ~B() {}
-};
-
-struct A : virtual B {
-};
-
-A a;
-
-
-// CHECK-DAG: !{{[0-9]+}} = !DILocalVariable(name: "vtt", arg: 2, scope: ![[destructor:[0-9]+]], type: ![[vtttype:[0-9]+]], flags: DIFlagArtificial)
-// CHECK-DAG: ![[destructor]] = distinct !DISubprogram(name: "~A", {{.*}}, type: ![[subroutinetype:[0-9]+]]
-// CHECK-DAG: ![[subroutinetype]] = !DISubroutineType(types: ![[types:[0-9]+]])
-// CHECK-DAG: [[types]] = !{null, !{{[0-9]+}}, ![[vtttype]]}
-
-// MSVC-DAG: ![[inttype:[0-9]+]] = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed)
-// MSVC-DAG: ![[voidpointertype:[0-9]+]] = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: null, size: 64)
-// MSVC-DAG: ![[destructor:[0-9]+]] = distinct !DISubprogram(name: "~A", linkageName: "??_GA@@UEAAPEAXI@Z", {{.*}}, type: ![[subroutinetype:[0-9]+]]
-// MSVC-DAG: !{{[0-9]+}} = !DILocalVariable(name: "should_call_delete", arg: 2, scope: ![[destructor]], type: ![[inttype]], flags: DIFlagArtificial)
-// MSVC-DAG: ![[subroutinetype]] = !DISubroutineType(types: ![[types:[0-9]+]])
-// MSVC-DAG: [[types]] = !{![[voidpointertype]], !{{[0-9]+}}, ![[inttype]]}
diff --git a/clang/test/Preprocessor/print-header-json.c b/clang/test/Preprocessor/print-header-json.c
index d0d5e6b..1ba63dd 100644
--- a/clang/test/Preprocessor/print-header-json.c
+++ b/clang/test/Preprocessor/print-header-json.c
@@ -1,11 +1,16 @@
// RUN: %clang_cc1 -E -header-include-format=json -header-include-filtering=only-direct-system -header-include-file %t.txt -I %S/Inputs/print-header-json -isystem %S/Inputs/print-header-json/system %s
// RUN: cat %t.txt | FileCheck %s --check-prefix=SUPPORTED
+
// RUN: not %clang_cc1 -E -header-include-format=textual -header-include-filtering=only-direct-system -header-include-file %t.txt -I %S/Inputs/print-header-json -isystem %S/Inputs/print-header-json/system %s -o /dev/null 2>&1 | FileCheck %s --check-prefix=UNSUPPORTED0
// RUN: not %clang_cc1 -E -header-include-format=json -header-include-filtering=none -header-include-file %t.txt -I %S/Inputs/print-header-json -isystem %S/Inputs/print-header-json/system %s -o /dev/null 2>&1 | FileCheck %s --check-prefix=UNSUPPORTED1
-// RUN: rm %t.txt
-// RUN: env CC_PRINT_HEADERS_FORMAT=json CC_PRINT_HEADERS_FILTERING=only-direct-system CC_PRINT_HEADERS_FILE=%t.txt %clang -fsyntax-only -I %S/Inputs/print-header-json -isystem %S/Inputs/print-header-json/system %s -o /dev/null
// RUN: env CC_PRINT_HEADERS_FORMAT=textual CC_PRINT_HEADERS_FILTERING=only-direct-system CC_PRINT_HEADERS_FILE=%t.txt not %clang -fsyntax-only -I %S/Inputs/print-header-json -isystem %S/Inputs/print-header-json/system %s -o /dev/null 2>&1 | FileCheck %s --check-prefix=UNSUPPORTED2
// RUN: env CC_PRINT_HEADERS_FORMAT=json CC_PRINT_HEADERS_FILTERING=none CC_PRINT_HEADERS_FILE=%t.txt not %clang -fsyntax-only -I %S/Inputs/print-header-json -isystem %S/Inputs/print-header-json/system %s -o /dev/null 2>&1 | FileCheck %s --check-prefix=UNSUPPORTED3
+// RUN: env CC_PRINT_HEADERS_FORMAT=json CC_PRINT_HEADERS_FILE=%t.txt not %clang -fsyntax-only -I %S/Inputs/print-header-json -isystem %S/Inputs/print-header-json/system %s -o /dev/null 2>&1 | FileCheck %s --check-prefix=UNSUPPORTED4
+// RUN: not %clang_cc1 -E -header-include-filtering=only-direct-system -header-include-file %t.txt -I %S/Inputs/print-header-json -isystem %S/Inputs/print-header-json/system %s -o /dev/null 2>&1 | FileCheck %s --check-prefix=UNSUPPORTED5
+// RUN: not %clang_cc1 -E -header-include-format=json -header-include-file %t.txt -I %S/Inputs/print-header-json -isystem %S/Inputs/print-header-json/system %s -o /dev/null 2>&1 | FileCheck %s --check-prefix=UNSUPPORTED6
+
+// RUN: rm %t.txt
+// RUN: env CC_PRINT_HEADERS_FORMAT=json CC_PRINT_HEADERS_FILTERING=only-direct-system CC_PRINT_HEADERS_FILE=%t.txt %clang -fsyntax-only -I %S/Inputs/print-header-json -isystem %S/Inputs/print-header-json/system %s -o /dev/null
// RUN: cat %t.txt | FileCheck %s --check-prefix=SUPPORTED
#include "system0.h"
@@ -18,3 +23,6 @@
// UNSUPPORTED1: error: unsupported combination: -header-include-format=json and -header-include-filtering=none
// UNSUPPORTED2: error: unsupported combination: CC_PRINT_HEADERS_FORMAT=textual and CC_PRINT_HEADERS_FILTERING=only-direct-system
// UNSUPPORTED3: error: unsupported combination: CC_PRINT_HEADERS_FORMAT=json and CC_PRINT_HEADERS_FILTERING=none
+// UNSUPPORTED4: error: environment variable CC_PRINT_HEADERS_FORMAT=json requires a compatible value for CC_PRINT_HEADERS_FILTERING
+// UNSUPPORTED5: error: -header-include-filtering=only-direct-system requires a compatible value for -header-include-format
+// UNSUPPORTED6: error: -header-include-format=json requires a compatible value for -header-include-filtering
diff --git a/clang/tools/driver/driver.cpp b/clang/tools/driver/driver.cpp
index 00c00ce..db72b4a 100644
--- a/clang/tools/driver/driver.cpp
+++ b/clang/tools/driver/driver.cpp
@@ -156,6 +156,11 @@ static bool SetBackdoorDriverOutputsFromEnvVars(Driver &TheDriver) {
}
const char *FilteringStr = ::getenv("CC_PRINT_HEADERS_FILTERING");
+ if (!FilteringStr) {
+ TheDriver.Diag(clang::diag::err_drv_print_header_env_var_invalid_format)
+ << EnvVar;
+ return false;
+ }
HeaderIncludeFilteringKind Filtering;
if (!stringToHeaderIncludeFiltering(FilteringStr, Filtering)) {
TheDriver.Diag(clang::diag::err_drv_print_header_env_var)
diff --git a/libc/config/linux/x86_64/entrypoints.txt b/libc/config/linux/x86_64/entrypoints.txt
index e3a96da..73dfeae 100644
--- a/libc/config/linux/x86_64/entrypoints.txt
+++ b/libc/config/linux/x86_64/entrypoints.txt
@@ -1049,8 +1049,6 @@ if(LLVM_LIBC_FULL_BUILD)
# setjmp.h entrypoints
libc.src.setjmp.longjmp
libc.src.setjmp.setjmp
- libc.src.setjmp.siglongjmp
- libc.src.setjmp.sigsetjmp
# stdio.h entrypoints
libc.src.stdio.clearerr
diff --git a/libc/hdr/CMakeLists.txt b/libc/hdr/CMakeLists.txt
index 209fcb9..db2dac9 100644
--- a/libc/hdr/CMakeLists.txt
+++ b/libc/hdr/CMakeLists.txt
@@ -223,14 +223,5 @@ add_proxy_header_library(
libc.include.wchar
)
-# offsetof is a macro inside compiler resource header stddef.h
-add_proxy_header_library(
- offsetof_macros
- HDRS
- offsetof_macros.h
- FULL_BUILD_DEPENDS
- libc.include.llvm-libc-macros.offsetof_macro
-)
-
add_subdirectory(types)
add_subdirectory(func)
diff --git a/libc/hdr/offsetof_macros.h b/libc/hdr/offsetof_macros.h
deleted file mode 100644
index 42e853f..0000000
--- a/libc/hdr/offsetof_macros.h
+++ /dev/null
@@ -1,23 +0,0 @@
-//===-- Definition of macros for offsetof ---------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_LIBC_HDR_OFFSETOF_MACROS_H
-#define LLVM_LIBC_HDR_OFFSETOF_MACROS_H
-
-#ifdef LIBC_FULL_BUILD
-
-#include "include/llvm-libc-macros/offsetof-macro.h"
-
-#else // Overlay mode
-
-#define __need_offsetof
-#include <stddef.h>
-
-#endif // LLVM_LIBC_FULL_BUILD
-
-#endif // LLVM_LIBC_HDR_OFFSETOF_MACROS_H
diff --git a/libc/include/llvm-libc-types/CMakeLists.txt b/libc/include/llvm-libc-types/CMakeLists.txt
index 26a3ed0..861b983 100644
--- a/libc/include/llvm-libc-types/CMakeLists.txt
+++ b/libc/include/llvm-libc-types/CMakeLists.txt
@@ -39,6 +39,7 @@ add_header(gid_t HDR gid_t.h)
add_header(uid_t HDR uid_t.h)
add_header(imaxdiv_t HDR imaxdiv_t.h)
add_header(ino_t HDR ino_t.h)
+add_header(jmp_buf HDR jmp_buf.h)
add_header(mbstate_t HDR mbstate_t.h)
add_header(mode_t HDR mode_t.h)
add_header(mtx_t HDR mtx_t.h DEPENDS .__futex_word .__mutex_type)
@@ -82,7 +83,6 @@ add_header(union_sigval HDR union_sigval.h)
add_header(siginfo_t HDR siginfo_t.h DEPENDS .union_sigval .pid_t .uid_t .clock_t)
add_header(sig_atomic_t HDR sig_atomic_t.h)
add_header(sigset_t HDR sigset_t.h DEPENDS libc.include.llvm-libc-macros.signal_macros)
-add_header(jmp_buf HDR jmp_buf.h DEPENDS .sigset_t)
add_header(struct_sigaction HDR struct_sigaction.h DEPENDS .sigset_t .siginfo_t)
add_header(struct_timespec HDR struct_timespec.h DEPENDS .time_t)
add_header(
diff --git a/libc/include/llvm-libc-types/jmp_buf.h b/libc/include/llvm-libc-types/jmp_buf.h
index 1e77916..f246e64 100644
--- a/libc/include/llvm-libc-types/jmp_buf.h
+++ b/libc/include/llvm-libc-types/jmp_buf.h
@@ -9,8 +9,6 @@
#ifndef LLVM_LIBC_TYPES_JMP_BUF_H
#define LLVM_LIBC_TYPES_JMP_BUF_H
-#include "sigset_t.h"
-
typedef struct {
#ifdef __x86_64__
__UINT64_TYPE__ rbx;
@@ -52,21 +50,8 @@ typedef struct {
#else
#error "__jmp_buf not available for your target architecture."
#endif
- // TODO: implement sigjmp_buf related functions for other architectures
- // Issue: https://github.com/llvm/llvm-project/issues/136358
-#if defined(__i386__) || defined(__x86_64__)
- // return address
- void *sig_retaddr;
- // extra register buffer to avoid indefinite stack growth in sigsetjmp
- void *sig_extra;
- // signal masks
- sigset_t sigmask;
-#endif
} __jmp_buf;
typedef __jmp_buf jmp_buf[1];
-#if defined(__i386__) || defined(__x86_64__)
-typedef __jmp_buf sigjmp_buf[1];
-#endif
#endif // LLVM_LIBC_TYPES_JMP_BUF_H
diff --git a/libc/include/setjmp.yaml b/libc/include/setjmp.yaml
index 00049e5..5fbb9eb 100644
--- a/libc/include/setjmp.yaml
+++ b/libc/include/setjmp.yaml
@@ -21,19 +21,3 @@ functions:
- _Returns_twice
arguments:
- type: jmp_buf
- - name: sigsetjmp
- standards:
- - POSIX
- return_type: int
- attributes:
- - _Returns_twice
- arguments:
- - type: sigjmp_buf
- - type: int
- - name: siglongjmp
- standards:
- - POSIX
- return_type: _Noreturn void
- arguments:
- - type: sigjmp_buf
- - type: int
diff --git a/libc/src/setjmp/CMakeLists.txt b/libc/src/setjmp/CMakeLists.txt
index 2591319..d85c532 100644
--- a/libc/src/setjmp/CMakeLists.txt
+++ b/libc/src/setjmp/CMakeLists.txt
@@ -1,13 +1,3 @@
-if(EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/${LIBC_TARGET_OS})
- add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/${LIBC_TARGET_OS})
- add_object_library(
- sigsetjmp_epilogue
- ALIAS
- DEPENDS
- .${LIBC_TARGET_OS}.sigsetjmp_epilogue
- )
-endif()
-
if(EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/${LIBC_TARGET_ARCHITECTURE})
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/${LIBC_TARGET_ARCHITECTURE})
endif()
@@ -25,20 +15,3 @@ add_entrypoint_object(
DEPENDS
.${LIBC_TARGET_ARCHITECTURE}.longjmp
)
-
-add_entrypoint_object(
- siglongjmp
- SRCS
- siglongjmp.cpp
- HDRS
- siglongjmp.h
- DEPENDS
- .longjmp
-)
-
-add_entrypoint_object(
- sigsetjmp
- ALIAS
- DEPENDS
- .${LIBC_TARGET_ARCHITECTURE}.sigsetjmp
-)
diff --git a/libc/src/setjmp/linux/CMakeLists.txt b/libc/src/setjmp/linux/CMakeLists.txt
deleted file mode 100644
index b844c8c..0000000
--- a/libc/src/setjmp/linux/CMakeLists.txt
+++ /dev/null
@@ -1,12 +0,0 @@
-add_object_library(
- sigsetjmp_epilogue
- HDRS
- ../sigsetjmp_epilogue.h
- SRCS
- sigsetjmp_epilogue.cpp
- DEPENDS
- libc.src.__support.common
- libc.src.__support.OSUtil.osutil
- libc.hdr.types.jmp_buf
- libc.hdr.types.sigset_t
-)
diff --git a/libc/src/setjmp/linux/sigsetjmp_epilogue.cpp b/libc/src/setjmp/linux/sigsetjmp_epilogue.cpp
deleted file mode 100644
index 4718623..0000000
--- a/libc/src/setjmp/linux/sigsetjmp_epilogue.cpp
+++ /dev/null
@@ -1,25 +0,0 @@
-//===-- Implementation of sigsetjmp_epilogue ------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#include "src/setjmp/sigsetjmp_epilogue.h"
-#include "src/__support/OSUtil/syscall.h"
-#include "src/__support/common.h"
-#include <sys/syscall.h> // For syscall numbers.
-
-namespace LIBC_NAMESPACE_DECL {
-[[gnu::returns_twice]] int sigsetjmp_epilogue(jmp_buf buffer, int retval) {
- // If set is NULL, then the signal mask is unchanged (i.e., how is
- // ignored), but the current value of the signal mask is nevertheless
- // returned in oldset (if it is not NULL).
- syscall_impl<long>(SYS_rt_sigprocmask, SIG_SETMASK,
- /* set= */ retval ? &buffer->sigmask : nullptr,
- /* old_set= */ retval ? nullptr : &buffer->sigmask,
- sizeof(sigset_t));
- return retval;
-}
-} // namespace LIBC_NAMESPACE_DECL
diff --git a/libc/src/setjmp/setjmp_impl.h b/libc/src/setjmp/setjmp_impl.h
index c89d6bc..669f720 100644
--- a/libc/src/setjmp/setjmp_impl.h
+++ b/libc/src/setjmp/setjmp_impl.h
@@ -29,8 +29,7 @@ namespace LIBC_NAMESPACE_DECL {
#ifdef LIBC_COMPILER_IS_GCC
[[gnu::nothrow]]
#endif
-[[gnu::returns_twice]] int
-setjmp(jmp_buf buf);
+__attribute__((returns_twice)) int setjmp(jmp_buf buf);
} // namespace LIBC_NAMESPACE_DECL
diff --git a/libc/src/setjmp/siglongjmp.cpp b/libc/src/setjmp/siglongjmp.cpp
deleted file mode 100644
index e372a6f..0000000
--- a/libc/src/setjmp/siglongjmp.cpp
+++ /dev/null
@@ -1,23 +0,0 @@
-//===-- Implementation of siglongjmp --------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#include "src/setjmp/siglongjmp.h"
-#include "src/__support/common.h"
-#include "src/setjmp/longjmp.h"
-
-namespace LIBC_NAMESPACE_DECL {
-
-// siglongjmp is the same as longjmp. The additional recovery work is done in
-// the epilogue of the sigsetjmp function.
-// TODO: move this inside the TU of longjmp and making it an alias after
-// sigsetjmp is implemented for all architectures.
-LLVM_LIBC_FUNCTION(void, siglongjmp, (jmp_buf buf, int val)) {
- return LIBC_NAMESPACE::longjmp(buf, val);
-}
-
-} // namespace LIBC_NAMESPACE_DECL
diff --git a/libc/src/setjmp/siglongjmp.h b/libc/src/setjmp/siglongjmp.h
deleted file mode 100644
index ea5bbb9..0000000
--- a/libc/src/setjmp/siglongjmp.h
+++ /dev/null
@@ -1,25 +0,0 @@
-//===-- Implementation header for siglongjmp --------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_LIBC_SRC_SETJMP_SIGLONGJMP_H
-#define LLVM_LIBC_SRC_SETJMP_SIGLONGJMP_H
-
-#include "hdr/types/jmp_buf.h"
-#include "src/__support/macros/config.h"
-#include "src/__support/macros/properties/compiler.h"
-
-namespace LIBC_NAMESPACE_DECL {
-
-#ifdef LIBC_COMPILER_IS_GCC
-[[gnu::nothrow]]
-#endif
-void siglongjmp(jmp_buf buf, int val);
-
-} // namespace LIBC_NAMESPACE_DECL
-
-#endif // LLVM_LIBC_SRC_SETJMP_SIGLONGJMP_H
diff --git a/libc/src/setjmp/sigsetjmp.h b/libc/src/setjmp/sigsetjmp.h
deleted file mode 100644
index ef060c8..0000000
--- a/libc/src/setjmp/sigsetjmp.h
+++ /dev/null
@@ -1,26 +0,0 @@
-//===-- Implementation header for sigsetjmp ---------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_LIBC_SRC_SETJMP_SIGSETJMP_H
-#define LLVM_LIBC_SRC_SETJMP_SIGSETJMP_H
-
-#include "hdr/types/jmp_buf.h"
-#include "src/__support/macros/config.h"
-#include "src/__support/macros/properties/compiler.h"
-
-namespace LIBC_NAMESPACE_DECL {
-
-#ifdef LIBC_COMPILER_IS_GCC
-[[gnu::nothrow]]
-#endif
-[[gnu::returns_twice]] int
-sigsetjmp(sigjmp_buf buf, int savesigs);
-
-} // namespace LIBC_NAMESPACE_DECL
-
-#endif // LLVM_LIBC_SRC_SETJMP_SIGSETJMP_H
diff --git a/libc/src/setjmp/sigsetjmp_epilogue.h b/libc/src/setjmp/sigsetjmp_epilogue.h
deleted file mode 100644
index 88702b7..0000000
--- a/libc/src/setjmp/sigsetjmp_epilogue.h
+++ /dev/null
@@ -1,19 +0,0 @@
-//===-- Implementation header for sigsetjmp epilogue ------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_LIBC_SRC_SETJMP_SIGSETJMP_EPILOGUE_H
-#define LLVM_LIBC_SRC_SETJMP_SIGSETJMP_EPILOGUE_H
-
-#include "hdr/types/jmp_buf.h"
-#include "src/__support/common.h"
-
-namespace LIBC_NAMESPACE_DECL {
-[[gnu::returns_twice]] int sigsetjmp_epilogue(jmp_buf buffer, int retval);
-} // namespace LIBC_NAMESPACE_DECL
-
-#endif // LLVM_LIBC_SRC_SETJMP_SIGSETJMP_EPILOGUE_H
diff --git a/libc/src/setjmp/x86_64/CMakeLists.txt b/libc/src/setjmp/x86_64/CMakeLists.txt
index 0090e81..96d5751 100644
--- a/libc/src/setjmp/x86_64/CMakeLists.txt
+++ b/libc/src/setjmp/x86_64/CMakeLists.txt
@@ -5,22 +5,9 @@ add_entrypoint_object(
HDRS
../setjmp_impl.h
DEPENDS
- libc.hdr.offsetof_macros
libc.hdr.types.jmp_buf
-)
-
-add_entrypoint_object(
- sigsetjmp
- SRCS
- sigsetjmp.cpp
- HDRS
- ../sigsetjmp.h
- DEPENDS
- libc.hdr.types.jmp_buf
- libc.hdr.types.sigset_t
- libc.hdr.offsetof_macros
- libc.src.setjmp.sigsetjmp_epilogue
- libc.src.setjmp.setjmp
+ COMPILE_OPTIONS
+ ${libc_opt_high_flag}
)
add_entrypoint_object(
@@ -31,4 +18,7 @@ add_entrypoint_object(
../longjmp.h
DEPENDS
libc.hdr.types.jmp_buf
+ COMPILE_OPTIONS
+ ${libc_opt_high_flag}
+ -fomit-frame-pointer
)
diff --git a/libc/src/setjmp/x86_64/setjmp.cpp b/libc/src/setjmp/x86_64/setjmp.cpp
index 28e5271..5ac10fa 100644
--- a/libc/src/setjmp/x86_64/setjmp.cpp
+++ b/libc/src/setjmp/x86_64/setjmp.cpp
@@ -6,7 +6,7 @@
//
//===----------------------------------------------------------------------===//
-#include "hdr/offsetof_macros.h"
+#include "include/llvm-libc-macros/offsetof-macro.h"
#include "src/__support/common.h"
#include "src/__support/macros/config.h"
#include "src/setjmp/setjmp_impl.h"
diff --git a/libc/src/setjmp/x86_64/sigsetjmp.cpp b/libc/src/setjmp/x86_64/sigsetjmp.cpp
deleted file mode 100644
index 4c97a01..0000000
--- a/libc/src/setjmp/x86_64/sigsetjmp.cpp
+++ /dev/null
@@ -1,68 +0,0 @@
-//===-- Implementation of sigsetjmp ---------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#include "src/setjmp/sigsetjmp.h"
-#include "hdr/offsetof_macros.h"
-#include "src/__support/common.h"
-#include "src/__support/macros/config.h"
-#include "src/setjmp/setjmp_impl.h"
-#include "src/setjmp/sigsetjmp_epilogue.h"
-
-#if !defined(LIBC_TARGET_ARCH_IS_X86)
-#error "Invalid file include"
-#endif
-namespace LIBC_NAMESPACE_DECL {
-#ifdef __i386__
-[[gnu::naked]]
-LLVM_LIBC_FUNCTION(int, sigsetjmp, (sigjmp_buf buf)) {
- asm(R"(
- mov 8(%%esp), %%ecx
- jecxz .Lnosave
-
- mov 4(%%esp), %%eax
- pop %c[retaddr](%%eax)
- mov %%ebx, %c[extra](%%eax)
- mov %%eax, %%ebx
- call %P[setjmp]
- push %c[retaddr](%%ebx)
- mov %%ebx,4(%%esp)
- mov %%eax,8(%%esp)
- mov %c[extra](%%ebx), %%ebx
- jmp %P[epilogue]
-
-.Lnosave:
- jmp %P[setjmp])" ::[retaddr] "i"(offsetof(__jmp_buf, sig_retaddr)),
- [extra] "i"(offsetof(__jmp_buf, sig_extra)), [setjmp] "X"(setjmp),
- [epilogue] "X"(sigsetjmp_epilogue)
- : "eax", "ebx", "ecx");
-}
-#endif
-[[gnu::naked]]
-LLVM_LIBC_FUNCTION(int, sigsetjmp, (sigjmp_buf, int)) {
- asm(R"(
- test %%esi, %%esi
- jz .Lnosave
-
- pop %c[retaddr](%%rdi)
- mov %%rbx, %c[extra](%%rdi)
- mov %%rdi, %%rbx
- call %P[setjmp]
- push %c[retaddr](%%rbx)
- mov %%rbx, %%rdi
- mov %%eax, %%esi
- mov %c[extra](%%rdi), %%rbx
- jmp %P[epilogue]
-
-.Lnosave:
- jmp %P[setjmp])" ::[retaddr] "i"(offsetof(__jmp_buf, sig_retaddr)),
- [extra] "i"(offsetof(__jmp_buf, sig_extra)), [setjmp] "X"(setjmp),
- [epilogue] "X"(sigsetjmp_epilogue)
- : "rax", "rbx");
-}
-
-} // namespace LIBC_NAMESPACE_DECL
diff --git a/libc/test/src/setjmp/CMakeLists.txt b/libc/test/src/setjmp/CMakeLists.txt
index e95476e..3922307 100644
--- a/libc/test/src/setjmp/CMakeLists.txt
+++ b/libc/test/src/setjmp/CMakeLists.txt
@@ -17,20 +17,3 @@ add_libc_unittest(
libc.src.setjmp.longjmp
libc.src.setjmp.setjmp
)
-
-add_libc_unittest(
- sigsetjmp_test
- SUITE
- libc_setjmp_unittests
- SRCS
- sigsetjmp_test.cpp
- CXX_STANDARD
- 20
- DEPENDS
- libc.src.setjmp.sigsetjmp
- libc.src.setjmp.siglongjmp
- libc.src.signal.sigprocmask
- libc.src.string.memset
- libc.src.string.memcmp
- libc.hdr.types.sigset_t
-)
diff --git a/libc/test/src/setjmp/sigsetjmp_test.cpp b/libc/test/src/setjmp/sigsetjmp_test.cpp
deleted file mode 100644
index cf8d2f2..0000000
--- a/libc/test/src/setjmp/sigsetjmp_test.cpp
+++ /dev/null
@@ -1,88 +0,0 @@
-//===-- Unittests for sigsetjmp and siglongjmp ----------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#include "src/setjmp/siglongjmp.h"
-#include "src/setjmp/sigsetjmp.h"
-#include "src/signal/sigprocmask.h"
-#include "src/string/memcmp.h"
-#include "src/string/memset.h"
-#include "test/UnitTest/Test.h"
-
-constexpr int MAX_LOOP = 123;
-int longjmp_called = 0;
-
-void jump_back(jmp_buf buf, int n) {
- longjmp_called++;
- LIBC_NAMESPACE::siglongjmp(buf, n); // Will return |n| out of setjmp
-}
-
-TEST(LlvmLibcSetJmpTest, SigSetAndJumpBackSaveSigs) {
- jmp_buf buf;
- longjmp_called = 0;
- volatile int n = 0;
- sigset_t old;
- sigset_t mask_all;
- sigset_t recovered;
- LIBC_NAMESPACE::memset(&mask_all, 0xFF, sizeof(mask_all));
- LIBC_NAMESPACE::memset(&old, 0, sizeof(old));
- LIBC_NAMESPACE::memset(&recovered, 0, sizeof(recovered));
- LIBC_NAMESPACE::sigprocmask(0, nullptr, &old);
- if (LIBC_NAMESPACE::sigsetjmp(buf, 1) <= MAX_LOOP) {
- LIBC_NAMESPACE::sigprocmask(0, nullptr, &recovered);
- ASSERT_EQ(0, LIBC_NAMESPACE::memcmp(&old, &recovered, sizeof(old)));
- n = n + 1;
- LIBC_NAMESPACE::sigprocmask(SIG_BLOCK, &mask_all, nullptr);
- jump_back(buf, n);
- }
- ASSERT_EQ(longjmp_called, n);
- ASSERT_EQ(n, MAX_LOOP + 1);
-}
-
-TEST(LlvmLibcSetJmpTest, SigSetAndJumpBackValOneSaveSigs) {
- jmp_buf buf;
- longjmp_called = 0;
- sigset_t old;
- sigset_t mask_all;
- sigset_t recovered;
- LIBC_NAMESPACE::memset(&mask_all, 0xFF, sizeof(mask_all));
- LIBC_NAMESPACE::memset(&old, 0, sizeof(old));
- LIBC_NAMESPACE::memset(&recovered, 0, sizeof(recovered));
- LIBC_NAMESPACE::sigprocmask(0, nullptr, &old);
- int val = LIBC_NAMESPACE::sigsetjmp(buf, 1);
- if (val == 0) {
- LIBC_NAMESPACE::sigprocmask(SIG_BLOCK, &mask_all, nullptr);
- jump_back(buf, val);
- }
- LIBC_NAMESPACE::sigprocmask(0, nullptr, &recovered);
- ASSERT_EQ(0, LIBC_NAMESPACE::memcmp(&old, &recovered, sizeof(old)));
- ASSERT_EQ(longjmp_called, 1);
- ASSERT_EQ(val, 1);
-}
-
-TEST(LlvmLibcSetJmpTest, SigSetAndJumpBackNoSaveSigs) {
- jmp_buf buf;
- longjmp_called = 0;
- volatile int n = 0;
- if (LIBC_NAMESPACE::sigsetjmp(buf, 0) <= MAX_LOOP) {
- n = n + 1;
- jump_back(buf, n);
- }
- ASSERT_EQ(longjmp_called, n);
- ASSERT_EQ(n, MAX_LOOP + 1);
-}
-
-TEST(LlvmLibcSetJmpTest, SigSetAndJumpBackValOneNoSaveSigs) {
- jmp_buf buf;
- longjmp_called = 0;
- int val = LIBC_NAMESPACE::sigsetjmp(buf, 0);
- if (val == 0) {
- jump_back(buf, val);
- }
- ASSERT_EQ(longjmp_called, 1);
- ASSERT_EQ(val, 1);
-}
diff --git a/lldb/include/lldb/Interpreter/CommandObject.h b/lldb/include/lldb/Interpreter/CommandObject.h
index e6fea9e..8e33edb 100644
--- a/lldb/include/lldb/Interpreter/CommandObject.h
+++ b/lldb/include/lldb/Interpreter/CommandObject.h
@@ -40,14 +40,13 @@ int AddNamesMatchingPartialString(
StringList *descriptions = nullptr) {
int number_added = 0;
- const bool add_all = cmd_str.empty();
-
- for (auto iter = in_map.begin(), end = in_map.end(); iter != end; iter++) {
- if (add_all || (iter->first.find(std::string(cmd_str), 0) == 0)) {
+ for (const auto &[name, cmd] : in_map) {
+ llvm::StringRef cmd_name = name;
+ if (cmd_name.starts_with(cmd_str)) {
++number_added;
- matches.AppendString(iter->first.c_str());
+ matches.AppendString(name);
if (descriptions)
- descriptions->AppendString(iter->second->GetHelp());
+ descriptions->AppendString(cmd->GetHelp());
}
}
diff --git a/lldb/include/lldb/Utility/CompletionRequest.h b/lldb/include/lldb/Utility/CompletionRequest.h
index 865d6db..4d3d440 100644
--- a/lldb/include/lldb/Utility/CompletionRequest.h
+++ b/lldb/include/lldb/Utility/CompletionRequest.h
@@ -115,6 +115,11 @@ public:
CompletionRequest(llvm::StringRef command_line, unsigned raw_cursor_pos,
CompletionResult &result);
+ /// Sets the maximum number of completions that should be returned.
+ void SetMaxReturnElements(size_t max_return_elements) {
+ m_max_return_elements = max_return_elements;
+ }
+
/// Returns the raw user input used to create this CompletionRequest cut off
/// at the cursor position. The cursor will be at the end of the raw line.
llvm::StringRef GetRawLine() const {
@@ -157,6 +162,23 @@ public:
size_t GetCursorIndex() const { return m_cursor_index; }
+ size_t GetMaxReturnElements() const { return m_max_return_elements; }
+
+ /// Returns true if the maximum number of completions has not been reached
+ /// yet, hence we should keep adding completions.
+ bool ShouldAddCompletions() const {
+ return GetMaxNumberOfCompletionsToAdd() > 0;
+ }
+
+ /// Returns the maximum number of completions that need to be added
+ /// until reaching the maximum
+ size_t GetMaxNumberOfCompletionsToAdd() const {
+ const size_t number_of_results = m_result.GetNumberOfResults();
+ if (number_of_results >= m_max_return_elements)
+ return 0;
+ return m_max_return_elements - number_of_results;
+ }
+
/// Adds a possible completion string. If the completion was already
/// suggested before, it will not be added to the list of results. A copy of
/// the suggested completion is stored, so the given string can be free'd
@@ -231,6 +253,8 @@ private:
size_t m_cursor_index;
/// The cursor position in the argument indexed by m_cursor_index.
size_t m_cursor_char_position;
+ /// The maximum number of completions that should be returned.
+ size_t m_max_return_elements = std::numeric_limits<size_t>::max();
/// The result this request is supposed to fill out.
/// We keep this object private to ensure that no backend can in any way
diff --git a/lldb/packages/Python/lldbsuite/test/lldbtest.py b/lldb/packages/Python/lldbsuite/test/lldbtest.py
index db15a1d..763e061 100644
--- a/lldb/packages/Python/lldbsuite/test/lldbtest.py
+++ b/lldb/packages/Python/lldbsuite/test/lldbtest.py
@@ -2257,12 +2257,12 @@ class TestBase(Base, metaclass=LLDBTestCaseFactory):
substrs=[p],
)
- def completions_match(self, command, completions):
+ def completions_match(self, command, completions, max_completions=-1):
"""Checks that the completions for the given command are equal to the
given list of completions"""
interp = self.dbg.GetCommandInterpreter()
match_strings = lldb.SBStringList()
- interp.HandleCompletion(command, len(command), 0, -1, match_strings)
+ interp.HandleCompletion(command, len(command), 0, max_completions, match_strings)
# match_strings is a 1-indexed list, so we have to slice...
self.assertCountEqual(
completions, list(match_strings)[1:], "List of returned completion is wrong"
diff --git a/lldb/source/API/SBCommandInterpreter.cpp b/lldb/source/API/SBCommandInterpreter.cpp
index de22a9d..4ea79d3 100644
--- a/lldb/source/API/SBCommandInterpreter.cpp
+++ b/lldb/source/API/SBCommandInterpreter.cpp
@@ -263,13 +263,26 @@ int SBCommandInterpreter::HandleCompletionWithDescriptions(
if (!IsValid())
return 0;
+ if (max_return_elements == 0)
+ return 0;
+
lldb_private::StringList lldb_matches, lldb_descriptions;
CompletionResult result;
CompletionRequest request(current_line, cursor - current_line, result);
+ if (max_return_elements > 0)
+ request.SetMaxReturnElements(max_return_elements);
m_opaque_ptr->HandleCompletion(request);
result.GetMatches(lldb_matches);
result.GetDescriptions(lldb_descriptions);
+ // limit the matches to the max_return_elements if necessary
+ if (max_return_elements > 0 &&
+ lldb_matches.GetSize() > static_cast<size_t>(max_return_elements)) {
+ lldb_matches.SetSize(max_return_elements);
+ lldb_descriptions.SetSize(max_return_elements);
+ }
+ int number_of_matches = lldb_matches.GetSize();
+
// Make the result array indexed from 1 again by adding the 'common prefix'
// of all completions as element 0. This is done to emulate the old API.
if (request.GetParsedLine().GetArgumentCount() == 0) {
@@ -303,7 +316,7 @@ int SBCommandInterpreter::HandleCompletionWithDescriptions(
matches.AppendList(temp_matches_list);
SBStringList temp_descriptions_list(&lldb_descriptions);
descriptions.AppendList(temp_descriptions_list);
- return result.GetNumberOfResults();
+ return number_of_matches;
}
int SBCommandInterpreter::HandleCompletionWithDescriptions(
diff --git a/lldb/source/Commands/CommandCompletions.cpp b/lldb/source/Commands/CommandCompletions.cpp
index 216aaf9..38231a8 100644
--- a/lldb/source/Commands/CommandCompletions.cpp
+++ b/lldb/source/Commands/CommandCompletions.cpp
@@ -91,7 +91,7 @@ bool CommandCompletions::InvokeCommonCompletionCallbacks(
nullptr} // This one has to be last in the list.
};
- for (int i = 0;; i++) {
+ for (int i = 0; request.ShouldAddCompletions(); i++) {
if (common_completions[i].type == lldb::eTerminatorCompletion)
break;
else if ((common_completions[i].type & completion_mask) ==
@@ -167,7 +167,9 @@ public:
m_matching_files.AppendIfUnique(context.comp_unit->GetPrimaryFile());
}
}
- return Searcher::eCallbackReturnContinue;
+ return m_matching_files.GetSize() >= m_request.GetMaxNumberOfCompletionsToAdd()
+ ? Searcher::eCallbackReturnStop
+ : Searcher::eCallbackReturnContinue;
}
void DoCompletion(SearchFilter *filter) override {
@@ -230,6 +232,9 @@ public:
// Now add the functions & symbols to the list - only add if unique:
for (const SymbolContext &sc : sc_list) {
+ if (m_match_set.size() >= m_request.GetMaxNumberOfCompletionsToAdd())
+ break;
+
ConstString func_name = sc.GetFunctionName(Mangled::ePreferDemangled);
// Ensure that the function name matches the regex. This is more than
// a sanity check. It is possible that the demangled function name
@@ -239,7 +244,9 @@ public:
m_match_set.insert(func_name);
}
}
- return Searcher::eCallbackReturnContinue;
+ return m_match_set.size() >= m_request.GetMaxNumberOfCompletionsToAdd()
+ ? Searcher::eCallbackReturnStop
+ : Searcher::eCallbackReturnContinue;
}
void DoCompletion(SearchFilter *filter) override {
@@ -305,7 +312,8 @@ public:
m_request.AddCompletion(cur_file_name);
}
}
- return Searcher::eCallbackReturnContinue;
+ return m_request.ShouldAddCompletions() ? Searcher::eCallbackReturnContinue
+ : Searcher::eCallbackReturnStop;
}
void DoCompletion(SearchFilter *filter) override { filter->Search(*this); }
@@ -429,7 +437,8 @@ static void DiskFilesOrDirectories(const llvm::Twine &partial_name,
std::error_code EC;
llvm::vfs::directory_iterator Iter = fs.DirBegin(SearchDir, EC);
llvm::vfs::directory_iterator End;
- for (; Iter != End && !EC; Iter.increment(EC)) {
+ for (; Iter != End && !EC && request.ShouldAddCompletions();
+ Iter.increment(EC)) {
auto &Entry = *Iter;
llvm::ErrorOr<llvm::vfs::Status> Status = fs.GetStatus(Entry.path());
diff --git a/lldb/source/Commands/CommandObjectProcess.cpp b/lldb/source/Commands/CommandObjectProcess.cpp
index 654dfa8..ed80c85 100644
--- a/lldb/source/Commands/CommandObjectProcess.cpp
+++ b/lldb/source/Commands/CommandObjectProcess.cpp
@@ -468,7 +468,13 @@ protected:
case 'b':
m_run_to_bkpt_args.AppendArgument(option_arg);
m_any_bkpts_specified = true;
- break;
+ break;
+ case 'F':
+ m_base_direction = lldb::RunDirection::eRunForward;
+ break;
+ case 'R':
+ m_base_direction = lldb::RunDirection::eRunReverse;
+ break;
default:
llvm_unreachable("Unimplemented option");
}
@@ -479,6 +485,7 @@ protected:
m_ignore = 0;
m_run_to_bkpt_args.Clear();
m_any_bkpts_specified = false;
+ m_base_direction = std::nullopt;
}
llvm::ArrayRef<OptionDefinition> GetDefinitions() override {
@@ -488,6 +495,7 @@ protected:
uint32_t m_ignore = 0;
Args m_run_to_bkpt_args;
bool m_any_bkpts_specified = false;
+ std::optional<lldb::RunDirection> m_base_direction;
};
void DoExecute(Args &command, CommandReturnObject &result) override {
@@ -654,6 +662,9 @@ protected:
}
}
+ if (m_options.m_base_direction.has_value())
+ process->SetBaseDirection(*m_options.m_base_direction);
+
const uint32_t iohandler_id = process->GetIOHandlerID();
StreamString stream;
diff --git a/lldb/source/Commands/Options.td b/lldb/source/Commands/Options.td
index cc579d7..53864ff 100644
--- a/lldb/source/Commands/Options.td
+++ b/lldb/source/Commands/Options.td
@@ -737,13 +737,17 @@ let Command = "process attach" in {
}
let Command = "process continue" in {
- def process_continue_ignore_count : Option<"ignore-count", "i">, Group<1>,
+ def process_continue_ignore_count : Option<"ignore-count", "i">, Groups<[1,2]>,
Arg<"UnsignedInteger">, Desc<"Ignore <N> crossings of the breakpoint (if it"
" exists) for the currently selected thread.">;
- def process_continue_run_to_bkpt : Option<"continue-to-bkpt", "b">, Group<2>,
+ def process_continue_run_to_bkpt : Option<"continue-to-bkpt", "b">, Groups<[3,4]>,
Arg<"BreakpointIDRange">, Desc<"Specify a breakpoint to continue to, temporarily "
"ignoring other breakpoints. Can be specified more than once. "
"The continue action will be done synchronously if this option is specified.">;
+ def thread_continue_forward : Option<"forward", "F">, Groups<[1,3]>,
+ Desc<"Set the direction to forward before continuing.">;
+ def thread_continue_reverse : Option<"reverse", "R">, Groups<[2,4]>,
+ Desc<"Set the direction to reverse before continuing.">;
}
let Command = "process detach" in {
diff --git a/lldb/source/Plugins/ExpressionParser/Clang/ClangModulesDeclVendor.cpp b/lldb/source/Plugins/ExpressionParser/Clang/ClangModulesDeclVendor.cpp
index 5b13534..3eb5c3b 100644
--- a/lldb/source/Plugins/ExpressionParser/Clang/ClangModulesDeclVendor.cpp
+++ b/lldb/source/Plugins/ExpressionParser/Clang/ClangModulesDeclVendor.cpp
@@ -330,7 +330,7 @@ bool ClangModulesDeclVendorImpl::AddModule(const SourceModule &module,
}
}
if (!HS.lookupModule(module.path.front().GetStringRef())) {
- error_stream.Printf("error: Header search couldn't locate module %s\n",
+ error_stream.Printf("error: Header search couldn't locate module '%s'\n",
module.path.front().AsCString());
return false;
}
diff --git a/lldb/source/Plugins/LanguageRuntime/ObjC/AppleObjCRuntime/AppleObjCRuntimeV2.cpp b/lldb/source/Plugins/LanguageRuntime/ObjC/AppleObjCRuntime/AppleObjCRuntimeV2.cpp
index 2338367..f458357 100644
--- a/lldb/source/Plugins/LanguageRuntime/ObjC/AppleObjCRuntime/AppleObjCRuntimeV2.cpp
+++ b/lldb/source/Plugins/LanguageRuntime/ObjC/AppleObjCRuntime/AppleObjCRuntimeV2.cpp
@@ -1112,7 +1112,7 @@ public:
: CommandObjectMultiword(
interpreter, "tagged-pointer",
"Commands for operating on Objective-C tagged pointers.",
- "class-table <subcommand> [<subcommand-options>]") {
+ "tagged-pointer <subcommand> [<subcommand-options>]") {
LoadSubCommand(
"info",
CommandObjectSP(
diff --git a/lldb/source/Plugins/Process/elf-core/ProcessElfCore.cpp b/lldb/source/Plugins/Process/elf-core/ProcessElfCore.cpp
index 5f85f99..6635b15 100644
--- a/lldb/source/Plugins/Process/elf-core/ProcessElfCore.cpp
+++ b/lldb/source/Plugins/Process/elf-core/ProcessElfCore.cpp
@@ -289,7 +289,7 @@ void ProcessElfCore::UpdateBuildIdForNTFileEntries() {
UUID ProcessElfCore::FindModuleUUID(const llvm::StringRef path) {
// Returns the gnu uuid from matched NT_FILE entry
for (NT_FILE_Entry &entry : m_nt_file_entries)
- if (path == entry.path)
+ if (path == entry.path && entry.uuid.IsValid())
return entry.uuid;
return UUID();
}
diff --git a/lldb/test/API/commands/expression/completion/TestExprCompletion.py b/lldb/test/API/commands/expression/completion/TestExprCompletion.py
index 022b943..09f2ffe 100644
--- a/lldb/test/API/commands/expression/completion/TestExprCompletion.py
+++ b/lldb/test/API/commands/expression/completion/TestExprCompletion.py
@@ -297,6 +297,37 @@ class CommandLineExprCompletionTestCase(TestBase):
enforce_order=True,
)
+ def test_expr_completion_max_results(self):
+ self.build()
+ self.main_source = "main.cpp"
+ self.main_source_spec = lldb.SBFileSpec(self.main_source)
+ self.createTestTarget()
+
+ (target, process, thread, bkpt) = lldbutil.run_to_source_breakpoint(
+ self, "// Break here", self.main_source_spec
+ )
+
+ expected_completions = [
+ "some_expr.~Expr()",
+ "some_expr.operator=(", # Copy operator
+ "some_expr.operator=(", # Move operator
+ "some_expr.MemberVariableBar",
+ "some_expr.StaticMemberMethodBar()",
+ "some_expr.Self()",
+ "some_expr.FooNoArgsBar()",
+ "some_expr.FooWithArgsBar(",
+ "some_expr.FooNumbersBar1()",
+ "some_expr.FooUnderscoreBar_()",
+ "some_expr.FooWithMultipleArgsBar(",
+ ]
+
+ for i in range(1, len(expected_completions)):
+ self.completions_match(
+ "expr some_expr.",
+ expected_completions[:i],
+ max_completions=i,
+ )
+
def assume_no_completions(self, str_input, cursor_pos=None):
interp = self.dbg.GetCommandInterpreter()
match_strings = lldb.SBStringList()
diff --git a/lldb/test/API/commands/process/reverse-continue/Makefile b/lldb/test/API/commands/process/reverse-continue/Makefile
new file mode 100644
index 0000000..1049594
--- /dev/null
+++ b/lldb/test/API/commands/process/reverse-continue/Makefile
@@ -0,0 +1,3 @@
+C_SOURCES := main.c
+
+include Makefile.rules
diff --git a/lldb/test/API/commands/process/reverse-continue/TestReverseContinue.py b/lldb/test/API/commands/process/reverse-continue/TestReverseContinue.py
new file mode 100644
index 0000000..c04d2b9
--- /dev/null
+++ b/lldb/test/API/commands/process/reverse-continue/TestReverseContinue.py
@@ -0,0 +1,66 @@
+"""
+Test the "process continue --reverse" and "--forward" options.
+"""
+
+
+import lldb
+from lldbsuite.test.lldbtest import *
+from lldbsuite.test.decorators import *
+from lldbsuite.test.gdbclientutils import *
+from lldbsuite.test.lldbreverse import ReverseTestBase
+from lldbsuite.test import lldbutil
+
+
+class TestReverseContinue(ReverseTestBase):
+ @skipIfRemote
+ def test_reverse_continue(self):
+ target, _, _ = self.setup_recording()
+
+ # Set breakpoint and reverse-continue
+ trigger_bkpt = target.BreakpointCreateByName("trigger_breakpoint", None)
+ self.assertTrue(trigger_bkpt.GetNumLocations() > 0)
+ self.expect(
+ "process continue --reverse",
+ substrs=["stop reason = breakpoint {0}.1".format(trigger_bkpt.GetID())],
+ )
+ # `process continue` should preserve current base direction.
+ self.expect(
+ "process continue",
+ STOPPED_DUE_TO_HISTORY_BOUNDARY,
+ substrs=["stopped", "stop reason = history boundary"],
+ )
+ self.expect(
+ "process continue --forward",
+ substrs=["stop reason = breakpoint {0}.1".format(trigger_bkpt.GetID())],
+ )
+
+ def setup_recording(self):
+ """
+ Record execution of code between "start_recording" and "stop_recording" breakpoints.
+
+ Returns with the target stopped at "stop_recording", with recording disabled,
+ ready to reverse-execute.
+ """
+ self.build()
+ target = self.dbg.CreateTarget(self.getBuildArtifact("a.out"))
+ process = self.connect(target)
+
+ # Record execution from the start of the function "start_recording"
+ # to the start of the function "stop_recording". We want to keep the
+ # interval that we record as small as possible to minimize the run-time
+ # of our single-stepping recorder.
+ start_recording_bkpt = target.BreakpointCreateByName("start_recording", None)
+ self.assertTrue(start_recording_bkpt.GetNumLocations() > 0)
+ initial_threads = lldbutil.continue_to_breakpoint(process, start_recording_bkpt)
+ self.assertEqual(len(initial_threads), 1)
+ target.BreakpointDelete(start_recording_bkpt.GetID())
+ self.start_recording()
+ stop_recording_bkpt = target.BreakpointCreateByName("stop_recording", None)
+ self.assertTrue(stop_recording_bkpt.GetNumLocations() > 0)
+ lldbutil.continue_to_breakpoint(process, stop_recording_bkpt)
+ target.BreakpointDelete(stop_recording_bkpt.GetID())
+ self.stop_recording()
+
+ self.dbg.SetAsync(False)
+
+ return target, process, initial_threads
diff --git a/lldb/test/API/commands/process/reverse-continue/TestReverseContinueNotSupported.py b/lldb/test/API/commands/process/reverse-continue/TestReverseContinueNotSupported.py
new file mode 100644
index 0000000..3d31812
--- /dev/null
+++ b/lldb/test/API/commands/process/reverse-continue/TestReverseContinueNotSupported.py
@@ -0,0 +1,51 @@
+"""
+Test the "process continue --reverse" and "--forward" options
+when reverse-continue is not supported.
+"""
+
+
+import lldb
+from lldbsuite.test.lldbtest import *
+from lldbsuite.test.decorators import *
+from lldbsuite.test import lldbutil
+
+
+class TestReverseContinueNotSupported(TestBase):
+ def test_reverse_continue_not_supported(self):
+ target = self.connect()
+
+ # Set breakpoint and reverse-continue
+ trigger_bkpt = target.BreakpointCreateByName("trigger_breakpoint", None)
+ self.assertTrue(trigger_bkpt, VALID_BREAKPOINT)
+ # `process continue --forward` should work.
+ self.expect(
+ "process continue --forward",
+ substrs=["stop reason = breakpoint {0}.1".format(trigger_bkpt.GetID())],
+ )
+ self.expect(
+ "process continue --reverse",
+ error=True,
+ substrs=["target does not support reverse-continue"],
+ )
+
+ def test_reverse_continue_forward_and_reverse(self):
+ self.connect()
+
+ self.expect(
+ "process continue --forward --reverse",
+ error=True,
+ substrs=["invalid combination of options for the given command"],
+ )
+
+ def connect(self):
+ self.build()
+ exe = self.getBuildArtifact("a.out")
+ target = self.dbg.CreateTarget(exe)
+ self.assertTrue(target, VALID_TARGET)
+
+ main_bkpt = target.BreakpointCreateByName("main", None)
+ self.assertTrue(main_bkpt, VALID_BREAKPOINT)
+
+ process = target.LaunchSimple(None, None, self.get_process_working_directory())
+ self.assertTrue(process, PROCESS_IS_VALID)
+ return target
diff --git a/lldb/test/API/commands/process/reverse-continue/main.c b/lldb/test/API/commands/process/reverse-continue/main.c
new file mode 100644
index 0000000..ccec2bb
--- /dev/null
+++ b/lldb/test/API/commands/process/reverse-continue/main.c
@@ -0,0 +1,12 @@
+static void start_recording() {}
+
+static void trigger_breakpoint() {}
+
+static void stop_recording() {}
+
+int main() {
+ start_recording();
+ trigger_breakpoint();
+ stop_recording();
+ return 0;
+}
diff --git a/lldb/tools/lldb-dap/Handler/NextRequestHandler.cpp b/lldb/tools/lldb-dap/Handler/NextRequestHandler.cpp
index 1603563..3fa1676 100644
--- a/lldb/tools/lldb-dap/Handler/NextRequestHandler.cpp
+++ b/lldb/tools/lldb-dap/Handler/NextRequestHandler.cpp
@@ -13,6 +13,7 @@
#include "llvm/Support/Error.h"
using namespace llvm;
+using namespace lldb;
using namespace lldb_dap::protocol;
namespace lldb_dap {
@@ -35,7 +36,7 @@ Error NextRequestHandler::Run(const NextArguments &args) const {
if (args.granularity == eSteppingGranularityInstruction) {
thread.StepInstruction(/*step_over=*/true);
} else {
- thread.StepOver();
+ thread.StepOver(args.singleThread ? eOnlyThisThread : eOnlyDuringStepping);
}
return Error::success();
diff --git a/lldb/tools/lldb-dap/Handler/RequestHandler.h b/lldb/tools/lldb-dap/Handler/RequestHandler.h
index edb9de7..e13f7a3 100644
--- a/lldb/tools/lldb-dap/Handler/RequestHandler.h
+++ b/lldb/tools/lldb-dap/Handler/RequestHandler.h
@@ -298,11 +298,12 @@ public:
llvm::Error Run(const protocol::NextArguments &args) const override;
};
-class StepInRequestHandler : public LegacyRequestHandler {
+class StepInRequestHandler : public RequestHandler<protocol::StepInArguments,
+ protocol::StepInResponse> {
public:
- using LegacyRequestHandler::LegacyRequestHandler;
+ using RequestHandler::RequestHandler;
static llvm::StringLiteral GetCommand() { return "stepIn"; }
- void operator()(const llvm::json::Object &request) const override;
+ llvm::Error Run(const protocol::StepInArguments &args) const override;
};
class StepInTargetsRequestHandler : public LegacyRequestHandler {
diff --git a/lldb/tools/lldb-dap/Handler/StepInRequestHandler.cpp b/lldb/tools/lldb-dap/Handler/StepInRequestHandler.cpp
index 9d8d75b..15f242a 100644
--- a/lldb/tools/lldb-dap/Handler/StepInRequestHandler.cpp
+++ b/lldb/tools/lldb-dap/Handler/StepInRequestHandler.cpp
@@ -8,91 +8,50 @@
#include "DAP.h"
#include "EventHelper.h"
-#include "JSONUtils.h"
+#include "Protocol/ProtocolRequests.h"
+#include "Protocol/ProtocolTypes.h"
#include "RequestHandler.h"
+using namespace llvm;
+using namespace lldb;
+using namespace lldb_dap::protocol;
+
namespace lldb_dap {
-// "StepInRequest": {
-// "allOf": [ { "$ref": "#/definitions/Request" }, {
-// "type": "object",
-// "description": "StepIn request; value of command field is 'stepIn'. The
-// request starts the debuggee to step into a function/method if possible.
-// If it cannot step into a target, 'stepIn' behaves like 'next'. The debug
-// adapter first sends the StepInResponse and then a StoppedEvent (event
-// type 'step') after the step has completed. If there are multiple
-// function/method calls (or other targets) on the source line, the optional
-// argument 'targetId' can be used to control into which target the 'stepIn'
-// should occur. The list of possible targets for a given source line can be
-// retrieved via the 'stepInTargets' request.", "properties": {
-// "command": {
-// "type": "string",
-// "enum": [ "stepIn" ]
-// },
-// "arguments": {
-// "$ref": "#/definitions/StepInArguments"
-// }
-// },
-// "required": [ "command", "arguments" ]
-// }]
-// },
-// "StepInArguments": {
-// "type": "object",
-// "description": "Arguments for 'stepIn' request.",
-// "properties": {
-// "threadId": {
-// "type": "integer",
-// "description": "Execute 'stepIn' for this thread."
-// },
-// "targetId": {
-// "type": "integer",
-// "description": "Optional id of the target to step into."
-// },
-// "granularity": {
-// "$ref": "#/definitions/SteppingGranularity",
-// "description": "Stepping granularity. If no granularity is specified, a
-// granularity of `statement` is assumed."
-// }
-// },
-// "required": [ "threadId" ]
-// },
-// "StepInResponse": {
-// "allOf": [ { "$ref": "#/definitions/Response" }, {
-// "type": "object",
-// "description": "Response to 'stepIn' request. This is just an
-// acknowledgement, so no body field is required."
-// }]
-// }
-void StepInRequestHandler::operator()(const llvm::json::Object &request) const {
- llvm::json::Object response;
- FillResponse(request, response);
- const auto *arguments = request.getObject("arguments");
+// The request resumes the given thread to step into a function/method and
+// allows all other threads to run freely by resuming them. If the debug adapter
+// supports single thread execution (see capability
+// `supportsSingleThreadExecutionRequests`), setting the `singleThread` argument
+// to true prevents other suspended threads from resuming. If the request cannot
+// step into a target, `stepIn` behaves like the `next` request. The debug
+// adapter first sends the response and then a `stopped` event (with reason
+// `step`) after the step has completed. If there are multiple function/method
+// calls (or other targets) on the source line, the argument `targetId` can be
+// used to control into which target the `stepIn` should occur. The list of
+// possible targets for a given source line can be retrieved via the
+// `stepInTargets` request.
+Error StepInRequestHandler::Run(const StepInArguments &args) const {
+ SBThread thread = dap.GetLLDBThread(args.threadId);
+ if (!thread.IsValid())
+ return make_error<DAPError>("invalid thread");
+
+ // Remember the thread ID that caused the resume so we can set the
+ // "threadCausedFocus" boolean value in the "stopped" events.
+ dap.focus_tid = thread.GetThreadID();
+
+ if (args.granularity == eSteppingGranularityInstruction) {
+ thread.StepInstruction(/*step_over=*/false);
+ return Error::success();
+ }
std::string step_in_target;
- const auto target_id =
- GetInteger<uint64_t>(arguments, "targetId").value_or(0);
- auto it = dap.step_in_targets.find(target_id);
+ auto it = dap.step_in_targets.find(args.targetId.value_or(0));
if (it != dap.step_in_targets.end())
step_in_target = it->second;
- const bool single_thread =
- GetBoolean(arguments, "singleThread").value_or(false);
- lldb::RunMode run_mode =
- single_thread ? lldb::eOnlyThisThread : lldb::eOnlyDuringStepping;
- lldb::SBThread thread = dap.GetLLDBThread(*arguments);
- if (thread.IsValid()) {
- // Remember the thread ID that caused the resume so we can set the
- // "threadCausedFocus" boolean value in the "stopped" events.
- dap.focus_tid = thread.GetThreadID();
- if (HasInstructionGranularity(*arguments)) {
- thread.StepInstruction(/*step_over=*/false);
- } else {
- thread.StepInto(step_in_target.c_str(), run_mode);
- }
- } else {
- response["success"] = llvm::json::Value(false);
- }
- dap.SendJSON(llvm::json::Value(std::move(response)));
+ RunMode run_mode = args.singleThread ? eOnlyThisThread : eOnlyDuringStepping;
+ thread.StepInto(step_in_target.c_str(), run_mode);
+ return Error::success();
}
} // namespace lldb_dap
diff --git a/lldb/tools/lldb-dap/Protocol/ProtocolRequests.cpp b/lldb/tools/lldb-dap/Protocol/ProtocolRequests.cpp
index b113299..ee7c653 100644
--- a/lldb/tools/lldb-dap/Protocol/ProtocolRequests.cpp
+++ b/lldb/tools/lldb-dap/Protocol/ProtocolRequests.cpp
@@ -121,4 +121,13 @@ bool fromJSON(const llvm::json::Value &Params, NextArguments &NA,
OM.mapOptional("granularity", NA.granularity);
}
+bool fromJSON(const llvm::json::Value &Params, StepInArguments &SIA,
+ llvm::json::Path P) {
+ json::ObjectMapper OM(Params, P);
+ return OM && OM.map("threadId", SIA.threadId) &&
+ OM.map("targetId", SIA.targetId) &&
+ OM.mapOptional("singleThread", SIA.singleThread) &&
+ OM.mapOptional("granularity", SIA.granularity);
+}
+
} // namespace lldb_dap::protocol
diff --git a/lldb/tools/lldb-dap/Protocol/ProtocolRequests.h b/lldb/tools/lldb-dap/Protocol/ProtocolRequests.h
index 6e3e2c6a..50c16c1 100644
--- a/lldb/tools/lldb-dap/Protocol/ProtocolRequests.h
+++ b/lldb/tools/lldb-dap/Protocol/ProtocolRequests.h
@@ -256,6 +256,28 @@ bool fromJSON(const llvm::json::Value &, NextArguments &, llvm::json::Path);
/// body field is required.
using NextResponse = VoidResponse;
+/// Arguments for `stepIn` request.
+struct StepInArguments {
+ /// Specifies the thread for which to resume execution for one step-into (of
+ /// the given granularity).
+ uint64_t threadId = LLDB_INVALID_THREAD_ID;
+
+ /// If this flag is true, all other suspended threads are not resumed.
+ bool singleThread = false;
+
+ /// Id of the target to step into.
+ std::optional<uint64_t> targetId;
+
+ /// Stepping granularity. If no granularity is specified, a granularity of
+ /// `statement` is assumed.
+ SteppingGranularity granularity = eSteppingGranularityStatement;
+};
+bool fromJSON(const llvm::json::Value &, StepInArguments &, llvm::json::Path);
+
+/// Response to `stepIn` request. This is just an acknowledgement, so no
+/// body field is required.
+using StepInResponse = VoidResponse;
+
} // namespace lldb_dap::protocol
#endif
diff --git a/llvm/docs/ReleaseNotes.md b/llvm/docs/ReleaseNotes.md
index a3f9122..6f6ecc89 100644
--- a/llvm/docs/ReleaseNotes.md
+++ b/llvm/docs/ReleaseNotes.md
@@ -235,6 +235,10 @@ Changes to LLDB
* The `min-gdbserver-port` and `max-gdbserver-port` options have been removed
from `lldb-server`'s platform mode. Since the changes to `lldb-server`'s port
handling in LLDB 20, these options have had no effect.
+* LLDB now supports `process continue --reverse` when used with debug servers
+ supporting reverse execution, such as [rr](https://rr-project.org).
+ When using reverse execution, `process continue --forward` returns to the
+ forward execution.
### Changes to lldb-dap
diff --git a/llvm/include/llvm/ProfileData/IndexedMemProfData.h b/llvm/include/llvm/ProfileData/IndexedMemProfData.h
new file mode 100644
index 0000000..3c6c329
--- /dev/null
+++ b/llvm/include/llvm/ProfileData/IndexedMemProfData.h
@@ -0,0 +1,23 @@
+//===- IndexedMemProfData.h - MemProf format support ------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// MemProf data is serialized in writeMemProf provided in this header file.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ProfileData/InstrProf.h"
+#include "llvm/ProfileData/MemProf.h"
+
+namespace llvm {
+
+// Write the MemProf data to OS.
+Error writeMemProf(ProfOStream &OS, memprof::IndexedMemProfData &MemProfData,
+ memprof::IndexedVersion MemProfVersionRequested,
+ bool MemProfFullSchema);
+
+} // namespace llvm
diff --git a/llvm/lib/ProfileData/CMakeLists.txt b/llvm/lib/ProfileData/CMakeLists.txt
index 4fa1b76..eb7c2a3 100644
--- a/llvm/lib/ProfileData/CMakeLists.txt
+++ b/llvm/lib/ProfileData/CMakeLists.txt
@@ -1,5 +1,6 @@
add_llvm_component_library(LLVMProfileData
GCOV.cpp
+ IndexedMemProfData.cpp
InstrProf.cpp
InstrProfCorrelator.cpp
InstrProfReader.cpp
diff --git a/llvm/lib/ProfileData/IndexedMemProfData.cpp b/llvm/lib/ProfileData/IndexedMemProfData.cpp
new file mode 100644
index 0000000..fb4a891
--- /dev/null
+++ b/llvm/lib/ProfileData/IndexedMemProfData.cpp
@@ -0,0 +1,300 @@
+//===- IndexedMemProfData.h - MemProf format support ------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// MemProf data is serialized in writeMemProf provided in this file.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ProfileData/InstrProf.h"
+#include "llvm/ProfileData/MemProf.h"
+#include "llvm/Support/FormatVariadic.h"
+#include "llvm/Support/OnDiskHashTable.h"
+
+namespace llvm {
+
+// Serialize Schema.
+static void writeMemProfSchema(ProfOStream &OS,
+ const memprof::MemProfSchema &Schema) {
+ OS.write(static_cast<uint64_t>(Schema.size()));
+ for (const auto Id : Schema)
+ OS.write(static_cast<uint64_t>(Id));
+}
+
+// Serialize MemProfRecordData. Return RecordTableOffset.
+static uint64_t writeMemProfRecords(
+ ProfOStream &OS,
+ llvm::MapVector<GlobalValue::GUID, memprof::IndexedMemProfRecord>
+ &MemProfRecordData,
+ memprof::MemProfSchema *Schema, memprof::IndexedVersion Version,
+ llvm::DenseMap<memprof::CallStackId, memprof::LinearCallStackId>
+ *MemProfCallStackIndexes = nullptr) {
+ memprof::RecordWriterTrait RecordWriter(Schema, Version,
+ MemProfCallStackIndexes);
+ OnDiskChainedHashTableGenerator<memprof::RecordWriterTrait>
+ RecordTableGenerator;
+ for (auto &[GUID, Record] : MemProfRecordData) {
+ // Insert the key (func hash) and value (memprof record).
+ RecordTableGenerator.insert(GUID, Record, RecordWriter);
+ }
+ // Release the memory of this MapVector as it is no longer needed.
+ MemProfRecordData.clear();
+
+ // The call to Emit invokes RecordWriterTrait::EmitData which destructs
+ // the memprof record copies owned by the RecordTableGenerator. This works
+ // because the RecordTableGenerator is not used after this point.
+ return RecordTableGenerator.Emit(OS.OS, RecordWriter);
+}
+
+// Serialize MemProfFrameData. Return FrameTableOffset.
+static uint64_t writeMemProfFrames(
+ ProfOStream &OS,
+ llvm::MapVector<memprof::FrameId, memprof::Frame> &MemProfFrameData) {
+ OnDiskChainedHashTableGenerator<memprof::FrameWriterTrait>
+ FrameTableGenerator;
+ for (auto &[FrameId, Frame] : MemProfFrameData) {
+ // Insert the key (frame id) and value (frame contents).
+ FrameTableGenerator.insert(FrameId, Frame);
+ }
+ // Release the memory of this MapVector as it is no longer needed.
+ MemProfFrameData.clear();
+
+ return FrameTableGenerator.Emit(OS.OS);
+}
+
+// Serialize MemProfFrameData. Return the mapping from FrameIds to their
+// indexes within the frame array.
+static llvm::DenseMap<memprof::FrameId, memprof::LinearFrameId>
+writeMemProfFrameArray(
+ ProfOStream &OS,
+ llvm::MapVector<memprof::FrameId, memprof::Frame> &MemProfFrameData,
+ llvm::DenseMap<memprof::FrameId, memprof::FrameStat> &FrameHistogram) {
+ // Mappings from FrameIds to array indexes.
+ llvm::DenseMap<memprof::FrameId, memprof::LinearFrameId> MemProfFrameIndexes;
+
+ // Compute the order in which we serialize Frames. The order does not matter
+ // in terms of correctness, but we still compute it for deserialization
+ // performance. Specifically, if we serialize frequently used Frames one
+ // after another, we have better cache utilization. For two Frames that
+ // appear equally frequently, we break a tie by serializing the one that tends
+ // to appear earlier in call stacks. We implement the tie-breaking mechanism
+ // by computing the sum of indexes within call stacks for each Frame. If we
+ // still have a tie, then we just resort to compare two FrameIds, which is
+ // just for stability of output.
+ std::vector<std::pair<memprof::FrameId, const memprof::Frame *>> FrameIdOrder;
+ FrameIdOrder.reserve(MemProfFrameData.size());
+ for (const auto &[Id, Frame] : MemProfFrameData)
+ FrameIdOrder.emplace_back(Id, &Frame);
+ assert(MemProfFrameData.size() == FrameIdOrder.size());
+ llvm::sort(FrameIdOrder,
+ [&](const std::pair<memprof::FrameId, const memprof::Frame *> &L,
+ const std::pair<memprof::FrameId, const memprof::Frame *> &R) {
+ const auto &SL = FrameHistogram[L.first];
+ const auto &SR = FrameHistogram[R.first];
+ // Popular FrameIds should come first.
+ if (SL.Count != SR.Count)
+ return SL.Count > SR.Count;
+ // If they are equally popular, then the one that tends to appear
+ // earlier in call stacks should come first.
+ if (SL.PositionSum != SR.PositionSum)
+ return SL.PositionSum < SR.PositionSum;
+ // Compare their FrameIds for sort stability.
+ return L.first < R.first;
+ });
+
+ // Serialize all frames while creating mappings from linear IDs to FrameIds.
+ uint64_t Index = 0;
+ MemProfFrameIndexes.reserve(FrameIdOrder.size());
+ for (const auto &[Id, F] : FrameIdOrder) {
+ F->serialize(OS.OS);
+ MemProfFrameIndexes.insert({Id, Index});
+ ++Index;
+ }
+ assert(MemProfFrameData.size() == Index);
+ assert(MemProfFrameData.size() == MemProfFrameIndexes.size());
+
+ // Release the memory of this MapVector as it is no longer needed.
+ MemProfFrameData.clear();
+
+ return MemProfFrameIndexes;
+}
+
+static uint64_t writeMemProfCallStacks(
+ ProfOStream &OS,
+ llvm::MapVector<memprof::CallStackId, llvm::SmallVector<memprof::FrameId>>
+ &MemProfCallStackData) {
+ OnDiskChainedHashTableGenerator<memprof::CallStackWriterTrait>
+ CallStackTableGenerator;
+ for (auto &[CSId, CallStack] : MemProfCallStackData)
+ CallStackTableGenerator.insert(CSId, CallStack);
+ // Release the memory of this vector as it is no longer needed.
+ MemProfCallStackData.clear();
+
+ return CallStackTableGenerator.Emit(OS.OS);
+}
+
+static llvm::DenseMap<memprof::CallStackId, memprof::LinearCallStackId>
+writeMemProfCallStackArray(
+ ProfOStream &OS,
+ llvm::MapVector<memprof::CallStackId, llvm::SmallVector<memprof::FrameId>>
+ &MemProfCallStackData,
+ llvm::DenseMap<memprof::FrameId, memprof::LinearFrameId>
+ &MemProfFrameIndexes,
+ llvm::DenseMap<memprof::FrameId, memprof::FrameStat> &FrameHistogram,
+ unsigned &NumElements) {
+ llvm::DenseMap<memprof::CallStackId, memprof::LinearCallStackId>
+ MemProfCallStackIndexes;
+
+ memprof::CallStackRadixTreeBuilder<memprof::FrameId> Builder;
+ Builder.build(std::move(MemProfCallStackData), &MemProfFrameIndexes,
+ FrameHistogram);
+ for (auto I : Builder.getRadixArray())
+ OS.write32(I);
+ NumElements = Builder.getRadixArray().size();
+ MemProfCallStackIndexes = Builder.takeCallStackPos();
+
+ // Release the memory of this vector as it is no longer needed.
+ MemProfCallStackData.clear();
+
+ return MemProfCallStackIndexes;
+}
+
+// Write out MemProf Version2 as follows:
+// uint64_t Version
+// uint64_t RecordTableOffset = RecordTableGenerator.Emit
+// uint64_t FramePayloadOffset = Offset for the frame payload
+// uint64_t FrameTableOffset = FrameTableGenerator.Emit
+// uint64_t CallStackPayloadOffset = Offset for the call stack payload (NEW V2)
+// uint64_t CallStackTableOffset = CallStackTableGenerator.Emit (NEW in V2)
+// uint64_t Num schema entries
+// uint64_t Schema entry 0
+// uint64_t Schema entry 1
+// ....
+// uint64_t Schema entry N - 1
+// OnDiskChainedHashTable MemProfRecordData
+// OnDiskChainedHashTable MemProfFrameData
+// OnDiskChainedHashTable MemProfCallStackData (NEW in V2)
+static Error writeMemProfV2(ProfOStream &OS,
+ memprof::IndexedMemProfData &MemProfData,
+ bool MemProfFullSchema) {
+ OS.write(memprof::Version2);
+ uint64_t HeaderUpdatePos = OS.tell();
+ OS.write(0ULL); // Reserve space for the memprof record table offset.
+ OS.write(0ULL); // Reserve space for the memprof frame payload offset.
+ OS.write(0ULL); // Reserve space for the memprof frame table offset.
+ OS.write(0ULL); // Reserve space for the memprof call stack payload offset.
+ OS.write(0ULL); // Reserve space for the memprof call stack table offset.
+
+ auto Schema = memprof::getHotColdSchema();
+ if (MemProfFullSchema)
+ Schema = memprof::getFullSchema();
+ writeMemProfSchema(OS, Schema);
+
+ uint64_t RecordTableOffset =
+ writeMemProfRecords(OS, MemProfData.Records, &Schema, memprof::Version2);
+
+ uint64_t FramePayloadOffset = OS.tell();
+ uint64_t FrameTableOffset = writeMemProfFrames(OS, MemProfData.Frames);
+
+ uint64_t CallStackPayloadOffset = OS.tell();
+ uint64_t CallStackTableOffset =
+ writeMemProfCallStacks(OS, MemProfData.CallStacks);
+
+ uint64_t Header[] = {
+ RecordTableOffset, FramePayloadOffset, FrameTableOffset,
+ CallStackPayloadOffset, CallStackTableOffset,
+ };
+ OS.patch({{HeaderUpdatePos, Header}});
+
+ return Error::success();
+}
+
+// Write out MemProf Version3 as follows:
+// uint64_t Version
+// uint64_t CallStackPayloadOffset = Offset for the call stack payload
+// uint64_t RecordPayloadOffset = Offset for the record payload
+// uint64_t RecordTableOffset = RecordTableGenerator.Emit
+// uint64_t Num schema entries
+// uint64_t Schema entry 0
+// uint64_t Schema entry 1
+// ....
+// uint64_t Schema entry N - 1
+// Frames serialized one after another
+// Call stacks encoded as a radix tree
+// OnDiskChainedHashTable MemProfRecordData
+static Error writeMemProfV3(ProfOStream &OS,
+ memprof::IndexedMemProfData &MemProfData,
+ bool MemProfFullSchema) {
+ OS.write(memprof::Version3);
+ uint64_t HeaderUpdatePos = OS.tell();
+ OS.write(0ULL); // Reserve space for the memprof call stack payload offset.
+ OS.write(0ULL); // Reserve space for the memprof record payload offset.
+ OS.write(0ULL); // Reserve space for the memprof record table offset.
+
+ auto Schema = memprof::getHotColdSchema();
+ if (MemProfFullSchema)
+ Schema = memprof::getFullSchema();
+ writeMemProfSchema(OS, Schema);
+
+ llvm::DenseMap<memprof::FrameId, memprof::FrameStat> FrameHistogram =
+ memprof::computeFrameHistogram(MemProfData.CallStacks);
+ assert(MemProfData.Frames.size() == FrameHistogram.size());
+
+ llvm::DenseMap<memprof::FrameId, memprof::LinearFrameId> MemProfFrameIndexes =
+ writeMemProfFrameArray(OS, MemProfData.Frames, FrameHistogram);
+
+ uint64_t CallStackPayloadOffset = OS.tell();
+ // The number of elements in the call stack array.
+ unsigned NumElements = 0;
+ llvm::DenseMap<memprof::CallStackId, memprof::LinearCallStackId>
+ MemProfCallStackIndexes =
+ writeMemProfCallStackArray(OS, MemProfData.CallStacks,
+ MemProfFrameIndexes, FrameHistogram,
+ NumElements);
+
+ uint64_t RecordPayloadOffset = OS.tell();
+ uint64_t RecordTableOffset =
+ writeMemProfRecords(OS, MemProfData.Records, &Schema, memprof::Version3,
+ &MemProfCallStackIndexes);
+
+ // IndexedMemProfReader::deserializeV3 computes the number of elements in the
+ // call stack array from the difference between CallStackPayloadOffset and
+ // RecordPayloadOffset. Verify that the computation works.
+ assert(CallStackPayloadOffset +
+ NumElements * sizeof(memprof::LinearFrameId) ==
+ RecordPayloadOffset);
+
+ uint64_t Header[] = {
+ CallStackPayloadOffset,
+ RecordPayloadOffset,
+ RecordTableOffset,
+ };
+ OS.patch({{HeaderUpdatePos, Header}});
+
+ return Error::success();
+}
+
+// Write out the MemProf data in a requested version.
+Error writeMemProf(ProfOStream &OS, memprof::IndexedMemProfData &MemProfData,
+ memprof::IndexedVersion MemProfVersionRequested,
+ bool MemProfFullSchema) {
+ switch (MemProfVersionRequested) {
+ case memprof::Version2:
+ return writeMemProfV2(OS, MemProfData, MemProfFullSchema);
+ case memprof::Version3:
+ return writeMemProfV3(OS, MemProfData, MemProfFullSchema);
+ }
+
+ return make_error<InstrProfError>(
+ instrprof_error::unsupported_version,
+ formatv("MemProf version {} not supported; "
+ "requires version between {} and {}, inclusive",
+ MemProfVersionRequested, memprof::MinimumSupportedVersion,
+ memprof::MaximumSupportedVersion));
+}
+
+} // namespace llvm
diff --git a/llvm/lib/ProfileData/InstrProfWriter.cpp b/llvm/lib/ProfileData/InstrProfWriter.cpp
index f1882dc..2759346 100644
--- a/llvm/lib/ProfileData/InstrProfWriter.cpp
+++ b/llvm/lib/ProfileData/InstrProfWriter.cpp
@@ -16,6 +16,7 @@
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/IR/ProfileSummary.h"
+#include "llvm/ProfileData/IndexedMemProfData.h"
#include "llvm/ProfileData/InstrProf.h"
#include "llvm/ProfileData/MemProf.h"
#include "llvm/ProfileData/ProfileCommon.h"
@@ -23,7 +24,6 @@
#include "llvm/Support/Endian.h"
#include "llvm/Support/EndianStream.h"
#include "llvm/Support/Error.h"
-#include "llvm/Support/FormatVariadic.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/OnDiskHashTable.h"
#include "llvm/Support/raw_ostream.h"
@@ -449,287 +449,6 @@ static void setSummary(IndexedInstrProf::Summary *TheSummary,
TheSummary->setEntry(I, Res[I]);
}
-// Serialize Schema.
-static void writeMemProfSchema(ProfOStream &OS,
- const memprof::MemProfSchema &Schema) {
- OS.write(static_cast<uint64_t>(Schema.size()));
- for (const auto Id : Schema)
- OS.write(static_cast<uint64_t>(Id));
-}
-
-// Serialize MemProfRecordData. Return RecordTableOffset.
-static uint64_t writeMemProfRecords(
- ProfOStream &OS,
- llvm::MapVector<GlobalValue::GUID, memprof::IndexedMemProfRecord>
- &MemProfRecordData,
- memprof::MemProfSchema *Schema, memprof::IndexedVersion Version,
- llvm::DenseMap<memprof::CallStackId, memprof::LinearCallStackId>
- *MemProfCallStackIndexes = nullptr) {
- memprof::RecordWriterTrait RecordWriter(Schema, Version,
- MemProfCallStackIndexes);
- OnDiskChainedHashTableGenerator<memprof::RecordWriterTrait>
- RecordTableGenerator;
- for (auto &[GUID, Record] : MemProfRecordData) {
- // Insert the key (func hash) and value (memprof record).
- RecordTableGenerator.insert(GUID, Record, RecordWriter);
- }
- // Release the memory of this MapVector as it is no longer needed.
- MemProfRecordData.clear();
-
- // The call to Emit invokes RecordWriterTrait::EmitData which destructs
- // the memprof record copies owned by the RecordTableGenerator. This works
- // because the RecordTableGenerator is not used after this point.
- return RecordTableGenerator.Emit(OS.OS, RecordWriter);
-}
-
-// Serialize MemProfFrameData. Return FrameTableOffset.
-static uint64_t writeMemProfFrames(
- ProfOStream &OS,
- llvm::MapVector<memprof::FrameId, memprof::Frame> &MemProfFrameData) {
- OnDiskChainedHashTableGenerator<memprof::FrameWriterTrait>
- FrameTableGenerator;
- for (auto &[FrameId, Frame] : MemProfFrameData) {
- // Insert the key (frame id) and value (frame contents).
- FrameTableGenerator.insert(FrameId, Frame);
- }
- // Release the memory of this MapVector as it is no longer needed.
- MemProfFrameData.clear();
-
- return FrameTableGenerator.Emit(OS.OS);
-}
-
-// Serialize MemProfFrameData. Return the mapping from FrameIds to their
-// indexes within the frame array.
-static llvm::DenseMap<memprof::FrameId, memprof::LinearFrameId>
-writeMemProfFrameArray(
- ProfOStream &OS,
- llvm::MapVector<memprof::FrameId, memprof::Frame> &MemProfFrameData,
- llvm::DenseMap<memprof::FrameId, memprof::FrameStat> &FrameHistogram) {
- // Mappings from FrameIds to array indexes.
- llvm::DenseMap<memprof::FrameId, memprof::LinearFrameId> MemProfFrameIndexes;
-
- // Compute the order in which we serialize Frames. The order does not matter
- // in terms of correctness, but we still compute it for deserialization
- // performance. Specifically, if we serialize frequently used Frames one
- // after another, we have better cache utilization. For two Frames that
- // appear equally frequently, we break a tie by serializing the one that tends
- // to appear earlier in call stacks. We implement the tie-breaking mechanism
- // by computing the sum of indexes within call stacks for each Frame. If we
- // still have a tie, then we just resort to compare two FrameIds, which is
- // just for stability of output.
- std::vector<std::pair<memprof::FrameId, const memprof::Frame *>> FrameIdOrder;
- FrameIdOrder.reserve(MemProfFrameData.size());
- for (const auto &[Id, Frame] : MemProfFrameData)
- FrameIdOrder.emplace_back(Id, &Frame);
- assert(MemProfFrameData.size() == FrameIdOrder.size());
- llvm::sort(FrameIdOrder,
- [&](const std::pair<memprof::FrameId, const memprof::Frame *> &L,
- const std::pair<memprof::FrameId, const memprof::Frame *> &R) {
- const auto &SL = FrameHistogram[L.first];
- const auto &SR = FrameHistogram[R.first];
- // Popular FrameIds should come first.
- if (SL.Count != SR.Count)
- return SL.Count > SR.Count;
- // If they are equally popular, then the one that tends to appear
- // earlier in call stacks should come first.
- if (SL.PositionSum != SR.PositionSum)
- return SL.PositionSum < SR.PositionSum;
- // Compare their FrameIds for sort stability.
- return L.first < R.first;
- });
-
- // Serialize all frames while creating mappings from linear IDs to FrameIds.
- uint64_t Index = 0;
- MemProfFrameIndexes.reserve(FrameIdOrder.size());
- for (const auto &[Id, F] : FrameIdOrder) {
- F->serialize(OS.OS);
- MemProfFrameIndexes.insert({Id, Index});
- ++Index;
- }
- assert(MemProfFrameData.size() == Index);
- assert(MemProfFrameData.size() == MemProfFrameIndexes.size());
-
- // Release the memory of this MapVector as it is no longer needed.
- MemProfFrameData.clear();
-
- return MemProfFrameIndexes;
-}
-
-static uint64_t writeMemProfCallStacks(
- ProfOStream &OS,
- llvm::MapVector<memprof::CallStackId, llvm::SmallVector<memprof::FrameId>>
- &MemProfCallStackData) {
- OnDiskChainedHashTableGenerator<memprof::CallStackWriterTrait>
- CallStackTableGenerator;
- for (auto &[CSId, CallStack] : MemProfCallStackData)
- CallStackTableGenerator.insert(CSId, CallStack);
- // Release the memory of this vector as it is no longer needed.
- MemProfCallStackData.clear();
-
- return CallStackTableGenerator.Emit(OS.OS);
-}
-
-static llvm::DenseMap<memprof::CallStackId, memprof::LinearCallStackId>
-writeMemProfCallStackArray(
- ProfOStream &OS,
- llvm::MapVector<memprof::CallStackId, llvm::SmallVector<memprof::FrameId>>
- &MemProfCallStackData,
- llvm::DenseMap<memprof::FrameId, memprof::LinearFrameId>
- &MemProfFrameIndexes,
- llvm::DenseMap<memprof::FrameId, memprof::FrameStat> &FrameHistogram,
- unsigned &NumElements) {
- llvm::DenseMap<memprof::CallStackId, memprof::LinearCallStackId>
- MemProfCallStackIndexes;
-
- memprof::CallStackRadixTreeBuilder<memprof::FrameId> Builder;
- Builder.build(std::move(MemProfCallStackData), &MemProfFrameIndexes,
- FrameHistogram);
- for (auto I : Builder.getRadixArray())
- OS.write32(I);
- NumElements = Builder.getRadixArray().size();
- MemProfCallStackIndexes = Builder.takeCallStackPos();
-
- // Release the memory of this vector as it is no longer needed.
- MemProfCallStackData.clear();
-
- return MemProfCallStackIndexes;
-}
-
-// Write out MemProf Version2 as follows:
-// uint64_t Version
-// uint64_t RecordTableOffset = RecordTableGenerator.Emit
-// uint64_t FramePayloadOffset = Offset for the frame payload
-// uint64_t FrameTableOffset = FrameTableGenerator.Emit
-// uint64_t CallStackPayloadOffset = Offset for the call stack payload (NEW V2)
-// uint64_t CallStackTableOffset = CallStackTableGenerator.Emit (NEW in V2)
-// uint64_t Num schema entries
-// uint64_t Schema entry 0
-// uint64_t Schema entry 1
-// ....
-// uint64_t Schema entry N - 1
-// OnDiskChainedHashTable MemProfRecordData
-// OnDiskChainedHashTable MemProfFrameData
-// OnDiskChainedHashTable MemProfCallStackData (NEW in V2)
-static Error writeMemProfV2(ProfOStream &OS,
- memprof::IndexedMemProfData &MemProfData,
- bool MemProfFullSchema) {
- OS.write(memprof::Version2);
- uint64_t HeaderUpdatePos = OS.tell();
- OS.write(0ULL); // Reserve space for the memprof record table offset.
- OS.write(0ULL); // Reserve space for the memprof frame payload offset.
- OS.write(0ULL); // Reserve space for the memprof frame table offset.
- OS.write(0ULL); // Reserve space for the memprof call stack payload offset.
- OS.write(0ULL); // Reserve space for the memprof call stack table offset.
-
- auto Schema = memprof::getHotColdSchema();
- if (MemProfFullSchema)
- Schema = memprof::getFullSchema();
- writeMemProfSchema(OS, Schema);
-
- uint64_t RecordTableOffset =
- writeMemProfRecords(OS, MemProfData.Records, &Schema, memprof::Version2);
-
- uint64_t FramePayloadOffset = OS.tell();
- uint64_t FrameTableOffset = writeMemProfFrames(OS, MemProfData.Frames);
-
- uint64_t CallStackPayloadOffset = OS.tell();
- uint64_t CallStackTableOffset =
- writeMemProfCallStacks(OS, MemProfData.CallStacks);
-
- uint64_t Header[] = {
- RecordTableOffset, FramePayloadOffset, FrameTableOffset,
- CallStackPayloadOffset, CallStackTableOffset,
- };
- OS.patch({{HeaderUpdatePos, Header}});
-
- return Error::success();
-}
-
-// Write out MemProf Version3 as follows:
-// uint64_t Version
-// uint64_t CallStackPayloadOffset = Offset for the call stack payload
-// uint64_t RecordPayloadOffset = Offset for the record payload
-// uint64_t RecordTableOffset = RecordTableGenerator.Emit
-// uint64_t Num schema entries
-// uint64_t Schema entry 0
-// uint64_t Schema entry 1
-// ....
-// uint64_t Schema entry N - 1
-// Frames serialized one after another
-// Call stacks encoded as a radix tree
-// OnDiskChainedHashTable MemProfRecordData
-static Error writeMemProfV3(ProfOStream &OS,
- memprof::IndexedMemProfData &MemProfData,
- bool MemProfFullSchema) {
- OS.write(memprof::Version3);
- uint64_t HeaderUpdatePos = OS.tell();
- OS.write(0ULL); // Reserve space for the memprof call stack payload offset.
- OS.write(0ULL); // Reserve space for the memprof record payload offset.
- OS.write(0ULL); // Reserve space for the memprof record table offset.
-
- auto Schema = memprof::getHotColdSchema();
- if (MemProfFullSchema)
- Schema = memprof::getFullSchema();
- writeMemProfSchema(OS, Schema);
-
- llvm::DenseMap<memprof::FrameId, memprof::FrameStat> FrameHistogram =
- memprof::computeFrameHistogram(MemProfData.CallStacks);
- assert(MemProfData.Frames.size() == FrameHistogram.size());
-
- llvm::DenseMap<memprof::FrameId, memprof::LinearFrameId> MemProfFrameIndexes =
- writeMemProfFrameArray(OS, MemProfData.Frames, FrameHistogram);
-
- uint64_t CallStackPayloadOffset = OS.tell();
- // The number of elements in the call stack array.
- unsigned NumElements = 0;
- llvm::DenseMap<memprof::CallStackId, memprof::LinearCallStackId>
- MemProfCallStackIndexes =
- writeMemProfCallStackArray(OS, MemProfData.CallStacks,
- MemProfFrameIndexes, FrameHistogram,
- NumElements);
-
- uint64_t RecordPayloadOffset = OS.tell();
- uint64_t RecordTableOffset =
- writeMemProfRecords(OS, MemProfData.Records, &Schema, memprof::Version3,
- &MemProfCallStackIndexes);
-
- // IndexedMemProfReader::deserializeV3 computes the number of elements in the
- // call stack array from the difference between CallStackPayloadOffset and
- // RecordPayloadOffset. Verify that the computation works.
- assert(CallStackPayloadOffset +
- NumElements * sizeof(memprof::LinearFrameId) ==
- RecordPayloadOffset);
-
- uint64_t Header[] = {
- CallStackPayloadOffset,
- RecordPayloadOffset,
- RecordTableOffset,
- };
- OS.patch({{HeaderUpdatePos, Header}});
-
- return Error::success();
-}
-
-// Write out the MemProf data in a requested version.
-static Error writeMemProf(ProfOStream &OS,
- memprof::IndexedMemProfData &MemProfData,
- memprof::IndexedVersion MemProfVersionRequested,
- bool MemProfFullSchema) {
- switch (MemProfVersionRequested) {
- case memprof::Version2:
- return writeMemProfV2(OS, MemProfData, MemProfFullSchema);
- case memprof::Version3:
- return writeMemProfV3(OS, MemProfData, MemProfFullSchema);
- }
-
- return make_error<InstrProfError>(
- instrprof_error::unsupported_version,
- formatv("MemProf version {} not supported; "
- "requires version between {} and {}, inclusive",
- MemProfVersionRequested, memprof::MinimumSupportedVersion,
- memprof::MaximumSupportedVersion));
-}
-
uint64_t InstrProfWriter::writeHeader(const IndexedInstrProf::Header &Header,
const bool WritePrevVersion,
ProfOStream &OS) {
diff --git a/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp b/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp
index 7c47492..0e26005 100644
--- a/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp
+++ b/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp
@@ -892,11 +892,10 @@ AArch64LoadStoreOpt::mergeNarrowZeroStores(MachineBasicBlock::iterator I,
OffsetImm = IOffsetInBytes;
int NewOpcode = getMatchingWideOpcode(Opc);
- bool FinalIsScaled = !TII->hasUnscaledLdStOffset(NewOpcode);
-
- // Adjust final offset if the result opcode is a scaled store.
- if (FinalIsScaled) {
- int NewOffsetStride = FinalIsScaled ? TII->getMemScale(NewOpcode) : 1;
+ // Adjust final offset on scaled stores because the new instruction
+ // has a different scale.
+ if (!TII->hasUnscaledLdStOffset(NewOpcode)) {
+ int NewOffsetStride = TII->getMemScale(NewOpcode);
assert(((OffsetImm % NewOffsetStride) == 0) &&
"Offset should be a multiple of the store memory scale");
OffsetImm = OffsetImm / NewOffsetStride;
@@ -906,7 +905,7 @@ AArch64LoadStoreOpt::mergeNarrowZeroStores(MachineBasicBlock::iterator I,
DebugLoc DL = I->getDebugLoc();
MachineBasicBlock *MBB = I->getParent();
MachineInstrBuilder MIB;
- MIB = BuildMI(*MBB, InsertionPoint, DL, TII->get(getMatchingWideOpcode(Opc)))
+ MIB = BuildMI(*MBB, InsertionPoint, DL, TII->get(NewOpcode))
.addReg(isNarrowStore(Opc) ? AArch64::WZR : AArch64::XZR)
.add(BaseRegOp)
.addImm(OffsetImm)
diff --git a/llvm/lib/Target/NVPTX/NVPTXRegisterInfo.cpp b/llvm/lib/Target/NVPTX/NVPTXRegisterInfo.cpp
index 1423888..6b9797c 100644
--- a/llvm/lib/Target/NVPTX/NVPTXRegisterInfo.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXRegisterInfo.cpp
@@ -26,9 +26,9 @@ using namespace llvm;
namespace llvm {
StringRef getNVPTXRegClassName(TargetRegisterClass const *RC) {
if (RC == &NVPTX::Float32RegsRegClass)
- return ".f32";
+ return ".b32";
if (RC == &NVPTX::Float64RegsRegClass)
- return ".f64";
+ return ".b64";
if (RC == &NVPTX::Int128RegsRegClass)
return ".b128";
if (RC == &NVPTX::Int64RegsRegClass)
diff --git a/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp b/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp
index 1fd1048..f09e3cb 100644
--- a/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp
@@ -1795,15 +1795,10 @@ bool RISCVFrameLowering::assignCalleeSavedSpillSlots(
MFI.CreateFixedSpillStackObject(
QCIInterruptPushAmount, -static_cast<int64_t>(QCIInterruptPushAmount));
} else if (RVFI->isPushable(MF)) {
- // Allocate a fixed object that covers all the registers that are pushed.
- if (unsigned PushedRegs = RVFI->getRVPushRegs()) {
- int64_t PushedRegsBytes =
- static_cast<int64_t>(PushedRegs) * (STI.getXLen() / 8);
- MFI.CreateFixedSpillStackObject(PushedRegsBytes, -PushedRegsBytes);
- }
+ // Allocate a fixed object that covers the full push.
+ if (int64_t PushSize = RVFI->getRVPushStackSize())
+ MFI.CreateFixedSpillStackObject(PushSize, -PushSize);
} else if (int LibCallRegs = getLibCallID(MF, CSI) + 1) {
- // Allocate a fixed object that covers all of the stack allocated by the
- // libcall.
int64_t LibCallFrameSize =
alignTo((STI.getXLen() / 8) * LibCallRegs, getStackAlign());
MFI.CreateFixedSpillStackObject(LibCallFrameSize, -LibCallFrameSize);
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoA.td b/llvm/lib/Target/RISCV/RISCVInstrInfoA.td
index 0575e17..6600b33 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoA.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoA.td
@@ -118,6 +118,29 @@ defm AMOMAXU_D : AMO_rr_aq_rl<0b11100, 0b011, "amomaxu.d">,
// Pseudo-instructions and codegen patterns
//===----------------------------------------------------------------------===//
+def riscv_atomic_asextload : PatFrag<(ops node:$ptr), (atomic_load node:$ptr), [{
+ ISD::LoadExtType ETy = cast<AtomicSDNode>(N)->getExtensionType();
+ return ETy == ISD::EXTLOAD || ETy == ISD::SEXTLOAD;
+}]>;
+
+def riscv_atomic_asextload_8 : PatFrag<(ops node:$ptr),
+ (riscv_atomic_asextload node:$ptr)> {
+ let IsAtomic = true;
+ let MemoryVT = i8;
+}
+
+def riscv_atomic_asextload_16 : PatFrag<(ops node:$ptr),
+ (riscv_atomic_asextload node:$ptr)> {
+ let IsAtomic = true;
+ let MemoryVT = i16;
+}
+
+def riscv_atomic_asextload_32 : PatFrag<(ops node:$ptr),
+ (riscv_atomic_asextload node:$ptr)> {
+ let IsAtomic = true;
+ let MemoryVT = i32;
+}
+
let IsAtomic = 1 in {
// An atomic load operation that does not need either acquire or release
// semantics.
@@ -165,16 +188,20 @@ class seq_cst_store<PatFrag base>
// any ordering. This is necessary because AtomicExpandPass has added fences to
// atomic load/stores and changed them to unordered ones.
let Predicates = [HasAtomicLdSt] in {
- def : LdPat<relaxed_load<atomic_load_8>, LB>;
- def : LdPat<relaxed_load<atomic_load_16>, LH>;
- def : LdPat<relaxed_load<atomic_load_32>, LW>;
+ def : LdPat<relaxed_load<riscv_atomic_asextload_8>, LB>;
+ def : LdPat<relaxed_load<riscv_atomic_asextload_16>, LH>;
def : StPat<relaxed_store<atomic_store_8>, SB, GPR, XLenVT>;
def : StPat<relaxed_store<atomic_store_16>, SH, GPR, XLenVT>;
def : StPat<relaxed_store<atomic_store_32>, SW, GPR, XLenVT>;
}
+let Predicates = [HasAtomicLdSt, IsRV32] in {
+ def : LdPat<relaxed_load<atomic_load_32>, LW>;
+}
+
let Predicates = [HasAtomicLdSt, IsRV64] in {
+ def : LdPat<relaxed_load<riscv_atomic_asextload_32>, LW>;
def : LdPat<relaxed_load<atomic_load_64>, LD, i64>;
def : StPat<relaxed_store<atomic_store_64>, SD, GPR, i64>;
}
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZalasr.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZalasr.td
index 085353a..f42352d 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoZalasr.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZalasr.td
@@ -70,25 +70,22 @@ class PatLAQ<SDPatternOperator OpNode, RVInst Inst, ValueType vt = XLenVT>
// while atomic_store has data, addr
class PatSRL<SDPatternOperator OpNode, RVInst Inst, ValueType vt = XLenVT>
: Pat<(OpNode (vt GPR:$rs2), (vt GPRMemZeroOffset:$rs1)),
- (Inst GPRMemZeroOffset:$rs1, GPR:$rs2)>;
-
+ (Inst GPRMemZeroOffset:$rs1, GPR:$rs2)>;
+
let Predicates = [HasStdExtZalasr] in {
// the sequentially consistent loads use
// .aq instead of .aqrl to match the psABI/A.7
- def : PatLAQ<acquiring_load<atomic_load_8>, LB_AQ>;
- def : PatLAQ<seq_cst_load<atomic_load_8>, LB_AQ>;
+ def : PatLAQ<acquiring_load<riscv_atomic_asextload_8>, LB_AQ>;
+ def : PatLAQ<seq_cst_load<riscv_atomic_asextload_8>, LB_AQ>;
- def : PatLAQ<acquiring_load<atomic_load_16>, LH_AQ>;
- def : PatLAQ<seq_cst_load<atomic_load_16>, LH_AQ>;
-
- def : PatLAQ<acquiring_load<atomic_load_32>, LW_AQ>;
- def : PatLAQ<seq_cst_load<atomic_load_32>, LW_AQ>;
+ def : PatLAQ<acquiring_load<riscv_atomic_asextload_16>, LH_AQ>;
+ def : PatLAQ<seq_cst_load<riscv_atomic_asextload_16>, LH_AQ>;
// the sequentially consistent stores use
// .rl instead of .aqrl to match the psABI/A.7
def : PatSRL<releasing_store<atomic_store_8>, SB_RL>;
- def : PatSRL<seq_cst_store<atomic_store_8>, SB_RL>;
+ def : PatSRL<seq_cst_store<atomic_store_8>, SB_RL>;
def : PatSRL<releasing_store<atomic_store_16>, SH_RL>;
def : PatSRL<seq_cst_store<atomic_store_16>, SH_RL>;
@@ -97,7 +94,16 @@ let Predicates = [HasStdExtZalasr] in {
def : PatSRL<seq_cst_store<atomic_store_32>, SW_RL>;
} // Predicates = [HasStdExtZalasr]
+let Predicates = [HasStdExtZalasr, IsRV32] in {
+ def : PatLAQ<acquiring_load<atomic_load_32>, LW_AQ>;
+ def : PatLAQ<seq_cst_load<atomic_load_32>, LW_AQ>;
+
+} // Predicates = [HasStdExtZalasr, IsRV64]
+
let Predicates = [HasStdExtZalasr, IsRV64] in {
+ def : PatLAQ<acquiring_load<riscv_atomic_asextload_32>, LW_AQ>;
+ def : PatLAQ<seq_cst_load<riscv_atomic_asextload_32>, LW_AQ>;
+
def : PatLAQ<acquiring_load<atomic_load_64>, LD_AQ>;
def : PatLAQ<seq_cst_load<atomic_load_64>, LD_AQ>;
diff --git a/llvm/lib/Transforms/IPO/FunctionAttrs.cpp b/llvm/lib/Transforms/IPO/FunctionAttrs.cpp
index bbfed2a..74e8a84 100644
--- a/llvm/lib/Transforms/IPO/FunctionAttrs.cpp
+++ b/llvm/lib/Transforms/IPO/FunctionAttrs.cpp
@@ -661,8 +661,13 @@ ArgumentAccessInfo getArgumentAccessInfo(const Instruction *I,
auto TypeSize = DL.getTypeStoreSize(Ty);
if (!TypeSize.isScalable() && Offset) {
int64_t Size = TypeSize.getFixedValue();
- return ConstantRange(APInt(64, *Offset, true),
- APInt(64, *Offset + Size, true));
+ APInt Low(64, *Offset, true);
+ bool Overflow;
+ APInt High = Low.sadd_ov(APInt(64, Size, true), Overflow);
+ // Bail if the range overflows signed 64-bit int.
+ if (Overflow)
+ return std::nullopt;
+ return ConstantRange(Low, High);
}
return std::nullopt;
};
diff --git a/llvm/test/CodeGen/NVPTX/LoadStoreVectorizer.ll b/llvm/test/CodeGen/NVPTX/LoadStoreVectorizer.ll
index e46657e..8f0964c 100644
--- a/llvm/test/CodeGen/NVPTX/LoadStoreVectorizer.ll
+++ b/llvm/test/CodeGen/NVPTX/LoadStoreVectorizer.ll
@@ -45,7 +45,7 @@ define half @fh(ptr %p) {
; ENABLED-LABEL: fh(
; ENABLED: {
; ENABLED-NEXT: .reg .b16 %rs<10>;
-; ENABLED-NEXT: .reg .f32 %f<13>;
+; ENABLED-NEXT: .reg .b32 %f<13>;
; ENABLED-NEXT: .reg .b64 %rd<2>;
; ENABLED-EMPTY:
; ENABLED-NEXT: // %bb.0:
@@ -74,7 +74,7 @@ define half @fh(ptr %p) {
; DISABLED-LABEL: fh(
; DISABLED: {
; DISABLED-NEXT: .reg .b16 %rs<10>;
-; DISABLED-NEXT: .reg .f32 %f<13>;
+; DISABLED-NEXT: .reg .b32 %f<13>;
; DISABLED-NEXT: .reg .b64 %rd<2>;
; DISABLED-EMPTY:
; DISABLED-NEXT: // %bb.0:
@@ -121,7 +121,7 @@ define half @fh(ptr %p) {
define float @ff(ptr %p) {
; ENABLED-LABEL: ff(
; ENABLED: {
-; ENABLED-NEXT: .reg .f32 %f<10>;
+; ENABLED-NEXT: .reg .b32 %f<10>;
; ENABLED-NEXT: .reg .b64 %rd<2>;
; ENABLED-EMPTY:
; ENABLED-NEXT: // %bb.0:
@@ -137,7 +137,7 @@ define float @ff(ptr %p) {
;
; DISABLED-LABEL: ff(
; DISABLED: {
-; DISABLED-NEXT: .reg .f32 %f<10>;
+; DISABLED-NEXT: .reg .b32 %f<10>;
; DISABLED-NEXT: .reg .b64 %rd<2>;
; DISABLED-EMPTY:
; DISABLED-NEXT: // %bb.0:
diff --git a/llvm/test/CodeGen/NVPTX/and-or-setcc.ll b/llvm/test/CodeGen/NVPTX/and-or-setcc.ll
index 6c3514c..5949de3 100644
--- a/llvm/test/CodeGen/NVPTX/and-or-setcc.ll
+++ b/llvm/test/CodeGen/NVPTX/and-or-setcc.ll
@@ -9,7 +9,7 @@ define i1 @and_ord(float %a, float %b) {
; CHECK: {
; CHECK-NEXT: .reg .pred %p<2>;
; CHECK-NEXT: .reg .b32 %r<2>;
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %f<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f1, [and_ord_param_0];
@@ -29,7 +29,7 @@ define i1 @or_uno(float %a, float %b) {
; CHECK: {
; CHECK-NEXT: .reg .pred %p<2>;
; CHECK-NEXT: .reg .b32 %r<2>;
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %f<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f1, [or_uno_param_0];
diff --git a/llvm/test/CodeGen/NVPTX/atomics.ll b/llvm/test/CodeGen/NVPTX/atomics.ll
index bb04aa8..16de80d 100644
--- a/llvm/test/CodeGen/NVPTX/atomics.ll
+++ b/llvm/test/CodeGen/NVPTX/atomics.ll
@@ -351,7 +351,7 @@ declare float @llvm.nvvm.atomic.load.add.f32.p0(ptr %addr, float %val)
define float @atomic_add_f32_generic(ptr %addr, float %val) {
; CHECK-LABEL: atomic_add_f32_generic(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %f<3>;
; CHECK-NEXT: .reg .b64 %rd<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
@@ -370,7 +370,7 @@ declare float @llvm.nvvm.atomic.load.add.f32.p1(ptr addrspace(1) %addr, float %v
define float @atomic_add_f32_addrspace1(ptr addrspace(1) %addr, float %val) {
; CHECK-LABEL: atomic_add_f32_addrspace1(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %f<3>;
; CHECK-NEXT: .reg .b64 %rd<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
@@ -389,7 +389,7 @@ declare float @llvm.nvvm.atomic.load.add.f32.p3(ptr addrspace(3) %addr, float %v
define float @atomic_add_f32_addrspace3(ptr addrspace(3) %addr, float %val) {
; CHECK-LABEL: atomic_add_f32_addrspace3(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %f<3>;
; CHECK-NEXT: .reg .b64 %rd<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
@@ -406,7 +406,7 @@ define float @atomic_add_f32_addrspace3(ptr addrspace(3) %addr, float %val) {
define float @atomicrmw_add_f32_generic(ptr %addr, float %val) {
; CHECK-LABEL: atomicrmw_add_f32_generic(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %f<3>;
; CHECK-NEXT: .reg .b64 %rd<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
@@ -426,7 +426,7 @@ define half @atomicrmw_add_f16_generic(ptr %addr, half %val) {
; CHECK-NEXT: .reg .pred %p<2>;
; CHECK-NEXT: .reg .b16 %rs<5>;
; CHECK-NEXT: .reg .b32 %r<17>;
-; CHECK-NEXT: .reg .f32 %f<4>;
+; CHECK-NEXT: .reg .b32 %f<4>;
; CHECK-NEXT: .reg .b64 %rd<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
@@ -470,7 +470,7 @@ define half @atomicrmw_add_f16_generic(ptr %addr, half %val) {
define float @atomicrmw_add_f32_addrspace1(ptr addrspace(1) %addr, float %val) {
; CHECK-LABEL: atomicrmw_add_f32_addrspace1(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %f<3>;
; CHECK-NEXT: .reg .b64 %rd<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
@@ -487,7 +487,7 @@ define float @atomicrmw_add_f32_addrspace1(ptr addrspace(1) %addr, float %val) {
define float @atomicrmw_add_f32_addrspace3(ptr addrspace(3) %addr, float %val) {
; CHECK-LABEL: atomicrmw_add_f32_addrspace3(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %f<3>;
; CHECK-NEXT: .reg .b64 %rd<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
diff --git a/llvm/test/CodeGen/NVPTX/bf16-instructions.ll b/llvm/test/CodeGen/NVPTX/bf16-instructions.ll
index b97cb6f..6be13c3 100644
--- a/llvm/test/CodeGen/NVPTX/bf16-instructions.ll
+++ b/llvm/test/CodeGen/NVPTX/bf16-instructions.ll
@@ -19,7 +19,7 @@ define bfloat @test_fadd(bfloat %0, bfloat %1) {
; SM70-NEXT: .reg .pred %p<2>;
; SM70-NEXT: .reg .b16 %rs<2>;
; SM70-NEXT: .reg .b32 %r<11>;
-; SM70-NEXT: .reg .f32 %f<4>;
+; SM70-NEXT: .reg .b32 %f<4>;
; SM70-EMPTY:
; SM70-NEXT: // %bb.0:
; SM70-NEXT: ld.param.u16 %r1, [test_fadd_param_1];
@@ -55,7 +55,7 @@ define bfloat @test_fadd(bfloat %0, bfloat %1) {
; SM80-FTZ-LABEL: test_fadd(
; SM80-FTZ: {
; SM80-FTZ-NEXT: .reg .b16 %rs<4>;
-; SM80-FTZ-NEXT: .reg .f32 %f<4>;
+; SM80-FTZ-NEXT: .reg .b32 %f<4>;
; SM80-FTZ-EMPTY:
; SM80-FTZ-NEXT: // %bb.0:
; SM80-FTZ-NEXT: ld.param.b16 %rs1, [test_fadd_param_0];
@@ -87,7 +87,7 @@ define bfloat @test_fsub(bfloat %0, bfloat %1) {
; SM70-NEXT: .reg .pred %p<2>;
; SM70-NEXT: .reg .b16 %rs<2>;
; SM70-NEXT: .reg .b32 %r<11>;
-; SM70-NEXT: .reg .f32 %f<4>;
+; SM70-NEXT: .reg .b32 %f<4>;
; SM70-EMPTY:
; SM70-NEXT: // %bb.0:
; SM70-NEXT: ld.param.u16 %r1, [test_fsub_param_1];
@@ -123,7 +123,7 @@ define bfloat @test_fsub(bfloat %0, bfloat %1) {
; SM80-FTZ-LABEL: test_fsub(
; SM80-FTZ: {
; SM80-FTZ-NEXT: .reg .b16 %rs<4>;
-; SM80-FTZ-NEXT: .reg .f32 %f<4>;
+; SM80-FTZ-NEXT: .reg .b32 %f<4>;
; SM80-FTZ-EMPTY:
; SM80-FTZ-NEXT: // %bb.0:
; SM80-FTZ-NEXT: ld.param.b16 %rs1, [test_fsub_param_0];
@@ -155,7 +155,7 @@ define <2 x bfloat> @test_faddx2(<2 x bfloat> %a, <2 x bfloat> %b) #0 {
; SM70-NEXT: .reg .pred %p<3>;
; SM70-NEXT: .reg .b16 %rs<5>;
; SM70-NEXT: .reg .b32 %r<24>;
-; SM70-NEXT: .reg .f32 %f<7>;
+; SM70-NEXT: .reg .b32 %f<7>;
; SM70-EMPTY:
; SM70-NEXT: // %bb.0:
; SM70-NEXT: ld.param.b32 %r1, [test_faddx2_param_0];
@@ -210,7 +210,7 @@ define <2 x bfloat> @test_faddx2(<2 x bfloat> %a, <2 x bfloat> %b) #0 {
; SM80-FTZ: {
; SM80-FTZ-NEXT: .reg .b16 %rs<5>;
; SM80-FTZ-NEXT: .reg .b32 %r<4>;
-; SM80-FTZ-NEXT: .reg .f32 %f<7>;
+; SM80-FTZ-NEXT: .reg .b32 %f<7>;
; SM80-FTZ-EMPTY:
; SM80-FTZ-NEXT: // %bb.0:
; SM80-FTZ-NEXT: ld.param.b32 %r1, [test_faddx2_param_0];
@@ -247,7 +247,7 @@ define <2 x bfloat> @test_fsubx2(<2 x bfloat> %a, <2 x bfloat> %b) #0 {
; SM70-NEXT: .reg .pred %p<3>;
; SM70-NEXT: .reg .b16 %rs<5>;
; SM70-NEXT: .reg .b32 %r<24>;
-; SM70-NEXT: .reg .f32 %f<7>;
+; SM70-NEXT: .reg .b32 %f<7>;
; SM70-EMPTY:
; SM70-NEXT: // %bb.0:
; SM70-NEXT: ld.param.b32 %r1, [test_fsubx2_param_0];
@@ -302,7 +302,7 @@ define <2 x bfloat> @test_fsubx2(<2 x bfloat> %a, <2 x bfloat> %b) #0 {
; SM80-FTZ: {
; SM80-FTZ-NEXT: .reg .b16 %rs<5>;
; SM80-FTZ-NEXT: .reg .b32 %r<4>;
-; SM80-FTZ-NEXT: .reg .f32 %f<7>;
+; SM80-FTZ-NEXT: .reg .b32 %f<7>;
; SM80-FTZ-EMPTY:
; SM80-FTZ-NEXT: // %bb.0:
; SM80-FTZ-NEXT: ld.param.b32 %r1, [test_fsubx2_param_0];
@@ -339,7 +339,7 @@ define <2 x bfloat> @test_fmulx2(<2 x bfloat> %a, <2 x bfloat> %b) #0 {
; SM70-NEXT: .reg .pred %p<3>;
; SM70-NEXT: .reg .b16 %rs<5>;
; SM70-NEXT: .reg .b32 %r<24>;
-; SM70-NEXT: .reg .f32 %f<7>;
+; SM70-NEXT: .reg .b32 %f<7>;
; SM70-EMPTY:
; SM70-NEXT: // %bb.0:
; SM70-NEXT: ld.param.b32 %r1, [test_fmulx2_param_0];
@@ -394,7 +394,7 @@ define <2 x bfloat> @test_fmulx2(<2 x bfloat> %a, <2 x bfloat> %b) #0 {
; SM80-FTZ: {
; SM80-FTZ-NEXT: .reg .b16 %rs<5>;
; SM80-FTZ-NEXT: .reg .b32 %r<4>;
-; SM80-FTZ-NEXT: .reg .f32 %f<7>;
+; SM80-FTZ-NEXT: .reg .b32 %f<7>;
; SM80-FTZ-EMPTY:
; SM80-FTZ-NEXT: // %bb.0:
; SM80-FTZ-NEXT: ld.param.b32 %r1, [test_fmulx2_param_0];
@@ -431,7 +431,7 @@ define <2 x bfloat> @test_fdiv(<2 x bfloat> %a, <2 x bfloat> %b) #0 {
; SM70-NEXT: .reg .pred %p<3>;
; SM70-NEXT: .reg .b16 %rs<5>;
; SM70-NEXT: .reg .b32 %r<24>;
-; SM70-NEXT: .reg .f32 %f<7>;
+; SM70-NEXT: .reg .b32 %f<7>;
; SM70-EMPTY:
; SM70-NEXT: // %bb.0:
; SM70-NEXT: ld.param.b32 %r1, [test_fdiv_param_0];
@@ -474,7 +474,7 @@ define <2 x bfloat> @test_fdiv(<2 x bfloat> %a, <2 x bfloat> %b) #0 {
; SM80: {
; SM80-NEXT: .reg .b16 %rs<5>;
; SM80-NEXT: .reg .b32 %r<4>;
-; SM80-NEXT: .reg .f32 %f<7>;
+; SM80-NEXT: .reg .b32 %f<7>;
; SM80-EMPTY:
; SM80-NEXT: // %bb.0:
; SM80-NEXT: ld.param.b32 %r1, [test_fdiv_param_0];
@@ -495,7 +495,7 @@ define <2 x bfloat> @test_fdiv(<2 x bfloat> %a, <2 x bfloat> %b) #0 {
; SM80-FTZ: {
; SM80-FTZ-NEXT: .reg .b16 %rs<5>;
; SM80-FTZ-NEXT: .reg .b32 %r<4>;
-; SM80-FTZ-NEXT: .reg .f32 %f<7>;
+; SM80-FTZ-NEXT: .reg .b32 %f<7>;
; SM80-FTZ-EMPTY:
; SM80-FTZ-NEXT: // %bb.0:
; SM80-FTZ-NEXT: ld.param.b32 %r1, [test_fdiv_param_0];
@@ -516,7 +516,7 @@ define <2 x bfloat> @test_fdiv(<2 x bfloat> %a, <2 x bfloat> %b) #0 {
; SM90: {
; SM90-NEXT: .reg .b16 %rs<5>;
; SM90-NEXT: .reg .b32 %r<4>;
-; SM90-NEXT: .reg .f32 %f<7>;
+; SM90-NEXT: .reg .b32 %f<7>;
; SM90-EMPTY:
; SM90-NEXT: // %bb.0:
; SM90-NEXT: ld.param.b32 %r1, [test_fdiv_param_0];
@@ -566,7 +566,7 @@ define float @test_fpext_float(bfloat %a) #0 {
; SM70-LABEL: test_fpext_float(
; SM70: {
; SM70-NEXT: .reg .b32 %r<3>;
-; SM70-NEXT: .reg .f32 %f<2>;
+; SM70-NEXT: .reg .b32 %f<2>;
; SM70-EMPTY:
; SM70-NEXT: // %bb.0:
; SM70-NEXT: ld.param.u16 %r1, [test_fpext_float_param_0];
@@ -578,7 +578,7 @@ define float @test_fpext_float(bfloat %a) #0 {
; SM80-LABEL: test_fpext_float(
; SM80: {
; SM80-NEXT: .reg .b16 %rs<2>;
-; SM80-NEXT: .reg .f32 %f<2>;
+; SM80-NEXT: .reg .b32 %f<2>;
; SM80-EMPTY:
; SM80-NEXT: // %bb.0:
; SM80-NEXT: ld.param.b16 %rs1, [test_fpext_float_param_0];
@@ -589,7 +589,7 @@ define float @test_fpext_float(bfloat %a) #0 {
; SM80-FTZ-LABEL: test_fpext_float(
; SM80-FTZ: {
; SM80-FTZ-NEXT: .reg .b16 %rs<2>;
-; SM80-FTZ-NEXT: .reg .f32 %f<2>;
+; SM80-FTZ-NEXT: .reg .b32 %f<2>;
; SM80-FTZ-EMPTY:
; SM80-FTZ-NEXT: // %bb.0:
; SM80-FTZ-NEXT: ld.param.b16 %rs1, [test_fpext_float_param_0];
@@ -600,7 +600,7 @@ define float @test_fpext_float(bfloat %a) #0 {
; SM90-LABEL: test_fpext_float(
; SM90: {
; SM90-NEXT: .reg .b16 %rs<2>;
-; SM90-NEXT: .reg .f32 %f<2>;
+; SM90-NEXT: .reg .b32 %f<2>;
; SM90-EMPTY:
; SM90-NEXT: // %bb.0:
; SM90-NEXT: ld.param.b16 %rs1, [test_fpext_float_param_0];
@@ -617,7 +617,7 @@ define bfloat @test_fptrunc_float(float %a) #0 {
; SM70-NEXT: .reg .pred %p<2>;
; SM70-NEXT: .reg .b16 %rs<2>;
; SM70-NEXT: .reg .b32 %r<7>;
-; SM70-NEXT: .reg .f32 %f<2>;
+; SM70-NEXT: .reg .b32 %f<2>;
; SM70-EMPTY:
; SM70-NEXT: // %bb.0:
; SM70-NEXT: ld.param.f32 %f1, [test_fptrunc_float_param_0];
@@ -635,7 +635,7 @@ define bfloat @test_fptrunc_float(float %a) #0 {
; SM80-LABEL: test_fptrunc_float(
; SM80: {
; SM80-NEXT: .reg .b16 %rs<2>;
-; SM80-NEXT: .reg .f32 %f<2>;
+; SM80-NEXT: .reg .b32 %f<2>;
; SM80-EMPTY:
; SM80-NEXT: // %bb.0:
; SM80-NEXT: ld.param.f32 %f1, [test_fptrunc_float_param_0];
@@ -646,7 +646,7 @@ define bfloat @test_fptrunc_float(float %a) #0 {
; SM80-FTZ-LABEL: test_fptrunc_float(
; SM80-FTZ: {
; SM80-FTZ-NEXT: .reg .b16 %rs<2>;
-; SM80-FTZ-NEXT: .reg .f32 %f<2>;
+; SM80-FTZ-NEXT: .reg .b32 %f<2>;
; SM80-FTZ-EMPTY:
; SM80-FTZ-NEXT: // %bb.0:
; SM80-FTZ-NEXT: ld.param.f32 %f1, [test_fptrunc_float_param_0];
@@ -657,7 +657,7 @@ define bfloat @test_fptrunc_float(float %a) #0 {
; SM90-LABEL: test_fptrunc_float(
; SM90: {
; SM90-NEXT: .reg .b16 %rs<2>;
-; SM90-NEXT: .reg .f32 %f<2>;
+; SM90-NEXT: .reg .b32 %f<2>;
; SM90-EMPTY:
; SM90-NEXT: // %bb.0:
; SM90-NEXT: ld.param.f32 %f1, [test_fptrunc_float_param_0];
@@ -674,7 +674,7 @@ define bfloat @test_fadd_imm_1(bfloat %a) #0 {
; SM70-NEXT: .reg .pred %p<2>;
; SM70-NEXT: .reg .b16 %rs<2>;
; SM70-NEXT: .reg .b32 %r<9>;
-; SM70-NEXT: .reg .f32 %f<3>;
+; SM70-NEXT: .reg .b32 %f<3>;
; SM70-EMPTY:
; SM70-NEXT: // %bb.0:
; SM70-NEXT: ld.param.u16 %r1, [test_fadd_imm_1_param_0];
@@ -706,7 +706,7 @@ define bfloat @test_fadd_imm_1(bfloat %a) #0 {
; SM80-FTZ-LABEL: test_fadd_imm_1(
; SM80-FTZ: {
; SM80-FTZ-NEXT: .reg .b16 %rs<3>;
-; SM80-FTZ-NEXT: .reg .f32 %f<3>;
+; SM80-FTZ-NEXT: .reg .b32 %f<3>;
; SM80-FTZ-EMPTY:
; SM80-FTZ-NEXT: // %bb.0:
; SM80-FTZ-NEXT: ld.param.b16 %rs1, [test_fadd_imm_1_param_0];
@@ -735,7 +735,7 @@ define bfloat @test_select_cc_bf16_f64(double %a, double %b, bfloat %c, bfloat %
; CHECK: {
; CHECK-NEXT: .reg .pred %p<2>;
; CHECK-NEXT: .reg .b16 %rs<4>;
-; CHECK-NEXT: .reg .f64 %fd<3>;
+; CHECK-NEXT: .reg .b64 %fd<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f64 %fd1, [test_select_cc_bf16_f64_param_0];
@@ -756,7 +756,7 @@ define <8 x float> @test_extload_bf16x8(ptr addrspace(3) noundef %arg) #0 {
; SM70: {
; SM70-NEXT: .reg .b16 %rs<9>;
; SM70-NEXT: .reg .b32 %r<21>;
-; SM70-NEXT: .reg .f32 %f<9>;
+; SM70-NEXT: .reg .b32 %f<9>;
; SM70-NEXT: .reg .b64 %rd<2>;
; SM70-EMPTY:
; SM70-NEXT: // %bb.0:
@@ -798,7 +798,7 @@ define <8 x float> @test_extload_bf16x8(ptr addrspace(3) noundef %arg) #0 {
; SM80: {
; SM80-NEXT: .reg .b16 %rs<9>;
; SM80-NEXT: .reg .b32 %r<5>;
-; SM80-NEXT: .reg .f32 %f<9>;
+; SM80-NEXT: .reg .b32 %f<9>;
; SM80-NEXT: .reg .b64 %rd<2>;
; SM80-EMPTY:
; SM80-NEXT: // %bb.0:
@@ -824,7 +824,7 @@ define <8 x float> @test_extload_bf16x8(ptr addrspace(3) noundef %arg) #0 {
; SM80-FTZ: {
; SM80-FTZ-NEXT: .reg .b16 %rs<9>;
; SM80-FTZ-NEXT: .reg .b32 %r<5>;
-; SM80-FTZ-NEXT: .reg .f32 %f<9>;
+; SM80-FTZ-NEXT: .reg .b32 %f<9>;
; SM80-FTZ-NEXT: .reg .b64 %rd<2>;
; SM80-FTZ-EMPTY:
; SM80-FTZ-NEXT: // %bb.0:
@@ -850,7 +850,7 @@ define <8 x float> @test_extload_bf16x8(ptr addrspace(3) noundef %arg) #0 {
; SM90: {
; SM90-NEXT: .reg .b16 %rs<9>;
; SM90-NEXT: .reg .b32 %r<5>;
-; SM90-NEXT: .reg .f32 %f<9>;
+; SM90-NEXT: .reg .b32 %f<9>;
; SM90-NEXT: .reg .b64 %rd<2>;
; SM90-EMPTY:
; SM90-NEXT: // %bb.0:
@@ -881,7 +881,7 @@ define i16 @test_fptosi_i16(bfloat %a) {
; SM70: {
; SM70-NEXT: .reg .b16 %rs<2>;
; SM70-NEXT: .reg .b32 %r<4>;
-; SM70-NEXT: .reg .f32 %f<2>;
+; SM70-NEXT: .reg .b32 %f<2>;
; SM70-EMPTY:
; SM70-NEXT: // %bb.0:
; SM70-NEXT: ld.param.u16 %r1, [test_fptosi_i16_param_0];
@@ -896,7 +896,7 @@ define i16 @test_fptosi_i16(bfloat %a) {
; SM80: {
; SM80-NEXT: .reg .b16 %rs<3>;
; SM80-NEXT: .reg .b32 %r<2>;
-; SM80-NEXT: .reg .f32 %f<2>;
+; SM80-NEXT: .reg .b32 %f<2>;
; SM80-EMPTY:
; SM80-NEXT: // %bb.0:
; SM80-NEXT: ld.param.b16 %rs1, [test_fptosi_i16_param_0];
@@ -910,7 +910,7 @@ define i16 @test_fptosi_i16(bfloat %a) {
; SM80-FTZ: {
; SM80-FTZ-NEXT: .reg .b16 %rs<3>;
; SM80-FTZ-NEXT: .reg .b32 %r<2>;
-; SM80-FTZ-NEXT: .reg .f32 %f<2>;
+; SM80-FTZ-NEXT: .reg .b32 %f<2>;
; SM80-FTZ-EMPTY:
; SM80-FTZ-NEXT: // %bb.0:
; SM80-FTZ-NEXT: ld.param.b16 %rs1, [test_fptosi_i16_param_0];
@@ -940,7 +940,7 @@ define i16 @test_fptoui_i16(bfloat %a) {
; SM70: {
; SM70-NEXT: .reg .b16 %rs<2>;
; SM70-NEXT: .reg .b32 %r<4>;
-; SM70-NEXT: .reg .f32 %f<2>;
+; SM70-NEXT: .reg .b32 %f<2>;
; SM70-EMPTY:
; SM70-NEXT: // %bb.0:
; SM70-NEXT: ld.param.u16 %r1, [test_fptoui_i16_param_0];
@@ -955,7 +955,7 @@ define i16 @test_fptoui_i16(bfloat %a) {
; SM80: {
; SM80-NEXT: .reg .b16 %rs<3>;
; SM80-NEXT: .reg .b32 %r<2>;
-; SM80-NEXT: .reg .f32 %f<2>;
+; SM80-NEXT: .reg .b32 %f<2>;
; SM80-EMPTY:
; SM80-NEXT: // %bb.0:
; SM80-NEXT: ld.param.b16 %rs1, [test_fptoui_i16_param_0];
@@ -969,7 +969,7 @@ define i16 @test_fptoui_i16(bfloat %a) {
; SM80-FTZ: {
; SM80-FTZ-NEXT: .reg .b16 %rs<3>;
; SM80-FTZ-NEXT: .reg .b32 %r<2>;
-; SM80-FTZ-NEXT: .reg .f32 %f<2>;
+; SM80-FTZ-NEXT: .reg .b32 %f<2>;
; SM80-FTZ-EMPTY:
; SM80-FTZ-NEXT: // %bb.0:
; SM80-FTZ-NEXT: ld.param.b16 %rs1, [test_fptoui_i16_param_0];
@@ -1000,7 +1000,7 @@ define bfloat @test_sitofp_i16(i16 %a) {
; SM70-NEXT: .reg .pred %p<2>;
; SM70-NEXT: .reg .b16 %rs<3>;
; SM70-NEXT: .reg .b32 %r<7>;
-; SM70-NEXT: .reg .f32 %f<2>;
+; SM70-NEXT: .reg .b32 %f<2>;
; SM70-EMPTY:
; SM70-NEXT: // %bb.0:
; SM70-NEXT: ld.param.u16 %rs1, [test_sitofp_i16_param_0];
@@ -1019,7 +1019,7 @@ define bfloat @test_sitofp_i16(i16 %a) {
; SM80-LABEL: test_sitofp_i16(
; SM80: {
; SM80-NEXT: .reg .b16 %rs<3>;
-; SM80-NEXT: .reg .f32 %f<2>;
+; SM80-NEXT: .reg .b32 %f<2>;
; SM80-EMPTY:
; SM80-NEXT: // %bb.0:
; SM80-NEXT: ld.param.u16 %rs1, [test_sitofp_i16_param_0];
@@ -1031,7 +1031,7 @@ define bfloat @test_sitofp_i16(i16 %a) {
; SM80-FTZ-LABEL: test_sitofp_i16(
; SM80-FTZ: {
; SM80-FTZ-NEXT: .reg .b16 %rs<3>;
-; SM80-FTZ-NEXT: .reg .f32 %f<2>;
+; SM80-FTZ-NEXT: .reg .b32 %f<2>;
; SM80-FTZ-EMPTY:
; SM80-FTZ-NEXT: // %bb.0:
; SM80-FTZ-NEXT: ld.param.u16 %rs1, [test_sitofp_i16_param_0];
@@ -1059,7 +1059,7 @@ define bfloat @test_uitofp_i8(i8 %a) {
; SM70-NEXT: .reg .pred %p<2>;
; SM70-NEXT: .reg .b16 %rs<3>;
; SM70-NEXT: .reg .b32 %r<7>;
-; SM70-NEXT: .reg .f32 %f<2>;
+; SM70-NEXT: .reg .b32 %f<2>;
; SM70-EMPTY:
; SM70-NEXT: // %bb.0:
; SM70-NEXT: ld.param.u8 %rs1, [test_uitofp_i8_param_0];
@@ -1078,7 +1078,7 @@ define bfloat @test_uitofp_i8(i8 %a) {
; SM80-LABEL: test_uitofp_i8(
; SM80: {
; SM80-NEXT: .reg .b16 %rs<3>;
-; SM80-NEXT: .reg .f32 %f<2>;
+; SM80-NEXT: .reg .b32 %f<2>;
; SM80-EMPTY:
; SM80-NEXT: // %bb.0:
; SM80-NEXT: ld.param.u8 %rs1, [test_uitofp_i8_param_0];
@@ -1090,7 +1090,7 @@ define bfloat @test_uitofp_i8(i8 %a) {
; SM80-FTZ-LABEL: test_uitofp_i8(
; SM80-FTZ: {
; SM80-FTZ-NEXT: .reg .b16 %rs<3>;
-; SM80-FTZ-NEXT: .reg .f32 %f<2>;
+; SM80-FTZ-NEXT: .reg .b32 %f<2>;
; SM80-FTZ-EMPTY:
; SM80-FTZ-NEXT: // %bb.0:
; SM80-FTZ-NEXT: ld.param.u8 %rs1, [test_uitofp_i8_param_0];
@@ -1118,7 +1118,7 @@ define bfloat @test_uitofp_i1(i1 %a) {
; SM70-NEXT: .reg .pred %p<3>;
; SM70-NEXT: .reg .b16 %rs<4>;
; SM70-NEXT: .reg .b32 %r<8>;
-; SM70-NEXT: .reg .f32 %f<2>;
+; SM70-NEXT: .reg .b32 %f<2>;
; SM70-EMPTY:
; SM70-NEXT: // %bb.0:
; SM70-NEXT: ld.param.u8 %rs1, [test_uitofp_i1_param_0];
@@ -1142,7 +1142,7 @@ define bfloat @test_uitofp_i1(i1 %a) {
; SM80-NEXT: .reg .pred %p<2>;
; SM80-NEXT: .reg .b16 %rs<4>;
; SM80-NEXT: .reg .b32 %r<2>;
-; SM80-NEXT: .reg .f32 %f<2>;
+; SM80-NEXT: .reg .b32 %f<2>;
; SM80-EMPTY:
; SM80-NEXT: // %bb.0:
; SM80-NEXT: ld.param.u8 %rs1, [test_uitofp_i1_param_0];
@@ -1159,7 +1159,7 @@ define bfloat @test_uitofp_i1(i1 %a) {
; SM80-FTZ-NEXT: .reg .pred %p<2>;
; SM80-FTZ-NEXT: .reg .b16 %rs<4>;
; SM80-FTZ-NEXT: .reg .b32 %r<2>;
-; SM80-FTZ-NEXT: .reg .f32 %f<2>;
+; SM80-FTZ-NEXT: .reg .b32 %f<2>;
; SM80-FTZ-EMPTY:
; SM80-FTZ-NEXT: // %bb.0:
; SM80-FTZ-NEXT: ld.param.u8 %rs1, [test_uitofp_i1_param_0];
@@ -1195,7 +1195,7 @@ define bfloat @test_uitofp_i16(i16 %a) {
; SM70-NEXT: .reg .pred %p<2>;
; SM70-NEXT: .reg .b16 %rs<3>;
; SM70-NEXT: .reg .b32 %r<7>;
-; SM70-NEXT: .reg .f32 %f<2>;
+; SM70-NEXT: .reg .b32 %f<2>;
; SM70-EMPTY:
; SM70-NEXT: // %bb.0:
; SM70-NEXT: ld.param.u16 %rs1, [test_uitofp_i16_param_0];
@@ -1214,7 +1214,7 @@ define bfloat @test_uitofp_i16(i16 %a) {
; SM80-LABEL: test_uitofp_i16(
; SM80: {
; SM80-NEXT: .reg .b16 %rs<3>;
-; SM80-NEXT: .reg .f32 %f<2>;
+; SM80-NEXT: .reg .b32 %f<2>;
; SM80-EMPTY:
; SM80-NEXT: // %bb.0:
; SM80-NEXT: ld.param.u16 %rs1, [test_uitofp_i16_param_0];
@@ -1226,7 +1226,7 @@ define bfloat @test_uitofp_i16(i16 %a) {
; SM80-FTZ-LABEL: test_uitofp_i16(
; SM80-FTZ: {
; SM80-FTZ-NEXT: .reg .b16 %rs<3>;
-; SM80-FTZ-NEXT: .reg .f32 %f<2>;
+; SM80-FTZ-NEXT: .reg .b32 %f<2>;
; SM80-FTZ-EMPTY:
; SM80-FTZ-NEXT: // %bb.0:
; SM80-FTZ-NEXT: ld.param.u16 %rs1, [test_uitofp_i16_param_0];
@@ -1254,7 +1254,7 @@ define bfloat @test_uitofp_i32(i32 %a) {
; SM70-NEXT: .reg .pred %p<2>;
; SM70-NEXT: .reg .b16 %rs<2>;
; SM70-NEXT: .reg .b32 %r<8>;
-; SM70-NEXT: .reg .f32 %f<2>;
+; SM70-NEXT: .reg .b32 %f<2>;
; SM70-EMPTY:
; SM70-NEXT: // %bb.0:
; SM70-NEXT: ld.param.u32 %r1, [test_uitofp_i32_param_0];
@@ -1274,7 +1274,7 @@ define bfloat @test_uitofp_i32(i32 %a) {
; SM80: {
; SM80-NEXT: .reg .b16 %rs<2>;
; SM80-NEXT: .reg .b32 %r<2>;
-; SM80-NEXT: .reg .f32 %f<2>;
+; SM80-NEXT: .reg .b32 %f<2>;
; SM80-EMPTY:
; SM80-NEXT: // %bb.0:
; SM80-NEXT: ld.param.u32 %r1, [test_uitofp_i32_param_0];
@@ -1287,7 +1287,7 @@ define bfloat @test_uitofp_i32(i32 %a) {
; SM80-FTZ: {
; SM80-FTZ-NEXT: .reg .b16 %rs<2>;
; SM80-FTZ-NEXT: .reg .b32 %r<2>;
-; SM80-FTZ-NEXT: .reg .f32 %f<2>;
+; SM80-FTZ-NEXT: .reg .b32 %f<2>;
; SM80-FTZ-EMPTY:
; SM80-FTZ-NEXT: // %bb.0:
; SM80-FTZ-NEXT: ld.param.u32 %r1, [test_uitofp_i32_param_0];
@@ -1316,7 +1316,7 @@ define bfloat @test_uitofp_i64(i64 %a) {
; SM70-NEXT: .reg .pred %p<2>;
; SM70-NEXT: .reg .b16 %rs<2>;
; SM70-NEXT: .reg .b32 %r<7>;
-; SM70-NEXT: .reg .f32 %f<2>;
+; SM70-NEXT: .reg .b32 %f<2>;
; SM70-NEXT: .reg .b64 %rd<2>;
; SM70-EMPTY:
; SM70-NEXT: // %bb.0:
@@ -1336,7 +1336,7 @@ define bfloat @test_uitofp_i64(i64 %a) {
; SM80-LABEL: test_uitofp_i64(
; SM80: {
; SM80-NEXT: .reg .b16 %rs<2>;
-; SM80-NEXT: .reg .f32 %f<2>;
+; SM80-NEXT: .reg .b32 %f<2>;
; SM80-NEXT: .reg .b64 %rd<2>;
; SM80-EMPTY:
; SM80-NEXT: // %bb.0:
@@ -1349,7 +1349,7 @@ define bfloat @test_uitofp_i64(i64 %a) {
; SM80-FTZ-LABEL: test_uitofp_i64(
; SM80-FTZ: {
; SM80-FTZ-NEXT: .reg .b16 %rs<2>;
-; SM80-FTZ-NEXT: .reg .f32 %f<2>;
+; SM80-FTZ-NEXT: .reg .b32 %f<2>;
; SM80-FTZ-NEXT: .reg .b64 %rd<2>;
; SM80-FTZ-EMPTY:
; SM80-FTZ-NEXT: // %bb.0:
@@ -1379,7 +1379,7 @@ define bfloat @test_roundeven(bfloat %a) {
; SM70-NEXT: .reg .pred %p<2>;
; SM70-NEXT: .reg .b16 %rs<2>;
; SM70-NEXT: .reg .b32 %r<9>;
-; SM70-NEXT: .reg .f32 %f<3>;
+; SM70-NEXT: .reg .b32 %f<3>;
; SM70-EMPTY:
; SM70-NEXT: // %bb.0:
; SM70-NEXT: ld.param.u16 %r1, [test_roundeven_param_0];
@@ -1400,7 +1400,7 @@ define bfloat @test_roundeven(bfloat %a) {
; SM80-LABEL: test_roundeven(
; SM80: {
; SM80-NEXT: .reg .b16 %rs<3>;
-; SM80-NEXT: .reg .f32 %f<3>;
+; SM80-NEXT: .reg .b32 %f<3>;
; SM80-EMPTY:
; SM80-NEXT: // %bb.0:
; SM80-NEXT: ld.param.b16 %rs1, [test_roundeven_param_0];
@@ -1413,7 +1413,7 @@ define bfloat @test_roundeven(bfloat %a) {
; SM80-FTZ-LABEL: test_roundeven(
; SM80-FTZ: {
; SM80-FTZ-NEXT: .reg .b16 %rs<3>;
-; SM80-FTZ-NEXT: .reg .f32 %f<3>;
+; SM80-FTZ-NEXT: .reg .b32 %f<3>;
; SM80-FTZ-EMPTY:
; SM80-FTZ-NEXT: // %bb.0:
; SM80-FTZ-NEXT: ld.param.b16 %rs1, [test_roundeven_param_0];
@@ -1442,7 +1442,7 @@ define bfloat @test_maximum(bfloat %a, bfloat %b) {
; SM70-NEXT: .reg .pred %p<6>;
; SM70-NEXT: .reg .b16 %rs<8>;
; SM70-NEXT: .reg .b32 %r<7>;
-; SM70-NEXT: .reg .f32 %f<4>;
+; SM70-NEXT: .reg .b32 %f<4>;
; SM70-EMPTY:
; SM70-NEXT: // %bb.0:
; SM70-NEXT: ld.param.b16 %rs1, [test_maximum_param_0];
@@ -1511,7 +1511,7 @@ define bfloat @test_maxnum(bfloat %a, bfloat %b) {
; SM70-NEXT: .reg .pred %p<2>;
; SM70-NEXT: .reg .b16 %rs<2>;
; SM70-NEXT: .reg .b32 %r<11>;
-; SM70-NEXT: .reg .f32 %f<4>;
+; SM70-NEXT: .reg .b32 %f<4>;
; SM70-EMPTY:
; SM70-NEXT: // %bb.0:
; SM70-NEXT: ld.param.u16 %r1, [test_maxnum_param_1];
@@ -1574,7 +1574,7 @@ define <2 x bfloat> @test_maximum_v2(<2 x bfloat> %a, <2 x bfloat> %b) {
; SM70-NEXT: .reg .pred %p<11>;
; SM70-NEXT: .reg .b16 %rs<15>;
; SM70-NEXT: .reg .b32 %r<16>;
-; SM70-NEXT: .reg .f32 %f<7>;
+; SM70-NEXT: .reg .b32 %f<7>;
; SM70-EMPTY:
; SM70-NEXT: // %bb.0:
; SM70-NEXT: ld.param.b32 %r1, [test_maximum_v2_param_0];
@@ -1665,7 +1665,7 @@ define <2 x bfloat> @test_maxnum_v2(<2 x bfloat> %a, <2 x bfloat> %b) {
; SM70-NEXT: .reg .pred %p<3>;
; SM70-NEXT: .reg .b16 %rs<5>;
; SM70-NEXT: .reg .b32 %r<24>;
-; SM70-NEXT: .reg .f32 %f<7>;
+; SM70-NEXT: .reg .b32 %f<7>;
; SM70-EMPTY:
; SM70-NEXT: // %bb.0:
; SM70-NEXT: ld.param.b32 %r1, [test_maxnum_v2_param_0];
@@ -1741,4 +1741,4 @@ define <2 x bfloat> @test_maxnum_v2(<2 x bfloat> %a, <2 x bfloat> %b) {
}
declare bfloat @llvm.maximum.bf16(bfloat, bfloat)
-declare <2 x bfloat> @llvm.maximum.v2bf16(<2 x bfloat>, <2 x bfloat>) \ No newline at end of file
+declare <2 x bfloat> @llvm.maximum.v2bf16(<2 x bfloat>, <2 x bfloat>)
diff --git a/llvm/test/CodeGen/NVPTX/bf16x2-instructions-approx.ll b/llvm/test/CodeGen/NVPTX/bf16x2-instructions-approx.ll
index fdf481e..5ab684a 100644
--- a/llvm/test/CodeGen/NVPTX/bf16x2-instructions-approx.ll
+++ b/llvm/test/CodeGen/NVPTX/bf16x2-instructions-approx.ll
@@ -12,7 +12,7 @@ define <2 x bfloat> @test_sin(<2 x bfloat> %a) #0 #1 {
; CHECK: {
; CHECK-NEXT: .reg .b16 %rs<3>;
; CHECK-NEXT: .reg .b32 %r<3>;
-; CHECK-NEXT: .reg .f32 %f<5>;
+; CHECK-NEXT: .reg .b32 %f<5>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.b32 %r1, [test_sin_param_0];
@@ -33,7 +33,7 @@ define <2 x bfloat> @test_cos(<2 x bfloat> %a) #0 #1 {
; CHECK: {
; CHECK-NEXT: .reg .b16 %rs<3>;
; CHECK-NEXT: .reg .b32 %r<3>;
-; CHECK-NEXT: .reg .f32 %f<5>;
+; CHECK-NEXT: .reg .b32 %f<5>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.b32 %r1, [test_cos_param_0];
diff --git a/llvm/test/CodeGen/NVPTX/bf16x2-instructions.ll b/llvm/test/CodeGen/NVPTX/bf16x2-instructions.ll
index 706a88f..677f0d7 100644
--- a/llvm/test/CodeGen/NVPTX/bf16x2-instructions.ll
+++ b/llvm/test/CodeGen/NVPTX/bf16x2-instructions.ll
@@ -131,7 +131,7 @@ define <2 x bfloat> @test_fdiv(<2 x bfloat> %a, <2 x bfloat> %b) #0 {
; CHECK: {
; CHECK-NEXT: .reg .b16 %rs<5>;
; CHECK-NEXT: .reg .b32 %r<4>;
-; CHECK-NEXT: .reg .f32 %f<7>;
+; CHECK-NEXT: .reg .b32 %f<7>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.b32 %r1, [test_fdiv_param_0];
@@ -259,7 +259,7 @@ define <2 x bfloat> @test_select_cc(<2 x bfloat> %a, <2 x bfloat> %b, <2 x bfloa
; SM80-NEXT: .reg .pred %p<3>;
; SM80-NEXT: .reg .b16 %rs<11>;
; SM80-NEXT: .reg .b32 %r<6>;
-; SM80-NEXT: .reg .f32 %f<5>;
+; SM80-NEXT: .reg .b32 %f<5>;
; SM80-EMPTY:
; SM80-NEXT: // %bb.0:
; SM80-NEXT: ld.param.b32 %r1, [test_select_cc_param_0];
@@ -312,7 +312,7 @@ define <2 x float> @test_select_cc_f32_bf16(<2 x float> %a, <2 x float> %b,
; SM80-NEXT: .reg .pred %p<3>;
; SM80-NEXT: .reg .b16 %rs<5>;
; SM80-NEXT: .reg .b32 %r<3>;
-; SM80-NEXT: .reg .f32 %f<11>;
+; SM80-NEXT: .reg .b32 %f<11>;
; SM80-EMPTY:
; SM80-NEXT: // %bb.0:
; SM80-NEXT: ld.param.v2.f32 {%f1, %f2}, [test_select_cc_f32_bf16_param_0];
@@ -336,7 +336,7 @@ define <2 x float> @test_select_cc_f32_bf16(<2 x float> %a, <2 x float> %b,
; SM90: {
; SM90-NEXT: .reg .pred %p<3>;
; SM90-NEXT: .reg .b32 %r<3>;
-; SM90-NEXT: .reg .f32 %f<7>;
+; SM90-NEXT: .reg .b32 %f<7>;
; SM90-EMPTY:
; SM90-NEXT: // %bb.0:
; SM90-NEXT: ld.param.v2.f32 {%f1, %f2}, [test_select_cc_f32_bf16_param_0];
@@ -360,7 +360,7 @@ define <2 x bfloat> @test_select_cc_bf16_f32(<2 x bfloat> %a, <2 x bfloat> %b,
; CHECK-NEXT: .reg .pred %p<3>;
; CHECK-NEXT: .reg .b16 %rs<7>;
; CHECK-NEXT: .reg .b32 %r<4>;
-; CHECK-NEXT: .reg .f32 %f<5>;
+; CHECK-NEXT: .reg .b32 %f<5>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.b32 %r1, [test_select_cc_bf16_f32_param_0];
@@ -386,7 +386,7 @@ define <2 x bfloat> @test_fptrunc_2xfloat(<2 x float> %a) #0 {
; CHECK-LABEL: test_fptrunc_2xfloat(
; CHECK: {
; CHECK-NEXT: .reg .b32 %r<2>;
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %f<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.v2.f32 {%f1, %f2}, [test_fptrunc_2xfloat_param_0];
@@ -402,7 +402,7 @@ define <2 x float> @test_fpext_2xfloat(<2 x bfloat> %a) #0 {
; CHECK: {
; CHECK-NEXT: .reg .b16 %rs<3>;
; CHECK-NEXT: .reg .b32 %r<2>;
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %f<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.b32 %r1, [test_fpext_2xfloat_param_0];
@@ -469,7 +469,7 @@ define <2 x bfloat> @test_sqrt(<2 x bfloat> %a) #0 {
; CHECK: {
; CHECK-NEXT: .reg .b16 %rs<3>;
; CHECK-NEXT: .reg .b32 %r<3>;
-; CHECK-NEXT: .reg .f32 %f<5>;
+; CHECK-NEXT: .reg .b32 %f<5>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.b32 %r1, [test_sqrt_param_0];
@@ -583,7 +583,7 @@ define <2 x bfloat> @test_floor(<2 x bfloat> %a) #0 {
; SM80: {
; SM80-NEXT: .reg .b16 %rs<3>;
; SM80-NEXT: .reg .b32 %r<3>;
-; SM80-NEXT: .reg .f32 %f<5>;
+; SM80-NEXT: .reg .b32 %f<5>;
; SM80-EMPTY:
; SM80-NEXT: // %bb.0:
; SM80-NEXT: ld.param.b32 %r1, [test_floor_param_0];
@@ -618,7 +618,7 @@ define <2 x bfloat> @test_ceil(<2 x bfloat> %a) #0 {
; SM80: {
; SM80-NEXT: .reg .b16 %rs<3>;
; SM80-NEXT: .reg .b32 %r<3>;
-; SM80-NEXT: .reg .f32 %f<5>;
+; SM80-NEXT: .reg .b32 %f<5>;
; SM80-EMPTY:
; SM80-NEXT: // %bb.0:
; SM80-NEXT: ld.param.b32 %r1, [test_ceil_param_0];
@@ -653,7 +653,7 @@ define <2 x bfloat> @test_trunc(<2 x bfloat> %a) #0 {
; SM80: {
; SM80-NEXT: .reg .b16 %rs<3>;
; SM80-NEXT: .reg .b32 %r<3>;
-; SM80-NEXT: .reg .f32 %f<5>;
+; SM80-NEXT: .reg .b32 %f<5>;
; SM80-EMPTY:
; SM80-NEXT: // %bb.0:
; SM80-NEXT: ld.param.b32 %r1, [test_trunc_param_0];
@@ -688,7 +688,7 @@ define <2 x bfloat> @test_rint(<2 x bfloat> %a) #0 {
; SM80: {
; SM80-NEXT: .reg .b16 %rs<3>;
; SM80-NEXT: .reg .b32 %r<3>;
-; SM80-NEXT: .reg .f32 %f<5>;
+; SM80-NEXT: .reg .b32 %f<5>;
; SM80-EMPTY:
; SM80-NEXT: // %bb.0:
; SM80-NEXT: ld.param.b32 %r1, [test_rint_param_0];
@@ -724,7 +724,7 @@ define <2 x bfloat> @test_round(<2 x bfloat> %a) #0 {
; CHECK-NEXT: .reg .pred %p<5>;
; CHECK-NEXT: .reg .b16 %rs<3>;
; CHECK-NEXT: .reg .b32 %r<9>;
-; CHECK-NEXT: .reg .f32 %f<17>;
+; CHECK-NEXT: .reg .b32 %f<17>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.b32 %r1, [test_round_param_0];
diff --git a/llvm/test/CodeGen/NVPTX/convert-fp-i8.ll b/llvm/test/CodeGen/NVPTX/convert-fp-i8.ll
index 93da3913..670e112 100644
--- a/llvm/test/CodeGen/NVPTX/convert-fp-i8.ll
+++ b/llvm/test/CodeGen/NVPTX/convert-fp-i8.ll
@@ -8,7 +8,7 @@ define i8 @cvt_u8_f32(float %x) {
; CHECK: {
; CHECK-NEXT: .reg .b16 %rs<2>;
; CHECK-NEXT: .reg .b32 %r<2>;
-; CHECK-NEXT: .reg .f32 %f<2>;
+; CHECK-NEXT: .reg .b32 %f<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f1, [cvt_u8_f32_param_0];
@@ -25,7 +25,7 @@ define i8 @cvt_u8_f64(double %x) {
; CHECK: {
; CHECK-NEXT: .reg .b16 %rs<2>;
; CHECK-NEXT: .reg .b32 %r<2>;
-; CHECK-NEXT: .reg .f64 %fd<2>;
+; CHECK-NEXT: .reg .b64 %fd<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f64 %fd1, [cvt_u8_f64_param_0];
@@ -41,7 +41,7 @@ define float @cvt_f32_i8(i8 %x) {
; CHECK-LABEL: cvt_f32_i8(
; CHECK: {
; CHECK-NEXT: .reg .b16 %rs<2>;
-; CHECK-NEXT: .reg .f32 %f<2>;
+; CHECK-NEXT: .reg .b32 %f<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.u8 %rs1, [cvt_f32_i8_param_0];
@@ -56,7 +56,7 @@ define double @cvt_f64_i8(i8 %x) {
; CHECK-LABEL: cvt_f64_i8(
; CHECK: {
; CHECK-NEXT: .reg .b16 %rs<2>;
-; CHECK-NEXT: .reg .f64 %fd<2>;
+; CHECK-NEXT: .reg .b64 %fd<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.u8 %rs1, [cvt_f64_i8_param_0];
@@ -71,7 +71,7 @@ define float @cvt_f32_s8(i8 %x) {
; CHECK-LABEL: cvt_f32_s8(
; CHECK: {
; CHECK-NEXT: .reg .b16 %rs<2>;
-; CHECK-NEXT: .reg .f32 %f<2>;
+; CHECK-NEXT: .reg .b32 %f<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.s8 %rs1, [cvt_f32_s8_param_0];
@@ -86,7 +86,7 @@ define double @cvt_f64_s8(i8 %x) {
; CHECK-LABEL: cvt_f64_s8(
; CHECK: {
; CHECK-NEXT: .reg .b16 %rs<2>;
-; CHECK-NEXT: .reg .f64 %fd<2>;
+; CHECK-NEXT: .reg .b64 %fd<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.s8 %rs1, [cvt_f64_s8_param_0];
@@ -102,7 +102,7 @@ define i8 @cvt_s8_f32(float %x) {
; CHECK: {
; CHECK-NEXT: .reg .b16 %rs<2>;
; CHECK-NEXT: .reg .b32 %r<3>;
-; CHECK-NEXT: .reg .f32 %f<2>;
+; CHECK-NEXT: .reg .b32 %f<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f1, [cvt_s8_f32_param_0];
@@ -120,7 +120,7 @@ define i8 @cvt_s8_f64(double %x) {
; CHECK: {
; CHECK-NEXT: .reg .b16 %rs<2>;
; CHECK-NEXT: .reg .b32 %r<3>;
-; CHECK-NEXT: .reg .f64 %fd<2>;
+; CHECK-NEXT: .reg .b64 %fd<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f64 %fd1, [cvt_s8_f64_param_0];
diff --git a/llvm/test/CodeGen/NVPTX/convert-sm100.ll b/llvm/test/CodeGen/NVPTX/convert-sm100.ll
index f92822f7..7230872 100644
--- a/llvm/test/CodeGen/NVPTX/convert-sm100.ll
+++ b/llvm/test/CodeGen/NVPTX/convert-sm100.ll
@@ -11,7 +11,7 @@ define i32 @cvt_rn_satf_tf32_f32(float %f1) {
; CHECK-LABEL: cvt_rn_satf_tf32_f32(
; CHECK: {
; CHECK-NEXT: .reg .b32 %r<2>;
-; CHECK-NEXT: .reg .f32 %f<2>;
+; CHECK-NEXT: .reg .b32 %f<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f1, [cvt_rn_satf_tf32_f32_param_0];
@@ -26,7 +26,7 @@ define i32 @cvt_rn_relu_satf_tf32_f32(float %f1) {
; CHECK-LABEL: cvt_rn_relu_satf_tf32_f32(
; CHECK: {
; CHECK-NEXT: .reg .b32 %r<2>;
-; CHECK-NEXT: .reg .f32 %f<2>;
+; CHECK-NEXT: .reg .b32 %f<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f1, [cvt_rn_relu_satf_tf32_f32_param_0];
@@ -41,7 +41,7 @@ define i32 @cvt_rz_satf_tf32_f32(float %f1) {
; CHECK-LABEL: cvt_rz_satf_tf32_f32(
; CHECK: {
; CHECK-NEXT: .reg .b32 %r<2>;
-; CHECK-NEXT: .reg .f32 %f<2>;
+; CHECK-NEXT: .reg .b32 %f<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f1, [cvt_rz_satf_tf32_f32_param_0];
@@ -56,7 +56,7 @@ define i32 @cvt_rz_relu_satf_tf32_f32(float %f1) {
; CHECK-LABEL: cvt_rz_relu_satf_tf32_f32(
; CHECK: {
; CHECK-NEXT: .reg .b32 %r<2>;
-; CHECK-NEXT: .reg .f32 %f<2>;
+; CHECK-NEXT: .reg .b32 %f<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f1, [cvt_rz_relu_satf_tf32_f32_param_0];
diff --git a/llvm/test/CodeGen/NVPTX/convert-sm100a.ll b/llvm/test/CodeGen/NVPTX/convert-sm100a.ll
index f0dd5f0..04d7a65 100644
--- a/llvm/test/CodeGen/NVPTX/convert-sm100a.ll
+++ b/llvm/test/CodeGen/NVPTX/convert-sm100a.ll
@@ -11,7 +11,7 @@ define i16 @cvt_rn_sf_e2m3x2_f32(float %f1, float %f2) {
; CHECK: {
; CHECK-NEXT: .reg .b16 %rs<2>;
; CHECK-NEXT: .reg .b32 %r<2>;
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %f<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f1, [cvt_rn_sf_e2m3x2_f32_param_0];
@@ -29,7 +29,7 @@ define i16 @cvt_rn_relu_sf_e2m3x2_f32(float %f1, float %f2) {
; CHECK: {
; CHECK-NEXT: .reg .b16 %rs<2>;
; CHECK-NEXT: .reg .b32 %r<2>;
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %f<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f1, [cvt_rn_relu_sf_e2m3x2_f32_param_0];
@@ -47,7 +47,7 @@ define i16 @cvt_rn_sf_e3m2x2_f32(float %f1, float %f2) {
; CHECK: {
; CHECK-NEXT: .reg .b16 %rs<2>;
; CHECK-NEXT: .reg .b32 %r<2>;
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %f<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f1, [cvt_rn_sf_e3m2x2_f32_param_0];
@@ -65,7 +65,7 @@ define i16 @cvt_rn_relu_sf_e3m2x2_f32(float %f1, float %f2) {
; CHECK: {
; CHECK-NEXT: .reg .b16 %rs<2>;
; CHECK-NEXT: .reg .b32 %r<2>;
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %f<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f1, [cvt_rn_relu_sf_e3m2x2_f32_param_0];
@@ -143,7 +143,7 @@ define i16 @cvt_rz_ue8m0x2_f32(float %f1, float %f2) {
; CHECK: {
; CHECK-NEXT: .reg .b16 %rs<2>;
; CHECK-NEXT: .reg .b32 %r<2>;
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %f<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f1, [cvt_rz_ue8m0x2_f32_param_0];
@@ -161,7 +161,7 @@ define i16 @cvt_rz_sf_ue8m0x2_f32(float %f1, float %f2) {
; CHECK: {
; CHECK-NEXT: .reg .b16 %rs<2>;
; CHECK-NEXT: .reg .b32 %r<2>;
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %f<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f1, [cvt_rz_sf_ue8m0x2_f32_param_0];
@@ -179,7 +179,7 @@ define i16 @cvt_rp_ue8m0x2_f32(float %f1, float %f2) {
; CHECK: {
; CHECK-NEXT: .reg .b16 %rs<2>;
; CHECK-NEXT: .reg .b32 %r<2>;
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %f<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f1, [cvt_rp_ue8m0x2_f32_param_0];
@@ -197,7 +197,7 @@ define i16 @cvt_rp_sf_ue8m0x2_f32(float %f1, float %f2) {
; CHECK: {
; CHECK-NEXT: .reg .b16 %rs<2>;
; CHECK-NEXT: .reg .b32 %r<2>;
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %f<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f1, [cvt_rp_sf_ue8m0x2_f32_param_0];
diff --git a/llvm/test/CodeGen/NVPTX/convert-sm80.ll b/llvm/test/CodeGen/NVPTX/convert-sm80.ll
index aebc28b..eb7a6bd 100644
--- a/llvm/test/CodeGen/NVPTX/convert-sm80.ll
+++ b/llvm/test/CodeGen/NVPTX/convert-sm80.ll
@@ -7,7 +7,7 @@ define <2 x bfloat> @cvt_rn_bf16x2_f32(float %f1, float %f2) {
; CHECK-LABEL: cvt_rn_bf16x2_f32(
; CHECK: {
; CHECK-NEXT: .reg .b32 %r<2>;
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %f<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f1, [cvt_rn_bf16x2_f32_param_0];
@@ -23,7 +23,7 @@ define <2 x bfloat> @cvt_rn_relu_bf16x2_f32(float %f1, float %f2) {
; CHECK-LABEL: cvt_rn_relu_bf16x2_f32(
; CHECK: {
; CHECK-NEXT: .reg .b32 %r<2>;
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %f<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f1, [cvt_rn_relu_bf16x2_f32_param_0];
@@ -39,7 +39,7 @@ define <2 x bfloat> @cvt_rz_bf16x2_f32(float %f1, float %f2) {
; CHECK-LABEL: cvt_rz_bf16x2_f32(
; CHECK: {
; CHECK-NEXT: .reg .b32 %r<2>;
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %f<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f1, [cvt_rz_bf16x2_f32_param_0];
@@ -55,7 +55,7 @@ define <2 x bfloat> @cvt_rz_relu_bf16x2_f32(float %f1, float %f2) {
; CHECK-LABEL: cvt_rz_relu_bf16x2_f32(
; CHECK: {
; CHECK-NEXT: .reg .b32 %r<2>;
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %f<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f1, [cvt_rz_relu_bf16x2_f32_param_0];
@@ -76,7 +76,7 @@ define <2 x half> @cvt_rn_f16x2_f32(float %f1, float %f2) {
; CHECK-LABEL: cvt_rn_f16x2_f32(
; CHECK: {
; CHECK-NEXT: .reg .b32 %r<2>;
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %f<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f1, [cvt_rn_f16x2_f32_param_0];
@@ -92,7 +92,7 @@ define <2 x half> @cvt_rn_relu_f16x2_f32(float %f1, float %f2) {
; CHECK-LABEL: cvt_rn_relu_f16x2_f32(
; CHECK: {
; CHECK-NEXT: .reg .b32 %r<2>;
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %f<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f1, [cvt_rn_relu_f16x2_f32_param_0];
@@ -108,7 +108,7 @@ define <2 x half> @cvt_rz_f16x2_f32(float %f1, float %f2) {
; CHECK-LABEL: cvt_rz_f16x2_f32(
; CHECK: {
; CHECK-NEXT: .reg .b32 %r<2>;
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %f<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f1, [cvt_rz_f16x2_f32_param_0];
@@ -124,7 +124,7 @@ define <2 x half> @cvt_rz_relu_f16x2_f32(float %f1, float %f2) {
; CHECK-LABEL: cvt_rz_relu_f16x2_f32(
; CHECK: {
; CHECK-NEXT: .reg .b32 %r<2>;
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %f<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f1, [cvt_rz_relu_f16x2_f32_param_0];
@@ -145,7 +145,7 @@ define bfloat @cvt_rn_bf16_f32(float %f1) {
; CHECK-LABEL: cvt_rn_bf16_f32(
; CHECK: {
; CHECK-NEXT: .reg .b16 %rs<2>;
-; CHECK-NEXT: .reg .f32 %f<2>;
+; CHECK-NEXT: .reg .b32 %f<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f1, [cvt_rn_bf16_f32_param_0];
@@ -160,7 +160,7 @@ define bfloat @cvt_rn_relu_bf16_f32(float %f1) {
; CHECK-LABEL: cvt_rn_relu_bf16_f32(
; CHECK: {
; CHECK-NEXT: .reg .b16 %rs<2>;
-; CHECK-NEXT: .reg .f32 %f<2>;
+; CHECK-NEXT: .reg .b32 %f<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f1, [cvt_rn_relu_bf16_f32_param_0];
@@ -175,7 +175,7 @@ define bfloat @cvt_rz_bf16_f32(float %f1) {
; CHECK-LABEL: cvt_rz_bf16_f32(
; CHECK: {
; CHECK-NEXT: .reg .b16 %rs<2>;
-; CHECK-NEXT: .reg .f32 %f<2>;
+; CHECK-NEXT: .reg .b32 %f<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f1, [cvt_rz_bf16_f32_param_0];
@@ -190,7 +190,7 @@ define bfloat @cvt_rz_relu_bf16_f32(float %f1) {
; CHECK-LABEL: cvt_rz_relu_bf16_f32(
; CHECK: {
; CHECK-NEXT: .reg .b16 %rs<2>;
-; CHECK-NEXT: .reg .f32 %f<2>;
+; CHECK-NEXT: .reg .b32 %f<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f1, [cvt_rz_relu_bf16_f32_param_0];
@@ -210,7 +210,7 @@ define i32 @cvt_rna_tf32_f32(float %f1) {
; CHECK-LABEL: cvt_rna_tf32_f32(
; CHECK: {
; CHECK-NEXT: .reg .b32 %r<2>;
-; CHECK-NEXT: .reg .f32 %f<2>;
+; CHECK-NEXT: .reg .b32 %f<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f1, [cvt_rna_tf32_f32_param_0];
@@ -228,7 +228,7 @@ define <2 x bfloat> @fold_ff2bf16x2(float %lo, float %hi) {
; CHECK-LABEL: fold_ff2bf16x2(
; CHECK: {
; CHECK-NEXT: .reg .b32 %r<2>;
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %f<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f1, [fold_ff2bf16x2_param_0];
@@ -247,7 +247,7 @@ define <2 x half> @fold_ff2f16x2(float %lo, float %hi) {
; CHECK-LABEL: fold_ff2f16x2(
; CHECK: {
; CHECK-NEXT: .reg .b32 %r<2>;
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %f<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f1, [fold_ff2f16x2_param_0];
diff --git a/llvm/test/CodeGen/NVPTX/convert-sm90.ll b/llvm/test/CodeGen/NVPTX/convert-sm90.ll
index 5f610e0..340117f 100644
--- a/llvm/test/CodeGen/NVPTX/convert-sm90.ll
+++ b/llvm/test/CodeGen/NVPTX/convert-sm90.ll
@@ -11,7 +11,7 @@ define i32 @cvt_rn_tf32_f32(float %f1) {
; CHECK-LABEL: cvt_rn_tf32_f32(
; CHECK: {
; CHECK-NEXT: .reg .b32 %r<2>;
-; CHECK-NEXT: .reg .f32 %f<2>;
+; CHECK-NEXT: .reg .b32 %f<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f1, [cvt_rn_tf32_f32_param_0];
@@ -26,7 +26,7 @@ define i32 @cvt_rn_relu_tf32_f32(float %f1) {
; CHECK-LABEL: cvt_rn_relu_tf32_f32(
; CHECK: {
; CHECK-NEXT: .reg .b32 %r<2>;
-; CHECK-NEXT: .reg .f32 %f<2>;
+; CHECK-NEXT: .reg .b32 %f<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f1, [cvt_rn_relu_tf32_f32_param_0];
@@ -41,7 +41,7 @@ define i32 @cvt_rz_tf32_f32(float %f1) {
; CHECK-LABEL: cvt_rz_tf32_f32(
; CHECK: {
; CHECK-NEXT: .reg .b32 %r<2>;
-; CHECK-NEXT: .reg .f32 %f<2>;
+; CHECK-NEXT: .reg .b32 %f<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f1, [cvt_rz_tf32_f32_param_0];
@@ -56,7 +56,7 @@ define i32 @cvt_rz_relu_tf32_f32(float %f1) {
; CHECK-LABEL: cvt_rz_relu_tf32_f32(
; CHECK: {
; CHECK-NEXT: .reg .b32 %r<2>;
-; CHECK-NEXT: .reg .f32 %f<2>;
+; CHECK-NEXT: .reg .b32 %f<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f1, [cvt_rz_relu_tf32_f32_param_0];
diff --git a/llvm/test/CodeGen/NVPTX/copysign.ll b/llvm/test/CodeGen/NVPTX/copysign.ll
index 843ef4d..2e305e68 100644
--- a/llvm/test/CodeGen/NVPTX/copysign.ll
+++ b/llvm/test/CodeGen/NVPTX/copysign.ll
@@ -8,7 +8,7 @@ target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3
define float @fcopysign_f_f(float %a, float %b) {
; CHECK-LABEL: fcopysign_f_f(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<4>;
+; CHECK-NEXT: .reg .b32 %f<4>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f1, [fcopysign_f_f_param_0];
@@ -23,7 +23,7 @@ define float @fcopysign_f_f(float %a, float %b) {
define double @fcopysign_d_d(double %a, double %b) {
; CHECK-LABEL: fcopysign_d_d(
; CHECK: {
-; CHECK-NEXT: .reg .f64 %fd<4>;
+; CHECK-NEXT: .reg .b64 %fd<4>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f64 %fd1, [fcopysign_d_d_param_0];
@@ -39,7 +39,7 @@ define float @fcopysign_f_d(float %a, double %b) {
; CHECK-LABEL: fcopysign_f_d(
; CHECK: {
; CHECK-NEXT: .reg .pred %p<2>;
-; CHECK-NEXT: .reg .f32 %f<5>;
+; CHECK-NEXT: .reg .b32 %f<5>;
; CHECK-NEXT: .reg .b64 %rd<4>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
@@ -63,7 +63,7 @@ define float @fcopysign_f_h(float %a, half %b) {
; CHECK: {
; CHECK-NEXT: .reg .pred %p<2>;
; CHECK-NEXT: .reg .b16 %rs<4>;
-; CHECK-NEXT: .reg .f32 %f<5>;
+; CHECK-NEXT: .reg .b32 %f<5>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f1, [fcopysign_f_h_param_0];
@@ -86,7 +86,7 @@ define double @fcopysign_d_f(double %a, float %b) {
; CHECK: {
; CHECK-NEXT: .reg .pred %p<2>;
; CHECK-NEXT: .reg .b32 %r<4>;
-; CHECK-NEXT: .reg .f64 %fd<5>;
+; CHECK-NEXT: .reg .b64 %fd<5>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f64 %fd1, [fcopysign_d_f_param_0];
@@ -109,7 +109,7 @@ define double @fcopysign_d_h(double %a, half %b) {
; CHECK: {
; CHECK-NEXT: .reg .pred %p<2>;
; CHECK-NEXT: .reg .b16 %rs<4>;
-; CHECK-NEXT: .reg .f64 %fd<5>;
+; CHECK-NEXT: .reg .b64 %fd<5>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f64 %fd1, [fcopysign_d_h_param_0];
diff --git a/llvm/test/CodeGen/NVPTX/distributed-shared-cluster.ll b/llvm/test/CodeGen/NVPTX/distributed-shared-cluster.ll
index 92b7379..a233616 100644
--- a/llvm/test/CodeGen/NVPTX/distributed-shared-cluster.ll
+++ b/llvm/test/CodeGen/NVPTX/distributed-shared-cluster.ll
@@ -59,9 +59,9 @@ define void @test_distributed_shared_cluster_float_atomic(ptr addrspace(7) %dsme
; CHECK-LABEL: test_distributed_shared_cluster_float_atomic(
; CHECK: {
; CHECK-NEXT: .reg .b16 %rs<5>;
-; CHECK-NEXT: .reg .f32 %f<2>;
+; CHECK-NEXT: .reg .b32 %f<2>;
; CHECK-NEXT: .reg .b64 %rd<2>;
-; CHECK-NEXT: .reg .f64 %fd<2>;
+; CHECK-NEXT: .reg .b64 %fd<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0: // %entry
; CHECK-NEXT: ld.param.u64 %rd1, [test_distributed_shared_cluster_float_atomic_param_0];
diff --git a/llvm/test/CodeGen/NVPTX/div.ll b/llvm/test/CodeGen/NVPTX/div.ll
index 4f9d587..f8711e3 100644
--- a/llvm/test/CodeGen/NVPTX/div.ll
+++ b/llvm/test/CodeGen/NVPTX/div.ll
@@ -5,7 +5,7 @@
define float @div_full(float %a, float %b) {
; CHECK-LABEL: div_full(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<9>;
+; CHECK-NEXT: .reg .b32 %f<9>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f1, [div_full_param_0];
diff --git a/llvm/test/CodeGen/NVPTX/f16-abs.ll b/llvm/test/CodeGen/NVPTX/f16-abs.ll
index d12653e..d3aaedf 100644
--- a/llvm/test/CodeGen/NVPTX/f16-abs.ll
+++ b/llvm/test/CodeGen/NVPTX/f16-abs.ll
@@ -49,7 +49,7 @@ define half @test_fabs(half %a) {
; CHECK-NOF16-LABEL: test_fabs(
; CHECK-NOF16: {
; CHECK-NOF16-NEXT: .reg .b16 %rs<3>;
-; CHECK-NOF16-NEXT: .reg .f32 %f<3>;
+; CHECK-NOF16-NEXT: .reg .b32 %f<3>;
; CHECK-NOF16-EMPTY:
; CHECK-NOF16-NEXT: // %bb.0:
; CHECK-NOF16-NEXT: ld.param.b16 %rs1, [test_fabs_param_0];
diff --git a/llvm/test/CodeGen/NVPTX/f16x2-instructions.ll b/llvm/test/CodeGen/NVPTX/f16x2-instructions.ll
index e9edabd..e854e5a 100644
--- a/llvm/test/CodeGen/NVPTX/f16x2-instructions.ll
+++ b/llvm/test/CodeGen/NVPTX/f16x2-instructions.ll
@@ -109,7 +109,7 @@ define <2 x half> @test_fadd(<2 x half> %a, <2 x half> %b) #0 {
; CHECK-NOF16: {
; CHECK-NOF16-NEXT: .reg .b16 %rs<7>;
; CHECK-NOF16-NEXT: .reg .b32 %r<4>;
-; CHECK-NOF16-NEXT: .reg .f32 %f<7>;
+; CHECK-NOF16-NEXT: .reg .b32 %f<7>;
; CHECK-NOF16-EMPTY:
; CHECK-NOF16-NEXT: // %bb.0:
; CHECK-NOF16-NEXT: ld.param.b32 %r2, [test_fadd_param_1];
@@ -148,7 +148,7 @@ define <2 x half> @test_fadd_imm_0(<2 x half> %a) #0 {
; CHECK-NOF16: {
; CHECK-NOF16-NEXT: .reg .b16 %rs<5>;
; CHECK-NOF16-NEXT: .reg .b32 %r<3>;
-; CHECK-NOF16-NEXT: .reg .f32 %f<5>;
+; CHECK-NOF16-NEXT: .reg .b32 %f<5>;
; CHECK-NOF16-EMPTY:
; CHECK-NOF16-NEXT: // %bb.0:
; CHECK-NOF16-NEXT: ld.param.b32 %r1, [test_fadd_imm_0_param_0];
@@ -182,7 +182,7 @@ define <2 x half> @test_fadd_imm_1(<2 x half> %a) #0 {
; CHECK-NOF16: {
; CHECK-NOF16-NEXT: .reg .b16 %rs<5>;
; CHECK-NOF16-NEXT: .reg .b32 %r<3>;
-; CHECK-NOF16-NEXT: .reg .f32 %f<5>;
+; CHECK-NOF16-NEXT: .reg .b32 %f<5>;
; CHECK-NOF16-EMPTY:
; CHECK-NOF16-NEXT: // %bb.0:
; CHECK-NOF16-NEXT: ld.param.b32 %r1, [test_fadd_imm_1_param_0];
@@ -216,7 +216,7 @@ define <2 x half> @test_fsub(<2 x half> %a, <2 x half> %b) #0 {
; CHECK-NOF16: {
; CHECK-NOF16-NEXT: .reg .b16 %rs<7>;
; CHECK-NOF16-NEXT: .reg .b32 %r<4>;
-; CHECK-NOF16-NEXT: .reg .f32 %f<7>;
+; CHECK-NOF16-NEXT: .reg .b32 %f<7>;
; CHECK-NOF16-EMPTY:
; CHECK-NOF16-NEXT: // %bb.0:
; CHECK-NOF16-NEXT: ld.param.b32 %r2, [test_fsub_param_1];
@@ -254,7 +254,7 @@ define <2 x half> @test_fneg(<2 x half> %a) #0 {
; CHECK-NOF16: {
; CHECK-NOF16-NEXT: .reg .b16 %rs<5>;
; CHECK-NOF16-NEXT: .reg .b32 %r<3>;
-; CHECK-NOF16-NEXT: .reg .f32 %f<6>;
+; CHECK-NOF16-NEXT: .reg .b32 %f<6>;
; CHECK-NOF16-EMPTY:
; CHECK-NOF16-NEXT: // %bb.0:
; CHECK-NOF16-NEXT: ld.param.b32 %r1, [test_fneg_param_0];
@@ -289,7 +289,7 @@ define <2 x half> @test_fmul(<2 x half> %a, <2 x half> %b) #0 {
; CHECK-NOF16: {
; CHECK-NOF16-NEXT: .reg .b16 %rs<7>;
; CHECK-NOF16-NEXT: .reg .b32 %r<4>;
-; CHECK-NOF16-NEXT: .reg .f32 %f<7>;
+; CHECK-NOF16-NEXT: .reg .b32 %f<7>;
; CHECK-NOF16-EMPTY:
; CHECK-NOF16-NEXT: // %bb.0:
; CHECK-NOF16-NEXT: ld.param.b32 %r2, [test_fmul_param_1];
@@ -316,7 +316,7 @@ define <2 x half> @test_fdiv(<2 x half> %a, <2 x half> %b) #0 {
; CHECK: {
; CHECK-NEXT: .reg .b16 %rs<7>;
; CHECK-NEXT: .reg .b32 %r<4>;
-; CHECK-NEXT: .reg .f32 %f<7>;
+; CHECK-NEXT: .reg .b32 %f<7>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.b32 %r2, [test_fdiv_param_1];
@@ -351,7 +351,7 @@ define <2 x half> @test_frem(<2 x half> %a, <2 x half> %b) #0 {
; CHECK-NEXT: .reg .pred %p<3>;
; CHECK-NEXT: .reg .b16 %rs<7>;
; CHECK-NEXT: .reg .b32 %r<4>;
-; CHECK-NEXT: .reg .f32 %f<15>;
+; CHECK-NEXT: .reg .b32 %f<15>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.b32 %r2, [test_frem_param_1];
@@ -591,7 +591,7 @@ define <2 x half> @test_select_cc(<2 x half> %a, <2 x half> %b, <2 x half> %c, <
; CHECK-NOF16-NEXT: .reg .pred %p<3>;
; CHECK-NOF16-NEXT: .reg .b16 %rs<11>;
; CHECK-NOF16-NEXT: .reg .b32 %r<6>;
-; CHECK-NOF16-NEXT: .reg .f32 %f<5>;
+; CHECK-NOF16-NEXT: .reg .b32 %f<5>;
; CHECK-NOF16-EMPTY:
; CHECK-NOF16-NEXT: // %bb.0:
; CHECK-NOF16-NEXT: ld.param.b32 %r4, [test_select_cc_param_3];
@@ -623,7 +623,7 @@ define <2 x float> @test_select_cc_f32_f16(<2 x float> %a, <2 x float> %b,
; CHECK-F16: {
; CHECK-F16-NEXT: .reg .pred %p<3>;
; CHECK-F16-NEXT: .reg .b32 %r<3>;
-; CHECK-F16-NEXT: .reg .f32 %f<7>;
+; CHECK-F16-NEXT: .reg .b32 %f<7>;
; CHECK-F16-EMPTY:
; CHECK-F16-NEXT: // %bb.0:
; CHECK-F16-NEXT: ld.param.v2.f32 {%f3, %f4}, [test_select_cc_f32_f16_param_1];
@@ -641,7 +641,7 @@ define <2 x float> @test_select_cc_f32_f16(<2 x float> %a, <2 x float> %b,
; CHECK-NOF16-NEXT: .reg .pred %p<3>;
; CHECK-NOF16-NEXT: .reg .b16 %rs<5>;
; CHECK-NOF16-NEXT: .reg .b32 %r<3>;
-; CHECK-NOF16-NEXT: .reg .f32 %f<11>;
+; CHECK-NOF16-NEXT: .reg .b32 %f<11>;
; CHECK-NOF16-EMPTY:
; CHECK-NOF16-NEXT: // %bb.0:
; CHECK-NOF16-NEXT: ld.param.v2.f32 {%f3, %f4}, [test_select_cc_f32_f16_param_1];
@@ -672,7 +672,7 @@ define <2 x half> @test_select_cc_f16_f32(<2 x half> %a, <2 x half> %b,
; CHECK-NEXT: .reg .pred %p<3>;
; CHECK-NEXT: .reg .b16 %rs<7>;
; CHECK-NEXT: .reg .b32 %r<4>;
-; CHECK-NEXT: .reg .f32 %f<5>;
+; CHECK-NEXT: .reg .b32 %f<5>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.v2.f32 {%f3, %f4}, [test_select_cc_f16_f32_param_3];
@@ -716,7 +716,7 @@ define <2 x i1> @test_fcmp_une(<2 x half> %a, <2 x half> %b) #0 {
; CHECK-NOF16-NEXT: .reg .pred %p<3>;
; CHECK-NOF16-NEXT: .reg .b16 %rs<7>;
; CHECK-NOF16-NEXT: .reg .b32 %r<3>;
-; CHECK-NOF16-NEXT: .reg .f32 %f<5>;
+; CHECK-NOF16-NEXT: .reg .b32 %f<5>;
; CHECK-NOF16-EMPTY:
; CHECK-NOF16-NEXT: // %bb.0:
; CHECK-NOF16-NEXT: ld.param.b32 %r2, [test_fcmp_une_param_1];
@@ -760,7 +760,7 @@ define <2 x i1> @test_fcmp_ueq(<2 x half> %a, <2 x half> %b) #0 {
; CHECK-NOF16-NEXT: .reg .pred %p<3>;
; CHECK-NOF16-NEXT: .reg .b16 %rs<7>;
; CHECK-NOF16-NEXT: .reg .b32 %r<3>;
-; CHECK-NOF16-NEXT: .reg .f32 %f<5>;
+; CHECK-NOF16-NEXT: .reg .b32 %f<5>;
; CHECK-NOF16-EMPTY:
; CHECK-NOF16-NEXT: // %bb.0:
; CHECK-NOF16-NEXT: ld.param.b32 %r2, [test_fcmp_ueq_param_1];
@@ -804,7 +804,7 @@ define <2 x i1> @test_fcmp_ugt(<2 x half> %a, <2 x half> %b) #0 {
; CHECK-NOF16-NEXT: .reg .pred %p<3>;
; CHECK-NOF16-NEXT: .reg .b16 %rs<7>;
; CHECK-NOF16-NEXT: .reg .b32 %r<3>;
-; CHECK-NOF16-NEXT: .reg .f32 %f<5>;
+; CHECK-NOF16-NEXT: .reg .b32 %f<5>;
; CHECK-NOF16-EMPTY:
; CHECK-NOF16-NEXT: // %bb.0:
; CHECK-NOF16-NEXT: ld.param.b32 %r2, [test_fcmp_ugt_param_1];
@@ -848,7 +848,7 @@ define <2 x i1> @test_fcmp_uge(<2 x half> %a, <2 x half> %b) #0 {
; CHECK-NOF16-NEXT: .reg .pred %p<3>;
; CHECK-NOF16-NEXT: .reg .b16 %rs<7>;
; CHECK-NOF16-NEXT: .reg .b32 %r<3>;
-; CHECK-NOF16-NEXT: .reg .f32 %f<5>;
+; CHECK-NOF16-NEXT: .reg .b32 %f<5>;
; CHECK-NOF16-EMPTY:
; CHECK-NOF16-NEXT: // %bb.0:
; CHECK-NOF16-NEXT: ld.param.b32 %r2, [test_fcmp_uge_param_1];
@@ -892,7 +892,7 @@ define <2 x i1> @test_fcmp_ult(<2 x half> %a, <2 x half> %b) #0 {
; CHECK-NOF16-NEXT: .reg .pred %p<3>;
; CHECK-NOF16-NEXT: .reg .b16 %rs<7>;
; CHECK-NOF16-NEXT: .reg .b32 %r<3>;
-; CHECK-NOF16-NEXT: .reg .f32 %f<5>;
+; CHECK-NOF16-NEXT: .reg .b32 %f<5>;
; CHECK-NOF16-EMPTY:
; CHECK-NOF16-NEXT: // %bb.0:
; CHECK-NOF16-NEXT: ld.param.b32 %r2, [test_fcmp_ult_param_1];
@@ -936,7 +936,7 @@ define <2 x i1> @test_fcmp_ule(<2 x half> %a, <2 x half> %b) #0 {
; CHECK-NOF16-NEXT: .reg .pred %p<3>;
; CHECK-NOF16-NEXT: .reg .b16 %rs<7>;
; CHECK-NOF16-NEXT: .reg .b32 %r<3>;
-; CHECK-NOF16-NEXT: .reg .f32 %f<5>;
+; CHECK-NOF16-NEXT: .reg .b32 %f<5>;
; CHECK-NOF16-EMPTY:
; CHECK-NOF16-NEXT: // %bb.0:
; CHECK-NOF16-NEXT: ld.param.b32 %r2, [test_fcmp_ule_param_1];
@@ -981,7 +981,7 @@ define <2 x i1> @test_fcmp_uno(<2 x half> %a, <2 x half> %b) #0 {
; CHECK-NOF16-NEXT: .reg .pred %p<3>;
; CHECK-NOF16-NEXT: .reg .b16 %rs<7>;
; CHECK-NOF16-NEXT: .reg .b32 %r<3>;
-; CHECK-NOF16-NEXT: .reg .f32 %f<5>;
+; CHECK-NOF16-NEXT: .reg .b32 %f<5>;
; CHECK-NOF16-EMPTY:
; CHECK-NOF16-NEXT: // %bb.0:
; CHECK-NOF16-NEXT: ld.param.b32 %r2, [test_fcmp_uno_param_1];
@@ -1025,7 +1025,7 @@ define <2 x i1> @test_fcmp_one(<2 x half> %a, <2 x half> %b) #0 {
; CHECK-NOF16-NEXT: .reg .pred %p<3>;
; CHECK-NOF16-NEXT: .reg .b16 %rs<7>;
; CHECK-NOF16-NEXT: .reg .b32 %r<3>;
-; CHECK-NOF16-NEXT: .reg .f32 %f<5>;
+; CHECK-NOF16-NEXT: .reg .b32 %f<5>;
; CHECK-NOF16-EMPTY:
; CHECK-NOF16-NEXT: // %bb.0:
; CHECK-NOF16-NEXT: ld.param.b32 %r2, [test_fcmp_one_param_1];
@@ -1069,7 +1069,7 @@ define <2 x i1> @test_fcmp_oeq(<2 x half> %a, <2 x half> %b) #0 {
; CHECK-NOF16-NEXT: .reg .pred %p<3>;
; CHECK-NOF16-NEXT: .reg .b16 %rs<7>;
; CHECK-NOF16-NEXT: .reg .b32 %r<3>;
-; CHECK-NOF16-NEXT: .reg .f32 %f<5>;
+; CHECK-NOF16-NEXT: .reg .b32 %f<5>;
; CHECK-NOF16-EMPTY:
; CHECK-NOF16-NEXT: // %bb.0:
; CHECK-NOF16-NEXT: ld.param.b32 %r2, [test_fcmp_oeq_param_1];
@@ -1113,7 +1113,7 @@ define <2 x i1> @test_fcmp_ogt(<2 x half> %a, <2 x half> %b) #0 {
; CHECK-NOF16-NEXT: .reg .pred %p<3>;
; CHECK-NOF16-NEXT: .reg .b16 %rs<7>;
; CHECK-NOF16-NEXT: .reg .b32 %r<3>;
-; CHECK-NOF16-NEXT: .reg .f32 %f<5>;
+; CHECK-NOF16-NEXT: .reg .b32 %f<5>;
; CHECK-NOF16-EMPTY:
; CHECK-NOF16-NEXT: // %bb.0:
; CHECK-NOF16-NEXT: ld.param.b32 %r2, [test_fcmp_ogt_param_1];
@@ -1157,7 +1157,7 @@ define <2 x i1> @test_fcmp_oge(<2 x half> %a, <2 x half> %b) #0 {
; CHECK-NOF16-NEXT: .reg .pred %p<3>;
; CHECK-NOF16-NEXT: .reg .b16 %rs<7>;
; CHECK-NOF16-NEXT: .reg .b32 %r<3>;
-; CHECK-NOF16-NEXT: .reg .f32 %f<5>;
+; CHECK-NOF16-NEXT: .reg .b32 %f<5>;
; CHECK-NOF16-EMPTY:
; CHECK-NOF16-NEXT: // %bb.0:
; CHECK-NOF16-NEXT: ld.param.b32 %r2, [test_fcmp_oge_param_1];
@@ -1201,7 +1201,7 @@ define <2 x i1> @test_fcmp_olt(<2 x half> %a, <2 x half> %b) #0 {
; CHECK-NOF16-NEXT: .reg .pred %p<3>;
; CHECK-NOF16-NEXT: .reg .b16 %rs<7>;
; CHECK-NOF16-NEXT: .reg .b32 %r<3>;
-; CHECK-NOF16-NEXT: .reg .f32 %f<5>;
+; CHECK-NOF16-NEXT: .reg .b32 %f<5>;
; CHECK-NOF16-EMPTY:
; CHECK-NOF16-NEXT: // %bb.0:
; CHECK-NOF16-NEXT: ld.param.b32 %r2, [test_fcmp_olt_param_1];
@@ -1245,7 +1245,7 @@ define <2 x i1> @test_fcmp_ole(<2 x half> %a, <2 x half> %b) #0 {
; CHECK-NOF16-NEXT: .reg .pred %p<3>;
; CHECK-NOF16-NEXT: .reg .b16 %rs<7>;
; CHECK-NOF16-NEXT: .reg .b32 %r<3>;
-; CHECK-NOF16-NEXT: .reg .f32 %f<5>;
+; CHECK-NOF16-NEXT: .reg .b32 %f<5>;
; CHECK-NOF16-EMPTY:
; CHECK-NOF16-NEXT: // %bb.0:
; CHECK-NOF16-NEXT: ld.param.b32 %r2, [test_fcmp_ole_param_1];
@@ -1289,7 +1289,7 @@ define <2 x i1> @test_fcmp_ord(<2 x half> %a, <2 x half> %b) #0 {
; CHECK-NOF16-NEXT: .reg .pred %p<3>;
; CHECK-NOF16-NEXT: .reg .b16 %rs<7>;
; CHECK-NOF16-NEXT: .reg .b32 %r<3>;
-; CHECK-NOF16-NEXT: .reg .f32 %f<5>;
+; CHECK-NOF16-NEXT: .reg .b32 %f<5>;
; CHECK-NOF16-EMPTY:
; CHECK-NOF16-NEXT: // %bb.0:
; CHECK-NOF16-NEXT: ld.param.b32 %r2, [test_fcmp_ord_param_1];
@@ -1472,7 +1472,7 @@ define <2 x half> @test_uitofp_2xi32_fadd(<2 x i32> %a, <2 x half> %b) #0 {
; CHECK-NOF16: {
; CHECK-NOF16-NEXT: .reg .b16 %rs<7>;
; CHECK-NOF16-NEXT: .reg .b32 %r<5>;
-; CHECK-NOF16-NEXT: .reg .f32 %f<7>;
+; CHECK-NOF16-NEXT: .reg .b32 %f<7>;
; CHECK-NOF16-EMPTY:
; CHECK-NOF16-NEXT: // %bb.0:
; CHECK-NOF16-NEXT: ld.param.v2.u32 {%r1, %r2}, [test_uitofp_2xi32_fadd_param_0];
@@ -1516,7 +1516,7 @@ define <2 x half> @test_sitofp_2xi32_fadd(<2 x i32> %a, <2 x half> %b) #0 {
; CHECK-NOF16: {
; CHECK-NOF16-NEXT: .reg .b16 %rs<7>;
; CHECK-NOF16-NEXT: .reg .b32 %r<5>;
-; CHECK-NOF16-NEXT: .reg .f32 %f<7>;
+; CHECK-NOF16-NEXT: .reg .b32 %f<7>;
; CHECK-NOF16-EMPTY:
; CHECK-NOF16-NEXT: // %bb.0:
; CHECK-NOF16-NEXT: ld.param.v2.u32 {%r1, %r2}, [test_sitofp_2xi32_fadd_param_0];
@@ -1545,7 +1545,7 @@ define <2 x half> @test_fptrunc_2xfloat(<2 x float> %a) #0 {
; CHECK: {
; CHECK-NEXT: .reg .b16 %rs<3>;
; CHECK-NEXT: .reg .b32 %r<2>;
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %f<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.v2.f32 {%f1, %f2}, [test_fptrunc_2xfloat_param_0];
@@ -1563,7 +1563,7 @@ define <2 x half> @test_fptrunc_2xdouble(<2 x double> %a) #0 {
; CHECK: {
; CHECK-NEXT: .reg .b16 %rs<3>;
; CHECK-NEXT: .reg .b32 %r<2>;
-; CHECK-NEXT: .reg .f64 %fd<3>;
+; CHECK-NEXT: .reg .b64 %fd<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.v2.f64 {%fd1, %fd2}, [test_fptrunc_2xdouble_param_0];
@@ -1581,7 +1581,7 @@ define <2 x float> @test_fpext_2xfloat(<2 x half> %a) #0 {
; CHECK: {
; CHECK-NEXT: .reg .b16 %rs<3>;
; CHECK-NEXT: .reg .b32 %r<2>;
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %f<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.b32 %r1, [test_fpext_2xfloat_param_0];
@@ -1599,7 +1599,7 @@ define <2 x double> @test_fpext_2xdouble(<2 x half> %a) #0 {
; CHECK: {
; CHECK-NEXT: .reg .b16 %rs<3>;
; CHECK-NEXT: .reg .b32 %r<2>;
-; CHECK-NEXT: .reg .f64 %fd<3>;
+; CHECK-NEXT: .reg .b64 %fd<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.b32 %r1, [test_fpext_2xdouble_param_0];
@@ -1643,7 +1643,7 @@ define <2 x half> @test_bitcast_float_to_2xhalf(float %a) #0 {
; CHECK-LABEL: test_bitcast_float_to_2xhalf(
; CHECK: {
; CHECK-NEXT: .reg .b32 %r<2>;
-; CHECK-NEXT: .reg .f32 %f<2>;
+; CHECK-NEXT: .reg .b32 %f<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f1, [test_bitcast_float_to_2xhalf_param_0];
@@ -1658,7 +1658,7 @@ define float @test_bitcast_2xhalf_to_float(<2 x half> %a) #0 {
; CHECK-LABEL: test_bitcast_2xhalf_to_float(
; CHECK: {
; CHECK-NEXT: .reg .b32 %r<2>;
-; CHECK-NEXT: .reg .f32 %f<2>;
+; CHECK-NEXT: .reg .b32 %f<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.u32 %r1, [test_bitcast_2xhalf_to_float_param_0];
@@ -1698,7 +1698,7 @@ define <2 x half> @test_sqrt(<2 x half> %a) #0 {
; CHECK: {
; CHECK-NEXT: .reg .b16 %rs<5>;
; CHECK-NEXT: .reg .b32 %r<3>;
-; CHECK-NEXT: .reg .f32 %f<5>;
+; CHECK-NEXT: .reg .b32 %f<5>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.b32 %r1, [test_sqrt_param_0];
@@ -1728,7 +1728,7 @@ define <2 x half> @test_sin(<2 x half> %a) #0 #1 {
; CHECK: {
; CHECK-NEXT: .reg .b16 %rs<5>;
; CHECK-NEXT: .reg .b32 %r<3>;
-; CHECK-NEXT: .reg .f32 %f<5>;
+; CHECK-NEXT: .reg .b32 %f<5>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.b32 %r1, [test_sin_param_0];
@@ -1751,7 +1751,7 @@ define <2 x half> @test_cos(<2 x half> %a) #0 #1 {
; CHECK: {
; CHECK-NEXT: .reg .b16 %rs<5>;
; CHECK-NEXT: .reg .b32 %r<3>;
-; CHECK-NEXT: .reg .f32 %f<5>;
+; CHECK-NEXT: .reg .b32 %f<5>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.b32 %r1, [test_cos_param_0];
@@ -1829,7 +1829,7 @@ define <2 x half> @test_fma(<2 x half> %a, <2 x half> %b, <2 x half> %c) #0 {
; CHECK-NOF16: {
; CHECK-NOF16-NEXT: .reg .b16 %rs<9>;
; CHECK-NOF16-NEXT: .reg .b32 %r<5>;
-; CHECK-NOF16-NEXT: .reg .f32 %f<9>;
+; CHECK-NOF16-NEXT: .reg .b32 %f<9>;
; CHECK-NOF16-EMPTY:
; CHECK-NOF16-NEXT: // %bb.0:
; CHECK-NOF16-NEXT: ld.param.b32 %r3, [test_fma_param_2];
@@ -1870,7 +1870,7 @@ define <2 x half> @test_fabs(<2 x half> %a) #0 {
; CHECK-NOF16: {
; CHECK-NOF16-NEXT: .reg .b16 %rs<5>;
; CHECK-NOF16-NEXT: .reg .b32 %r<3>;
-; CHECK-NOF16-NEXT: .reg .f32 %f<5>;
+; CHECK-NOF16-NEXT: .reg .b32 %f<5>;
; CHECK-NOF16-EMPTY:
; CHECK-NOF16-NEXT: // %bb.0:
; CHECK-NOF16-NEXT: ld.param.b32 %r1, [test_fabs_param_0];
@@ -1893,7 +1893,7 @@ define <2 x half> @test_minnum(<2 x half> %a, <2 x half> %b) #0 {
; CHECK: {
; CHECK-NEXT: .reg .b16 %rs<7>;
; CHECK-NEXT: .reg .b32 %r<4>;
-; CHECK-NEXT: .reg .f32 %f<7>;
+; CHECK-NEXT: .reg .b32 %f<7>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.b32 %r2, [test_minnum_param_1];
@@ -1920,7 +1920,7 @@ define <2 x half> @test_maxnum(<2 x half> %a, <2 x half> %b) #0 {
; CHECK: {
; CHECK-NEXT: .reg .b16 %rs<7>;
; CHECK-NEXT: .reg .b32 %r<4>;
-; CHECK-NEXT: .reg .f32 %f<7>;
+; CHECK-NEXT: .reg .b32 %f<7>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.b32 %r2, [test_maxnum_param_1];
@@ -1984,7 +1984,7 @@ define <2 x half> @test_copysign_f32(<2 x half> %a, <2 x float> %b) #0 {
; CHECK-F16: {
; CHECK-F16-NEXT: .reg .b16 %rs<3>;
; CHECK-F16-NEXT: .reg .b32 %r<6>;
-; CHECK-F16-NEXT: .reg .f32 %f<3>;
+; CHECK-F16-NEXT: .reg .b32 %f<3>;
; CHECK-F16-EMPTY:
; CHECK-F16-NEXT: // %bb.0:
; CHECK-F16-NEXT: ld.param.v2.f32 {%f1, %f2}, [test_copysign_f32_param_1];
@@ -2002,7 +2002,7 @@ define <2 x half> @test_copysign_f32(<2 x half> %a, <2 x float> %b) #0 {
; CHECK-NOF16: {
; CHECK-NOF16-NEXT: .reg .b16 %rs<9>;
; CHECK-NOF16-NEXT: .reg .b32 %r<7>;
-; CHECK-NOF16-NEXT: .reg .f32 %f<3>;
+; CHECK-NOF16-NEXT: .reg .b32 %f<3>;
; CHECK-NOF16-EMPTY:
; CHECK-NOF16-NEXT: // %bb.0:
; CHECK-NOF16-NEXT: ld.param.v2.f32 {%f1, %f2}, [test_copysign_f32_param_1];
@@ -2031,7 +2031,7 @@ define <2 x half> @test_copysign_f64(<2 x half> %a, <2 x double> %b) #0 {
; CHECK-F16: {
; CHECK-F16-NEXT: .reg .b16 %rs<3>;
; CHECK-F16-NEXT: .reg .b32 %r<6>;
-; CHECK-F16-NEXT: .reg .f64 %fd<3>;
+; CHECK-F16-NEXT: .reg .b64 %fd<3>;
; CHECK-F16-EMPTY:
; CHECK-F16-NEXT: // %bb.0:
; CHECK-F16-NEXT: ld.param.v2.f64 {%fd1, %fd2}, [test_copysign_f64_param_1];
@@ -2050,7 +2050,7 @@ define <2 x half> @test_copysign_f64(<2 x half> %a, <2 x double> %b) #0 {
; CHECK-NOF16-NEXT: .reg .b16 %rs<9>;
; CHECK-NOF16-NEXT: .reg .b32 %r<3>;
; CHECK-NOF16-NEXT: .reg .b64 %rd<7>;
-; CHECK-NOF16-NEXT: .reg .f64 %fd<3>;
+; CHECK-NOF16-NEXT: .reg .b64 %fd<3>;
; CHECK-NOF16-EMPTY:
; CHECK-NOF16-NEXT: // %bb.0:
; CHECK-NOF16-NEXT: ld.param.v2.f64 {%fd1, %fd2}, [test_copysign_f64_param_1];
@@ -2081,7 +2081,7 @@ define <2 x float> @test_copysign_extended(<2 x half> %a, <2 x half> %b) #0 {
; CHECK-F16: {
; CHECK-F16-NEXT: .reg .b16 %rs<3>;
; CHECK-F16-NEXT: .reg .b32 %r<6>;
-; CHECK-F16-NEXT: .reg .f32 %f<3>;
+; CHECK-F16-NEXT: .reg .b32 %f<3>;
; CHECK-F16-EMPTY:
; CHECK-F16-NEXT: // %bb.0:
; CHECK-F16-NEXT: ld.param.b32 %r2, [test_copysign_extended_param_1];
@@ -2099,7 +2099,7 @@ define <2 x float> @test_copysign_extended(<2 x half> %a, <2 x half> %b) #0 {
; CHECK-NOF16: {
; CHECK-NOF16-NEXT: .reg .b16 %rs<11>;
; CHECK-NOF16-NEXT: .reg .b32 %r<3>;
-; CHECK-NOF16-NEXT: .reg .f32 %f<3>;
+; CHECK-NOF16-NEXT: .reg .b32 %f<3>;
; CHECK-NOF16-EMPTY:
; CHECK-NOF16-NEXT: // %bb.0:
; CHECK-NOF16-NEXT: ld.param.b32 %r2, [test_copysign_extended_param_1];
@@ -2236,7 +2236,7 @@ define <2 x half> @test_round(<2 x half> %a) #0 {
; CHECK-NEXT: .reg .pred %p<5>;
; CHECK-NEXT: .reg .b16 %rs<5>;
; CHECK-NEXT: .reg .b32 %r<9>;
-; CHECK-NEXT: .reg .f32 %f<17>;
+; CHECK-NEXT: .reg .b32 %f<17>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.b32 %r1, [test_round_param_0];
@@ -2293,7 +2293,7 @@ define <2 x half> @test_fmuladd(<2 x half> %a, <2 x half> %b, <2 x half> %c) #0
; CHECK-NOF16: {
; CHECK-NOF16-NEXT: .reg .b16 %rs<9>;
; CHECK-NOF16-NEXT: .reg .b32 %r<5>;
-; CHECK-NOF16-NEXT: .reg .f32 %f<9>;
+; CHECK-NOF16-NEXT: .reg .b32 %f<9>;
; CHECK-NOF16-EMPTY:
; CHECK-NOF16-NEXT: // %bb.0:
; CHECK-NOF16-NEXT: ld.param.b32 %r3, [test_fmuladd_param_2];
diff --git a/llvm/test/CodeGen/NVPTX/f32-ex2.ll b/llvm/test/CodeGen/NVPTX/f32-ex2.ll
index c9eff2a..2c5c814 100644
--- a/llvm/test/CodeGen/NVPTX/f32-ex2.ll
+++ b/llvm/test/CodeGen/NVPTX/f32-ex2.ll
@@ -9,7 +9,7 @@ declare float @llvm.nvvm.ex2.approx.f(float)
define float @ex2_float(float %0) {
; CHECK-LABEL: ex2_float(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %f<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f1, [ex2_float_param_0];
@@ -24,7 +24,7 @@ define float @ex2_float(float %0) {
define float @ex2_float_ftz(float %0) {
; CHECK-LABEL: ex2_float_ftz(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %f<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f1, [ex2_float_ftz_param_0];
diff --git a/llvm/test/CodeGen/NVPTX/f32-lg2.ll b/llvm/test/CodeGen/NVPTX/f32-lg2.ll
index 43c5219..9dac308 100644
--- a/llvm/test/CodeGen/NVPTX/f32-lg2.ll
+++ b/llvm/test/CodeGen/NVPTX/f32-lg2.ll
@@ -10,7 +10,7 @@ declare float @llvm.nvvm.lg2.approx.ftz.f(float)
define float @lg2_float(float %0) {
; CHECK-LABEL: lg2_float(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %f<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f1, [lg2_float_param_0];
@@ -25,7 +25,7 @@ define float @lg2_float(float %0) {
define float @lg2_float_ftz(float %0) {
; CHECK-LABEL: lg2_float_ftz(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %f<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f1, [lg2_float_ftz_param_0];
diff --git a/llvm/test/CodeGen/NVPTX/fabs-intrinsics.ll b/llvm/test/CodeGen/NVPTX/fabs-intrinsics.ll
index dd9ef22..d9c5a52 100644
--- a/llvm/test/CodeGen/NVPTX/fabs-intrinsics.ll
+++ b/llvm/test/CodeGen/NVPTX/fabs-intrinsics.ll
@@ -18,7 +18,7 @@ declare <2 x bfloat> @llvm.nvvm.fabs.v2bf16(<2 x bfloat>)
define float @fabs_float(float %a) {
; CHECK-LABEL: fabs_float(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %f<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f1, [fabs_float_param_0];
@@ -32,7 +32,7 @@ define float @fabs_float(float %a) {
define float @fabs_float_ftz(float %a) {
; CHECK-LABEL: fabs_float_ftz(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %f<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f1, [fabs_float_ftz_param_0];
@@ -46,7 +46,7 @@ define float @fabs_float_ftz(float %a) {
define double @fabs_double(double %a) {
; CHECK-LABEL: fabs_double(
; CHECK: {
-; CHECK-NEXT: .reg .f64 %fd<3>;
+; CHECK-NEXT: .reg .b64 %fd<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f64 %fd1, [fabs_double_param_0];
diff --git a/llvm/test/CodeGen/NVPTX/fexp2.ll b/llvm/test/CodeGen/NVPTX/fexp2.ll
index 7e485dc..4664d70 100644
--- a/llvm/test/CodeGen/NVPTX/fexp2.ll
+++ b/llvm/test/CodeGen/NVPTX/fexp2.ll
@@ -13,7 +13,7 @@ target triple = "nvptx64-nvidia-cuda"
define float @exp2_test(float %in) {
; CHECK-LABEL: exp2_test(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %f<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0: // %entry
; CHECK-NEXT: ld.param.f32 %f1, [exp2_test_param_0];
@@ -23,7 +23,7 @@ define float @exp2_test(float %in) {
;
; CHECK-FP16-LABEL: exp2_test(
; CHECK-FP16: {
-; CHECK-FP16-NEXT: .reg .f32 %f<3>;
+; CHECK-FP16-NEXT: .reg .b32 %f<3>;
; CHECK-FP16-EMPTY:
; CHECK-FP16-NEXT: // %bb.0: // %entry
; CHECK-FP16-NEXT: ld.param.f32 %f1, [exp2_test_param_0];
@@ -33,7 +33,7 @@ define float @exp2_test(float %in) {
;
; CHECK-BF16-LABEL: exp2_test(
; CHECK-BF16: {
-; CHECK-BF16-NEXT: .reg .f32 %f<3>;
+; CHECK-BF16-NEXT: .reg .b32 %f<3>;
; CHECK-BF16-EMPTY:
; CHECK-BF16-NEXT: // %bb.0: // %entry
; CHECK-BF16-NEXT: ld.param.f32 %f1, [exp2_test_param_0];
@@ -49,7 +49,7 @@ entry:
define float @exp2_ftz_test(float %in) #0 {
; CHECK-LABEL: exp2_ftz_test(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %f<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0: // %entry
; CHECK-NEXT: ld.param.f32 %f1, [exp2_ftz_test_param_0];
@@ -59,7 +59,7 @@ define float @exp2_ftz_test(float %in) #0 {
;
; CHECK-FP16-LABEL: exp2_ftz_test(
; CHECK-FP16: {
-; CHECK-FP16-NEXT: .reg .f32 %f<3>;
+; CHECK-FP16-NEXT: .reg .b32 %f<3>;
; CHECK-FP16-EMPTY:
; CHECK-FP16-NEXT: // %bb.0: // %entry
; CHECK-FP16-NEXT: ld.param.f32 %f1, [exp2_ftz_test_param_0];
@@ -69,7 +69,7 @@ define float @exp2_ftz_test(float %in) #0 {
;
; CHECK-BF16-LABEL: exp2_ftz_test(
; CHECK-BF16: {
-; CHECK-BF16-NEXT: .reg .f32 %f<3>;
+; CHECK-BF16-NEXT: .reg .b32 %f<3>;
; CHECK-BF16-EMPTY:
; CHECK-BF16-NEXT: // %bb.0: // %entry
; CHECK-BF16-NEXT: ld.param.f32 %f1, [exp2_ftz_test_param_0];
@@ -85,7 +85,7 @@ entry:
define <2 x float> @exp2_test_v(<2 x float> %in) {
; CHECK-LABEL: exp2_test_v(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<5>;
+; CHECK-NEXT: .reg .b32 %f<5>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0: // %entry
; CHECK-NEXT: ld.param.v2.f32 {%f1, %f2}, [exp2_test_v_param_0];
@@ -96,7 +96,7 @@ define <2 x float> @exp2_test_v(<2 x float> %in) {
;
; CHECK-FP16-LABEL: exp2_test_v(
; CHECK-FP16: {
-; CHECK-FP16-NEXT: .reg .f32 %f<5>;
+; CHECK-FP16-NEXT: .reg .b32 %f<5>;
; CHECK-FP16-EMPTY:
; CHECK-FP16-NEXT: // %bb.0: // %entry
; CHECK-FP16-NEXT: ld.param.v2.f32 {%f1, %f2}, [exp2_test_v_param_0];
@@ -107,7 +107,7 @@ define <2 x float> @exp2_test_v(<2 x float> %in) {
;
; CHECK-BF16-LABEL: exp2_test_v(
; CHECK-BF16: {
-; CHECK-BF16-NEXT: .reg .f32 %f<5>;
+; CHECK-BF16-NEXT: .reg .b32 %f<5>;
; CHECK-BF16-EMPTY:
; CHECK-BF16-NEXT: // %bb.0: // %entry
; CHECK-BF16-NEXT: ld.param.v2.f32 {%f1, %f2}, [exp2_test_v_param_0];
@@ -127,7 +127,7 @@ define half @exp2_f16_test(half %in) {
; CHECK-LABEL: exp2_f16_test(
; CHECK: {
; CHECK-NEXT: .reg .b16 %rs<3>;
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %f<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0: // %entry
; CHECK-NEXT: ld.param.b16 %rs1, [exp2_f16_test_param_0];
@@ -167,7 +167,7 @@ define half @exp2_f16_ftz_test(half %in) #0 {
; CHECK-LABEL: exp2_f16_ftz_test(
; CHECK: {
; CHECK-NEXT: .reg .b16 %rs<3>;
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %f<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0: // %entry
; CHECK-NEXT: ld.param.b16 %rs1, [exp2_f16_ftz_test_param_0];
@@ -207,7 +207,7 @@ define <2 x half> @exp2_f16_test_v(<2 x half> %in) {
; CHECK: {
; CHECK-NEXT: .reg .b16 %rs<5>;
; CHECK-NEXT: .reg .b32 %r<3>;
-; CHECK-NEXT: .reg .f32 %f<5>;
+; CHECK-NEXT: .reg .b32 %f<5>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0: // %entry
; CHECK-NEXT: ld.param.b32 %r1, [exp2_f16_test_v_param_0];
@@ -256,7 +256,7 @@ define bfloat @exp2_bf16_test(bfloat %in) {
; CHECK-NEXT: .reg .pred %p<2>;
; CHECK-NEXT: .reg .b16 %rs<2>;
; CHECK-NEXT: .reg .b32 %r<9>;
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %f<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0: // %entry
; CHECK-NEXT: ld.param.u16 %r1, [exp2_bf16_test_param_0];
@@ -279,7 +279,7 @@ define bfloat @exp2_bf16_test(bfloat %in) {
; CHECK-FP16-NEXT: .reg .pred %p<2>;
; CHECK-FP16-NEXT: .reg .b16 %rs<2>;
; CHECK-FP16-NEXT: .reg .b32 %r<9>;
-; CHECK-FP16-NEXT: .reg .f32 %f<3>;
+; CHECK-FP16-NEXT: .reg .b32 %f<3>;
; CHECK-FP16-EMPTY:
; CHECK-FP16-NEXT: // %bb.0: // %entry
; CHECK-FP16-NEXT: ld.param.u16 %r1, [exp2_bf16_test_param_0];
@@ -318,7 +318,7 @@ define <2 x bfloat> @exp2_bf16_test_v(<2 x bfloat> %in) {
; CHECK-NEXT: .reg .pred %p<3>;
; CHECK-NEXT: .reg .b16 %rs<3>;
; CHECK-NEXT: .reg .b32 %r<19>;
-; CHECK-NEXT: .reg .f32 %f<5>;
+; CHECK-NEXT: .reg .b32 %f<5>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0: // %entry
; CHECK-NEXT: ld.param.b32 %r1, [exp2_bf16_test_v_param_0];
@@ -354,7 +354,7 @@ define <2 x bfloat> @exp2_bf16_test_v(<2 x bfloat> %in) {
; CHECK-FP16-NEXT: .reg .pred %p<3>;
; CHECK-FP16-NEXT: .reg .b16 %rs<3>;
; CHECK-FP16-NEXT: .reg .b32 %r<19>;
-; CHECK-FP16-NEXT: .reg .f32 %f<5>;
+; CHECK-FP16-NEXT: .reg .b32 %f<5>;
; CHECK-FP16-EMPTY:
; CHECK-FP16-NEXT: // %bb.0: // %entry
; CHECK-FP16-NEXT: ld.param.b32 %r1, [exp2_bf16_test_v_param_0];
diff --git a/llvm/test/CodeGen/NVPTX/flog2.ll b/llvm/test/CodeGen/NVPTX/flog2.ll
index ff762dc..4dfed3d 100644
--- a/llvm/test/CodeGen/NVPTX/flog2.ll
+++ b/llvm/test/CodeGen/NVPTX/flog2.ll
@@ -7,7 +7,7 @@ target triple = "nvptx64-nvidia-cuda"
define float @log2_test(float %in) {
; CHECK-LABEL: log2_test(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %f<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0: // %entry
; CHECK-NEXT: ld.param.f32 %f1, [log2_test_param_0];
@@ -23,7 +23,7 @@ entry:
define float @log2_ftz_test(float %in) #0 {
; CHECK-LABEL: log2_ftz_test(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %f<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0: // %entry
; CHECK-NEXT: ld.param.f32 %f1, [log2_ftz_test_param_0];
@@ -39,7 +39,7 @@ entry:
define <2 x float> @log2_test_v(<2 x float> %in) {
; CHECK-LABEL: log2_test_v(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<5>;
+; CHECK-NEXT: .reg .b32 %f<5>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0: // %entry
; CHECK-NEXT: ld.param.v2.f32 {%f1, %f2}, [log2_test_v_param_0];
@@ -59,7 +59,7 @@ define half @log2_f16_test(half %in) {
; CHECK-LABEL: log2_f16_test(
; CHECK: {
; CHECK-NEXT: .reg .b16 %rs<3>;
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %f<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0: // %entry
; CHECK-NEXT: ld.param.b16 %rs1, [log2_f16_test_param_0];
@@ -78,7 +78,7 @@ define half @log2_f16_ftz_test(half %in) #0 {
; CHECK-LABEL: log2_f16_ftz_test(
; CHECK: {
; CHECK-NEXT: .reg .b16 %rs<3>;
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %f<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0: // %entry
; CHECK-NEXT: ld.param.b16 %rs1, [log2_f16_ftz_test_param_0];
@@ -98,7 +98,7 @@ define <2 x half> @log2_f16_test_v(<2 x half> %in) {
; CHECK: {
; CHECK-NEXT: .reg .b16 %rs<5>;
; CHECK-NEXT: .reg .b32 %r<3>;
-; CHECK-NEXT: .reg .f32 %f<5>;
+; CHECK-NEXT: .reg .b32 %f<5>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0: // %entry
; CHECK-NEXT: ld.param.b32 %r1, [log2_f16_test_v_param_0];
@@ -126,7 +126,7 @@ define bfloat @log2_bf16_test(bfloat %in) {
; CHECK-NEXT: .reg .pred %p<2>;
; CHECK-NEXT: .reg .b16 %rs<2>;
; CHECK-NEXT: .reg .b32 %r<9>;
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %f<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0: // %entry
; CHECK-NEXT: ld.param.u16 %r1, [log2_bf16_test_param_0];
@@ -155,7 +155,7 @@ define bfloat @log2_bf16_ftz_test(bfloat %in) #0 {
; CHECK-NEXT: .reg .pred %p<2>;
; CHECK-NEXT: .reg .b16 %rs<2>;
; CHECK-NEXT: .reg .b32 %r<9>;
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %f<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0: // %entry
; CHECK-NEXT: ld.param.u16 %r1, [log2_bf16_ftz_test_param_0];
@@ -184,7 +184,7 @@ define <2 x bfloat> @log2_bf16_test_v(<2 x bfloat> %in) {
; CHECK-NEXT: .reg .pred %p<3>;
; CHECK-NEXT: .reg .b16 %rs<3>;
; CHECK-NEXT: .reg .b32 %r<19>;
-; CHECK-NEXT: .reg .f32 %f<5>;
+; CHECK-NEXT: .reg .b32 %f<5>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0: // %entry
; CHECK-NEXT: ld.param.b32 %r1, [log2_bf16_test_v_param_0];
diff --git a/llvm/test/CodeGen/NVPTX/fma-relu-contract.ll b/llvm/test/CodeGen/NVPTX/fma-relu-contract.ll
index 7dce894..9051a0b 100644
--- a/llvm/test/CodeGen/NVPTX/fma-relu-contract.ll
+++ b/llvm/test/CodeGen/NVPTX/fma-relu-contract.ll
@@ -198,7 +198,7 @@ define half @fma_f16_expanded_maxnum_no_nans(half %a, half %b, half %c) #0 {
; CHECK-SM70-LABEL: fma_f16_expanded_maxnum_no_nans(
; CHECK-SM70: {
; CHECK-SM70-NEXT: .reg .b16 %rs<6>;
-; CHECK-SM70-NEXT: .reg .f32 %f<3>;
+; CHECK-SM70-NEXT: .reg .b32 %f<3>;
; CHECK-SM70-EMPTY:
; CHECK-SM70-NEXT: // %bb.0:
; CHECK-SM70-NEXT: ld.param.b16 %rs1, [fma_f16_expanded_maxnum_no_nans_param_0];
@@ -250,7 +250,7 @@ define bfloat @fma_bf16_expanded_unsafe_with_nans(bfloat %a, bfloat %b, bfloat %
; CHECK-SM70-NEXT: .reg .pred %p<3>;
; CHECK-SM70-NEXT: .reg .b16 %rs<3>;
; CHECK-SM70-NEXT: .reg .b32 %r<14>;
-; CHECK-SM70-NEXT: .reg .f32 %f<6>;
+; CHECK-SM70-NEXT: .reg .b32 %f<6>;
; CHECK-SM70-EMPTY:
; CHECK-SM70-NEXT: // %bb.0:
; CHECK-SM70-NEXT: ld.param.u16 %r1, [fma_bf16_expanded_unsafe_with_nans_param_2];
@@ -314,7 +314,7 @@ define bfloat @fma_bf16_expanded_no_nans(bfloat %a, bfloat %b, bfloat %c) #0 {
; CHECK-SM70-NEXT: .reg .pred %p<3>;
; CHECK-SM70-NEXT: .reg .b16 %rs<3>;
; CHECK-SM70-NEXT: .reg .b32 %r<14>;
-; CHECK-SM70-NEXT: .reg .f32 %f<6>;
+; CHECK-SM70-NEXT: .reg .b32 %f<6>;
; CHECK-SM70-EMPTY:
; CHECK-SM70-NEXT: // %bb.0:
; CHECK-SM70-NEXT: ld.param.u16 %r1, [fma_bf16_expanded_no_nans_param_2];
@@ -372,7 +372,7 @@ define bfloat @fma_bf16_expanded_no_nans_multiple_uses_of_fma(bfloat %a, bfloat
; CHECK-FTZ: {
; CHECK-FTZ-NEXT: .reg .b16 %rs<9>;
; CHECK-FTZ-NEXT: .reg .b32 %r<7>;
-; CHECK-FTZ-NEXT: .reg .f32 %f<6>;
+; CHECK-FTZ-NEXT: .reg .b32 %f<6>;
; CHECK-FTZ-EMPTY:
; CHECK-FTZ-NEXT: // %bb.0:
; CHECK-FTZ-NEXT: ld.param.b16 %rs1, [fma_bf16_expanded_no_nans_multiple_uses_of_fma_param_0];
@@ -402,7 +402,7 @@ define bfloat @fma_bf16_expanded_no_nans_multiple_uses_of_fma(bfloat %a, bfloat
; CHECK-SM70-NEXT: .reg .pred %p<5>;
; CHECK-SM70-NEXT: .reg .b16 %rs<4>;
; CHECK-SM70-NEXT: .reg .b32 %r<29>;
-; CHECK-SM70-NEXT: .reg .f32 %f<10>;
+; CHECK-SM70-NEXT: .reg .b32 %f<10>;
; CHECK-SM70-EMPTY:
; CHECK-SM70-NEXT: // %bb.0:
; CHECK-SM70-NEXT: ld.param.u16 %r1, [fma_bf16_expanded_no_nans_multiple_uses_of_fma_param_2];
@@ -490,7 +490,7 @@ define bfloat @fma_bf16_expanded_maxnum_no_nans(bfloat %a, bfloat %b, bfloat %c)
; CHECK-SM70-NEXT: .reg .pred %p<3>;
; CHECK-SM70-NEXT: .reg .b16 %rs<2>;
; CHECK-SM70-NEXT: .reg .b32 %r<20>;
-; CHECK-SM70-NEXT: .reg .f32 %f<7>;
+; CHECK-SM70-NEXT: .reg .b32 %f<7>;
; CHECK-SM70-EMPTY:
; CHECK-SM70-NEXT: // %bb.0:
; CHECK-SM70-NEXT: ld.param.u16 %r1, [fma_bf16_expanded_maxnum_no_nans_param_2];
@@ -731,7 +731,7 @@ define <2 x half> @fma_f16x2_expanded_maxnum_no_nans(<2 x half> %a, <2 x half> %
; CHECK-SM70: {
; CHECK-SM70-NEXT: .reg .b16 %rs<5>;
; CHECK-SM70-NEXT: .reg .b32 %r<6>;
-; CHECK-SM70-NEXT: .reg .f32 %f<5>;
+; CHECK-SM70-NEXT: .reg .b32 %f<5>;
; CHECK-SM70-EMPTY:
; CHECK-SM70-NEXT: // %bb.0:
; CHECK-SM70-NEXT: ld.param.b32 %r1, [fma_f16x2_expanded_maxnum_no_nans_param_2];
@@ -788,7 +788,7 @@ define <2 x bfloat> @fma_bf16x2_expanded_unsafe_with_nans(<2 x bfloat> %a, <2 x
; CHECK-SM70-NEXT: .reg .pred %p<5>;
; CHECK-SM70-NEXT: .reg .b16 %rs<11>;
; CHECK-SM70-NEXT: .reg .b32 %r<31>;
-; CHECK-SM70-NEXT: .reg .f32 %f<11>;
+; CHECK-SM70-NEXT: .reg .b32 %f<11>;
; CHECK-SM70-EMPTY:
; CHECK-SM70-NEXT: // %bb.0:
; CHECK-SM70-NEXT: ld.param.b32 %r1, [fma_bf16x2_expanded_unsafe_with_nans_param_0];
@@ -881,7 +881,7 @@ define <2 x bfloat> @fma_bf16x2_expanded_no_nans(<2 x bfloat> %a, <2 x bfloat> %
; CHECK-SM70-NEXT: .reg .pred %p<5>;
; CHECK-SM70-NEXT: .reg .b16 %rs<11>;
; CHECK-SM70-NEXT: .reg .b32 %r<31>;
-; CHECK-SM70-NEXT: .reg .f32 %f<11>;
+; CHECK-SM70-NEXT: .reg .b32 %f<11>;
; CHECK-SM70-EMPTY:
; CHECK-SM70-NEXT: // %bb.0:
; CHECK-SM70-NEXT: ld.param.b32 %r1, [fma_bf16x2_expanded_no_nans_param_0];
@@ -968,7 +968,7 @@ define <2 x bfloat> @fma_bf16x2_expanded_no_nans_multiple_uses_of_fma(<2 x bfloa
; CHECK-FTZ: {
; CHECK-FTZ-NEXT: .reg .b16 %rs<7>;
; CHECK-FTZ-NEXT: .reg .b32 %r<20>;
-; CHECK-FTZ-NEXT: .reg .f32 %f<11>;
+; CHECK-FTZ-NEXT: .reg .b32 %f<11>;
; CHECK-FTZ-EMPTY:
; CHECK-FTZ-NEXT: // %bb.0:
; CHECK-FTZ-NEXT: ld.param.b32 %r1, [fma_bf16x2_expanded_no_nans_multiple_uses_of_fma_param_2];
@@ -1012,7 +1012,7 @@ define <2 x bfloat> @fma_bf16x2_expanded_no_nans_multiple_uses_of_fma(<2 x bfloa
; CHECK-SM70-NEXT: .reg .pred %p<9>;
; CHECK-SM70-NEXT: .reg .b16 %rs<11>;
; CHECK-SM70-NEXT: .reg .b32 %r<61>;
-; CHECK-SM70-NEXT: .reg .f32 %f<19>;
+; CHECK-SM70-NEXT: .reg .b32 %f<19>;
; CHECK-SM70-EMPTY:
; CHECK-SM70-NEXT: // %bb.0:
; CHECK-SM70-NEXT: ld.param.b32 %r1, [fma_bf16x2_expanded_no_nans_multiple_uses_of_fma_param_0];
@@ -1149,7 +1149,7 @@ define <2 x bfloat> @fma_bf16x2_expanded_maxnum_no_nans(<2 x bfloat> %a, <2 x bf
; CHECK-SM70-NEXT: .reg .pred %p<5>;
; CHECK-SM70-NEXT: .reg .b16 %rs<7>;
; CHECK-SM70-NEXT: .reg .b32 %r<43>;
-; CHECK-SM70-NEXT: .reg .f32 %f<13>;
+; CHECK-SM70-NEXT: .reg .b32 %f<13>;
; CHECK-SM70-EMPTY:
; CHECK-SM70-NEXT: // %bb.0:
; CHECK-SM70-NEXT: ld.param.b32 %r1, [fma_bf16x2_expanded_maxnum_no_nans_param_0];
diff --git a/llvm/test/CodeGen/NVPTX/fma-relu-fma-intrinsic.ll b/llvm/test/CodeGen/NVPTX/fma-relu-fma-intrinsic.ll
index eb51d7db..73f808f 100644
--- a/llvm/test/CodeGen/NVPTX/fma-relu-fma-intrinsic.ll
+++ b/llvm/test/CodeGen/NVPTX/fma-relu-fma-intrinsic.ll
@@ -137,7 +137,7 @@ define half @fma_f16_maxnum_no_nans(half %a, half %b, half %c) #0 {
; CHECK-SM70-LABEL: fma_f16_maxnum_no_nans(
; CHECK-SM70: {
; CHECK-SM70-NEXT: .reg .b16 %rs<6>;
-; CHECK-SM70-NEXT: .reg .f32 %f<3>;
+; CHECK-SM70-NEXT: .reg .b32 %f<3>;
; CHECK-SM70-EMPTY:
; CHECK-SM70-NEXT: // %bb.0:
; CHECK-SM70-NEXT: ld.param.b16 %rs1, [fma_f16_maxnum_no_nans_param_0];
@@ -184,7 +184,7 @@ define bfloat @fma_bf16_no_nans(bfloat %a, bfloat %b, bfloat %c) #0 {
; CHECK-SM70-NEXT: .reg .pred %p<3>;
; CHECK-SM70-NEXT: .reg .b16 %rs<3>;
; CHECK-SM70-NEXT: .reg .b32 %r<14>;
-; CHECK-SM70-NEXT: .reg .f32 %f<6>;
+; CHECK-SM70-NEXT: .reg .b32 %f<6>;
; CHECK-SM70-EMPTY:
; CHECK-SM70-NEXT: // %bb.0:
; CHECK-SM70-NEXT: ld.param.u16 %r1, [fma_bf16_no_nans_param_2];
@@ -239,7 +239,7 @@ define bfloat @fma_bf16_no_nans_multiple_uses_of_fma(bfloat %a, bfloat %b, bfloa
; CHECK-FTZ: {
; CHECK-FTZ-NEXT: .reg .b16 %rs<7>;
; CHECK-FTZ-NEXT: .reg .b32 %r<5>;
-; CHECK-FTZ-NEXT: .reg .f32 %f<5>;
+; CHECK-FTZ-NEXT: .reg .b32 %f<5>;
; CHECK-FTZ-EMPTY:
; CHECK-FTZ-NEXT: // %bb.0:
; CHECK-FTZ-NEXT: ld.param.b16 %rs1, [fma_bf16_no_nans_multiple_uses_of_fma_param_0];
@@ -264,7 +264,7 @@ define bfloat @fma_bf16_no_nans_multiple_uses_of_fma(bfloat %a, bfloat %b, bfloa
; CHECK-SM70-NEXT: .reg .pred %p<4>;
; CHECK-SM70-NEXT: .reg .b16 %rs<2>;
; CHECK-SM70-NEXT: .reg .b32 %r<27>;
-; CHECK-SM70-NEXT: .reg .f32 %f<9>;
+; CHECK-SM70-NEXT: .reg .b32 %f<9>;
; CHECK-SM70-EMPTY:
; CHECK-SM70-NEXT: // %bb.0:
; CHECK-SM70-NEXT: ld.param.u16 %r1, [fma_bf16_no_nans_multiple_uses_of_fma_param_2];
@@ -345,7 +345,7 @@ define bfloat @fma_bf16_maxnum_no_nans(bfloat %a, bfloat %b, bfloat %c) #0 {
; CHECK-SM70-NEXT: .reg .pred %p<3>;
; CHECK-SM70-NEXT: .reg .b16 %rs<2>;
; CHECK-SM70-NEXT: .reg .b32 %r<20>;
-; CHECK-SM70-NEXT: .reg .f32 %f<7>;
+; CHECK-SM70-NEXT: .reg .b32 %f<7>;
; CHECK-SM70-EMPTY:
; CHECK-SM70-NEXT: // %bb.0:
; CHECK-SM70-NEXT: ld.param.u16 %r1, [fma_bf16_maxnum_no_nans_param_2];
@@ -516,7 +516,7 @@ define <2 x half> @fma_f16x2_maxnum_no_nans(<2 x half> %a, <2 x half> %b, <2 x h
; CHECK-SM70: {
; CHECK-SM70-NEXT: .reg .b16 %rs<5>;
; CHECK-SM70-NEXT: .reg .b32 %r<6>;
-; CHECK-SM70-NEXT: .reg .f32 %f<5>;
+; CHECK-SM70-NEXT: .reg .b32 %f<5>;
; CHECK-SM70-EMPTY:
; CHECK-SM70-NEXT: // %bb.0:
; CHECK-SM70-NEXT: ld.param.b32 %r1, [fma_f16x2_maxnum_no_nans_param_2];
@@ -568,7 +568,7 @@ define <2 x bfloat> @fma_bf16x2_no_nans(<2 x bfloat> %a, <2 x bfloat> %b, <2 x b
; CHECK-SM70-NEXT: .reg .pred %p<5>;
; CHECK-SM70-NEXT: .reg .b16 %rs<11>;
; CHECK-SM70-NEXT: .reg .b32 %r<31>;
-; CHECK-SM70-NEXT: .reg .f32 %f<11>;
+; CHECK-SM70-NEXT: .reg .b32 %f<11>;
; CHECK-SM70-EMPTY:
; CHECK-SM70-NEXT: // %bb.0:
; CHECK-SM70-NEXT: ld.param.b32 %r1, [fma_bf16x2_no_nans_param_0];
@@ -652,7 +652,7 @@ define <2 x bfloat> @fma_bf16x2_no_nans_multiple_uses_of_fma(<2 x bfloat> %a, <2
; CHECK-FTZ: {
; CHECK-FTZ-NEXT: .reg .b16 %rs<5>;
; CHECK-FTZ-NEXT: .reg .b32 %r<14>;
-; CHECK-FTZ-NEXT: .reg .f32 %f<9>;
+; CHECK-FTZ-NEXT: .reg .b32 %f<9>;
; CHECK-FTZ-EMPTY:
; CHECK-FTZ-NEXT: // %bb.0:
; CHECK-FTZ-NEXT: ld.param.b32 %r1, [fma_bf16x2_no_nans_multiple_uses_of_fma_param_2];
@@ -687,7 +687,7 @@ define <2 x bfloat> @fma_bf16x2_no_nans_multiple_uses_of_fma(<2 x bfloat> %a, <2
; CHECK-SM70-NEXT: .reg .pred %p<7>;
; CHECK-SM70-NEXT: .reg .b16 %rs<7>;
; CHECK-SM70-NEXT: .reg .b32 %r<57>;
-; CHECK-SM70-NEXT: .reg .f32 %f<17>;
+; CHECK-SM70-NEXT: .reg .b32 %f<17>;
; CHECK-SM70-EMPTY:
; CHECK-SM70-NEXT: // %bb.0:
; CHECK-SM70-NEXT: ld.param.b32 %r1, [fma_bf16x2_no_nans_multiple_uses_of_fma_param_0];
@@ -811,7 +811,7 @@ define <2 x bfloat> @fma_bf16x2_maxnum_no_nans(<2 x bfloat> %a, <2 x bfloat> %b,
; CHECK-SM70-NEXT: .reg .pred %p<5>;
; CHECK-SM70-NEXT: .reg .b16 %rs<7>;
; CHECK-SM70-NEXT: .reg .b32 %r<43>;
-; CHECK-SM70-NEXT: .reg .f32 %f<13>;
+; CHECK-SM70-NEXT: .reg .b32 %f<13>;
; CHECK-SM70-EMPTY:
; CHECK-SM70-NEXT: // %bb.0:
; CHECK-SM70-NEXT: ld.param.b32 %r1, [fma_bf16x2_maxnum_no_nans_param_0];
diff --git a/llvm/test/CodeGen/NVPTX/fma-relu-instruction-flag.ll b/llvm/test/CodeGen/NVPTX/fma-relu-instruction-flag.ll
index a3545f5..b94fa5a 100644
--- a/llvm/test/CodeGen/NVPTX/fma-relu-instruction-flag.ll
+++ b/llvm/test/CodeGen/NVPTX/fma-relu-instruction-flag.ll
@@ -147,7 +147,7 @@ define half @fma_f16_expanded_maxnum_no_nans(half %a, half %b, half %c) {
; CHECK-SM70-LABEL: fma_f16_expanded_maxnum_no_nans(
; CHECK-SM70: {
; CHECK-SM70-NEXT: .reg .b16 %rs<6>;
-; CHECK-SM70-NEXT: .reg .f32 %f<3>;
+; CHECK-SM70-NEXT: .reg .b32 %f<3>;
; CHECK-SM70-EMPTY:
; CHECK-SM70-NEXT: // %bb.0:
; CHECK-SM70-NEXT: ld.param.b16 %rs1, [fma_f16_expanded_maxnum_no_nans_param_0];
@@ -195,7 +195,7 @@ define bfloat @fma_bf16_expanded_no_nans(bfloat %a, bfloat %b, bfloat %c) {
; CHECK-SM70-NEXT: .reg .pred %p<3>;
; CHECK-SM70-NEXT: .reg .b16 %rs<3>;
; CHECK-SM70-NEXT: .reg .b32 %r<14>;
-; CHECK-SM70-NEXT: .reg .f32 %f<6>;
+; CHECK-SM70-NEXT: .reg .b32 %f<6>;
; CHECK-SM70-EMPTY:
; CHECK-SM70-NEXT: // %bb.0:
; CHECK-SM70-NEXT: ld.param.u16 %r1, [fma_bf16_expanded_no_nans_param_2];
@@ -253,7 +253,7 @@ define bfloat @fma_bf16_expanded_no_nans_multiple_uses_of_fma(bfloat %a, bfloat
; CHECK-FTZ: {
; CHECK-FTZ-NEXT: .reg .b16 %rs<9>;
; CHECK-FTZ-NEXT: .reg .b32 %r<7>;
-; CHECK-FTZ-NEXT: .reg .f32 %f<6>;
+; CHECK-FTZ-NEXT: .reg .b32 %f<6>;
; CHECK-FTZ-EMPTY:
; CHECK-FTZ-NEXT: // %bb.0:
; CHECK-FTZ-NEXT: ld.param.b16 %rs1, [fma_bf16_expanded_no_nans_multiple_uses_of_fma_param_0];
@@ -283,7 +283,7 @@ define bfloat @fma_bf16_expanded_no_nans_multiple_uses_of_fma(bfloat %a, bfloat
; CHECK-SM70-NEXT: .reg .pred %p<5>;
; CHECK-SM70-NEXT: .reg .b16 %rs<4>;
; CHECK-SM70-NEXT: .reg .b32 %r<29>;
-; CHECK-SM70-NEXT: .reg .f32 %f<10>;
+; CHECK-SM70-NEXT: .reg .b32 %f<10>;
; CHECK-SM70-EMPTY:
; CHECK-SM70-NEXT: // %bb.0:
; CHECK-SM70-NEXT: ld.param.u16 %r1, [fma_bf16_expanded_no_nans_multiple_uses_of_fma_param_2];
@@ -373,7 +373,7 @@ define bfloat @fma_bf16_expanded_maxnum_no_nans(bfloat %a, bfloat %b, bfloat %c)
; CHECK-SM70-NEXT: .reg .pred %p<3>;
; CHECK-SM70-NEXT: .reg .b16 %rs<2>;
; CHECK-SM70-NEXT: .reg .b32 %r<20>;
-; CHECK-SM70-NEXT: .reg .f32 %f<7>;
+; CHECK-SM70-NEXT: .reg .b32 %f<7>;
; CHECK-SM70-EMPTY:
; CHECK-SM70-NEXT: // %bb.0:
; CHECK-SM70-NEXT: ld.param.u16 %r1, [fma_bf16_expanded_maxnum_no_nans_param_2];
@@ -563,7 +563,7 @@ define <2 x half> @fma_f16x2_expanded_maxnum_no_nans(<2 x half> %a, <2 x half> %
; CHECK-SM70: {
; CHECK-SM70-NEXT: .reg .b16 %rs<5>;
; CHECK-SM70-NEXT: .reg .b32 %r<6>;
-; CHECK-SM70-NEXT: .reg .f32 %f<5>;
+; CHECK-SM70-NEXT: .reg .b32 %f<5>;
; CHECK-SM70-EMPTY:
; CHECK-SM70-NEXT: // %bb.0:
; CHECK-SM70-NEXT: ld.param.b32 %r1, [fma_f16x2_expanded_maxnum_no_nans_param_2];
@@ -616,7 +616,7 @@ define <2 x bfloat> @fma_bf16x2_expanded_no_nans(<2 x bfloat> %a, <2 x bfloat> %
; CHECK-SM70-NEXT: .reg .pred %p<5>;
; CHECK-SM70-NEXT: .reg .b16 %rs<11>;
; CHECK-SM70-NEXT: .reg .b32 %r<31>;
-; CHECK-SM70-NEXT: .reg .f32 %f<11>;
+; CHECK-SM70-NEXT: .reg .b32 %f<11>;
; CHECK-SM70-EMPTY:
; CHECK-SM70-NEXT: // %bb.0:
; CHECK-SM70-NEXT: ld.param.b32 %r1, [fma_bf16x2_expanded_no_nans_param_0];
@@ -703,7 +703,7 @@ define <2 x bfloat> @fma_bf16x2_expanded_no_nans_multiple_uses_of_fma(<2 x bfloa
; CHECK-FTZ: {
; CHECK-FTZ-NEXT: .reg .b16 %rs<7>;
; CHECK-FTZ-NEXT: .reg .b32 %r<20>;
-; CHECK-FTZ-NEXT: .reg .f32 %f<11>;
+; CHECK-FTZ-NEXT: .reg .b32 %f<11>;
; CHECK-FTZ-EMPTY:
; CHECK-FTZ-NEXT: // %bb.0:
; CHECK-FTZ-NEXT: ld.param.b32 %r1, [fma_bf16x2_expanded_no_nans_multiple_uses_of_fma_param_2];
@@ -747,7 +747,7 @@ define <2 x bfloat> @fma_bf16x2_expanded_no_nans_multiple_uses_of_fma(<2 x bfloa
; CHECK-SM70-NEXT: .reg .pred %p<9>;
; CHECK-SM70-NEXT: .reg .b16 %rs<11>;
; CHECK-SM70-NEXT: .reg .b32 %r<61>;
-; CHECK-SM70-NEXT: .reg .f32 %f<19>;
+; CHECK-SM70-NEXT: .reg .b32 %f<19>;
; CHECK-SM70-EMPTY:
; CHECK-SM70-NEXT: // %bb.0:
; CHECK-SM70-NEXT: ld.param.b32 %r1, [fma_bf16x2_expanded_no_nans_multiple_uses_of_fma_param_0];
@@ -884,7 +884,7 @@ define <2 x bfloat> @fma_bf16x2_expanded_maxnum_no_nans(<2 x bfloat> %a, <2 x bf
; CHECK-SM70-NEXT: .reg .pred %p<5>;
; CHECK-SM70-NEXT: .reg .b16 %rs<7>;
; CHECK-SM70-NEXT: .reg .b32 %r<43>;
-; CHECK-SM70-NEXT: .reg .f32 %f<13>;
+; CHECK-SM70-NEXT: .reg .b32 %f<13>;
; CHECK-SM70-EMPTY:
; CHECK-SM70-NEXT: // %bb.0:
; CHECK-SM70-NEXT: ld.param.b32 %r1, [fma_bf16x2_expanded_maxnum_no_nans_param_0];
@@ -1084,7 +1084,7 @@ define half @fma_f16_maxnum_no_nans(half %a, half %b, half %c) {
; CHECK-SM70-LABEL: fma_f16_maxnum_no_nans(
; CHECK-SM70: {
; CHECK-SM70-NEXT: .reg .b16 %rs<6>;
-; CHECK-SM70-NEXT: .reg .f32 %f<3>;
+; CHECK-SM70-NEXT: .reg .b32 %f<3>;
; CHECK-SM70-EMPTY:
; CHECK-SM70-NEXT: // %bb.0:
; CHECK-SM70-NEXT: ld.param.b16 %rs1, [fma_f16_maxnum_no_nans_param_0];
@@ -1131,7 +1131,7 @@ define bfloat @fma_bf16_no_nans(bfloat %a, bfloat %b, bfloat %c) {
; CHECK-SM70-NEXT: .reg .pred %p<3>;
; CHECK-SM70-NEXT: .reg .b16 %rs<3>;
; CHECK-SM70-NEXT: .reg .b32 %r<14>;
-; CHECK-SM70-NEXT: .reg .f32 %f<6>;
+; CHECK-SM70-NEXT: .reg .b32 %f<6>;
; CHECK-SM70-EMPTY:
; CHECK-SM70-NEXT: // %bb.0:
; CHECK-SM70-NEXT: ld.param.u16 %r1, [fma_bf16_no_nans_param_2];
@@ -1186,7 +1186,7 @@ define bfloat @fma_bf16_no_nans_multiple_uses_of_fma(bfloat %a, bfloat %b, bfloa
; CHECK-FTZ: {
; CHECK-FTZ-NEXT: .reg .b16 %rs<7>;
; CHECK-FTZ-NEXT: .reg .b32 %r<5>;
-; CHECK-FTZ-NEXT: .reg .f32 %f<5>;
+; CHECK-FTZ-NEXT: .reg .b32 %f<5>;
; CHECK-FTZ-EMPTY:
; CHECK-FTZ-NEXT: // %bb.0:
; CHECK-FTZ-NEXT: ld.param.b16 %rs1, [fma_bf16_no_nans_multiple_uses_of_fma_param_0];
@@ -1211,7 +1211,7 @@ define bfloat @fma_bf16_no_nans_multiple_uses_of_fma(bfloat %a, bfloat %b, bfloa
; CHECK-SM70-NEXT: .reg .pred %p<4>;
; CHECK-SM70-NEXT: .reg .b16 %rs<2>;
; CHECK-SM70-NEXT: .reg .b32 %r<27>;
-; CHECK-SM70-NEXT: .reg .f32 %f<9>;
+; CHECK-SM70-NEXT: .reg .b32 %f<9>;
; CHECK-SM70-EMPTY:
; CHECK-SM70-NEXT: // %bb.0:
; CHECK-SM70-NEXT: ld.param.u16 %r1, [fma_bf16_no_nans_multiple_uses_of_fma_param_2];
@@ -1292,7 +1292,7 @@ define bfloat @fma_bf16_maxnum_no_nans(bfloat %a, bfloat %b, bfloat %c) {
; CHECK-SM70-NEXT: .reg .pred %p<3>;
; CHECK-SM70-NEXT: .reg .b16 %rs<2>;
; CHECK-SM70-NEXT: .reg .b32 %r<20>;
-; CHECK-SM70-NEXT: .reg .f32 %f<7>;
+; CHECK-SM70-NEXT: .reg .b32 %f<7>;
; CHECK-SM70-EMPTY:
; CHECK-SM70-NEXT: // %bb.0:
; CHECK-SM70-NEXT: ld.param.u16 %r1, [fma_bf16_maxnum_no_nans_param_2];
@@ -1467,7 +1467,7 @@ define <2 x half> @fma_f16x2_maxnum_no_nans(<2 x half> %a, <2 x half> %b, <2 x h
; CHECK-SM70: {
; CHECK-SM70-NEXT: .reg .b16 %rs<5>;
; CHECK-SM70-NEXT: .reg .b32 %r<6>;
-; CHECK-SM70-NEXT: .reg .f32 %f<5>;
+; CHECK-SM70-NEXT: .reg .b32 %f<5>;
; CHECK-SM70-EMPTY:
; CHECK-SM70-NEXT: // %bb.0:
; CHECK-SM70-NEXT: ld.param.b32 %r1, [fma_f16x2_maxnum_no_nans_param_2];
@@ -1519,7 +1519,7 @@ define <2 x bfloat> @fma_bf16x2_no_nans(<2 x bfloat> %a, <2 x bfloat> %b, <2 x b
; CHECK-SM70-NEXT: .reg .pred %p<5>;
; CHECK-SM70-NEXT: .reg .b16 %rs<11>;
; CHECK-SM70-NEXT: .reg .b32 %r<31>;
-; CHECK-SM70-NEXT: .reg .f32 %f<11>;
+; CHECK-SM70-NEXT: .reg .b32 %f<11>;
; CHECK-SM70-EMPTY:
; CHECK-SM70-NEXT: // %bb.0:
; CHECK-SM70-NEXT: ld.param.b32 %r1, [fma_bf16x2_no_nans_param_0];
@@ -1603,7 +1603,7 @@ define <2 x bfloat> @fma_bf16x2_no_nans_multiple_uses_of_fma(<2 x bfloat> %a, <2
; CHECK-FTZ: {
; CHECK-FTZ-NEXT: .reg .b16 %rs<5>;
; CHECK-FTZ-NEXT: .reg .b32 %r<14>;
-; CHECK-FTZ-NEXT: .reg .f32 %f<9>;
+; CHECK-FTZ-NEXT: .reg .b32 %f<9>;
; CHECK-FTZ-EMPTY:
; CHECK-FTZ-NEXT: // %bb.0:
; CHECK-FTZ-NEXT: ld.param.b32 %r1, [fma_bf16x2_no_nans_multiple_uses_of_fma_param_2];
@@ -1638,7 +1638,7 @@ define <2 x bfloat> @fma_bf16x2_no_nans_multiple_uses_of_fma(<2 x bfloat> %a, <2
; CHECK-SM70-NEXT: .reg .pred %p<7>;
; CHECK-SM70-NEXT: .reg .b16 %rs<7>;
; CHECK-SM70-NEXT: .reg .b32 %r<57>;
-; CHECK-SM70-NEXT: .reg .f32 %f<17>;
+; CHECK-SM70-NEXT: .reg .b32 %f<17>;
; CHECK-SM70-EMPTY:
; CHECK-SM70-NEXT: // %bb.0:
; CHECK-SM70-NEXT: ld.param.b32 %r1, [fma_bf16x2_no_nans_multiple_uses_of_fma_param_0];
@@ -1762,7 +1762,7 @@ define <2 x bfloat> @fma_bf16x2_maxnum_no_nans(<2 x bfloat> %a, <2 x bfloat> %b,
; CHECK-SM70-NEXT: .reg .pred %p<5>;
; CHECK-SM70-NEXT: .reg .b16 %rs<7>;
; CHECK-SM70-NEXT: .reg .b32 %r<43>;
-; CHECK-SM70-NEXT: .reg .f32 %f<13>;
+; CHECK-SM70-NEXT: .reg .b32 %f<13>;
; CHECK-SM70-EMPTY:
; CHECK-SM70-NEXT: // %bb.0:
; CHECK-SM70-NEXT: ld.param.b32 %r1, [fma_bf16x2_maxnum_no_nans_param_0];
diff --git a/llvm/test/CodeGen/NVPTX/fp-contract.ll b/llvm/test/CodeGen/NVPTX/fp-contract.ll
index ea5da6e..bd559ea 100644
--- a/llvm/test/CodeGen/NVPTX/fp-contract.ll
+++ b/llvm/test/CodeGen/NVPTX/fp-contract.ll
@@ -15,7 +15,7 @@ target triple = "nvptx64-unknown-cuda"
define float @t0(float %a, float %b, float %c) {
; FAST-LABEL: t0(
; FAST: {
-; FAST-NEXT: .reg .f32 %f<5>;
+; FAST-NEXT: .reg .b32 %f<5>;
; FAST-EMPTY:
; FAST-NEXT: // %bb.0:
; FAST-NEXT: ld.param.f32 %f1, [t0_param_0];
@@ -27,7 +27,7 @@ define float @t0(float %a, float %b, float %c) {
;
; DEFAULT-LABEL: t0(
; DEFAULT: {
-; DEFAULT-NEXT: .reg .f32 %f<6>;
+; DEFAULT-NEXT: .reg .b32 %f<6>;
; DEFAULT-EMPTY:
; DEFAULT-NEXT: // %bb.0:
; DEFAULT-NEXT: ld.param.f32 %f1, [t0_param_0];
@@ -47,7 +47,7 @@ define float @t0(float %a, float %b, float %c) {
define float @t1(float %a, float %b) {
; FAST-LABEL: t1(
; FAST: {
-; FAST-NEXT: .reg .f32 %f<6>;
+; FAST-NEXT: .reg .b32 %f<6>;
; FAST-EMPTY:
; FAST-NEXT: // %bb.0:
; FAST-NEXT: ld.param.f32 %f1, [t1_param_0];
@@ -60,7 +60,7 @@ define float @t1(float %a, float %b) {
;
; DEFAULT-LABEL: t1(
; DEFAULT: {
-; DEFAULT-NEXT: .reg .f32 %f<6>;
+; DEFAULT-NEXT: .reg .b32 %f<6>;
; DEFAULT-EMPTY:
; DEFAULT-NEXT: // %bb.0:
; DEFAULT-NEXT: ld.param.f32 %f1, [t1_param_0];
@@ -81,7 +81,7 @@ define float @t1(float %a, float %b) {
define float @t2(float %a, float %b) {
; CHECK-LABEL: t2(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<6>;
+; CHECK-NEXT: .reg .b32 %f<6>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f1, [t2_param_0];
@@ -101,7 +101,7 @@ define float @t2(float %a, float %b) {
define float @t3(float %a, float %b, float %c) {
; CHECK-LABEL: t3(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<5>;
+; CHECK-NEXT: .reg .b32 %f<5>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f1, [t3_param_0];
diff --git a/llvm/test/CodeGen/NVPTX/frem.ll b/llvm/test/CodeGen/NVPTX/frem.ll
index 73debfb..4077f6d 100644
--- a/llvm/test/CodeGen/NVPTX/frem.ll
+++ b/llvm/test/CodeGen/NVPTX/frem.ll
@@ -9,7 +9,7 @@ define half @frem_f16(half %a, half %b) {
; FAST-LABEL: frem_f16(
; FAST: {
; FAST-NEXT: .reg .b16 %rs<4>;
-; FAST-NEXT: .reg .f32 %f<7>;
+; FAST-NEXT: .reg .b32 %f<7>;
; FAST-EMPTY:
; FAST-NEXT: // %bb.0:
; FAST-NEXT: ld.param.b16 %rs1, [frem_f16_param_0];
@@ -28,7 +28,7 @@ define half @frem_f16(half %a, half %b) {
; NORMAL: {
; NORMAL-NEXT: .reg .pred %p<2>;
; NORMAL-NEXT: .reg .b16 %rs<4>;
-; NORMAL-NEXT: .reg .f32 %f<8>;
+; NORMAL-NEXT: .reg .b32 %f<8>;
; NORMAL-EMPTY:
; NORMAL-NEXT: // %bb.0:
; NORMAL-NEXT: ld.param.b16 %rs1, [frem_f16_param_0];
@@ -51,7 +51,7 @@ define half @frem_f16(half %a, half %b) {
define float @frem_f32(float %a, float %b) {
; FAST-LABEL: frem_f32(
; FAST: {
-; FAST-NEXT: .reg .f32 %f<7>;
+; FAST-NEXT: .reg .b32 %f<7>;
; FAST-EMPTY:
; FAST-NEXT: // %bb.0:
; FAST-NEXT: ld.param.f32 %f1, [frem_f32_param_0];
@@ -66,7 +66,7 @@ define float @frem_f32(float %a, float %b) {
; NORMAL-LABEL: frem_f32(
; NORMAL: {
; NORMAL-NEXT: .reg .pred %p<2>;
-; NORMAL-NEXT: .reg .f32 %f<8>;
+; NORMAL-NEXT: .reg .b32 %f<8>;
; NORMAL-EMPTY:
; NORMAL-NEXT: // %bb.0:
; NORMAL-NEXT: ld.param.f32 %f1, [frem_f32_param_0];
@@ -86,7 +86,7 @@ define float @frem_f32(float %a, float %b) {
define double @frem_f64(double %a, double %b) {
; FAST-LABEL: frem_f64(
; FAST: {
-; FAST-NEXT: .reg .f64 %fd<7>;
+; FAST-NEXT: .reg .b64 %fd<7>;
; FAST-EMPTY:
; FAST-NEXT: // %bb.0:
; FAST-NEXT: ld.param.f64 %fd1, [frem_f64_param_0];
@@ -101,7 +101,7 @@ define double @frem_f64(double %a, double %b) {
; NORMAL-LABEL: frem_f64(
; NORMAL: {
; NORMAL-NEXT: .reg .pred %p<2>;
-; NORMAL-NEXT: .reg .f64 %fd<8>;
+; NORMAL-NEXT: .reg .b64 %fd<8>;
; NORMAL-EMPTY:
; NORMAL-NEXT: // %bb.0:
; NORMAL-NEXT: ld.param.f64 %fd1, [frem_f64_param_0];
@@ -122,7 +122,7 @@ define half @frem_f16_ninf(half %a, half %b) {
; FAST-LABEL: frem_f16_ninf(
; FAST: {
; FAST-NEXT: .reg .b16 %rs<4>;
-; FAST-NEXT: .reg .f32 %f<7>;
+; FAST-NEXT: .reg .b32 %f<7>;
; FAST-EMPTY:
; FAST-NEXT: // %bb.0:
; FAST-NEXT: ld.param.b16 %rs1, [frem_f16_ninf_param_0];
@@ -140,7 +140,7 @@ define half @frem_f16_ninf(half %a, half %b) {
; NORMAL-LABEL: frem_f16_ninf(
; NORMAL: {
; NORMAL-NEXT: .reg .b16 %rs<4>;
-; NORMAL-NEXT: .reg .f32 %f<7>;
+; NORMAL-NEXT: .reg .b32 %f<7>;
; NORMAL-EMPTY:
; NORMAL-NEXT: // %bb.0:
; NORMAL-NEXT: ld.param.b16 %rs1, [frem_f16_ninf_param_0];
@@ -161,7 +161,7 @@ define half @frem_f16_ninf(half %a, half %b) {
define float @frem_f32_ninf(float %a, float %b) {
; FAST-LABEL: frem_f32_ninf(
; FAST: {
-; FAST-NEXT: .reg .f32 %f<7>;
+; FAST-NEXT: .reg .b32 %f<7>;
; FAST-EMPTY:
; FAST-NEXT: // %bb.0:
; FAST-NEXT: ld.param.f32 %f1, [frem_f32_ninf_param_0];
@@ -175,7 +175,7 @@ define float @frem_f32_ninf(float %a, float %b) {
;
; NORMAL-LABEL: frem_f32_ninf(
; NORMAL: {
-; NORMAL-NEXT: .reg .f32 %f<7>;
+; NORMAL-NEXT: .reg .b32 %f<7>;
; NORMAL-EMPTY:
; NORMAL-NEXT: // %bb.0:
; NORMAL-NEXT: ld.param.f32 %f1, [frem_f32_ninf_param_0];
@@ -193,7 +193,7 @@ define float @frem_f32_ninf(float %a, float %b) {
define double @frem_f64_ninf(double %a, double %b) {
; FAST-LABEL: frem_f64_ninf(
; FAST: {
-; FAST-NEXT: .reg .f64 %fd<7>;
+; FAST-NEXT: .reg .b64 %fd<7>;
; FAST-EMPTY:
; FAST-NEXT: // %bb.0:
; FAST-NEXT: ld.param.f64 %fd1, [frem_f64_ninf_param_0];
@@ -207,7 +207,7 @@ define double @frem_f64_ninf(double %a, double %b) {
;
; NORMAL-LABEL: frem_f64_ninf(
; NORMAL: {
-; NORMAL-NEXT: .reg .f64 %fd<7>;
+; NORMAL-NEXT: .reg .b64 %fd<7>;
; NORMAL-EMPTY:
; NORMAL-NEXT: // %bb.0:
; NORMAL-NEXT: ld.param.f64 %fd1, [frem_f64_ninf_param_0];
@@ -225,7 +225,7 @@ define double @frem_f64_ninf(double %a, double %b) {
define float @frem_f32_imm1(float %a) {
; FAST-LABEL: frem_f32_imm1(
; FAST: {
-; FAST-NEXT: .reg .f32 %f<5>;
+; FAST-NEXT: .reg .b32 %f<5>;
; FAST-EMPTY:
; FAST-NEXT: // %bb.0:
; FAST-NEXT: ld.param.f32 %f1, [frem_f32_imm1_param_0];
@@ -237,7 +237,7 @@ define float @frem_f32_imm1(float %a) {
;
; NORMAL-LABEL: frem_f32_imm1(
; NORMAL: {
-; NORMAL-NEXT: .reg .f32 %f<5>;
+; NORMAL-NEXT: .reg .b32 %f<5>;
; NORMAL-EMPTY:
; NORMAL-NEXT: // %bb.0:
; NORMAL-NEXT: ld.param.f32 %f1, [frem_f32_imm1_param_0];
@@ -253,7 +253,7 @@ define float @frem_f32_imm1(float %a) {
define float @frem_f32_imm2(float %a) {
; FAST-LABEL: frem_f32_imm2(
; FAST: {
-; FAST-NEXT: .reg .f32 %f<7>;
+; FAST-NEXT: .reg .b32 %f<7>;
; FAST-EMPTY:
; FAST-NEXT: // %bb.0:
; FAST-NEXT: ld.param.f32 %f1, [frem_f32_imm2_param_0];
@@ -268,7 +268,7 @@ define float @frem_f32_imm2(float %a) {
; NORMAL-LABEL: frem_f32_imm2(
; NORMAL: {
; NORMAL-NEXT: .reg .pred %p<2>;
-; NORMAL-NEXT: .reg .f32 %f<8>;
+; NORMAL-NEXT: .reg .b32 %f<8>;
; NORMAL-EMPTY:
; NORMAL-NEXT: // %bb.0:
; NORMAL-NEXT: ld.param.f32 %f1, [frem_f32_imm2_param_0];
diff --git a/llvm/test/CodeGen/NVPTX/i8x4-instructions.ll b/llvm/test/CodeGen/NVPTX/i8x4-instructions.ll
index 1b779db..65edcf2 100644
--- a/llvm/test/CodeGen/NVPTX/i8x4-instructions.ll
+++ b/llvm/test/CodeGen/NVPTX/i8x4-instructions.ll
@@ -1141,7 +1141,7 @@ define <4 x i8> @test_bitcast_float_to_4xi8(float %a) #0 {
; CHECK-LABEL: test_bitcast_float_to_4xi8(
; CHECK: {
; CHECK-NEXT: .reg .b32 %r<2>;
-; CHECK-NEXT: .reg .f32 %f<2>;
+; CHECK-NEXT: .reg .b32 %f<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f1, [test_bitcast_float_to_4xi8_param_0];
@@ -1169,7 +1169,7 @@ define float @test_bitcast_4xi8_to_float(<4 x i8> %a) #0 {
; CHECK-LABEL: test_bitcast_4xi8_to_float(
; CHECK: {
; CHECK-NEXT: .reg .b32 %r<2>;
-; CHECK-NEXT: .reg .f32 %f<2>;
+; CHECK-NEXT: .reg .b32 %f<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.u32 %r1, [test_bitcast_4xi8_to_float_param_0];
diff --git a/llvm/test/CodeGen/NVPTX/intrinsics.ll b/llvm/test/CodeGen/NVPTX/intrinsics.ll
index cc6af06..01c51bb 100644
--- a/llvm/test/CodeGen/NVPTX/intrinsics.ll
+++ b/llvm/test/CodeGen/NVPTX/intrinsics.ll
@@ -7,7 +7,7 @@
define float @test_fabsf(float %f) {
; CHECK-LABEL: test_fabsf(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %f<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f1, [test_fabsf_param_0];
@@ -21,7 +21,7 @@ define float @test_fabsf(float %f) {
define double @test_fabs(double %d) {
; CHECK-LABEL: test_fabs(
; CHECK: {
-; CHECK-NEXT: .reg .f64 %fd<3>;
+; CHECK-NEXT: .reg .b64 %fd<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f64 %fd1, [test_fabs_param_0];
@@ -35,7 +35,7 @@ define double @test_fabs(double %d) {
define float @test_nvvm_sqrt(float %a) {
; CHECK-LABEL: test_nvvm_sqrt(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %f<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f1, [test_nvvm_sqrt_param_0];
@@ -49,7 +49,7 @@ define float @test_nvvm_sqrt(float %a) {
define float @test_llvm_sqrt(float %a) {
; CHECK-LABEL: test_llvm_sqrt(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %f<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f1, [test_llvm_sqrt_param_0];
diff --git a/llvm/test/CodeGen/NVPTX/ldg-invariant.ll b/llvm/test/CodeGen/NVPTX/ldg-invariant.ll
index 16a0189..2fe2d28 100644
--- a/llvm/test/CodeGen/NVPTX/ldg-invariant.ll
+++ b/llvm/test/CodeGen/NVPTX/ldg-invariant.ll
@@ -27,7 +27,7 @@ define half @ld_global_v2f16(ptr addrspace(1) %ptr) {
; CHECK: {
; CHECK-NEXT: .reg .b16 %rs<4>;
; CHECK-NEXT: .reg .b32 %r<2>;
-; CHECK-NEXT: .reg .f32 %f<4>;
+; CHECK-NEXT: .reg .b32 %f<4>;
; CHECK-NEXT: .reg .b64 %rd<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
@@ -54,7 +54,7 @@ define half @ld_global_v4f16(ptr addrspace(1) %ptr) {
; CHECK-LABEL: ld_global_v4f16(
; CHECK: {
; CHECK-NEXT: .reg .b16 %rs<8>;
-; CHECK-NEXT: .reg .f32 %f<10>;
+; CHECK-NEXT: .reg .b32 %f<10>;
; CHECK-NEXT: .reg .b64 %rd<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
@@ -92,7 +92,7 @@ define half @ld_global_v8f16(ptr addrspace(1) %ptr) {
; CHECK: {
; CHECK-NEXT: .reg .b16 %rs<8>;
; CHECK-NEXT: .reg .b32 %r<5>;
-; CHECK-NEXT: .reg .f32 %f<10>;
+; CHECK-NEXT: .reg .b32 %f<10>;
; CHECK-NEXT: .reg .b64 %rd<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
diff --git a/llvm/test/CodeGen/NVPTX/ldu-ldg.ll b/llvm/test/CodeGen/NVPTX/ldu-ldg.ll
index 4c5c44a..2c1550aa 100644
--- a/llvm/test/CodeGen/NVPTX/ldu-ldg.ll
+++ b/llvm/test/CodeGen/NVPTX/ldu-ldg.ll
@@ -104,7 +104,7 @@ define ptr @test_ldu_p(ptr addrspace(1) %ptr) {
define float @test_ldu_f32(ptr addrspace(1) %ptr) {
; CHECK-LABEL: test_ldu_f32(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<2>;
+; CHECK-NEXT: .reg .b32 %f<2>;
; CHECK-NEXT: .reg .b64 %rd<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
@@ -120,7 +120,7 @@ define double @test_ldu_f64(ptr addrspace(1) %ptr) {
; CHECK-LABEL: test_ldu_f64(
; CHECK: {
; CHECK-NEXT: .reg .b64 %rd<2>;
-; CHECK-NEXT: .reg .f64 %fd<2>;
+; CHECK-NEXT: .reg .b64 %fd<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.u64 %rd1, [test_ldu_f64_param_0];
@@ -241,7 +241,7 @@ define ptr @test_ldg_p(ptr addrspace(1) %ptr) {
define float @test_ldg_f32(ptr addrspace(1) %ptr) {
; CHECK-LABEL: test_ldg_f32(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<2>;
+; CHECK-NEXT: .reg .b32 %f<2>;
; CHECK-NEXT: .reg .b64 %rd<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
@@ -257,7 +257,7 @@ define double @test_ldg_f64(ptr addrspace(1) %ptr) {
; CHECK-LABEL: test_ldg_f64(
; CHECK: {
; CHECK-NEXT: .reg .b64 %rd<2>;
-; CHECK-NEXT: .reg .f64 %fd<2>;
+; CHECK-NEXT: .reg .b64 %fd<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.u64 %rd1, [test_ldg_f64_param_0];
diff --git a/llvm/test/CodeGen/NVPTX/load-store-scalars.ll b/llvm/test/CodeGen/NVPTX/load-store-scalars.ll
index ed94cb4..cb2e247 100644
--- a/llvm/test/CodeGen/NVPTX/load-store-scalars.ll
+++ b/llvm/test/CodeGen/NVPTX/load-store-scalars.ll
@@ -91,7 +91,7 @@ define void @generic_i64(ptr %a) {
define void @generic_float(ptr %a) {
; CHECK-LABEL: generic_float(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %f<3>;
; CHECK-NEXT: .reg .b64 %rd<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
@@ -110,7 +110,7 @@ define void @generic_double(ptr %a) {
; CHECK-LABEL: generic_double(
; CHECK: {
; CHECK-NEXT: .reg .b64 %rd<2>;
-; CHECK-NEXT: .reg .f64 %fd<3>;
+; CHECK-NEXT: .reg .b64 %fd<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.u64 %rd1, [generic_double_param_0];
@@ -200,7 +200,7 @@ define void @generic_volatile_i64(ptr %a) {
define void @generic_volatile_float(ptr %a) {
; CHECK-LABEL: generic_volatile_float(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %f<3>;
; CHECK-NEXT: .reg .b64 %rd<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
@@ -219,7 +219,7 @@ define void @generic_volatile_double(ptr %a) {
; CHECK-LABEL: generic_volatile_double(
; CHECK: {
; CHECK-NEXT: .reg .b64 %rd<2>;
-; CHECK-NEXT: .reg .f64 %fd<3>;
+; CHECK-NEXT: .reg .b64 %fd<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.u64 %rd1, [generic_volatile_double_param_0];
@@ -356,7 +356,7 @@ define void @generic_unordered_sys_i64(ptr %a) {
define void @generic_unordered_sys_float(ptr %a) {
; SM60-LABEL: generic_unordered_sys_float(
; SM60: {
-; SM60-NEXT: .reg .f32 %f<3>;
+; SM60-NEXT: .reg .b32 %f<3>;
; SM60-NEXT: .reg .b64 %rd<2>;
; SM60-EMPTY:
; SM60-NEXT: // %bb.0:
@@ -368,7 +368,7 @@ define void @generic_unordered_sys_float(ptr %a) {
;
; SM70-LABEL: generic_unordered_sys_float(
; SM70: {
-; SM70-NEXT: .reg .f32 %f<3>;
+; SM70-NEXT: .reg .b32 %f<3>;
; SM70-NEXT: .reg .b64 %rd<2>;
; SM70-EMPTY:
; SM70-NEXT: // %bb.0:
@@ -387,7 +387,7 @@ define void @generic_unordered_sys_double(ptr %a) {
; SM60-LABEL: generic_unordered_sys_double(
; SM60: {
; SM60-NEXT: .reg .b64 %rd<2>;
-; SM60-NEXT: .reg .f64 %fd<3>;
+; SM60-NEXT: .reg .b64 %fd<3>;
; SM60-EMPTY:
; SM60-NEXT: // %bb.0:
; SM60-NEXT: ld.param.u64 %rd1, [generic_unordered_sys_double_param_0];
@@ -399,7 +399,7 @@ define void @generic_unordered_sys_double(ptr %a) {
; SM70-LABEL: generic_unordered_sys_double(
; SM70: {
; SM70-NEXT: .reg .b64 %rd<2>;
-; SM70-NEXT: .reg .f64 %fd<3>;
+; SM70-NEXT: .reg .b64 %fd<3>;
; SM70-EMPTY:
; SM70-NEXT: // %bb.0:
; SM70-NEXT: ld.param.u64 %rd1, [generic_unordered_sys_double_param_0];
@@ -489,7 +489,7 @@ define void @generic_unordered_volatile_sys_i64(ptr %a) {
define void @generic_unordered_volatile_sys_float(ptr %a) {
; CHECK-LABEL: generic_unordered_volatile_sys_float(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %f<3>;
; CHECK-NEXT: .reg .b64 %rd<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
@@ -508,7 +508,7 @@ define void @generic_unordered_volatile_sys_double(ptr %a) {
; CHECK-LABEL: generic_unordered_volatile_sys_double(
; CHECK: {
; CHECK-NEXT: .reg .b64 %rd<2>;
-; CHECK-NEXT: .reg .f64 %fd<3>;
+; CHECK-NEXT: .reg .b64 %fd<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.u64 %rd1, [generic_unordered_volatile_sys_double_param_0];
@@ -645,7 +645,7 @@ define void @generic_monotonic_sys_i64(ptr %a) {
define void @generic_monotonic_sys_float(ptr %a) {
; SM60-LABEL: generic_monotonic_sys_float(
; SM60: {
-; SM60-NEXT: .reg .f32 %f<3>;
+; SM60-NEXT: .reg .b32 %f<3>;
; SM60-NEXT: .reg .b64 %rd<2>;
; SM60-EMPTY:
; SM60-NEXT: // %bb.0:
@@ -657,7 +657,7 @@ define void @generic_monotonic_sys_float(ptr %a) {
;
; SM70-LABEL: generic_monotonic_sys_float(
; SM70: {
-; SM70-NEXT: .reg .f32 %f<3>;
+; SM70-NEXT: .reg .b32 %f<3>;
; SM70-NEXT: .reg .b64 %rd<2>;
; SM70-EMPTY:
; SM70-NEXT: // %bb.0:
@@ -676,7 +676,7 @@ define void @generic_monotonic_sys_double(ptr %a) {
; SM60-LABEL: generic_monotonic_sys_double(
; SM60: {
; SM60-NEXT: .reg .b64 %rd<2>;
-; SM60-NEXT: .reg .f64 %fd<3>;
+; SM60-NEXT: .reg .b64 %fd<3>;
; SM60-EMPTY:
; SM60-NEXT: // %bb.0:
; SM60-NEXT: ld.param.u64 %rd1, [generic_monotonic_sys_double_param_0];
@@ -688,7 +688,7 @@ define void @generic_monotonic_sys_double(ptr %a) {
; SM70-LABEL: generic_monotonic_sys_double(
; SM70: {
; SM70-NEXT: .reg .b64 %rd<2>;
-; SM70-NEXT: .reg .f64 %fd<3>;
+; SM70-NEXT: .reg .b64 %fd<3>;
; SM70-EMPTY:
; SM70-NEXT: // %bb.0:
; SM70-NEXT: ld.param.u64 %rd1, [generic_monotonic_sys_double_param_0];
@@ -778,7 +778,7 @@ define void @generic_monotonic_volatile_sys_i64(ptr %a) {
define void @generic_monotonic_volatile_sys_float(ptr %a) {
; CHECK-LABEL: generic_monotonic_volatile_sys_float(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %f<3>;
; CHECK-NEXT: .reg .b64 %rd<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
@@ -797,7 +797,7 @@ define void @generic_monotonic_volatile_sys_double(ptr %a) {
; CHECK-LABEL: generic_monotonic_volatile_sys_double(
; CHECK: {
; CHECK-NEXT: .reg .b64 %rd<2>;
-; CHECK-NEXT: .reg .f64 %fd<3>;
+; CHECK-NEXT: .reg .b64 %fd<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.u64 %rd1, [generic_monotonic_volatile_sys_double_param_0];
@@ -889,7 +889,7 @@ define void @global_i64(ptr addrspace(1) %a) {
define void @global_float(ptr addrspace(1) %a) {
; CHECK-LABEL: global_float(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %f<3>;
; CHECK-NEXT: .reg .b64 %rd<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
@@ -908,7 +908,7 @@ define void @global_double(ptr addrspace(1) %a) {
; CHECK-LABEL: global_double(
; CHECK: {
; CHECK-NEXT: .reg .b64 %rd<2>;
-; CHECK-NEXT: .reg .f64 %fd<3>;
+; CHECK-NEXT: .reg .b64 %fd<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.u64 %rd1, [global_double_param_0];
@@ -998,7 +998,7 @@ define void @global_volatile_i64(ptr addrspace(1) %a) {
define void @global_volatile_float(ptr addrspace(1) %a) {
; CHECK-LABEL: global_volatile_float(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %f<3>;
; CHECK-NEXT: .reg .b64 %rd<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
@@ -1017,7 +1017,7 @@ define void @global_volatile_double(ptr addrspace(1) %a) {
; CHECK-LABEL: global_volatile_double(
; CHECK: {
; CHECK-NEXT: .reg .b64 %rd<2>;
-; CHECK-NEXT: .reg .f64 %fd<3>;
+; CHECK-NEXT: .reg .b64 %fd<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.u64 %rd1, [global_volatile_double_param_0];
@@ -1154,7 +1154,7 @@ define void @global_unordered_sys_i64(ptr addrspace(1) %a) {
define void @global_unordered_sys_float(ptr addrspace(1) %a) {
; SM60-LABEL: global_unordered_sys_float(
; SM60: {
-; SM60-NEXT: .reg .f32 %f<3>;
+; SM60-NEXT: .reg .b32 %f<3>;
; SM60-NEXT: .reg .b64 %rd<2>;
; SM60-EMPTY:
; SM60-NEXT: // %bb.0:
@@ -1166,7 +1166,7 @@ define void @global_unordered_sys_float(ptr addrspace(1) %a) {
;
; SM70-LABEL: global_unordered_sys_float(
; SM70: {
-; SM70-NEXT: .reg .f32 %f<3>;
+; SM70-NEXT: .reg .b32 %f<3>;
; SM70-NEXT: .reg .b64 %rd<2>;
; SM70-EMPTY:
; SM70-NEXT: // %bb.0:
@@ -1185,7 +1185,7 @@ define void @global_unordered_sys_double(ptr addrspace(1) %a) {
; SM60-LABEL: global_unordered_sys_double(
; SM60: {
; SM60-NEXT: .reg .b64 %rd<2>;
-; SM60-NEXT: .reg .f64 %fd<3>;
+; SM60-NEXT: .reg .b64 %fd<3>;
; SM60-EMPTY:
; SM60-NEXT: // %bb.0:
; SM60-NEXT: ld.param.u64 %rd1, [global_unordered_sys_double_param_0];
@@ -1197,7 +1197,7 @@ define void @global_unordered_sys_double(ptr addrspace(1) %a) {
; SM70-LABEL: global_unordered_sys_double(
; SM70: {
; SM70-NEXT: .reg .b64 %rd<2>;
-; SM70-NEXT: .reg .f64 %fd<3>;
+; SM70-NEXT: .reg .b64 %fd<3>;
; SM70-EMPTY:
; SM70-NEXT: // %bb.0:
; SM70-NEXT: ld.param.u64 %rd1, [global_unordered_sys_double_param_0];
@@ -1334,7 +1334,7 @@ define void @global_unordered_volatile_sys_i64(ptr addrspace(1) %a) {
define void @global_unordered_volatile_sys_float(ptr addrspace(1) %a) {
; SM60-LABEL: global_unordered_volatile_sys_float(
; SM60: {
-; SM60-NEXT: .reg .f32 %f<3>;
+; SM60-NEXT: .reg .b32 %f<3>;
; SM60-NEXT: .reg .b64 %rd<2>;
; SM60-EMPTY:
; SM60-NEXT: // %bb.0:
@@ -1346,7 +1346,7 @@ define void @global_unordered_volatile_sys_float(ptr addrspace(1) %a) {
;
; SM70-LABEL: global_unordered_volatile_sys_float(
; SM70: {
-; SM70-NEXT: .reg .f32 %f<3>;
+; SM70-NEXT: .reg .b32 %f<3>;
; SM70-NEXT: .reg .b64 %rd<2>;
; SM70-EMPTY:
; SM70-NEXT: // %bb.0:
@@ -1365,7 +1365,7 @@ define void @global_unordered_volatile_sys_double(ptr addrspace(1) %a) {
; SM60-LABEL: global_unordered_volatile_sys_double(
; SM60: {
; SM60-NEXT: .reg .b64 %rd<2>;
-; SM60-NEXT: .reg .f64 %fd<3>;
+; SM60-NEXT: .reg .b64 %fd<3>;
; SM60-EMPTY:
; SM60-NEXT: // %bb.0:
; SM60-NEXT: ld.param.u64 %rd1, [global_unordered_volatile_sys_double_param_0];
@@ -1377,7 +1377,7 @@ define void @global_unordered_volatile_sys_double(ptr addrspace(1) %a) {
; SM70-LABEL: global_unordered_volatile_sys_double(
; SM70: {
; SM70-NEXT: .reg .b64 %rd<2>;
-; SM70-NEXT: .reg .f64 %fd<3>;
+; SM70-NEXT: .reg .b64 %fd<3>;
; SM70-EMPTY:
; SM70-NEXT: // %bb.0:
; SM70-NEXT: ld.param.u64 %rd1, [global_unordered_volatile_sys_double_param_0];
@@ -1514,7 +1514,7 @@ define void @global_monotonic_sys_i64(ptr addrspace(1) %a) {
define void @global_monotonic_sys_float(ptr addrspace(1) %a) {
; SM60-LABEL: global_monotonic_sys_float(
; SM60: {
-; SM60-NEXT: .reg .f32 %f<3>;
+; SM60-NEXT: .reg .b32 %f<3>;
; SM60-NEXT: .reg .b64 %rd<2>;
; SM60-EMPTY:
; SM60-NEXT: // %bb.0:
@@ -1526,7 +1526,7 @@ define void @global_monotonic_sys_float(ptr addrspace(1) %a) {
;
; SM70-LABEL: global_monotonic_sys_float(
; SM70: {
-; SM70-NEXT: .reg .f32 %f<3>;
+; SM70-NEXT: .reg .b32 %f<3>;
; SM70-NEXT: .reg .b64 %rd<2>;
; SM70-EMPTY:
; SM70-NEXT: // %bb.0:
@@ -1545,7 +1545,7 @@ define void @global_monotonic_sys_double(ptr addrspace(1) %a) {
; SM60-LABEL: global_monotonic_sys_double(
; SM60: {
; SM60-NEXT: .reg .b64 %rd<2>;
-; SM60-NEXT: .reg .f64 %fd<3>;
+; SM60-NEXT: .reg .b64 %fd<3>;
; SM60-EMPTY:
; SM60-NEXT: // %bb.0:
; SM60-NEXT: ld.param.u64 %rd1, [global_monotonic_sys_double_param_0];
@@ -1557,7 +1557,7 @@ define void @global_monotonic_sys_double(ptr addrspace(1) %a) {
; SM70-LABEL: global_monotonic_sys_double(
; SM70: {
; SM70-NEXT: .reg .b64 %rd<2>;
-; SM70-NEXT: .reg .f64 %fd<3>;
+; SM70-NEXT: .reg .b64 %fd<3>;
; SM70-EMPTY:
; SM70-NEXT: // %bb.0:
; SM70-NEXT: ld.param.u64 %rd1, [global_monotonic_sys_double_param_0];
@@ -1694,7 +1694,7 @@ define void @global_monotonic_volatile_sys_i64(ptr addrspace(1) %a) {
define void @global_monotonic_volatile_sys_float(ptr addrspace(1) %a) {
; SM60-LABEL: global_monotonic_volatile_sys_float(
; SM60: {
-; SM60-NEXT: .reg .f32 %f<3>;
+; SM60-NEXT: .reg .b32 %f<3>;
; SM60-NEXT: .reg .b64 %rd<2>;
; SM60-EMPTY:
; SM60-NEXT: // %bb.0:
@@ -1706,7 +1706,7 @@ define void @global_monotonic_volatile_sys_float(ptr addrspace(1) %a) {
;
; SM70-LABEL: global_monotonic_volatile_sys_float(
; SM70: {
-; SM70-NEXT: .reg .f32 %f<3>;
+; SM70-NEXT: .reg .b32 %f<3>;
; SM70-NEXT: .reg .b64 %rd<2>;
; SM70-EMPTY:
; SM70-NEXT: // %bb.0:
@@ -1725,7 +1725,7 @@ define void @global_monotonic_volatile_sys_double(ptr addrspace(1) %a) {
; SM60-LABEL: global_monotonic_volatile_sys_double(
; SM60: {
; SM60-NEXT: .reg .b64 %rd<2>;
-; SM60-NEXT: .reg .f64 %fd<3>;
+; SM60-NEXT: .reg .b64 %fd<3>;
; SM60-EMPTY:
; SM60-NEXT: // %bb.0:
; SM60-NEXT: ld.param.u64 %rd1, [global_monotonic_volatile_sys_double_param_0];
@@ -1737,7 +1737,7 @@ define void @global_monotonic_volatile_sys_double(ptr addrspace(1) %a) {
; SM70-LABEL: global_monotonic_volatile_sys_double(
; SM70: {
; SM70-NEXT: .reg .b64 %rd<2>;
-; SM70-NEXT: .reg .f64 %fd<3>;
+; SM70-NEXT: .reg .b64 %fd<3>;
; SM70-EMPTY:
; SM70-NEXT: // %bb.0:
; SM70-NEXT: ld.param.u64 %rd1, [global_monotonic_volatile_sys_double_param_0];
@@ -1829,7 +1829,7 @@ define void @shared_i64(ptr addrspace(3) %a) {
define void @shared_float(ptr addrspace(3) %a) {
; CHECK-LABEL: shared_float(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %f<3>;
; CHECK-NEXT: .reg .b64 %rd<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
@@ -1848,7 +1848,7 @@ define void @shared_double(ptr addrspace(3) %a) {
; CHECK-LABEL: shared_double(
; CHECK: {
; CHECK-NEXT: .reg .b64 %rd<2>;
-; CHECK-NEXT: .reg .f64 %fd<3>;
+; CHECK-NEXT: .reg .b64 %fd<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.u64 %rd1, [shared_double_param_0];
@@ -1938,7 +1938,7 @@ define void @shared_volatile_i64(ptr addrspace(3) %a) {
define void @shared_volatile_float(ptr addrspace(3) %a) {
; CHECK-LABEL: shared_volatile_float(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %f<3>;
; CHECK-NEXT: .reg .b64 %rd<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
@@ -1957,7 +1957,7 @@ define void @shared_volatile_double(ptr addrspace(3) %a) {
; CHECK-LABEL: shared_volatile_double(
; CHECK: {
; CHECK-NEXT: .reg .b64 %rd<2>;
-; CHECK-NEXT: .reg .f64 %fd<3>;
+; CHECK-NEXT: .reg .b64 %fd<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.u64 %rd1, [shared_volatile_double_param_0];
@@ -2094,7 +2094,7 @@ define void @shared_unordered_sys_i64(ptr addrspace(3) %a) {
define void @shared_unordered_sys_float(ptr addrspace(3) %a) {
; SM60-LABEL: shared_unordered_sys_float(
; SM60: {
-; SM60-NEXT: .reg .f32 %f<3>;
+; SM60-NEXT: .reg .b32 %f<3>;
; SM60-NEXT: .reg .b64 %rd<2>;
; SM60-EMPTY:
; SM60-NEXT: // %bb.0:
@@ -2106,7 +2106,7 @@ define void @shared_unordered_sys_float(ptr addrspace(3) %a) {
;
; SM70-LABEL: shared_unordered_sys_float(
; SM70: {
-; SM70-NEXT: .reg .f32 %f<3>;
+; SM70-NEXT: .reg .b32 %f<3>;
; SM70-NEXT: .reg .b64 %rd<2>;
; SM70-EMPTY:
; SM70-NEXT: // %bb.0:
@@ -2125,7 +2125,7 @@ define void @shared_unordered_sys_double(ptr addrspace(3) %a) {
; SM60-LABEL: shared_unordered_sys_double(
; SM60: {
; SM60-NEXT: .reg .b64 %rd<2>;
-; SM60-NEXT: .reg .f64 %fd<3>;
+; SM60-NEXT: .reg .b64 %fd<3>;
; SM60-EMPTY:
; SM60-NEXT: // %bb.0:
; SM60-NEXT: ld.param.u64 %rd1, [shared_unordered_sys_double_param_0];
@@ -2137,7 +2137,7 @@ define void @shared_unordered_sys_double(ptr addrspace(3) %a) {
; SM70-LABEL: shared_unordered_sys_double(
; SM70: {
; SM70-NEXT: .reg .b64 %rd<2>;
-; SM70-NEXT: .reg .f64 %fd<3>;
+; SM70-NEXT: .reg .b64 %fd<3>;
; SM70-EMPTY:
; SM70-NEXT: // %bb.0:
; SM70-NEXT: ld.param.u64 %rd1, [shared_unordered_sys_double_param_0];
@@ -2227,7 +2227,7 @@ define void @shared_unordered_volatile_sys_i64(ptr addrspace(3) %a) {
define void @shared_unordered_volatile_sys_float(ptr addrspace(3) %a) {
; CHECK-LABEL: shared_unordered_volatile_sys_float(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %f<3>;
; CHECK-NEXT: .reg .b64 %rd<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
@@ -2246,7 +2246,7 @@ define void @shared_unordered_volatile_sys_double(ptr addrspace(3) %a) {
; CHECK-LABEL: shared_unordered_volatile_sys_double(
; CHECK: {
; CHECK-NEXT: .reg .b64 %rd<2>;
-; CHECK-NEXT: .reg .f64 %fd<3>;
+; CHECK-NEXT: .reg .b64 %fd<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.u64 %rd1, [shared_unordered_volatile_sys_double_param_0];
@@ -2383,7 +2383,7 @@ define void @shared_monotonic_sys_i64(ptr addrspace(3) %a) {
define void @shared_monotonic_sys_float(ptr addrspace(3) %a) {
; SM60-LABEL: shared_monotonic_sys_float(
; SM60: {
-; SM60-NEXT: .reg .f32 %f<3>;
+; SM60-NEXT: .reg .b32 %f<3>;
; SM60-NEXT: .reg .b64 %rd<2>;
; SM60-EMPTY:
; SM60-NEXT: // %bb.0:
@@ -2395,7 +2395,7 @@ define void @shared_monotonic_sys_float(ptr addrspace(3) %a) {
;
; SM70-LABEL: shared_monotonic_sys_float(
; SM70: {
-; SM70-NEXT: .reg .f32 %f<3>;
+; SM70-NEXT: .reg .b32 %f<3>;
; SM70-NEXT: .reg .b64 %rd<2>;
; SM70-EMPTY:
; SM70-NEXT: // %bb.0:
@@ -2414,7 +2414,7 @@ define void @shared_monotonic_sys_double(ptr addrspace(3) %a) {
; SM60-LABEL: shared_monotonic_sys_double(
; SM60: {
; SM60-NEXT: .reg .b64 %rd<2>;
-; SM60-NEXT: .reg .f64 %fd<3>;
+; SM60-NEXT: .reg .b64 %fd<3>;
; SM60-EMPTY:
; SM60-NEXT: // %bb.0:
; SM60-NEXT: ld.param.u64 %rd1, [shared_monotonic_sys_double_param_0];
@@ -2426,7 +2426,7 @@ define void @shared_monotonic_sys_double(ptr addrspace(3) %a) {
; SM70-LABEL: shared_monotonic_sys_double(
; SM70: {
; SM70-NEXT: .reg .b64 %rd<2>;
-; SM70-NEXT: .reg .f64 %fd<3>;
+; SM70-NEXT: .reg .b64 %fd<3>;
; SM70-EMPTY:
; SM70-NEXT: // %bb.0:
; SM70-NEXT: ld.param.u64 %rd1, [shared_monotonic_sys_double_param_0];
@@ -2516,7 +2516,7 @@ define void @shared_monotonic_volatile_sys_i64(ptr addrspace(3) %a) {
define void @shared_monotonic_volatile_sys_float(ptr addrspace(3) %a) {
; CHECK-LABEL: shared_monotonic_volatile_sys_float(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %f<3>;
; CHECK-NEXT: .reg .b64 %rd<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
@@ -2535,7 +2535,7 @@ define void @shared_monotonic_volatile_sys_double(ptr addrspace(3) %a) {
; CHECK-LABEL: shared_monotonic_volatile_sys_double(
; CHECK: {
; CHECK-NEXT: .reg .b64 %rd<2>;
-; CHECK-NEXT: .reg .f64 %fd<3>;
+; CHECK-NEXT: .reg .b64 %fd<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.u64 %rd1, [shared_monotonic_volatile_sys_double_param_0];
@@ -2627,7 +2627,7 @@ define void @local_i64(ptr addrspace(5) %a) {
define void @local_float(ptr addrspace(5) %a) {
; CHECK-LABEL: local_float(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %f<3>;
; CHECK-NEXT: .reg .b64 %rd<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
@@ -2646,7 +2646,7 @@ define void @local_double(ptr addrspace(5) %a) {
; CHECK-LABEL: local_double(
; CHECK: {
; CHECK-NEXT: .reg .b64 %rd<2>;
-; CHECK-NEXT: .reg .f64 %fd<3>;
+; CHECK-NEXT: .reg .b64 %fd<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.u64 %rd1, [local_double_param_0];
@@ -2736,7 +2736,7 @@ define void @local_volatile_i64(ptr addrspace(5) %a) {
define void @local_volatile_float(ptr addrspace(5) %a) {
; CHECK-LABEL: local_volatile_float(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %f<3>;
; CHECK-NEXT: .reg .b64 %rd<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
@@ -2755,7 +2755,7 @@ define void @local_volatile_double(ptr addrspace(5) %a) {
; CHECK-LABEL: local_volatile_double(
; CHECK: {
; CHECK-NEXT: .reg .b64 %rd<2>;
-; CHECK-NEXT: .reg .f64 %fd<3>;
+; CHECK-NEXT: .reg .b64 %fd<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.u64 %rd1, [local_volatile_double_param_0];
@@ -2845,7 +2845,7 @@ define void @local_unordered_sys_i64(ptr addrspace(5) %a) {
define void @local_unordered_sys_float(ptr addrspace(5) %a) {
; CHECK-LABEL: local_unordered_sys_float(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %f<3>;
; CHECK-NEXT: .reg .b64 %rd<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
@@ -2864,7 +2864,7 @@ define void @local_unordered_sys_double(ptr addrspace(5) %a) {
; CHECK-LABEL: local_unordered_sys_double(
; CHECK: {
; CHECK-NEXT: .reg .b64 %rd<2>;
-; CHECK-NEXT: .reg .f64 %fd<3>;
+; CHECK-NEXT: .reg .b64 %fd<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.u64 %rd1, [local_unordered_sys_double_param_0];
@@ -2954,7 +2954,7 @@ define void @local_unordered_volatile_sys_i64(ptr addrspace(5) %a) {
define void @local_unordered_volatile_sys_float(ptr addrspace(5) %a) {
; CHECK-LABEL: local_unordered_volatile_sys_float(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %f<3>;
; CHECK-NEXT: .reg .b64 %rd<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
@@ -2973,7 +2973,7 @@ define void @local_unordered_volatile_sys_double(ptr addrspace(5) %a) {
; CHECK-LABEL: local_unordered_volatile_sys_double(
; CHECK: {
; CHECK-NEXT: .reg .b64 %rd<2>;
-; CHECK-NEXT: .reg .f64 %fd<3>;
+; CHECK-NEXT: .reg .b64 %fd<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.u64 %rd1, [local_unordered_volatile_sys_double_param_0];
@@ -3063,7 +3063,7 @@ define void @local_monotonic_sys_i64(ptr addrspace(5) %a) {
define void @local_monotonic_sys_float(ptr addrspace(5) %a) {
; CHECK-LABEL: local_monotonic_sys_float(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %f<3>;
; CHECK-NEXT: .reg .b64 %rd<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
@@ -3082,7 +3082,7 @@ define void @local_monotonic_sys_double(ptr addrspace(5) %a) {
; CHECK-LABEL: local_monotonic_sys_double(
; CHECK: {
; CHECK-NEXT: .reg .b64 %rd<2>;
-; CHECK-NEXT: .reg .f64 %fd<3>;
+; CHECK-NEXT: .reg .b64 %fd<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.u64 %rd1, [local_monotonic_sys_double_param_0];
@@ -3172,7 +3172,7 @@ define void @local_monotonic_volatile_sys_i64(ptr addrspace(5) %a) {
define void @local_monotonic_volatile_sys_float(ptr addrspace(5) %a) {
; CHECK-LABEL: local_monotonic_volatile_sys_float(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %f<3>;
; CHECK-NEXT: .reg .b64 %rd<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
@@ -3191,7 +3191,7 @@ define void @local_monotonic_volatile_sys_double(ptr addrspace(5) %a) {
; CHECK-LABEL: local_monotonic_volatile_sys_double(
; CHECK: {
; CHECK-NEXT: .reg .b64 %rd<2>;
-; CHECK-NEXT: .reg .f64 %fd<3>;
+; CHECK-NEXT: .reg .b64 %fd<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.u64 %rd1, [local_monotonic_volatile_sys_double_param_0];
diff --git a/llvm/test/CodeGen/NVPTX/load-store-vectors.ll b/llvm/test/CodeGen/NVPTX/load-store-vectors.ll
index ba397dc..3215fce 100644
--- a/llvm/test/CodeGen/NVPTX/load-store-vectors.ll
+++ b/llvm/test/CodeGen/NVPTX/load-store-vectors.ll
@@ -371,7 +371,7 @@ define void @generic_2xi64(ptr %a) {
define void @generic_2xfloat(ptr %a) {
; CHECK-LABEL: generic_2xfloat(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<5>;
+; CHECK-NEXT: .reg .b32 %f<5>;
; CHECK-NEXT: .reg .b64 %rd<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
@@ -390,7 +390,7 @@ define void @generic_2xfloat(ptr %a) {
define void @generic_4xfloat(ptr %a) {
; CHECK-LABEL: generic_4xfloat(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<9>;
+; CHECK-NEXT: .reg .b32 %f<9>;
; CHECK-NEXT: .reg .b64 %rd<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
@@ -412,7 +412,7 @@ define void @generic_2xdouble(ptr %a) {
; CHECK-LABEL: generic_2xdouble(
; CHECK: {
; CHECK-NEXT: .reg .b64 %rd<2>;
-; CHECK-NEXT: .reg .f64 %fd<5>;
+; CHECK-NEXT: .reg .b64 %fd<5>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.u64 %rd1, [generic_2xdouble_param_0];
@@ -792,7 +792,7 @@ define void @generic_volatile_2xi64(ptr %a) {
define void @generic_volatile_2xfloat(ptr %a) {
; CHECK-LABEL: generic_volatile_2xfloat(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<5>;
+; CHECK-NEXT: .reg .b32 %f<5>;
; CHECK-NEXT: .reg .b64 %rd<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
@@ -811,7 +811,7 @@ define void @generic_volatile_2xfloat(ptr %a) {
define void @generic_volatile_4xfloat(ptr %a) {
; CHECK-LABEL: generic_volatile_4xfloat(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<9>;
+; CHECK-NEXT: .reg .b32 %f<9>;
; CHECK-NEXT: .reg .b64 %rd<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
@@ -833,7 +833,7 @@ define void @generic_volatile_2xdouble(ptr %a) {
; CHECK-LABEL: generic_volatile_2xdouble(
; CHECK: {
; CHECK-NEXT: .reg .b64 %rd<2>;
-; CHECK-NEXT: .reg .f64 %fd<5>;
+; CHECK-NEXT: .reg .b64 %fd<5>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.u64 %rd1, [generic_volatile_2xdouble_param_0];
@@ -1196,7 +1196,7 @@ define void @global_2xi64(ptr addrspace(1) %a) {
define void @global_2xfloat(ptr addrspace(1) %a) {
; CHECK-LABEL: global_2xfloat(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<5>;
+; CHECK-NEXT: .reg .b32 %f<5>;
; CHECK-NEXT: .reg .b64 %rd<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
@@ -1215,7 +1215,7 @@ define void @global_2xfloat(ptr addrspace(1) %a) {
define void @global_4xfloat(ptr addrspace(1) %a) {
; CHECK-LABEL: global_4xfloat(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<9>;
+; CHECK-NEXT: .reg .b32 %f<9>;
; CHECK-NEXT: .reg .b64 %rd<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
@@ -1237,7 +1237,7 @@ define void @global_2xdouble(ptr addrspace(1) %a) {
; CHECK-LABEL: global_2xdouble(
; CHECK: {
; CHECK-NEXT: .reg .b64 %rd<2>;
-; CHECK-NEXT: .reg .f64 %fd<5>;
+; CHECK-NEXT: .reg .b64 %fd<5>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.u64 %rd1, [global_2xdouble_param_0];
@@ -1598,7 +1598,7 @@ define void @global_volatile_2xi64(ptr addrspace(1) %a) {
define void @global_volatile_2xfloat(ptr addrspace(1) %a) {
; CHECK-LABEL: global_volatile_2xfloat(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<5>;
+; CHECK-NEXT: .reg .b32 %f<5>;
; CHECK-NEXT: .reg .b64 %rd<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
@@ -1617,7 +1617,7 @@ define void @global_volatile_2xfloat(ptr addrspace(1) %a) {
define void @global_volatile_4xfloat(ptr addrspace(1) %a) {
; CHECK-LABEL: global_volatile_4xfloat(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<9>;
+; CHECK-NEXT: .reg .b32 %f<9>;
; CHECK-NEXT: .reg .b64 %rd<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
@@ -1639,7 +1639,7 @@ define void @global_volatile_2xdouble(ptr addrspace(1) %a) {
; CHECK-LABEL: global_volatile_2xdouble(
; CHECK: {
; CHECK-NEXT: .reg .b64 %rd<2>;
-; CHECK-NEXT: .reg .f64 %fd<5>;
+; CHECK-NEXT: .reg .b64 %fd<5>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.u64 %rd1, [global_volatile_2xdouble_param_0];
@@ -2002,7 +2002,7 @@ define void @shared_2xi64(ptr addrspace(3) %a) {
define void @shared_2xfloat(ptr addrspace(3) %a) {
; CHECK-LABEL: shared_2xfloat(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<5>;
+; CHECK-NEXT: .reg .b32 %f<5>;
; CHECK-NEXT: .reg .b64 %rd<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
@@ -2021,7 +2021,7 @@ define void @shared_2xfloat(ptr addrspace(3) %a) {
define void @shared_4xfloat(ptr addrspace(3) %a) {
; CHECK-LABEL: shared_4xfloat(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<9>;
+; CHECK-NEXT: .reg .b32 %f<9>;
; CHECK-NEXT: .reg .b64 %rd<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
@@ -2043,7 +2043,7 @@ define void @shared_2xdouble(ptr addrspace(3) %a) {
; CHECK-LABEL: shared_2xdouble(
; CHECK: {
; CHECK-NEXT: .reg .b64 %rd<2>;
-; CHECK-NEXT: .reg .f64 %fd<5>;
+; CHECK-NEXT: .reg .b64 %fd<5>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.u64 %rd1, [shared_2xdouble_param_0];
@@ -2404,7 +2404,7 @@ define void @shared_volatile_2xi64(ptr addrspace(3) %a) {
define void @shared_volatile_2xfloat(ptr addrspace(3) %a) {
; CHECK-LABEL: shared_volatile_2xfloat(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<5>;
+; CHECK-NEXT: .reg .b32 %f<5>;
; CHECK-NEXT: .reg .b64 %rd<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
@@ -2423,7 +2423,7 @@ define void @shared_volatile_2xfloat(ptr addrspace(3) %a) {
define void @shared_volatile_4xfloat(ptr addrspace(3) %a) {
; CHECK-LABEL: shared_volatile_4xfloat(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<9>;
+; CHECK-NEXT: .reg .b32 %f<9>;
; CHECK-NEXT: .reg .b64 %rd<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
@@ -2445,7 +2445,7 @@ define void @shared_volatile_2xdouble(ptr addrspace(3) %a) {
; CHECK-LABEL: shared_volatile_2xdouble(
; CHECK: {
; CHECK-NEXT: .reg .b64 %rd<2>;
-; CHECK-NEXT: .reg .f64 %fd<5>;
+; CHECK-NEXT: .reg .b64 %fd<5>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.u64 %rd1, [shared_volatile_2xdouble_param_0];
@@ -2808,7 +2808,7 @@ define void @local_2xi64(ptr addrspace(5) %a) {
define void @local_2xfloat(ptr addrspace(5) %a) {
; CHECK-LABEL: local_2xfloat(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<5>;
+; CHECK-NEXT: .reg .b32 %f<5>;
; CHECK-NEXT: .reg .b64 %rd<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
@@ -2827,7 +2827,7 @@ define void @local_2xfloat(ptr addrspace(5) %a) {
define void @local_4xfloat(ptr addrspace(5) %a) {
; CHECK-LABEL: local_4xfloat(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<9>;
+; CHECK-NEXT: .reg .b32 %f<9>;
; CHECK-NEXT: .reg .b64 %rd<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
@@ -2849,7 +2849,7 @@ define void @local_2xdouble(ptr addrspace(5) %a) {
; CHECK-LABEL: local_2xdouble(
; CHECK: {
; CHECK-NEXT: .reg .b64 %rd<2>;
-; CHECK-NEXT: .reg .f64 %fd<5>;
+; CHECK-NEXT: .reg .b64 %fd<5>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.u64 %rd1, [local_2xdouble_param_0];
@@ -3210,7 +3210,7 @@ define void @local_volatile_2xi64(ptr addrspace(5) %a) {
define void @local_volatile_2xfloat(ptr addrspace(5) %a) {
; CHECK-LABEL: local_volatile_2xfloat(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<5>;
+; CHECK-NEXT: .reg .b32 %f<5>;
; CHECK-NEXT: .reg .b64 %rd<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
@@ -3229,7 +3229,7 @@ define void @local_volatile_2xfloat(ptr addrspace(5) %a) {
define void @local_volatile_4xfloat(ptr addrspace(5) %a) {
; CHECK-LABEL: local_volatile_4xfloat(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<9>;
+; CHECK-NEXT: .reg .b32 %f<9>;
; CHECK-NEXT: .reg .b64 %rd<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
@@ -3251,7 +3251,7 @@ define void @local_volatile_2xdouble(ptr addrspace(5) %a) {
; CHECK-LABEL: local_volatile_2xdouble(
; CHECK: {
; CHECK-NEXT: .reg .b64 %rd<2>;
-; CHECK-NEXT: .reg .f64 %fd<5>;
+; CHECK-NEXT: .reg .b64 %fd<5>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.u64 %rd1, [local_volatile_2xdouble_param_0];
diff --git a/llvm/test/CodeGen/NVPTX/math-intrins.ll b/llvm/test/CodeGen/NVPTX/math-intrins.ll
index 189f342..a6d01c1 100644
--- a/llvm/test/CodeGen/NVPTX/math-intrins.ll
+++ b/llvm/test/CodeGen/NVPTX/math-intrins.ll
@@ -50,7 +50,7 @@ declare double @llvm.fma.f64(double, double, double) #0
define float @ceil_float(float %a) {
; CHECK-LABEL: ceil_float(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %f<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f1, [ceil_float_param_0];
@@ -64,7 +64,7 @@ define float @ceil_float(float %a) {
define float @ceil_float_ftz(float %a) #1 {
; CHECK-LABEL: ceil_float_ftz(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %f<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f1, [ceil_float_ftz_param_0];
@@ -78,7 +78,7 @@ define float @ceil_float_ftz(float %a) #1 {
define double @ceil_double(double %a) {
; CHECK-LABEL: ceil_double(
; CHECK: {
-; CHECK-NEXT: .reg .f64 %fd<3>;
+; CHECK-NEXT: .reg .b64 %fd<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f64 %fd1, [ceil_double_param_0];
@@ -94,7 +94,7 @@ define double @ceil_double(double %a) {
define float @floor_float(float %a) {
; CHECK-LABEL: floor_float(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %f<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f1, [floor_float_param_0];
@@ -108,7 +108,7 @@ define float @floor_float(float %a) {
define float @floor_float_ftz(float %a) #1 {
; CHECK-LABEL: floor_float_ftz(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %f<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f1, [floor_float_ftz_param_0];
@@ -122,7 +122,7 @@ define float @floor_float_ftz(float %a) #1 {
define double @floor_double(double %a) {
; CHECK-LABEL: floor_double(
; CHECK: {
-; CHECK-NEXT: .reg .f64 %fd<3>;
+; CHECK-NEXT: .reg .b64 %fd<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f64 %fd1, [floor_double_param_0];
@@ -141,7 +141,7 @@ define float @round_float(float %a) {
; CHECK: {
; CHECK-NEXT: .reg .pred %p<3>;
; CHECK-NEXT: .reg .b32 %r<4>;
-; CHECK-NEXT: .reg .f32 %f<9>;
+; CHECK-NEXT: .reg .b32 %f<9>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f1, [round_float_param_0];
@@ -169,7 +169,7 @@ define float @round_float_ftz(float %a) #1 {
; CHECK: {
; CHECK-NEXT: .reg .pred %p<3>;
; CHECK-NEXT: .reg .b32 %r<4>;
-; CHECK-NEXT: .reg .f32 %f<9>;
+; CHECK-NEXT: .reg .b32 %f<9>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f1, [round_float_ftz_param_0];
@@ -196,7 +196,7 @@ define double @round_double(double %a) {
; CHECK-LABEL: round_double(
; CHECK: {
; CHECK-NEXT: .reg .pred %p<3>;
-; CHECK-NEXT: .reg .f64 %fd<8>;
+; CHECK-NEXT: .reg .b64 %fd<8>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f64 %fd1, [round_double_param_0];
@@ -219,7 +219,7 @@ define double @round_double(double %a) {
define float @nearbyint_float(float %a) {
; CHECK-LABEL: nearbyint_float(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %f<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f1, [nearbyint_float_param_0];
@@ -233,7 +233,7 @@ define float @nearbyint_float(float %a) {
define float @nearbyint_float_ftz(float %a) #1 {
; CHECK-LABEL: nearbyint_float_ftz(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %f<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f1, [nearbyint_float_ftz_param_0];
@@ -247,7 +247,7 @@ define float @nearbyint_float_ftz(float %a) #1 {
define double @nearbyint_double(double %a) {
; CHECK-LABEL: nearbyint_double(
; CHECK: {
-; CHECK-NEXT: .reg .f64 %fd<3>;
+; CHECK-NEXT: .reg .b64 %fd<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f64 %fd1, [nearbyint_double_param_0];
@@ -263,7 +263,7 @@ define double @nearbyint_double(double %a) {
define float @rint_float(float %a) {
; CHECK-LABEL: rint_float(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %f<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f1, [rint_float_param_0];
@@ -277,7 +277,7 @@ define float @rint_float(float %a) {
define float @rint_float_ftz(float %a) #1 {
; CHECK-LABEL: rint_float_ftz(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %f<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f1, [rint_float_ftz_param_0];
@@ -291,7 +291,7 @@ define float @rint_float_ftz(float %a) #1 {
define double @rint_double(double %a) {
; CHECK-LABEL: rint_double(
; CHECK: {
-; CHECK-NEXT: .reg .f64 %fd<3>;
+; CHECK-NEXT: .reg .b64 %fd<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f64 %fd1, [rint_double_param_0];
@@ -307,7 +307,7 @@ define double @rint_double(double %a) {
define float @roundeven_float(float %a) {
; CHECK-LABEL: roundeven_float(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %f<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f1, [roundeven_float_param_0];
@@ -321,7 +321,7 @@ define float @roundeven_float(float %a) {
define float @roundeven_float_ftz(float %a) #1 {
; CHECK-LABEL: roundeven_float_ftz(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %f<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f1, [roundeven_float_ftz_param_0];
@@ -335,7 +335,7 @@ define float @roundeven_float_ftz(float %a) #1 {
define double @roundeven_double(double %a) {
; CHECK-LABEL: roundeven_double(
; CHECK: {
-; CHECK-NEXT: .reg .f64 %fd<3>;
+; CHECK-NEXT: .reg .b64 %fd<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f64 %fd1, [roundeven_double_param_0];
@@ -351,7 +351,7 @@ define double @roundeven_double(double %a) {
define float @trunc_float(float %a) {
; CHECK-LABEL: trunc_float(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %f<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f1, [trunc_float_param_0];
@@ -365,7 +365,7 @@ define float @trunc_float(float %a) {
define float @trunc_float_ftz(float %a) #1 {
; CHECK-LABEL: trunc_float_ftz(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %f<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f1, [trunc_float_ftz_param_0];
@@ -379,7 +379,7 @@ define float @trunc_float_ftz(float %a) #1 {
define double @trunc_double(double %a) {
; CHECK-LABEL: trunc_double(
; CHECK: {
-; CHECK-NEXT: .reg .f64 %fd<3>;
+; CHECK-NEXT: .reg .b64 %fd<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f64 %fd1, [trunc_double_param_0];
@@ -395,7 +395,7 @@ define double @trunc_double(double %a) {
define float @abs_float(float %a) {
; CHECK-LABEL: abs_float(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %f<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f1, [abs_float_param_0];
@@ -409,7 +409,7 @@ define float @abs_float(float %a) {
define float @abs_float_ftz(float %a) #1 {
; CHECK-LABEL: abs_float_ftz(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %f<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f1, [abs_float_ftz_param_0];
@@ -423,7 +423,7 @@ define float @abs_float_ftz(float %a) #1 {
define double @abs_double(double %a) {
; CHECK-LABEL: abs_double(
; CHECK: {
-; CHECK-NEXT: .reg .f64 %fd<3>;
+; CHECK-NEXT: .reg .b64 %fd<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f64 %fd1, [abs_double_param_0];
@@ -440,7 +440,7 @@ define half @minnum_half(half %a, half %b) {
; CHECK-NOF16-LABEL: minnum_half(
; CHECK-NOF16: {
; CHECK-NOF16-NEXT: .reg .b16 %rs<4>;
-; CHECK-NOF16-NEXT: .reg .f32 %f<4>;
+; CHECK-NOF16-NEXT: .reg .b32 %f<4>;
; CHECK-NOF16-EMPTY:
; CHECK-NOF16-NEXT: // %bb.0:
; CHECK-NOF16-NEXT: ld.param.b16 %rs1, [minnum_half_param_0];
@@ -466,7 +466,7 @@ define half @minnum_half(half %a, half %b) {
; CHECK-SM80-NOF16-LABEL: minnum_half(
; CHECK-SM80-NOF16: {
; CHECK-SM80-NOF16-NEXT: .reg .b16 %rs<4>;
-; CHECK-SM80-NOF16-NEXT: .reg .f32 %f<4>;
+; CHECK-SM80-NOF16-NEXT: .reg .b32 %f<4>;
; CHECK-SM80-NOF16-EMPTY:
; CHECK-SM80-NOF16-NEXT: // %bb.0:
; CHECK-SM80-NOF16-NEXT: ld.param.b16 %rs1, [minnum_half_param_0];
@@ -484,7 +484,7 @@ define half @minnum_half(half %a, half %b) {
define float @minnum_float(float %a, float %b) {
; CHECK-LABEL: minnum_float(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<4>;
+; CHECK-NEXT: .reg .b32 %f<4>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f1, [minnum_float_param_0];
@@ -499,7 +499,7 @@ define float @minnum_float(float %a, float %b) {
define float @minnum_imm1(float %a) {
; CHECK-LABEL: minnum_imm1(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %f<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f1, [minnum_imm1_param_0];
@@ -513,7 +513,7 @@ define float @minnum_imm1(float %a) {
define float @minnum_imm2(float %a) {
; CHECK-LABEL: minnum_imm2(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %f<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f1, [minnum_imm2_param_0];
@@ -527,7 +527,7 @@ define float @minnum_imm2(float %a) {
define float @minnum_float_ftz(float %a, float %b) #1 {
; CHECK-LABEL: minnum_float_ftz(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<4>;
+; CHECK-NEXT: .reg .b32 %f<4>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f1, [minnum_float_ftz_param_0];
@@ -542,7 +542,7 @@ define float @minnum_float_ftz(float %a, float %b) #1 {
define double @minnum_double(double %a, double %b) {
; CHECK-LABEL: minnum_double(
; CHECK: {
-; CHECK-NEXT: .reg .f64 %fd<4>;
+; CHECK-NEXT: .reg .b64 %fd<4>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f64 %fd1, [minnum_double_param_0];
@@ -559,7 +559,7 @@ define <2 x half> @minnum_v2half(<2 x half> %a, <2 x half> %b) {
; CHECK-NOF16: {
; CHECK-NOF16-NEXT: .reg .b16 %rs<7>;
; CHECK-NOF16-NEXT: .reg .b32 %r<4>;
-; CHECK-NOF16-NEXT: .reg .f32 %f<7>;
+; CHECK-NOF16-NEXT: .reg .b32 %f<7>;
; CHECK-NOF16-EMPTY:
; CHECK-NOF16-NEXT: // %bb.0:
; CHECK-NOF16-NEXT: ld.param.b32 %r1, [minnum_v2half_param_0];
@@ -593,7 +593,7 @@ define <2 x half> @minnum_v2half(<2 x half> %a, <2 x half> %b) {
; CHECK-SM80-NOF16: {
; CHECK-SM80-NOF16-NEXT: .reg .b16 %rs<7>;
; CHECK-SM80-NOF16-NEXT: .reg .b32 %r<4>;
-; CHECK-SM80-NOF16-NEXT: .reg .f32 %f<7>;
+; CHECK-SM80-NOF16-NEXT: .reg .b32 %f<7>;
; CHECK-SM80-NOF16-EMPTY:
; CHECK-SM80-NOF16-NEXT: // %bb.0:
; CHECK-SM80-NOF16-NEXT: ld.param.b32 %r1, [minnum_v2half_param_0];
@@ -622,7 +622,7 @@ define half @minimum_half(half %a, half %b) {
; CHECK-NOF16: {
; CHECK-NOF16-NEXT: .reg .pred %p<6>;
; CHECK-NOF16-NEXT: .reg .b16 %rs<8>;
-; CHECK-NOF16-NEXT: .reg .f32 %f<4>;
+; CHECK-NOF16-NEXT: .reg .b32 %f<4>;
; CHECK-NOF16-EMPTY:
; CHECK-NOF16-NEXT: // %bb.0:
; CHECK-NOF16-NEXT: ld.param.b16 %rs1, [minimum_half_param_0];
@@ -658,7 +658,7 @@ define half @minimum_half(half %a, half %b) {
; CHECK-SM80-NOF16: {
; CHECK-SM80-NOF16-NEXT: .reg .pred %p<6>;
; CHECK-SM80-NOF16-NEXT: .reg .b16 %rs<8>;
-; CHECK-SM80-NOF16-NEXT: .reg .f32 %f<4>;
+; CHECK-SM80-NOF16-NEXT: .reg .b32 %f<4>;
; CHECK-SM80-NOF16-EMPTY:
; CHECK-SM80-NOF16-NEXT: // %bb.0:
; CHECK-SM80-NOF16-NEXT: ld.param.b16 %rs1, [minimum_half_param_0];
@@ -687,7 +687,7 @@ define float @minimum_float(float %a, float %b) {
; CHECK-NOF16: {
; CHECK-NOF16-NEXT: .reg .pred %p<5>;
; CHECK-NOF16-NEXT: .reg .b32 %r<3>;
-; CHECK-NOF16-NEXT: .reg .f32 %f<8>;
+; CHECK-NOF16-NEXT: .reg .b32 %f<8>;
; CHECK-NOF16-EMPTY:
; CHECK-NOF16-NEXT: // %bb.0:
; CHECK-NOF16-NEXT: ld.param.f32 %f1, [minimum_float_param_0];
@@ -708,7 +708,7 @@ define float @minimum_float(float %a, float %b) {
;
; CHECK-F16-LABEL: minimum_float(
; CHECK-F16: {
-; CHECK-F16-NEXT: .reg .f32 %f<4>;
+; CHECK-F16-NEXT: .reg .b32 %f<4>;
; CHECK-F16-EMPTY:
; CHECK-F16-NEXT: // %bb.0:
; CHECK-F16-NEXT: ld.param.f32 %f1, [minimum_float_param_0];
@@ -719,7 +719,7 @@ define float @minimum_float(float %a, float %b) {
;
; CHECK-SM80-NOF16-LABEL: minimum_float(
; CHECK-SM80-NOF16: {
-; CHECK-SM80-NOF16-NEXT: .reg .f32 %f<4>;
+; CHECK-SM80-NOF16-NEXT: .reg .b32 %f<4>;
; CHECK-SM80-NOF16-EMPTY:
; CHECK-SM80-NOF16-NEXT: // %bb.0:
; CHECK-SM80-NOF16-NEXT: ld.param.f32 %f1, [minimum_float_param_0];
@@ -736,7 +736,7 @@ define float @minimum_imm1(float %a) {
; CHECK-NOF16: {
; CHECK-NOF16-NEXT: .reg .pred %p<4>;
; CHECK-NOF16-NEXT: .reg .b32 %r<2>;
-; CHECK-NOF16-NEXT: .reg .f32 %f<6>;
+; CHECK-NOF16-NEXT: .reg .b32 %f<6>;
; CHECK-NOF16-EMPTY:
; CHECK-NOF16-NEXT: // %bb.0:
; CHECK-NOF16-NEXT: ld.param.f32 %f1, [minimum_imm1_param_0];
@@ -753,7 +753,7 @@ define float @minimum_imm1(float %a) {
;
; CHECK-F16-LABEL: minimum_imm1(
; CHECK-F16: {
-; CHECK-F16-NEXT: .reg .f32 %f<3>;
+; CHECK-F16-NEXT: .reg .b32 %f<3>;
; CHECK-F16-EMPTY:
; CHECK-F16-NEXT: // %bb.0:
; CHECK-F16-NEXT: ld.param.f32 %f1, [minimum_imm1_param_0];
@@ -763,7 +763,7 @@ define float @minimum_imm1(float %a) {
;
; CHECK-SM80-NOF16-LABEL: minimum_imm1(
; CHECK-SM80-NOF16: {
-; CHECK-SM80-NOF16-NEXT: .reg .f32 %f<3>;
+; CHECK-SM80-NOF16-NEXT: .reg .b32 %f<3>;
; CHECK-SM80-NOF16-EMPTY:
; CHECK-SM80-NOF16-NEXT: // %bb.0:
; CHECK-SM80-NOF16-NEXT: ld.param.f32 %f1, [minimum_imm1_param_0];
@@ -779,7 +779,7 @@ define float @minimum_imm2(float %a) {
; CHECK-NOF16: {
; CHECK-NOF16-NEXT: .reg .pred %p<4>;
; CHECK-NOF16-NEXT: .reg .b32 %r<2>;
-; CHECK-NOF16-NEXT: .reg .f32 %f<6>;
+; CHECK-NOF16-NEXT: .reg .b32 %f<6>;
; CHECK-NOF16-EMPTY:
; CHECK-NOF16-NEXT: // %bb.0:
; CHECK-NOF16-NEXT: ld.param.f32 %f1, [minimum_imm2_param_0];
@@ -796,7 +796,7 @@ define float @minimum_imm2(float %a) {
;
; CHECK-F16-LABEL: minimum_imm2(
; CHECK-F16: {
-; CHECK-F16-NEXT: .reg .f32 %f<3>;
+; CHECK-F16-NEXT: .reg .b32 %f<3>;
; CHECK-F16-EMPTY:
; CHECK-F16-NEXT: // %bb.0:
; CHECK-F16-NEXT: ld.param.f32 %f1, [minimum_imm2_param_0];
@@ -806,7 +806,7 @@ define float @minimum_imm2(float %a) {
;
; CHECK-SM80-NOF16-LABEL: minimum_imm2(
; CHECK-SM80-NOF16: {
-; CHECK-SM80-NOF16-NEXT: .reg .f32 %f<3>;
+; CHECK-SM80-NOF16-NEXT: .reg .b32 %f<3>;
; CHECK-SM80-NOF16-EMPTY:
; CHECK-SM80-NOF16-NEXT: // %bb.0:
; CHECK-SM80-NOF16-NEXT: ld.param.f32 %f1, [minimum_imm2_param_0];
@@ -822,7 +822,7 @@ define float @minimum_float_ftz(float %a, float %b) #1 {
; CHECK-NOF16: {
; CHECK-NOF16-NEXT: .reg .pred %p<5>;
; CHECK-NOF16-NEXT: .reg .b32 %r<3>;
-; CHECK-NOF16-NEXT: .reg .f32 %f<8>;
+; CHECK-NOF16-NEXT: .reg .b32 %f<8>;
; CHECK-NOF16-EMPTY:
; CHECK-NOF16-NEXT: // %bb.0:
; CHECK-NOF16-NEXT: ld.param.f32 %f1, [minimum_float_ftz_param_0];
@@ -843,7 +843,7 @@ define float @minimum_float_ftz(float %a, float %b) #1 {
;
; CHECK-F16-LABEL: minimum_float_ftz(
; CHECK-F16: {
-; CHECK-F16-NEXT: .reg .f32 %f<4>;
+; CHECK-F16-NEXT: .reg .b32 %f<4>;
; CHECK-F16-EMPTY:
; CHECK-F16-NEXT: // %bb.0:
; CHECK-F16-NEXT: ld.param.f32 %f1, [minimum_float_ftz_param_0];
@@ -854,7 +854,7 @@ define float @minimum_float_ftz(float %a, float %b) #1 {
;
; CHECK-SM80-NOF16-LABEL: minimum_float_ftz(
; CHECK-SM80-NOF16: {
-; CHECK-SM80-NOF16-NEXT: .reg .f32 %f<4>;
+; CHECK-SM80-NOF16-NEXT: .reg .b32 %f<4>;
; CHECK-SM80-NOF16-EMPTY:
; CHECK-SM80-NOF16-NEXT: // %bb.0:
; CHECK-SM80-NOF16-NEXT: ld.param.f32 %f1, [minimum_float_ftz_param_0];
@@ -871,7 +871,7 @@ define double @minimum_double(double %a, double %b) {
; CHECK: {
; CHECK-NEXT: .reg .pred %p<5>;
; CHECK-NEXT: .reg .b64 %rd<3>;
-; CHECK-NEXT: .reg .f64 %fd<8>;
+; CHECK-NEXT: .reg .b64 %fd<8>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f64 %fd1, [minimum_double_param_0];
@@ -899,7 +899,7 @@ define <2 x half> @minimum_v2half(<2 x half> %a, <2 x half> %b) {
; CHECK-NOF16-NEXT: .reg .pred %p<11>;
; CHECK-NOF16-NEXT: .reg .b16 %rs<15>;
; CHECK-NOF16-NEXT: .reg .b32 %r<4>;
-; CHECK-NOF16-NEXT: .reg .f32 %f<7>;
+; CHECK-NOF16-NEXT: .reg .b32 %f<7>;
; CHECK-NOF16-EMPTY:
; CHECK-NOF16-NEXT: // %bb.0:
; CHECK-NOF16-NEXT: ld.param.b32 %r1, [minimum_v2half_param_0];
@@ -952,7 +952,7 @@ define <2 x half> @minimum_v2half(<2 x half> %a, <2 x half> %b) {
; CHECK-SM80-NOF16-NEXT: .reg .pred %p<11>;
; CHECK-SM80-NOF16-NEXT: .reg .b16 %rs<15>;
; CHECK-SM80-NOF16-NEXT: .reg .b32 %r<4>;
-; CHECK-SM80-NOF16-NEXT: .reg .f32 %f<7>;
+; CHECK-SM80-NOF16-NEXT: .reg .b32 %f<7>;
; CHECK-SM80-NOF16-EMPTY:
; CHECK-SM80-NOF16-NEXT: // %bb.0:
; CHECK-SM80-NOF16-NEXT: ld.param.b32 %r1, [minimum_v2half_param_0];
@@ -998,7 +998,7 @@ define half @maxnum_half(half %a, half %b) {
; CHECK-NOF16-LABEL: maxnum_half(
; CHECK-NOF16: {
; CHECK-NOF16-NEXT: .reg .b16 %rs<4>;
-; CHECK-NOF16-NEXT: .reg .f32 %f<4>;
+; CHECK-NOF16-NEXT: .reg .b32 %f<4>;
; CHECK-NOF16-EMPTY:
; CHECK-NOF16-NEXT: // %bb.0:
; CHECK-NOF16-NEXT: ld.param.b16 %rs1, [maxnum_half_param_0];
@@ -1024,7 +1024,7 @@ define half @maxnum_half(half %a, half %b) {
; CHECK-SM80-NOF16-LABEL: maxnum_half(
; CHECK-SM80-NOF16: {
; CHECK-SM80-NOF16-NEXT: .reg .b16 %rs<4>;
-; CHECK-SM80-NOF16-NEXT: .reg .f32 %f<4>;
+; CHECK-SM80-NOF16-NEXT: .reg .b32 %f<4>;
; CHECK-SM80-NOF16-EMPTY:
; CHECK-SM80-NOF16-NEXT: // %bb.0:
; CHECK-SM80-NOF16-NEXT: ld.param.b16 %rs1, [maxnum_half_param_0];
@@ -1042,7 +1042,7 @@ define half @maxnum_half(half %a, half %b) {
define float @maxnum_imm1(float %a) {
; CHECK-LABEL: maxnum_imm1(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %f<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f1, [maxnum_imm1_param_0];
@@ -1056,7 +1056,7 @@ define float @maxnum_imm1(float %a) {
define float @maxnum_imm2(float %a) {
; CHECK-LABEL: maxnum_imm2(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %f<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f1, [maxnum_imm2_param_0];
@@ -1070,7 +1070,7 @@ define float @maxnum_imm2(float %a) {
define float @maxnum_float(float %a, float %b) {
; CHECK-LABEL: maxnum_float(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<4>;
+; CHECK-NEXT: .reg .b32 %f<4>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f1, [maxnum_float_param_0];
@@ -1085,7 +1085,7 @@ define float @maxnum_float(float %a, float %b) {
define float @maxnum_float_ftz(float %a, float %b) #1 {
; CHECK-LABEL: maxnum_float_ftz(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<4>;
+; CHECK-NEXT: .reg .b32 %f<4>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f1, [maxnum_float_ftz_param_0];
@@ -1100,7 +1100,7 @@ define float @maxnum_float_ftz(float %a, float %b) #1 {
define double @maxnum_double(double %a, double %b) {
; CHECK-LABEL: maxnum_double(
; CHECK: {
-; CHECK-NEXT: .reg .f64 %fd<4>;
+; CHECK-NEXT: .reg .b64 %fd<4>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f64 %fd1, [maxnum_double_param_0];
@@ -1117,7 +1117,7 @@ define <2 x half> @maxnum_v2half(<2 x half> %a, <2 x half> %b) {
; CHECK-NOF16: {
; CHECK-NOF16-NEXT: .reg .b16 %rs<7>;
; CHECK-NOF16-NEXT: .reg .b32 %r<4>;
-; CHECK-NOF16-NEXT: .reg .f32 %f<7>;
+; CHECK-NOF16-NEXT: .reg .b32 %f<7>;
; CHECK-NOF16-EMPTY:
; CHECK-NOF16-NEXT: // %bb.0:
; CHECK-NOF16-NEXT: ld.param.b32 %r1, [maxnum_v2half_param_0];
@@ -1151,7 +1151,7 @@ define <2 x half> @maxnum_v2half(<2 x half> %a, <2 x half> %b) {
; CHECK-SM80-NOF16: {
; CHECK-SM80-NOF16-NEXT: .reg .b16 %rs<7>;
; CHECK-SM80-NOF16-NEXT: .reg .b32 %r<4>;
-; CHECK-SM80-NOF16-NEXT: .reg .f32 %f<7>;
+; CHECK-SM80-NOF16-NEXT: .reg .b32 %f<7>;
; CHECK-SM80-NOF16-EMPTY:
; CHECK-SM80-NOF16-NEXT: // %bb.0:
; CHECK-SM80-NOF16-NEXT: ld.param.b32 %r1, [maxnum_v2half_param_0];
@@ -1180,7 +1180,7 @@ define half @maximum_half(half %a, half %b) {
; CHECK-NOF16: {
; CHECK-NOF16-NEXT: .reg .pred %p<6>;
; CHECK-NOF16-NEXT: .reg .b16 %rs<8>;
-; CHECK-NOF16-NEXT: .reg .f32 %f<4>;
+; CHECK-NOF16-NEXT: .reg .b32 %f<4>;
; CHECK-NOF16-EMPTY:
; CHECK-NOF16-NEXT: // %bb.0:
; CHECK-NOF16-NEXT: ld.param.b16 %rs1, [maximum_half_param_0];
@@ -1216,7 +1216,7 @@ define half @maximum_half(half %a, half %b) {
; CHECK-SM80-NOF16: {
; CHECK-SM80-NOF16-NEXT: .reg .pred %p<6>;
; CHECK-SM80-NOF16-NEXT: .reg .b16 %rs<8>;
-; CHECK-SM80-NOF16-NEXT: .reg .f32 %f<4>;
+; CHECK-SM80-NOF16-NEXT: .reg .b32 %f<4>;
; CHECK-SM80-NOF16-EMPTY:
; CHECK-SM80-NOF16-NEXT: // %bb.0:
; CHECK-SM80-NOF16-NEXT: ld.param.b16 %rs1, [maximum_half_param_0];
@@ -1244,7 +1244,7 @@ define float @maximum_imm1(float %a) {
; CHECK-NOF16-LABEL: maximum_imm1(
; CHECK-NOF16: {
; CHECK-NOF16-NEXT: .reg .pred %p<3>;
-; CHECK-NOF16-NEXT: .reg .f32 %f<5>;
+; CHECK-NOF16-NEXT: .reg .b32 %f<5>;
; CHECK-NOF16-EMPTY:
; CHECK-NOF16-NEXT: // %bb.0:
; CHECK-NOF16-NEXT: ld.param.f32 %f1, [maximum_imm1_param_0];
@@ -1258,7 +1258,7 @@ define float @maximum_imm1(float %a) {
;
; CHECK-F16-LABEL: maximum_imm1(
; CHECK-F16: {
-; CHECK-F16-NEXT: .reg .f32 %f<3>;
+; CHECK-F16-NEXT: .reg .b32 %f<3>;
; CHECK-F16-EMPTY:
; CHECK-F16-NEXT: // %bb.0:
; CHECK-F16-NEXT: ld.param.f32 %f1, [maximum_imm1_param_0];
@@ -1268,7 +1268,7 @@ define float @maximum_imm1(float %a) {
;
; CHECK-SM80-NOF16-LABEL: maximum_imm1(
; CHECK-SM80-NOF16: {
-; CHECK-SM80-NOF16-NEXT: .reg .f32 %f<3>;
+; CHECK-SM80-NOF16-NEXT: .reg .b32 %f<3>;
; CHECK-SM80-NOF16-EMPTY:
; CHECK-SM80-NOF16-NEXT: // %bb.0:
; CHECK-SM80-NOF16-NEXT: ld.param.f32 %f1, [maximum_imm1_param_0];
@@ -1283,7 +1283,7 @@ define float @maximum_imm2(float %a) {
; CHECK-NOF16-LABEL: maximum_imm2(
; CHECK-NOF16: {
; CHECK-NOF16-NEXT: .reg .pred %p<3>;
-; CHECK-NOF16-NEXT: .reg .f32 %f<5>;
+; CHECK-NOF16-NEXT: .reg .b32 %f<5>;
; CHECK-NOF16-EMPTY:
; CHECK-NOF16-NEXT: // %bb.0:
; CHECK-NOF16-NEXT: ld.param.f32 %f1, [maximum_imm2_param_0];
@@ -1297,7 +1297,7 @@ define float @maximum_imm2(float %a) {
;
; CHECK-F16-LABEL: maximum_imm2(
; CHECK-F16: {
-; CHECK-F16-NEXT: .reg .f32 %f<3>;
+; CHECK-F16-NEXT: .reg .b32 %f<3>;
; CHECK-F16-EMPTY:
; CHECK-F16-NEXT: // %bb.0:
; CHECK-F16-NEXT: ld.param.f32 %f1, [maximum_imm2_param_0];
@@ -1307,7 +1307,7 @@ define float @maximum_imm2(float %a) {
;
; CHECK-SM80-NOF16-LABEL: maximum_imm2(
; CHECK-SM80-NOF16: {
-; CHECK-SM80-NOF16-NEXT: .reg .f32 %f<3>;
+; CHECK-SM80-NOF16-NEXT: .reg .b32 %f<3>;
; CHECK-SM80-NOF16-EMPTY:
; CHECK-SM80-NOF16-NEXT: // %bb.0:
; CHECK-SM80-NOF16-NEXT: ld.param.f32 %f1, [maximum_imm2_param_0];
@@ -1323,7 +1323,7 @@ define float @maximum_float(float %a, float %b) {
; CHECK-NOF16: {
; CHECK-NOF16-NEXT: .reg .pred %p<5>;
; CHECK-NOF16-NEXT: .reg .b32 %r<3>;
-; CHECK-NOF16-NEXT: .reg .f32 %f<8>;
+; CHECK-NOF16-NEXT: .reg .b32 %f<8>;
; CHECK-NOF16-EMPTY:
; CHECK-NOF16-NEXT: // %bb.0:
; CHECK-NOF16-NEXT: ld.param.f32 %f1, [maximum_float_param_0];
@@ -1344,7 +1344,7 @@ define float @maximum_float(float %a, float %b) {
;
; CHECK-F16-LABEL: maximum_float(
; CHECK-F16: {
-; CHECK-F16-NEXT: .reg .f32 %f<4>;
+; CHECK-F16-NEXT: .reg .b32 %f<4>;
; CHECK-F16-EMPTY:
; CHECK-F16-NEXT: // %bb.0:
; CHECK-F16-NEXT: ld.param.f32 %f1, [maximum_float_param_0];
@@ -1355,7 +1355,7 @@ define float @maximum_float(float %a, float %b) {
;
; CHECK-SM80-NOF16-LABEL: maximum_float(
; CHECK-SM80-NOF16: {
-; CHECK-SM80-NOF16-NEXT: .reg .f32 %f<4>;
+; CHECK-SM80-NOF16-NEXT: .reg .b32 %f<4>;
; CHECK-SM80-NOF16-EMPTY:
; CHECK-SM80-NOF16-NEXT: // %bb.0:
; CHECK-SM80-NOF16-NEXT: ld.param.f32 %f1, [maximum_float_param_0];
@@ -1372,7 +1372,7 @@ define float @maximum_float_ftz(float %a, float %b) #1 {
; CHECK-NOF16: {
; CHECK-NOF16-NEXT: .reg .pred %p<5>;
; CHECK-NOF16-NEXT: .reg .b32 %r<3>;
-; CHECK-NOF16-NEXT: .reg .f32 %f<8>;
+; CHECK-NOF16-NEXT: .reg .b32 %f<8>;
; CHECK-NOF16-EMPTY:
; CHECK-NOF16-NEXT: // %bb.0:
; CHECK-NOF16-NEXT: ld.param.f32 %f1, [maximum_float_ftz_param_0];
@@ -1393,7 +1393,7 @@ define float @maximum_float_ftz(float %a, float %b) #1 {
;
; CHECK-F16-LABEL: maximum_float_ftz(
; CHECK-F16: {
-; CHECK-F16-NEXT: .reg .f32 %f<4>;
+; CHECK-F16-NEXT: .reg .b32 %f<4>;
; CHECK-F16-EMPTY:
; CHECK-F16-NEXT: // %bb.0:
; CHECK-F16-NEXT: ld.param.f32 %f1, [maximum_float_ftz_param_0];
@@ -1404,7 +1404,7 @@ define float @maximum_float_ftz(float %a, float %b) #1 {
;
; CHECK-SM80-NOF16-LABEL: maximum_float_ftz(
; CHECK-SM80-NOF16: {
-; CHECK-SM80-NOF16-NEXT: .reg .f32 %f<4>;
+; CHECK-SM80-NOF16-NEXT: .reg .b32 %f<4>;
; CHECK-SM80-NOF16-EMPTY:
; CHECK-SM80-NOF16-NEXT: // %bb.0:
; CHECK-SM80-NOF16-NEXT: ld.param.f32 %f1, [maximum_float_ftz_param_0];
@@ -1421,7 +1421,7 @@ define double @maximum_double(double %a, double %b) {
; CHECK: {
; CHECK-NEXT: .reg .pred %p<5>;
; CHECK-NEXT: .reg .b64 %rd<3>;
-; CHECK-NEXT: .reg .f64 %fd<8>;
+; CHECK-NEXT: .reg .b64 %fd<8>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f64 %fd1, [maximum_double_param_0];
@@ -1449,7 +1449,7 @@ define <2 x half> @maximum_v2half(<2 x half> %a, <2 x half> %b) {
; CHECK-NOF16-NEXT: .reg .pred %p<11>;
; CHECK-NOF16-NEXT: .reg .b16 %rs<15>;
; CHECK-NOF16-NEXT: .reg .b32 %r<4>;
-; CHECK-NOF16-NEXT: .reg .f32 %f<7>;
+; CHECK-NOF16-NEXT: .reg .b32 %f<7>;
; CHECK-NOF16-EMPTY:
; CHECK-NOF16-NEXT: // %bb.0:
; CHECK-NOF16-NEXT: ld.param.b32 %r1, [maximum_v2half_param_0];
@@ -1502,7 +1502,7 @@ define <2 x half> @maximum_v2half(<2 x half> %a, <2 x half> %b) {
; CHECK-SM80-NOF16-NEXT: .reg .pred %p<11>;
; CHECK-SM80-NOF16-NEXT: .reg .b16 %rs<15>;
; CHECK-SM80-NOF16-NEXT: .reg .b32 %r<4>;
-; CHECK-SM80-NOF16-NEXT: .reg .f32 %f<7>;
+; CHECK-SM80-NOF16-NEXT: .reg .b32 %f<7>;
; CHECK-SM80-NOF16-EMPTY:
; CHECK-SM80-NOF16-NEXT: // %bb.0:
; CHECK-SM80-NOF16-NEXT: ld.param.b32 %r1, [maximum_v2half_param_0];
@@ -1547,7 +1547,7 @@ define <2 x half> @maximum_v2half(<2 x half> %a, <2 x half> %b) {
define float @fma_float(float %a, float %b, float %c) {
; CHECK-LABEL: fma_float(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<5>;
+; CHECK-NEXT: .reg .b32 %f<5>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f1, [fma_float_param_0];
@@ -1563,7 +1563,7 @@ define float @fma_float(float %a, float %b, float %c) {
define float @fma_float_ftz(float %a, float %b, float %c) #1 {
; CHECK-LABEL: fma_float_ftz(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<5>;
+; CHECK-NEXT: .reg .b32 %f<5>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f1, [fma_float_ftz_param_0];
@@ -1579,7 +1579,7 @@ define float @fma_float_ftz(float %a, float %b, float %c) #1 {
define double @fma_double(double %a, double %b, double %c) {
; CHECK-LABEL: fma_double(
; CHECK: {
-; CHECK-NEXT: .reg .f64 %fd<5>;
+; CHECK-NEXT: .reg .b64 %fd<5>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f64 %fd1, [fma_double_param_0];
diff --git a/llvm/test/CodeGen/NVPTX/misched_func_call.ll b/llvm/test/CodeGen/NVPTX/misched_func_call.ll
index e0d0197..fb4c653 100644
--- a/llvm/test/CodeGen/NVPTX/misched_func_call.ll
+++ b/llvm/test/CodeGen/NVPTX/misched_func_call.ll
@@ -9,7 +9,7 @@ define ptx_kernel void @wombat(i32 %arg, i32 %arg1, i32 %arg2) {
; CHECK: {
; CHECK-NEXT: .reg .b32 %r<11>;
; CHECK-NEXT: .reg .b64 %rd<2>;
-; CHECK-NEXT: .reg .f64 %fd<6>;
+; CHECK-NEXT: .reg .b64 %fd<6>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0: // %bb
; CHECK-NEXT: ld.param.u32 %r4, [wombat_param_2];
diff --git a/llvm/test/CodeGen/NVPTX/param-add.ll b/llvm/test/CodeGen/NVPTX/param-add.ll
index afabc11..c8daf3b 100644
--- a/llvm/test/CodeGen/NVPTX/param-add.ll
+++ b/llvm/test/CodeGen/NVPTX/param-add.ll
@@ -15,7 +15,7 @@ define i32 @test(%struct.1float alignstack(32) %data) {
; CHECK-LABEL: test(
; CHECK: {
; CHECK-NEXT: .reg .b32 %r<18>;
-; CHECK-NEXT: .reg .f32 %f<2>;
+; CHECK-NEXT: .reg .b32 %f<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.u8 %r1, [test_param_0+1];
diff --git a/llvm/test/CodeGen/NVPTX/rcp-opt.ll b/llvm/test/CodeGen/NVPTX/rcp-opt.ll
index 31fd8eb..0b020b7 100644
--- a/llvm/test/CodeGen/NVPTX/rcp-opt.ll
+++ b/llvm/test/CodeGen/NVPTX/rcp-opt.ll
@@ -9,7 +9,7 @@ target triple = "nvptx64-nvidia-cuda"
define double @test1(double %in) {
; CHECK-LABEL: test1(
; CHECK: {
-; CHECK-NEXT: .reg .f64 %fd<4>;
+; CHECK-NEXT: .reg .b64 %fd<4>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f64 %fd1, [test1_param_0];
@@ -27,7 +27,7 @@ define double @test1(double %in) {
define double @test2(double %in) {
; CHECK-LABEL: test2(
; CHECK: {
-; CHECK-NEXT: .reg .f64 %fd<4>;
+; CHECK-NEXT: .reg .b64 %fd<4>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f64 %fd1, [test2_param_0];
@@ -44,7 +44,7 @@ define double @test2(double %in) {
define double @test3(double %in) {
; CHECK-LABEL: test3(
; CHECK: {
-; CHECK-NEXT: .reg .f64 %fd<4>;
+; CHECK-NEXT: .reg .b64 %fd<4>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f64 %fd1, [test3_param_0];
diff --git a/llvm/test/CodeGen/NVPTX/reduction-intrinsics.ll b/llvm/test/CodeGen/NVPTX/reduction-intrinsics.ll
index 2a12e9b..020a61a 100644
--- a/llvm/test/CodeGen/NVPTX/reduction-intrinsics.ll
+++ b/llvm/test/CodeGen/NVPTX/reduction-intrinsics.ll
@@ -115,7 +115,7 @@ define half @reduce_fadd_half_reassoc_nonpow2(<7 x half> %in) {
define float @reduce_fadd_float(<8 x float> %in) {
; CHECK-LABEL: reduce_fadd_float(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<17>;
+; CHECK-NEXT: .reg .b32 %f<17>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.v4.f32 {%f5, %f6, %f7, %f8}, [reduce_fadd_float_param_0+16];
@@ -137,7 +137,7 @@ define float @reduce_fadd_float(<8 x float> %in) {
define float @reduce_fadd_float_reassoc(<8 x float> %in) {
; CHECK-LABEL: reduce_fadd_float_reassoc(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<17>;
+; CHECK-NEXT: .reg .b32 %f<17>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.v4.f32 {%f5, %f6, %f7, %f8}, [reduce_fadd_float_reassoc_param_0+16];
@@ -159,7 +159,7 @@ define float @reduce_fadd_float_reassoc(<8 x float> %in) {
define float @reduce_fadd_float_reassoc_nonpow2(<7 x float> %in) {
; CHECK-LABEL: reduce_fadd_float_reassoc_nonpow2(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<15>;
+; CHECK-NEXT: .reg .b32 %f<15>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f7, [reduce_fadd_float_reassoc_nonpow2_param_0+24];
@@ -274,7 +274,7 @@ define half @reduce_fmul_half_reassoc_nonpow2(<7 x half> %in) {
define float @reduce_fmul_float(<8 x float> %in) {
; CHECK-LABEL: reduce_fmul_float(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<16>;
+; CHECK-NEXT: .reg .b32 %f<16>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.v4.f32 {%f5, %f6, %f7, %f8}, [reduce_fmul_float_param_0+16];
@@ -295,7 +295,7 @@ define float @reduce_fmul_float(<8 x float> %in) {
define float @reduce_fmul_float_reassoc(<8 x float> %in) {
; CHECK-LABEL: reduce_fmul_float_reassoc(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<16>;
+; CHECK-NEXT: .reg .b32 %f<16>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.v4.f32 {%f5, %f6, %f7, %f8}, [reduce_fmul_float_reassoc_param_0+16];
@@ -316,7 +316,7 @@ define float @reduce_fmul_float_reassoc(<8 x float> %in) {
define float @reduce_fmul_float_reassoc_nonpow2(<7 x float> %in) {
; CHECK-LABEL: reduce_fmul_float_reassoc_nonpow2(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<14>;
+; CHECK-NEXT: .reg .b32 %f<14>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f7, [reduce_fmul_float_reassoc_nonpow2_param_0+24];
@@ -404,7 +404,7 @@ define float @reduce_fmax_float(<8 x float> %in) {
;
; CHECK-LABEL: reduce_fmax_float(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<16>;
+; CHECK-NEXT: .reg .b32 %f<16>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.v4.f32 {%f5, %f6, %f7, %f8}, [reduce_fmax_float_param_0+16];
@@ -426,7 +426,7 @@ define float @reduce_fmax_float_reassoc(<8 x float> %in) {
;
; CHECK-LABEL: reduce_fmax_float_reassoc(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<16>;
+; CHECK-NEXT: .reg .b32 %f<16>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.v4.f32 {%f5, %f6, %f7, %f8}, [reduce_fmax_float_reassoc_param_0+16];
@@ -448,7 +448,7 @@ define float @reduce_fmax_float_reassoc_nonpow2(<7 x float> %in) {
;
; CHECK-LABEL: reduce_fmax_float_reassoc_nonpow2(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<14>;
+; CHECK-NEXT: .reg .b32 %f<14>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f7, [reduce_fmax_float_reassoc_nonpow2_param_0+24];
@@ -536,7 +536,7 @@ define float @reduce_fmin_float(<8 x float> %in) {
;
; CHECK-LABEL: reduce_fmin_float(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<16>;
+; CHECK-NEXT: .reg .b32 %f<16>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.v4.f32 {%f5, %f6, %f7, %f8}, [reduce_fmin_float_param_0+16];
@@ -558,7 +558,7 @@ define float @reduce_fmin_float_reassoc(<8 x float> %in) {
;
; CHECK-LABEL: reduce_fmin_float_reassoc(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<16>;
+; CHECK-NEXT: .reg .b32 %f<16>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.v4.f32 {%f5, %f6, %f7, %f8}, [reduce_fmin_float_reassoc_param_0+16];
@@ -580,7 +580,7 @@ define float @reduce_fmin_float_reassoc_nonpow2(<7 x float> %in) {
;
; CHECK-LABEL: reduce_fmin_float_reassoc_nonpow2(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<14>;
+; CHECK-NEXT: .reg .b32 %f<14>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f7, [reduce_fmin_float_reassoc_nonpow2_param_0+24];
@@ -668,7 +668,7 @@ define float @reduce_fmaximum_float(<8 x float> %in) {
;
; CHECK-LABEL: reduce_fmaximum_float(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<16>;
+; CHECK-NEXT: .reg .b32 %f<16>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.v4.f32 {%f5, %f6, %f7, %f8}, [reduce_fmaximum_float_param_0+16];
@@ -690,7 +690,7 @@ define float @reduce_fmaximum_float_reassoc(<8 x float> %in) {
;
; CHECK-LABEL: reduce_fmaximum_float_reassoc(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<16>;
+; CHECK-NEXT: .reg .b32 %f<16>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.v4.f32 {%f5, %f6, %f7, %f8}, [reduce_fmaximum_float_reassoc_param_0+16];
@@ -712,7 +712,7 @@ define float @reduce_fmaximum_float_reassoc_nonpow2(<7 x float> %in) {
;
; CHECK-LABEL: reduce_fmaximum_float_reassoc_nonpow2(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<14>;
+; CHECK-NEXT: .reg .b32 %f<14>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f7, [reduce_fmaximum_float_reassoc_nonpow2_param_0+24];
@@ -800,7 +800,7 @@ define float @reduce_fminimum_float(<8 x float> %in) {
;
; CHECK-LABEL: reduce_fminimum_float(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<16>;
+; CHECK-NEXT: .reg .b32 %f<16>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.v4.f32 {%f5, %f6, %f7, %f8}, [reduce_fminimum_float_param_0+16];
@@ -822,7 +822,7 @@ define float @reduce_fminimum_float_reassoc(<8 x float> %in) {
;
; CHECK-LABEL: reduce_fminimum_float_reassoc(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<16>;
+; CHECK-NEXT: .reg .b32 %f<16>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.v4.f32 {%f5, %f6, %f7, %f8}, [reduce_fminimum_float_reassoc_param_0+16];
@@ -844,7 +844,7 @@ define float @reduce_fminimum_float_reassoc_nonpow2(<7 x float> %in) {
;
; CHECK-LABEL: reduce_fminimum_float_reassoc_nonpow2(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<14>;
+; CHECK-NEXT: .reg .b32 %f<14>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f7, [reduce_fminimum_float_reassoc_nonpow2_param_0+24];
diff --git a/llvm/test/CodeGen/NVPTX/redux-sync-f32.ll b/llvm/test/CodeGen/NVPTX/redux-sync-f32.ll
index af113e7..ed78529 100644
--- a/llvm/test/CodeGen/NVPTX/redux-sync-f32.ll
+++ b/llvm/test/CodeGen/NVPTX/redux-sync-f32.ll
@@ -7,7 +7,7 @@ define float @redux_sync_fmin(float %src, i32 %mask) {
; CHECK-LABEL: redux_sync_fmin(
; CHECK: {
; CHECK-NEXT: .reg .b32 %r<2>;
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %f<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f1, [redux_sync_fmin_param_0];
@@ -24,7 +24,7 @@ define float @redux_sync_fmin_abs(float %src, i32 %mask) {
; CHECK-LABEL: redux_sync_fmin_abs(
; CHECK: {
; CHECK-NEXT: .reg .b32 %r<2>;
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %f<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f1, [redux_sync_fmin_abs_param_0];
@@ -41,7 +41,7 @@ define float @redux_sync_fmin_NaN(float %src, i32 %mask) {
; CHECK-LABEL: redux_sync_fmin_NaN(
; CHECK: {
; CHECK-NEXT: .reg .b32 %r<2>;
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %f<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f1, [redux_sync_fmin_NaN_param_0];
@@ -58,7 +58,7 @@ define float @redux_sync_fmin_abs_NaN(float %src, i32 %mask) {
; CHECK-LABEL: redux_sync_fmin_abs_NaN(
; CHECK: {
; CHECK-NEXT: .reg .b32 %r<2>;
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %f<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f1, [redux_sync_fmin_abs_NaN_param_0];
@@ -75,7 +75,7 @@ define float @redux_sync_fmax(float %src, i32 %mask) {
; CHECK-LABEL: redux_sync_fmax(
; CHECK: {
; CHECK-NEXT: .reg .b32 %r<2>;
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %f<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f1, [redux_sync_fmax_param_0];
@@ -92,7 +92,7 @@ define float @redux_sync_fmax_abs(float %src, i32 %mask) {
; CHECK-LABEL: redux_sync_fmax_abs(
; CHECK: {
; CHECK-NEXT: .reg .b32 %r<2>;
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %f<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f1, [redux_sync_fmax_abs_param_0];
@@ -109,7 +109,7 @@ define float @redux_sync_fmax_NaN(float %src, i32 %mask) {
; CHECK-LABEL: redux_sync_fmax_NaN(
; CHECK: {
; CHECK-NEXT: .reg .b32 %r<2>;
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %f<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f1, [redux_sync_fmax_NaN_param_0];
@@ -126,7 +126,7 @@ define float @redux_sync_fmax_abs_NaN(float %src, i32 %mask) {
; CHECK-LABEL: redux_sync_fmax_abs_NaN(
; CHECK: {
; CHECK-NEXT: .reg .b32 %r<2>;
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %f<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f1, [redux_sync_fmax_abs_NaN_param_0];
diff --git a/llvm/test/CodeGen/NVPTX/reg-types.ll b/llvm/test/CodeGen/NVPTX/reg-types.ll
index cf2433a..7b4ebca 100644
--- a/llvm/test/CodeGen/NVPTX/reg-types.ll
+++ b/llvm/test/CodeGen/NVPTX/reg-types.ll
@@ -25,9 +25,9 @@ entry:
%u64 = alloca i64, align 8
; CHECK-DAG: .reg .b64 %rd<
%f32 = alloca float, align 4
-; CHECK-DAG: .reg .f32 %f<
+; CHECK-DAG: .reg .b32 %f<
%f64 = alloca double, align 8
-; CHECK-DAG: .reg .f64 %fd<
+; CHECK-DAG: .reg .b64 %fd<
; Verify that we use correct register types.
store i8 1, ptr %s8, align 1
diff --git a/llvm/test/CodeGen/NVPTX/st-param-imm.ll b/llvm/test/CodeGen/NVPTX/st-param-imm.ll
index ab14476..e8ad689 100644
--- a/llvm/test/CodeGen/NVPTX/st-param-imm.ll
+++ b/llvm/test/CodeGen/NVPTX/st-param-imm.ll
@@ -403,7 +403,7 @@ define void @st_param_v2_f32_ii(float %val) {
define void @st_param_v2_f32_ir(float %val) {
; CHECK-LABEL: st_param_v2_f32_ir(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<2>;
+; CHECK-NEXT: .reg .b32 %f<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f1, [st_param_v2_f32_ir_param_0];
@@ -425,7 +425,7 @@ define void @st_param_v2_f32_ir(float %val) {
define void @st_param_v2_f32_ri(float %val) {
; CHECK-LABEL: st_param_v2_f32_ri(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<2>;
+; CHECK-NEXT: .reg .b32 %f<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f1, [st_param_v2_f32_ri_param_0];
@@ -467,7 +467,7 @@ define void @st_param_v2_f64_ii(double %val) {
define void @st_param_v2_f64_ir(double %val) {
; CHECK-LABEL: st_param_v2_f64_ir(
; CHECK: {
-; CHECK-NEXT: .reg .f64 %fd<2>;
+; CHECK-NEXT: .reg .b64 %fd<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f64 %fd1, [st_param_v2_f64_ir_param_0];
@@ -489,7 +489,7 @@ define void @st_param_v2_f64_ir(double %val) {
define void @st_param_v2_f64_ri(double %val) {
; CHECK-LABEL: st_param_v2_f64_ri(
; CHECK: {
-; CHECK-NEXT: .reg .f64 %fd<2>;
+; CHECK-NEXT: .reg .b64 %fd<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f64 %fd1, [st_param_v2_f64_ri_param_0];
@@ -1648,7 +1648,7 @@ define void @st_param_v4_f32_iiii() {
define void @st_param_v4_f32_irrr(float %b, float %c, float %d) {
; CHECK-LABEL: st_param_v4_f32_irrr(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<4>;
+; CHECK-NEXT: .reg .b32 %f<4>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f1, [st_param_v4_f32_irrr_param_0];
@@ -1674,7 +1674,7 @@ define void @st_param_v4_f32_irrr(float %b, float %c, float %d) {
define void @st_param_v4_f32_rirr(float %a, float %c, float %d) {
; CHECK-LABEL: st_param_v4_f32_rirr(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<4>;
+; CHECK-NEXT: .reg .b32 %f<4>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f1, [st_param_v4_f32_rirr_param_0];
@@ -1700,7 +1700,7 @@ define void @st_param_v4_f32_rirr(float %a, float %c, float %d) {
define void @st_param_v4_f32_rrir(float %a, float %b, float %d) {
; CHECK-LABEL: st_param_v4_f32_rrir(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<4>;
+; CHECK-NEXT: .reg .b32 %f<4>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f1, [st_param_v4_f32_rrir_param_0];
@@ -1726,7 +1726,7 @@ define void @st_param_v4_f32_rrir(float %a, float %b, float %d) {
define void @st_param_v4_f32_rrri(float %a, float %b, float %c) {
; CHECK-LABEL: st_param_v4_f32_rrri(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<4>;
+; CHECK-NEXT: .reg .b32 %f<4>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f1, [st_param_v4_f32_rrri_param_0];
@@ -1752,7 +1752,7 @@ define void @st_param_v4_f32_rrri(float %a, float %b, float %c) {
define void @st_param_v4_f32_iirr(float %c, float %d) {
; CHECK-LABEL: st_param_v4_f32_iirr(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %f<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f1, [st_param_v4_f32_iirr_param_0];
@@ -1777,7 +1777,7 @@ define void @st_param_v4_f32_iirr(float %c, float %d) {
define void @st_param_v4_f32_irir(float %b, float %d) {
; CHECK-LABEL: st_param_v4_f32_irir(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %f<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f1, [st_param_v4_f32_irir_param_0];
@@ -1802,7 +1802,7 @@ define void @st_param_v4_f32_irir(float %b, float %d) {
define void @st_param_v4_f32_irri(float %b, float %c) {
; CHECK-LABEL: st_param_v4_f32_irri(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %f<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f1, [st_param_v4_f32_irri_param_0];
@@ -1827,7 +1827,7 @@ define void @st_param_v4_f32_irri(float %b, float %c) {
define void @st_param_v4_f32_riir(float %a, float %d) {
; CHECK-LABEL: st_param_v4_f32_riir(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %f<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f1, [st_param_v4_f32_riir_param_0];
@@ -1852,7 +1852,7 @@ define void @st_param_v4_f32_riir(float %a, float %d) {
define void @st_param_v4_f32_riri(float %a, float %c) {
; CHECK-LABEL: st_param_v4_f32_riri(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %f<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f1, [st_param_v4_f32_riri_param_0];
@@ -1877,7 +1877,7 @@ define void @st_param_v4_f32_riri(float %a, float %c) {
define void @st_param_v4_f32_rrii(float %a, float %b) {
; CHECK-LABEL: st_param_v4_f32_rrii(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b32 %f<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f1, [st_param_v4_f32_rrii_param_0];
@@ -1902,7 +1902,7 @@ define void @st_param_v4_f32_rrii(float %a, float %b) {
define void @st_param_v4_f32_iiir(float %d) {
; CHECK-LABEL: st_param_v4_f32_iiir(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<2>;
+; CHECK-NEXT: .reg .b32 %f<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f1, [st_param_v4_f32_iiir_param_0];
@@ -1926,7 +1926,7 @@ define void @st_param_v4_f32_iiir(float %d) {
define void @st_param_v4_f32_iiri(float %c) {
; CHECK-LABEL: st_param_v4_f32_iiri(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<2>;
+; CHECK-NEXT: .reg .b32 %f<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f1, [st_param_v4_f32_iiri_param_0];
@@ -1950,7 +1950,7 @@ define void @st_param_v4_f32_iiri(float %c) {
define void @st_param_v4_f32_irii(float %b) {
; CHECK-LABEL: st_param_v4_f32_irii(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<2>;
+; CHECK-NEXT: .reg .b32 %f<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f1, [st_param_v4_f32_irii_param_0];
@@ -1974,7 +1974,7 @@ define void @st_param_v4_f32_irii(float %b) {
define void @st_param_v4_f32_riii(float %a) {
; CHECK-LABEL: st_param_v4_f32_riii(
; CHECK: {
-; CHECK-NEXT: .reg .f32 %f<2>;
+; CHECK-NEXT: .reg .b32 %f<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.f32 %f1, [st_param_v4_f32_riii_param_0];
diff --git a/llvm/test/CodeGen/NVPTX/surf-read-cuda.ll b/llvm/test/CodeGen/NVPTX/surf-read-cuda.ll
index 7a7904a..3afff32 100644
--- a/llvm/test/CodeGen/NVPTX/surf-read-cuda.ll
+++ b/llvm/test/CodeGen/NVPTX/surf-read-cuda.ll
@@ -14,7 +14,7 @@ define ptx_kernel void @foo(i64 %img, ptr %red, i32 %idx) {
; CHECK-LABEL: foo(
; CHECK: {
; CHECK-NEXT: .reg .b32 %r<3>;
-; CHECK-NEXT: .reg .f32 %f<2>;
+; CHECK-NEXT: .reg .b32 %f<2>;
; CHECK-NEXT: .reg .b64 %rd<4>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
@@ -38,7 +38,7 @@ define ptx_kernel void @bar(ptr %red, i32 %idx) {
; CHECK-LABEL: bar(
; CHECK: {
; CHECK-NEXT: .reg .b32 %r<3>;
-; CHECK-NEXT: .reg .f32 %f<2>;
+; CHECK-NEXT: .reg .b32 %f<2>;
; CHECK-NEXT: .reg .b64 %rd<4>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
diff --git a/llvm/test/CodeGen/NVPTX/tex-read-cuda.ll b/llvm/test/CodeGen/NVPTX/tex-read-cuda.ll
index 61837bd..4e4e3f3 100644
--- a/llvm/test/CodeGen/NVPTX/tex-read-cuda.ll
+++ b/llvm/test/CodeGen/NVPTX/tex-read-cuda.ll
@@ -14,7 +14,7 @@ define ptx_kernel void @foo(i64 %img, ptr %red, i32 %idx) {
; CHECK-LABEL: foo(
; CHECK: {
; CHECK-NEXT: .reg .b32 %r<2>;
-; CHECK-NEXT: .reg .f32 %f<5>;
+; CHECK-NEXT: .reg .b32 %f<5>;
; CHECK-NEXT: .reg .b64 %rd<4>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
@@ -38,7 +38,7 @@ define ptx_kernel void @bar(ptr %red, i32 %idx) {
; CHECK-LABEL: bar(
; CHECK: {
; CHECK-NEXT: .reg .b32 %r<2>;
-; CHECK-NEXT: .reg .f32 %f<5>;
+; CHECK-NEXT: .reg .b32 %f<5>;
; CHECK-NEXT: .reg .b64 %rd<4>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
@@ -61,7 +61,7 @@ define ptx_kernel void @baz(ptr %red, i32 %idx) {
; CHECK-LABEL: baz(
; CHECK: {
; CHECK-NEXT: .reg .b32 %r<2>;
-; CHECK-NEXT: .reg .f32 %f<8>;
+; CHECK-NEXT: .reg .b32 %f<8>;
; CHECK-NEXT: .reg .b64 %rd<4>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
diff --git a/llvm/test/CodeGen/NVPTX/variadics-backend.ll b/llvm/test/CodeGen/NVPTX/variadics-backend.ll
index 35db489..9da3614 100644
--- a/llvm/test/CodeGen/NVPTX/variadics-backend.ll
+++ b/llvm/test/CodeGen/NVPTX/variadics-backend.ll
@@ -13,7 +13,7 @@ define dso_local i32 @variadics1(i32 noundef %first, ...) {
; CHECK-PTX: {
; CHECK-PTX-NEXT: .reg .b32 %r<11>;
; CHECK-PTX-NEXT: .reg .b64 %rd<11>;
-; CHECK-PTX-NEXT: .reg .f64 %fd<7>;
+; CHECK-PTX-NEXT: .reg .b64 %fd<7>;
; CHECK-PTX-EMPTY:
; CHECK-PTX-NEXT: // %bb.0: // %entry
; CHECK-PTX-NEXT: ld.param.u32 %r1, [variadics1_param_0];
diff --git a/llvm/test/CodeGen/RISCV/callee-saved-gprs.ll b/llvm/test/CodeGen/RISCV/callee-saved-gprs.ll
index f9f1ba6..874cf89 100644
--- a/llvm/test/CodeGen/RISCV/callee-saved-gprs.ll
+++ b/llvm/test/CodeGen/RISCV/callee-saved-gprs.ll
@@ -419,8 +419,8 @@ define void @callee() {
;
; RV32IZCMP-LABEL: callee:
; RV32IZCMP: # %bb.0:
-; RV32IZCMP-NEXT: cm.push {ra, s0-s11}, -80
-; RV32IZCMP-NEXT: .cfi_def_cfa_offset 80
+; RV32IZCMP-NEXT: cm.push {ra, s0-s11}, -96
+; RV32IZCMP-NEXT: .cfi_def_cfa_offset 96
; RV32IZCMP-NEXT: .cfi_offset ra, -52
; RV32IZCMP-NEXT: .cfi_offset s0, -48
; RV32IZCMP-NEXT: .cfi_offset s1, -44
@@ -436,18 +436,18 @@ define void @callee() {
; RV32IZCMP-NEXT: .cfi_offset s11, -4
; RV32IZCMP-NEXT: lui t0, %hi(var)
; RV32IZCMP-NEXT: lw a0, %lo(var)(t0)
-; RV32IZCMP-NEXT: sw a0, 24(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: sw a0, 28(sp) # 4-byte Folded Spill
; RV32IZCMP-NEXT: lw a0, %lo(var+4)(t0)
-; RV32IZCMP-NEXT: sw a0, 20(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: sw a0, 24(sp) # 4-byte Folded Spill
; RV32IZCMP-NEXT: lw a0, %lo(var+8)(t0)
-; RV32IZCMP-NEXT: sw a0, 16(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: sw a0, 20(sp) # 4-byte Folded Spill
; RV32IZCMP-NEXT: lw a0, %lo(var+12)(t0)
-; RV32IZCMP-NEXT: sw a0, 12(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: sw a0, 16(sp) # 4-byte Folded Spill
; RV32IZCMP-NEXT: addi a5, t0, %lo(var)
; RV32IZCMP-NEXT: lw a0, 16(a5)
-; RV32IZCMP-NEXT: sw a0, 8(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: sw a0, 12(sp) # 4-byte Folded Spill
; RV32IZCMP-NEXT: lw a0, 20(a5)
-; RV32IZCMP-NEXT: sw a0, 4(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: sw a0, 8(sp) # 4-byte Folded Spill
; RV32IZCMP-NEXT: lw t4, 24(a5)
; RV32IZCMP-NEXT: lw t5, 28(a5)
; RV32IZCMP-NEXT: lw t6, 32(a5)
@@ -500,19 +500,19 @@ define void @callee() {
; RV32IZCMP-NEXT: sw t6, 32(a5)
; RV32IZCMP-NEXT: sw t5, 28(a5)
; RV32IZCMP-NEXT: sw t4, 24(a5)
-; RV32IZCMP-NEXT: lw a0, 4(sp) # 4-byte Folded Reload
-; RV32IZCMP-NEXT: sw a0, 20(a5)
; RV32IZCMP-NEXT: lw a0, 8(sp) # 4-byte Folded Reload
-; RV32IZCMP-NEXT: sw a0, 16(a5)
+; RV32IZCMP-NEXT: sw a0, 20(a5)
; RV32IZCMP-NEXT: lw a0, 12(sp) # 4-byte Folded Reload
-; RV32IZCMP-NEXT: sw a0, %lo(var+12)(t0)
+; RV32IZCMP-NEXT: sw a0, 16(a5)
; RV32IZCMP-NEXT: lw a0, 16(sp) # 4-byte Folded Reload
-; RV32IZCMP-NEXT: sw a0, %lo(var+8)(t0)
+; RV32IZCMP-NEXT: sw a0, %lo(var+12)(t0)
; RV32IZCMP-NEXT: lw a0, 20(sp) # 4-byte Folded Reload
-; RV32IZCMP-NEXT: sw a0, %lo(var+4)(t0)
+; RV32IZCMP-NEXT: sw a0, %lo(var+8)(t0)
; RV32IZCMP-NEXT: lw a0, 24(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT: sw a0, %lo(var+4)(t0)
+; RV32IZCMP-NEXT: lw a0, 28(sp) # 4-byte Folded Reload
; RV32IZCMP-NEXT: sw a0, %lo(var)(t0)
-; RV32IZCMP-NEXT: cm.popret {ra, s0-s11}, 80
+; RV32IZCMP-NEXT: cm.popret {ra, s0-s11}, 96
;
; RV32IZCMP-WITH-FP-LABEL: callee:
; RV32IZCMP-WITH-FP: # %bb.0:
@@ -1055,18 +1055,18 @@ define void @callee() {
; RV64IZCMP-NEXT: .cfi_offset s11, -8
; RV64IZCMP-NEXT: lui t0, %hi(var)
; RV64IZCMP-NEXT: lw a0, %lo(var)(t0)
-; RV64IZCMP-NEXT: sd a0, 48(sp) # 8-byte Folded Spill
-; RV64IZCMP-NEXT: lw a0, %lo(var+4)(t0)
; RV64IZCMP-NEXT: sd a0, 40(sp) # 8-byte Folded Spill
-; RV64IZCMP-NEXT: lw a0, %lo(var+8)(t0)
+; RV64IZCMP-NEXT: lw a0, %lo(var+4)(t0)
; RV64IZCMP-NEXT: sd a0, 32(sp) # 8-byte Folded Spill
-; RV64IZCMP-NEXT: lw a0, %lo(var+12)(t0)
+; RV64IZCMP-NEXT: lw a0, %lo(var+8)(t0)
; RV64IZCMP-NEXT: sd a0, 24(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: lw a0, %lo(var+12)(t0)
+; RV64IZCMP-NEXT: sd a0, 16(sp) # 8-byte Folded Spill
; RV64IZCMP-NEXT: addi a5, t0, %lo(var)
; RV64IZCMP-NEXT: lw a0, 16(a5)
-; RV64IZCMP-NEXT: sd a0, 16(sp) # 8-byte Folded Spill
-; RV64IZCMP-NEXT: lw a0, 20(a5)
; RV64IZCMP-NEXT: sd a0, 8(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: lw a0, 20(a5)
+; RV64IZCMP-NEXT: sd a0, 0(sp) # 8-byte Folded Spill
; RV64IZCMP-NEXT: lw t4, 24(a5)
; RV64IZCMP-NEXT: lw t5, 28(a5)
; RV64IZCMP-NEXT: lw t6, 32(a5)
@@ -1119,17 +1119,17 @@ define void @callee() {
; RV64IZCMP-NEXT: sw t6, 32(a5)
; RV64IZCMP-NEXT: sw t5, 28(a5)
; RV64IZCMP-NEXT: sw t4, 24(a5)
-; RV64IZCMP-NEXT: ld a0, 8(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: ld a0, 0(sp) # 8-byte Folded Reload
; RV64IZCMP-NEXT: sw a0, 20(a5)
-; RV64IZCMP-NEXT: ld a0, 16(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: ld a0, 8(sp) # 8-byte Folded Reload
; RV64IZCMP-NEXT: sw a0, 16(a5)
-; RV64IZCMP-NEXT: ld a0, 24(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: ld a0, 16(sp) # 8-byte Folded Reload
; RV64IZCMP-NEXT: sw a0, %lo(var+12)(t0)
-; RV64IZCMP-NEXT: ld a0, 32(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: ld a0, 24(sp) # 8-byte Folded Reload
; RV64IZCMP-NEXT: sw a0, %lo(var+8)(t0)
-; RV64IZCMP-NEXT: ld a0, 40(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: ld a0, 32(sp) # 8-byte Folded Reload
; RV64IZCMP-NEXT: sw a0, %lo(var+4)(t0)
-; RV64IZCMP-NEXT: ld a0, 48(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: ld a0, 40(sp) # 8-byte Folded Reload
; RV64IZCMP-NEXT: sw a0, %lo(var)(t0)
; RV64IZCMP-NEXT: cm.popret {ra, s0-s11}, 160
;
@@ -1798,54 +1798,54 @@ define void @caller() {
; RV32IZCMP-NEXT: .cfi_offset s9, -12
; RV32IZCMP-NEXT: .cfi_offset s10, -8
; RV32IZCMP-NEXT: .cfi_offset s11, -4
-; RV32IZCMP-NEXT: addi sp, sp, -32
-; RV32IZCMP-NEXT: .cfi_def_cfa_offset 144
+; RV32IZCMP-NEXT: addi sp, sp, -48
+; RV32IZCMP-NEXT: .cfi_def_cfa_offset 160
; RV32IZCMP-NEXT: lui s0, %hi(var)
; RV32IZCMP-NEXT: lw a0, %lo(var)(s0)
-; RV32IZCMP-NEXT: sw a0, 88(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: sw a0, 92(sp) # 4-byte Folded Spill
; RV32IZCMP-NEXT: lw a0, %lo(var+4)(s0)
-; RV32IZCMP-NEXT: sw a0, 84(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: sw a0, 88(sp) # 4-byte Folded Spill
; RV32IZCMP-NEXT: lw a0, %lo(var+8)(s0)
-; RV32IZCMP-NEXT: sw a0, 80(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: sw a0, 84(sp) # 4-byte Folded Spill
; RV32IZCMP-NEXT: lw a0, %lo(var+12)(s0)
-; RV32IZCMP-NEXT: sw a0, 76(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: sw a0, 80(sp) # 4-byte Folded Spill
; RV32IZCMP-NEXT: addi s1, s0, %lo(var)
; RV32IZCMP-NEXT: lw a0, 16(s1)
-; RV32IZCMP-NEXT: sw a0, 72(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: sw a0, 76(sp) # 4-byte Folded Spill
; RV32IZCMP-NEXT: lw a0, 20(s1)
-; RV32IZCMP-NEXT: sw a0, 68(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: sw a0, 72(sp) # 4-byte Folded Spill
; RV32IZCMP-NEXT: lw a0, 24(s1)
-; RV32IZCMP-NEXT: sw a0, 64(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: sw a0, 68(sp) # 4-byte Folded Spill
; RV32IZCMP-NEXT: lw a0, 28(s1)
-; RV32IZCMP-NEXT: sw a0, 60(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: sw a0, 64(sp) # 4-byte Folded Spill
; RV32IZCMP-NEXT: lw a0, 32(s1)
-; RV32IZCMP-NEXT: sw a0, 56(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: sw a0, 60(sp) # 4-byte Folded Spill
; RV32IZCMP-NEXT: lw a0, 36(s1)
-; RV32IZCMP-NEXT: sw a0, 52(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: sw a0, 56(sp) # 4-byte Folded Spill
; RV32IZCMP-NEXT: lw a0, 40(s1)
-; RV32IZCMP-NEXT: sw a0, 48(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: sw a0, 52(sp) # 4-byte Folded Spill
; RV32IZCMP-NEXT: lw a0, 44(s1)
-; RV32IZCMP-NEXT: sw a0, 44(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: sw a0, 48(sp) # 4-byte Folded Spill
; RV32IZCMP-NEXT: lw a0, 48(s1)
-; RV32IZCMP-NEXT: sw a0, 40(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: sw a0, 44(sp) # 4-byte Folded Spill
; RV32IZCMP-NEXT: lw a0, 52(s1)
-; RV32IZCMP-NEXT: sw a0, 36(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: sw a0, 40(sp) # 4-byte Folded Spill
; RV32IZCMP-NEXT: lw a0, 56(s1)
-; RV32IZCMP-NEXT: sw a0, 32(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: sw a0, 36(sp) # 4-byte Folded Spill
; RV32IZCMP-NEXT: lw a0, 60(s1)
-; RV32IZCMP-NEXT: sw a0, 28(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: sw a0, 32(sp) # 4-byte Folded Spill
; RV32IZCMP-NEXT: lw a0, 64(s1)
-; RV32IZCMP-NEXT: sw a0, 24(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: sw a0, 28(sp) # 4-byte Folded Spill
; RV32IZCMP-NEXT: lw a0, 68(s1)
-; RV32IZCMP-NEXT: sw a0, 20(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: sw a0, 24(sp) # 4-byte Folded Spill
; RV32IZCMP-NEXT: lw a0, 72(s1)
-; RV32IZCMP-NEXT: sw a0, 16(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: sw a0, 20(sp) # 4-byte Folded Spill
; RV32IZCMP-NEXT: lw a0, 76(s1)
-; RV32IZCMP-NEXT: sw a0, 12(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: sw a0, 16(sp) # 4-byte Folded Spill
; RV32IZCMP-NEXT: lw a0, 80(s1)
-; RV32IZCMP-NEXT: sw a0, 8(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: sw a0, 12(sp) # 4-byte Folded Spill
; RV32IZCMP-NEXT: lw a0, 84(s1)
-; RV32IZCMP-NEXT: sw a0, 4(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: sw a0, 8(sp) # 4-byte Folded Spill
; RV32IZCMP-NEXT: lw s4, 88(s1)
; RV32IZCMP-NEXT: lw s5, 92(s1)
; RV32IZCMP-NEXT: lw s6, 96(s1)
@@ -1867,51 +1867,51 @@ define void @caller() {
; RV32IZCMP-NEXT: sw s6, 96(s1)
; RV32IZCMP-NEXT: sw s5, 92(s1)
; RV32IZCMP-NEXT: sw s4, 88(s1)
-; RV32IZCMP-NEXT: lw a0, 4(sp) # 4-byte Folded Reload
-; RV32IZCMP-NEXT: sw a0, 84(s1)
; RV32IZCMP-NEXT: lw a0, 8(sp) # 4-byte Folded Reload
-; RV32IZCMP-NEXT: sw a0, 80(s1)
+; RV32IZCMP-NEXT: sw a0, 84(s1)
; RV32IZCMP-NEXT: lw a0, 12(sp) # 4-byte Folded Reload
-; RV32IZCMP-NEXT: sw a0, 76(s1)
+; RV32IZCMP-NEXT: sw a0, 80(s1)
; RV32IZCMP-NEXT: lw a0, 16(sp) # 4-byte Folded Reload
-; RV32IZCMP-NEXT: sw a0, 72(s1)
+; RV32IZCMP-NEXT: sw a0, 76(s1)
; RV32IZCMP-NEXT: lw a0, 20(sp) # 4-byte Folded Reload
-; RV32IZCMP-NEXT: sw a0, 68(s1)
+; RV32IZCMP-NEXT: sw a0, 72(s1)
; RV32IZCMP-NEXT: lw a0, 24(sp) # 4-byte Folded Reload
-; RV32IZCMP-NEXT: sw a0, 64(s1)
+; RV32IZCMP-NEXT: sw a0, 68(s1)
; RV32IZCMP-NEXT: lw a0, 28(sp) # 4-byte Folded Reload
-; RV32IZCMP-NEXT: sw a0, 60(s1)
+; RV32IZCMP-NEXT: sw a0, 64(s1)
; RV32IZCMP-NEXT: lw a0, 32(sp) # 4-byte Folded Reload
-; RV32IZCMP-NEXT: sw a0, 56(s1)
+; RV32IZCMP-NEXT: sw a0, 60(s1)
; RV32IZCMP-NEXT: lw a0, 36(sp) # 4-byte Folded Reload
-; RV32IZCMP-NEXT: sw a0, 52(s1)
+; RV32IZCMP-NEXT: sw a0, 56(s1)
; RV32IZCMP-NEXT: lw a0, 40(sp) # 4-byte Folded Reload
-; RV32IZCMP-NEXT: sw a0, 48(s1)
+; RV32IZCMP-NEXT: sw a0, 52(s1)
; RV32IZCMP-NEXT: lw a0, 44(sp) # 4-byte Folded Reload
-; RV32IZCMP-NEXT: sw a0, 44(s1)
+; RV32IZCMP-NEXT: sw a0, 48(s1)
; RV32IZCMP-NEXT: lw a0, 48(sp) # 4-byte Folded Reload
-; RV32IZCMP-NEXT: sw a0, 40(s1)
+; RV32IZCMP-NEXT: sw a0, 44(s1)
; RV32IZCMP-NEXT: lw a0, 52(sp) # 4-byte Folded Reload
-; RV32IZCMP-NEXT: sw a0, 36(s1)
+; RV32IZCMP-NEXT: sw a0, 40(s1)
; RV32IZCMP-NEXT: lw a0, 56(sp) # 4-byte Folded Reload
-; RV32IZCMP-NEXT: sw a0, 32(s1)
+; RV32IZCMP-NEXT: sw a0, 36(s1)
; RV32IZCMP-NEXT: lw a0, 60(sp) # 4-byte Folded Reload
-; RV32IZCMP-NEXT: sw a0, 28(s1)
+; RV32IZCMP-NEXT: sw a0, 32(s1)
; RV32IZCMP-NEXT: lw a0, 64(sp) # 4-byte Folded Reload
-; RV32IZCMP-NEXT: sw a0, 24(s1)
+; RV32IZCMP-NEXT: sw a0, 28(s1)
; RV32IZCMP-NEXT: lw a0, 68(sp) # 4-byte Folded Reload
-; RV32IZCMP-NEXT: sw a0, 20(s1)
+; RV32IZCMP-NEXT: sw a0, 24(s1)
; RV32IZCMP-NEXT: lw a0, 72(sp) # 4-byte Folded Reload
-; RV32IZCMP-NEXT: sw a0, 16(s1)
+; RV32IZCMP-NEXT: sw a0, 20(s1)
; RV32IZCMP-NEXT: lw a0, 76(sp) # 4-byte Folded Reload
-; RV32IZCMP-NEXT: sw a0, %lo(var+12)(s0)
+; RV32IZCMP-NEXT: sw a0, 16(s1)
; RV32IZCMP-NEXT: lw a0, 80(sp) # 4-byte Folded Reload
-; RV32IZCMP-NEXT: sw a0, %lo(var+8)(s0)
+; RV32IZCMP-NEXT: sw a0, %lo(var+12)(s0)
; RV32IZCMP-NEXT: lw a0, 84(sp) # 4-byte Folded Reload
-; RV32IZCMP-NEXT: sw a0, %lo(var+4)(s0)
+; RV32IZCMP-NEXT: sw a0, %lo(var+8)(s0)
; RV32IZCMP-NEXT: lw a0, 88(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT: sw a0, %lo(var+4)(s0)
+; RV32IZCMP-NEXT: lw a0, 92(sp) # 4-byte Folded Reload
; RV32IZCMP-NEXT: sw a0, %lo(var)(s0)
-; RV32IZCMP-NEXT: addi sp, sp, 32
+; RV32IZCMP-NEXT: addi sp, sp, 48
; RV32IZCMP-NEXT: .cfi_def_cfa_offset 112
; RV32IZCMP-NEXT: cm.popret {ra, s0-s11}, 112
;
@@ -2609,50 +2609,50 @@ define void @caller() {
; RV64IZCMP-NEXT: .cfi_def_cfa_offset 288
; RV64IZCMP-NEXT: lui s0, %hi(var)
; RV64IZCMP-NEXT: lw a0, %lo(var)(s0)
-; RV64IZCMP-NEXT: sd a0, 176(sp) # 8-byte Folded Spill
-; RV64IZCMP-NEXT: lw a0, %lo(var+4)(s0)
; RV64IZCMP-NEXT: sd a0, 168(sp) # 8-byte Folded Spill
-; RV64IZCMP-NEXT: lw a0, %lo(var+8)(s0)
+; RV64IZCMP-NEXT: lw a0, %lo(var+4)(s0)
; RV64IZCMP-NEXT: sd a0, 160(sp) # 8-byte Folded Spill
-; RV64IZCMP-NEXT: lw a0, %lo(var+12)(s0)
+; RV64IZCMP-NEXT: lw a0, %lo(var+8)(s0)
; RV64IZCMP-NEXT: sd a0, 152(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: lw a0, %lo(var+12)(s0)
+; RV64IZCMP-NEXT: sd a0, 144(sp) # 8-byte Folded Spill
; RV64IZCMP-NEXT: addi s1, s0, %lo(var)
; RV64IZCMP-NEXT: lw a0, 16(s1)
-; RV64IZCMP-NEXT: sd a0, 144(sp) # 8-byte Folded Spill
-; RV64IZCMP-NEXT: lw a0, 20(s1)
; RV64IZCMP-NEXT: sd a0, 136(sp) # 8-byte Folded Spill
-; RV64IZCMP-NEXT: lw a0, 24(s1)
+; RV64IZCMP-NEXT: lw a0, 20(s1)
; RV64IZCMP-NEXT: sd a0, 128(sp) # 8-byte Folded Spill
-; RV64IZCMP-NEXT: lw a0, 28(s1)
+; RV64IZCMP-NEXT: lw a0, 24(s1)
; RV64IZCMP-NEXT: sd a0, 120(sp) # 8-byte Folded Spill
-; RV64IZCMP-NEXT: lw a0, 32(s1)
+; RV64IZCMP-NEXT: lw a0, 28(s1)
; RV64IZCMP-NEXT: sd a0, 112(sp) # 8-byte Folded Spill
-; RV64IZCMP-NEXT: lw a0, 36(s1)
+; RV64IZCMP-NEXT: lw a0, 32(s1)
; RV64IZCMP-NEXT: sd a0, 104(sp) # 8-byte Folded Spill
-; RV64IZCMP-NEXT: lw a0, 40(s1)
+; RV64IZCMP-NEXT: lw a0, 36(s1)
; RV64IZCMP-NEXT: sd a0, 96(sp) # 8-byte Folded Spill
-; RV64IZCMP-NEXT: lw a0, 44(s1)
+; RV64IZCMP-NEXT: lw a0, 40(s1)
; RV64IZCMP-NEXT: sd a0, 88(sp) # 8-byte Folded Spill
-; RV64IZCMP-NEXT: lw a0, 48(s1)
+; RV64IZCMP-NEXT: lw a0, 44(s1)
; RV64IZCMP-NEXT: sd a0, 80(sp) # 8-byte Folded Spill
-; RV64IZCMP-NEXT: lw a0, 52(s1)
+; RV64IZCMP-NEXT: lw a0, 48(s1)
; RV64IZCMP-NEXT: sd a0, 72(sp) # 8-byte Folded Spill
-; RV64IZCMP-NEXT: lw a0, 56(s1)
+; RV64IZCMP-NEXT: lw a0, 52(s1)
; RV64IZCMP-NEXT: sd a0, 64(sp) # 8-byte Folded Spill
-; RV64IZCMP-NEXT: lw a0, 60(s1)
+; RV64IZCMP-NEXT: lw a0, 56(s1)
; RV64IZCMP-NEXT: sd a0, 56(sp) # 8-byte Folded Spill
-; RV64IZCMP-NEXT: lw a0, 64(s1)
+; RV64IZCMP-NEXT: lw a0, 60(s1)
; RV64IZCMP-NEXT: sd a0, 48(sp) # 8-byte Folded Spill
-; RV64IZCMP-NEXT: lw a0, 68(s1)
+; RV64IZCMP-NEXT: lw a0, 64(s1)
; RV64IZCMP-NEXT: sd a0, 40(sp) # 8-byte Folded Spill
-; RV64IZCMP-NEXT: lw a0, 72(s1)
+; RV64IZCMP-NEXT: lw a0, 68(s1)
; RV64IZCMP-NEXT: sd a0, 32(sp) # 8-byte Folded Spill
-; RV64IZCMP-NEXT: lw a0, 76(s1)
+; RV64IZCMP-NEXT: lw a0, 72(s1)
; RV64IZCMP-NEXT: sd a0, 24(sp) # 8-byte Folded Spill
-; RV64IZCMP-NEXT: lw a0, 80(s1)
+; RV64IZCMP-NEXT: lw a0, 76(s1)
; RV64IZCMP-NEXT: sd a0, 16(sp) # 8-byte Folded Spill
-; RV64IZCMP-NEXT: lw a0, 84(s1)
+; RV64IZCMP-NEXT: lw a0, 80(s1)
; RV64IZCMP-NEXT: sd a0, 8(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: lw a0, 84(s1)
+; RV64IZCMP-NEXT: sd a0, 0(sp) # 8-byte Folded Spill
; RV64IZCMP-NEXT: lw s4, 88(s1)
; RV64IZCMP-NEXT: lw s5, 92(s1)
; RV64IZCMP-NEXT: lw s6, 96(s1)
@@ -2674,49 +2674,49 @@ define void @caller() {
; RV64IZCMP-NEXT: sw s6, 96(s1)
; RV64IZCMP-NEXT: sw s5, 92(s1)
; RV64IZCMP-NEXT: sw s4, 88(s1)
-; RV64IZCMP-NEXT: ld a0, 8(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: ld a0, 0(sp) # 8-byte Folded Reload
; RV64IZCMP-NEXT: sw a0, 84(s1)
-; RV64IZCMP-NEXT: ld a0, 16(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: ld a0, 8(sp) # 8-byte Folded Reload
; RV64IZCMP-NEXT: sw a0, 80(s1)
-; RV64IZCMP-NEXT: ld a0, 24(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: ld a0, 16(sp) # 8-byte Folded Reload
; RV64IZCMP-NEXT: sw a0, 76(s1)
-; RV64IZCMP-NEXT: ld a0, 32(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: ld a0, 24(sp) # 8-byte Folded Reload
; RV64IZCMP-NEXT: sw a0, 72(s1)
-; RV64IZCMP-NEXT: ld a0, 40(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: ld a0, 32(sp) # 8-byte Folded Reload
; RV64IZCMP-NEXT: sw a0, 68(s1)
-; RV64IZCMP-NEXT: ld a0, 48(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: ld a0, 40(sp) # 8-byte Folded Reload
; RV64IZCMP-NEXT: sw a0, 64(s1)
-; RV64IZCMP-NEXT: ld a0, 56(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: ld a0, 48(sp) # 8-byte Folded Reload
; RV64IZCMP-NEXT: sw a0, 60(s1)
-; RV64IZCMP-NEXT: ld a0, 64(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: ld a0, 56(sp) # 8-byte Folded Reload
; RV64IZCMP-NEXT: sw a0, 56(s1)
-; RV64IZCMP-NEXT: ld a0, 72(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: ld a0, 64(sp) # 8-byte Folded Reload
; RV64IZCMP-NEXT: sw a0, 52(s1)
-; RV64IZCMP-NEXT: ld a0, 80(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: ld a0, 72(sp) # 8-byte Folded Reload
; RV64IZCMP-NEXT: sw a0, 48(s1)
-; RV64IZCMP-NEXT: ld a0, 88(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: ld a0, 80(sp) # 8-byte Folded Reload
; RV64IZCMP-NEXT: sw a0, 44(s1)
-; RV64IZCMP-NEXT: ld a0, 96(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: ld a0, 88(sp) # 8-byte Folded Reload
; RV64IZCMP-NEXT: sw a0, 40(s1)
-; RV64IZCMP-NEXT: ld a0, 104(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: ld a0, 96(sp) # 8-byte Folded Reload
; RV64IZCMP-NEXT: sw a0, 36(s1)
-; RV64IZCMP-NEXT: ld a0, 112(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: ld a0, 104(sp) # 8-byte Folded Reload
; RV64IZCMP-NEXT: sw a0, 32(s1)
-; RV64IZCMP-NEXT: ld a0, 120(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: ld a0, 112(sp) # 8-byte Folded Reload
; RV64IZCMP-NEXT: sw a0, 28(s1)
-; RV64IZCMP-NEXT: ld a0, 128(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: ld a0, 120(sp) # 8-byte Folded Reload
; RV64IZCMP-NEXT: sw a0, 24(s1)
-; RV64IZCMP-NEXT: ld a0, 136(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: ld a0, 128(sp) # 8-byte Folded Reload
; RV64IZCMP-NEXT: sw a0, 20(s1)
-; RV64IZCMP-NEXT: ld a0, 144(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: ld a0, 136(sp) # 8-byte Folded Reload
; RV64IZCMP-NEXT: sw a0, 16(s1)
-; RV64IZCMP-NEXT: ld a0, 152(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: ld a0, 144(sp) # 8-byte Folded Reload
; RV64IZCMP-NEXT: sw a0, %lo(var+12)(s0)
-; RV64IZCMP-NEXT: ld a0, 160(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: ld a0, 152(sp) # 8-byte Folded Reload
; RV64IZCMP-NEXT: sw a0, %lo(var+8)(s0)
-; RV64IZCMP-NEXT: ld a0, 168(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: ld a0, 160(sp) # 8-byte Folded Reload
; RV64IZCMP-NEXT: sw a0, %lo(var+4)(s0)
-; RV64IZCMP-NEXT: ld a0, 176(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: ld a0, 168(sp) # 8-byte Folded Reload
; RV64IZCMP-NEXT: sw a0, %lo(var)(s0)
; RV64IZCMP-NEXT: addi sp, sp, 128
; RV64IZCMP-NEXT: .cfi_def_cfa_offset 160
diff --git a/llvm/test/CodeGen/RISCV/push-pop-popret.ll b/llvm/test/CodeGen/RISCV/push-pop-popret.ll
index 65f58d0..8f9c97d 100644
--- a/llvm/test/CodeGen/RISCV/push-pop-popret.ll
+++ b/llvm/test/CodeGen/RISCV/push-pop-popret.ll
@@ -24,7 +24,7 @@ define i32 @foo() {
; RV32IZCMP-NEXT: .cfi_offset ra, -4
; RV32IZCMP-NEXT: addi sp, sp, -464
; RV32IZCMP-NEXT: .cfi_def_cfa_offset 528
-; RV32IZCMP-NEXT: addi a0, sp, 12
+; RV32IZCMP-NEXT: mv a0, sp
; RV32IZCMP-NEXT: call test
; RV32IZCMP-NEXT: addi sp, sp, 464
; RV32IZCMP-NEXT: .cfi_def_cfa_offset 64
@@ -37,7 +37,7 @@ define i32 @foo() {
; RV64IZCMP-NEXT: .cfi_offset ra, -8
; RV64IZCMP-NEXT: addi sp, sp, -464
; RV64IZCMP-NEXT: .cfi_def_cfa_offset 528
-; RV64IZCMP-NEXT: addi a0, sp, 8
+; RV64IZCMP-NEXT: mv a0, sp
; RV64IZCMP-NEXT: call test
; RV64IZCMP-NEXT: addi sp, sp, 464
; RV64IZCMP-NEXT: .cfi_def_cfa_offset 64
@@ -50,7 +50,7 @@ define i32 @foo() {
; RV32IZCMP-SR-NEXT: .cfi_offset ra, -4
; RV32IZCMP-SR-NEXT: addi sp, sp, -464
; RV32IZCMP-SR-NEXT: .cfi_def_cfa_offset 528
-; RV32IZCMP-SR-NEXT: addi a0, sp, 12
+; RV32IZCMP-SR-NEXT: mv a0, sp
; RV32IZCMP-SR-NEXT: call test
; RV32IZCMP-SR-NEXT: addi sp, sp, 464
; RV32IZCMP-SR-NEXT: .cfi_def_cfa_offset 64
@@ -63,7 +63,7 @@ define i32 @foo() {
; RV64IZCMP-SR-NEXT: .cfi_offset ra, -8
; RV64IZCMP-SR-NEXT: addi sp, sp, -464
; RV64IZCMP-SR-NEXT: .cfi_def_cfa_offset 528
-; RV64IZCMP-SR-NEXT: addi a0, sp, 8
+; RV64IZCMP-SR-NEXT: mv a0, sp
; RV64IZCMP-SR-NEXT: call test
; RV64IZCMP-SR-NEXT: addi sp, sp, 464
; RV64IZCMP-SR-NEXT: .cfi_def_cfa_offset 64
@@ -1775,52 +1775,54 @@ define void @foo_with_irq() "interrupt"="machine" {
; RV32IZCMP-NEXT: cm.push {ra}, -64
; RV32IZCMP-NEXT: .cfi_def_cfa_offset 64
; RV32IZCMP-NEXT: .cfi_offset ra, -4
-; RV32IZCMP-NEXT: sw t0, 56(sp) # 4-byte Folded Spill
-; RV32IZCMP-NEXT: sw t1, 52(sp) # 4-byte Folded Spill
-; RV32IZCMP-NEXT: sw t2, 48(sp) # 4-byte Folded Spill
-; RV32IZCMP-NEXT: sw a0, 44(sp) # 4-byte Folded Spill
-; RV32IZCMP-NEXT: sw a1, 40(sp) # 4-byte Folded Spill
-; RV32IZCMP-NEXT: sw a2, 36(sp) # 4-byte Folded Spill
-; RV32IZCMP-NEXT: sw a3, 32(sp) # 4-byte Folded Spill
-; RV32IZCMP-NEXT: sw a4, 28(sp) # 4-byte Folded Spill
-; RV32IZCMP-NEXT: sw a5, 24(sp) # 4-byte Folded Spill
-; RV32IZCMP-NEXT: sw a6, 20(sp) # 4-byte Folded Spill
-; RV32IZCMP-NEXT: sw a7, 16(sp) # 4-byte Folded Spill
-; RV32IZCMP-NEXT: sw t3, 12(sp) # 4-byte Folded Spill
-; RV32IZCMP-NEXT: sw t4, 8(sp) # 4-byte Folded Spill
-; RV32IZCMP-NEXT: sw t5, 4(sp) # 4-byte Folded Spill
-; RV32IZCMP-NEXT: sw t6, 0(sp) # 4-byte Folded Spill
-; RV32IZCMP-NEXT: .cfi_offset t0, -8
-; RV32IZCMP-NEXT: .cfi_offset t1, -12
-; RV32IZCMP-NEXT: .cfi_offset t2, -16
-; RV32IZCMP-NEXT: .cfi_offset a0, -20
-; RV32IZCMP-NEXT: .cfi_offset a1, -24
-; RV32IZCMP-NEXT: .cfi_offset a2, -28
-; RV32IZCMP-NEXT: .cfi_offset a3, -32
-; RV32IZCMP-NEXT: .cfi_offset a4, -36
-; RV32IZCMP-NEXT: .cfi_offset a5, -40
-; RV32IZCMP-NEXT: .cfi_offset a6, -44
-; RV32IZCMP-NEXT: .cfi_offset a7, -48
-; RV32IZCMP-NEXT: .cfi_offset t3, -52
-; RV32IZCMP-NEXT: .cfi_offset t4, -56
-; RV32IZCMP-NEXT: .cfi_offset t5, -60
-; RV32IZCMP-NEXT: .cfi_offset t6, -64
+; RV32IZCMP-NEXT: addi sp, sp, -16
+; RV32IZCMP-NEXT: .cfi_def_cfa_offset 80
+; RV32IZCMP-NEXT: sw t0, 60(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: sw t1, 56(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: sw t2, 52(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: sw a0, 48(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: sw a1, 44(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: sw a2, 40(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: sw a3, 36(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: sw a4, 32(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: sw a5, 28(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: sw a6, 24(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: sw a7, 20(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: sw t3, 16(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: sw t4, 12(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: sw t5, 8(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: sw t6, 4(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: .cfi_offset t0, -20
+; RV32IZCMP-NEXT: .cfi_offset t1, -24
+; RV32IZCMP-NEXT: .cfi_offset t2, -28
+; RV32IZCMP-NEXT: .cfi_offset a0, -32
+; RV32IZCMP-NEXT: .cfi_offset a1, -36
+; RV32IZCMP-NEXT: .cfi_offset a2, -40
+; RV32IZCMP-NEXT: .cfi_offset a3, -44
+; RV32IZCMP-NEXT: .cfi_offset a4, -48
+; RV32IZCMP-NEXT: .cfi_offset a5, -52
+; RV32IZCMP-NEXT: .cfi_offset a6, -56
+; RV32IZCMP-NEXT: .cfi_offset a7, -60
+; RV32IZCMP-NEXT: .cfi_offset t3, -64
+; RV32IZCMP-NEXT: .cfi_offset t4, -68
+; RV32IZCMP-NEXT: .cfi_offset t5, -72
+; RV32IZCMP-NEXT: .cfi_offset t6, -76
; RV32IZCMP-NEXT: call foo_test_irq
-; RV32IZCMP-NEXT: lw t0, 56(sp) # 4-byte Folded Reload
-; RV32IZCMP-NEXT: lw t1, 52(sp) # 4-byte Folded Reload
-; RV32IZCMP-NEXT: lw t2, 48(sp) # 4-byte Folded Reload
-; RV32IZCMP-NEXT: lw a0, 44(sp) # 4-byte Folded Reload
-; RV32IZCMP-NEXT: lw a1, 40(sp) # 4-byte Folded Reload
-; RV32IZCMP-NEXT: lw a2, 36(sp) # 4-byte Folded Reload
-; RV32IZCMP-NEXT: lw a3, 32(sp) # 4-byte Folded Reload
-; RV32IZCMP-NEXT: lw a4, 28(sp) # 4-byte Folded Reload
-; RV32IZCMP-NEXT: lw a5, 24(sp) # 4-byte Folded Reload
-; RV32IZCMP-NEXT: lw a6, 20(sp) # 4-byte Folded Reload
-; RV32IZCMP-NEXT: lw a7, 16(sp) # 4-byte Folded Reload
-; RV32IZCMP-NEXT: lw t3, 12(sp) # 4-byte Folded Reload
-; RV32IZCMP-NEXT: lw t4, 8(sp) # 4-byte Folded Reload
-; RV32IZCMP-NEXT: lw t5, 4(sp) # 4-byte Folded Reload
-; RV32IZCMP-NEXT: lw t6, 0(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT: lw t0, 60(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT: lw t1, 56(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT: lw t2, 52(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT: lw a0, 48(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT: lw a1, 44(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT: lw a2, 40(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT: lw a3, 36(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT: lw a4, 32(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT: lw a5, 28(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT: lw a6, 24(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT: lw a7, 20(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT: lw t3, 16(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT: lw t4, 12(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT: lw t5, 8(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT: lw t6, 4(sp) # 4-byte Folded Reload
; RV32IZCMP-NEXT: .cfi_restore t0
; RV32IZCMP-NEXT: .cfi_restore t1
; RV32IZCMP-NEXT: .cfi_restore t2
@@ -1836,6 +1838,8 @@ define void @foo_with_irq() "interrupt"="machine" {
; RV32IZCMP-NEXT: .cfi_restore t4
; RV32IZCMP-NEXT: .cfi_restore t5
; RV32IZCMP-NEXT: .cfi_restore t6
+; RV32IZCMP-NEXT: addi sp, sp, 16
+; RV32IZCMP-NEXT: .cfi_def_cfa_offset 64
; RV32IZCMP-NEXT: cm.pop {ra}, 64
; RV32IZCMP-NEXT: .cfi_restore ra
; RV32IZCMP-NEXT: .cfi_def_cfa_offset 0
@@ -1846,54 +1850,54 @@ define void @foo_with_irq() "interrupt"="machine" {
; RV64IZCMP-NEXT: cm.push {ra}, -64
; RV64IZCMP-NEXT: .cfi_def_cfa_offset 64
; RV64IZCMP-NEXT: .cfi_offset ra, -8
-; RV64IZCMP-NEXT: addi sp, sp, -64
-; RV64IZCMP-NEXT: .cfi_def_cfa_offset 128
-; RV64IZCMP-NEXT: sd t0, 112(sp) # 8-byte Folded Spill
-; RV64IZCMP-NEXT: sd t1, 104(sp) # 8-byte Folded Spill
-; RV64IZCMP-NEXT: sd t2, 96(sp) # 8-byte Folded Spill
-; RV64IZCMP-NEXT: sd a0, 88(sp) # 8-byte Folded Spill
-; RV64IZCMP-NEXT: sd a1, 80(sp) # 8-byte Folded Spill
-; RV64IZCMP-NEXT: sd a2, 72(sp) # 8-byte Folded Spill
-; RV64IZCMP-NEXT: sd a3, 64(sp) # 8-byte Folded Spill
-; RV64IZCMP-NEXT: sd a4, 56(sp) # 8-byte Folded Spill
-; RV64IZCMP-NEXT: sd a5, 48(sp) # 8-byte Folded Spill
-; RV64IZCMP-NEXT: sd a6, 40(sp) # 8-byte Folded Spill
-; RV64IZCMP-NEXT: sd a7, 32(sp) # 8-byte Folded Spill
-; RV64IZCMP-NEXT: sd t3, 24(sp) # 8-byte Folded Spill
-; RV64IZCMP-NEXT: sd t4, 16(sp) # 8-byte Folded Spill
-; RV64IZCMP-NEXT: sd t5, 8(sp) # 8-byte Folded Spill
-; RV64IZCMP-NEXT: sd t6, 0(sp) # 8-byte Folded Spill
-; RV64IZCMP-NEXT: .cfi_offset t0, -16
-; RV64IZCMP-NEXT: .cfi_offset t1, -24
-; RV64IZCMP-NEXT: .cfi_offset t2, -32
-; RV64IZCMP-NEXT: .cfi_offset a0, -40
-; RV64IZCMP-NEXT: .cfi_offset a1, -48
-; RV64IZCMP-NEXT: .cfi_offset a2, -56
-; RV64IZCMP-NEXT: .cfi_offset a3, -64
-; RV64IZCMP-NEXT: .cfi_offset a4, -72
-; RV64IZCMP-NEXT: .cfi_offset a5, -80
-; RV64IZCMP-NEXT: .cfi_offset a6, -88
-; RV64IZCMP-NEXT: .cfi_offset a7, -96
-; RV64IZCMP-NEXT: .cfi_offset t3, -104
-; RV64IZCMP-NEXT: .cfi_offset t4, -112
-; RV64IZCMP-NEXT: .cfi_offset t5, -120
-; RV64IZCMP-NEXT: .cfi_offset t6, -128
+; RV64IZCMP-NEXT: addi sp, sp, -80
+; RV64IZCMP-NEXT: .cfi_def_cfa_offset 144
+; RV64IZCMP-NEXT: sd t0, 120(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: sd t1, 112(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: sd t2, 104(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: sd a0, 96(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: sd a1, 88(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: sd a2, 80(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: sd a3, 72(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: sd a4, 64(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: sd a5, 56(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: sd a6, 48(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: sd a7, 40(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: sd t3, 32(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: sd t4, 24(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: sd t5, 16(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: sd t6, 8(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: .cfi_offset t0, -24
+; RV64IZCMP-NEXT: .cfi_offset t1, -32
+; RV64IZCMP-NEXT: .cfi_offset t2, -40
+; RV64IZCMP-NEXT: .cfi_offset a0, -48
+; RV64IZCMP-NEXT: .cfi_offset a1, -56
+; RV64IZCMP-NEXT: .cfi_offset a2, -64
+; RV64IZCMP-NEXT: .cfi_offset a3, -72
+; RV64IZCMP-NEXT: .cfi_offset a4, -80
+; RV64IZCMP-NEXT: .cfi_offset a5, -88
+; RV64IZCMP-NEXT: .cfi_offset a6, -96
+; RV64IZCMP-NEXT: .cfi_offset a7, -104
+; RV64IZCMP-NEXT: .cfi_offset t3, -112
+; RV64IZCMP-NEXT: .cfi_offset t4, -120
+; RV64IZCMP-NEXT: .cfi_offset t5, -128
+; RV64IZCMP-NEXT: .cfi_offset t6, -136
; RV64IZCMP-NEXT: call foo_test_irq
-; RV64IZCMP-NEXT: ld t0, 112(sp) # 8-byte Folded Reload
-; RV64IZCMP-NEXT: ld t1, 104(sp) # 8-byte Folded Reload
-; RV64IZCMP-NEXT: ld t2, 96(sp) # 8-byte Folded Reload
-; RV64IZCMP-NEXT: ld a0, 88(sp) # 8-byte Folded Reload
-; RV64IZCMP-NEXT: ld a1, 80(sp) # 8-byte Folded Reload
-; RV64IZCMP-NEXT: ld a2, 72(sp) # 8-byte Folded Reload
-; RV64IZCMP-NEXT: ld a3, 64(sp) # 8-byte Folded Reload
-; RV64IZCMP-NEXT: ld a4, 56(sp) # 8-byte Folded Reload
-; RV64IZCMP-NEXT: ld a5, 48(sp) # 8-byte Folded Reload
-; RV64IZCMP-NEXT: ld a6, 40(sp) # 8-byte Folded Reload
-; RV64IZCMP-NEXT: ld a7, 32(sp) # 8-byte Folded Reload
-; RV64IZCMP-NEXT: ld t3, 24(sp) # 8-byte Folded Reload
-; RV64IZCMP-NEXT: ld t4, 16(sp) # 8-byte Folded Reload
-; RV64IZCMP-NEXT: ld t5, 8(sp) # 8-byte Folded Reload
-; RV64IZCMP-NEXT: ld t6, 0(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: ld t0, 120(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: ld t1, 112(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: ld t2, 104(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: ld a0, 96(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: ld a1, 88(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: ld a2, 80(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: ld a3, 72(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: ld a4, 64(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: ld a5, 56(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: ld a6, 48(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: ld a7, 40(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: ld t3, 32(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: ld t4, 24(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: ld t5, 16(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: ld t6, 8(sp) # 8-byte Folded Reload
; RV64IZCMP-NEXT: .cfi_restore t0
; RV64IZCMP-NEXT: .cfi_restore t1
; RV64IZCMP-NEXT: .cfi_restore t2
@@ -1909,7 +1913,7 @@ define void @foo_with_irq() "interrupt"="machine" {
; RV64IZCMP-NEXT: .cfi_restore t4
; RV64IZCMP-NEXT: .cfi_restore t5
; RV64IZCMP-NEXT: .cfi_restore t6
-; RV64IZCMP-NEXT: addi sp, sp, 64
+; RV64IZCMP-NEXT: addi sp, sp, 80
; RV64IZCMP-NEXT: .cfi_def_cfa_offset 64
; RV64IZCMP-NEXT: cm.pop {ra}, 64
; RV64IZCMP-NEXT: .cfi_restore ra
@@ -1921,52 +1925,54 @@ define void @foo_with_irq() "interrupt"="machine" {
; RV32IZCMP-SR-NEXT: cm.push {ra}, -64
; RV32IZCMP-SR-NEXT: .cfi_def_cfa_offset 64
; RV32IZCMP-SR-NEXT: .cfi_offset ra, -4
-; RV32IZCMP-SR-NEXT: sw t0, 56(sp) # 4-byte Folded Spill
-; RV32IZCMP-SR-NEXT: sw t1, 52(sp) # 4-byte Folded Spill
-; RV32IZCMP-SR-NEXT: sw t2, 48(sp) # 4-byte Folded Spill
-; RV32IZCMP-SR-NEXT: sw a0, 44(sp) # 4-byte Folded Spill
-; RV32IZCMP-SR-NEXT: sw a1, 40(sp) # 4-byte Folded Spill
-; RV32IZCMP-SR-NEXT: sw a2, 36(sp) # 4-byte Folded Spill
-; RV32IZCMP-SR-NEXT: sw a3, 32(sp) # 4-byte Folded Spill
-; RV32IZCMP-SR-NEXT: sw a4, 28(sp) # 4-byte Folded Spill
-; RV32IZCMP-SR-NEXT: sw a5, 24(sp) # 4-byte Folded Spill
-; RV32IZCMP-SR-NEXT: sw a6, 20(sp) # 4-byte Folded Spill
-; RV32IZCMP-SR-NEXT: sw a7, 16(sp) # 4-byte Folded Spill
-; RV32IZCMP-SR-NEXT: sw t3, 12(sp) # 4-byte Folded Spill
-; RV32IZCMP-SR-NEXT: sw t4, 8(sp) # 4-byte Folded Spill
-; RV32IZCMP-SR-NEXT: sw t5, 4(sp) # 4-byte Folded Spill
-; RV32IZCMP-SR-NEXT: sw t6, 0(sp) # 4-byte Folded Spill
-; RV32IZCMP-SR-NEXT: .cfi_offset t0, -8
-; RV32IZCMP-SR-NEXT: .cfi_offset t1, -12
-; RV32IZCMP-SR-NEXT: .cfi_offset t2, -16
-; RV32IZCMP-SR-NEXT: .cfi_offset a0, -20
-; RV32IZCMP-SR-NEXT: .cfi_offset a1, -24
-; RV32IZCMP-SR-NEXT: .cfi_offset a2, -28
-; RV32IZCMP-SR-NEXT: .cfi_offset a3, -32
-; RV32IZCMP-SR-NEXT: .cfi_offset a4, -36
-; RV32IZCMP-SR-NEXT: .cfi_offset a5, -40
-; RV32IZCMP-SR-NEXT: .cfi_offset a6, -44
-; RV32IZCMP-SR-NEXT: .cfi_offset a7, -48
-; RV32IZCMP-SR-NEXT: .cfi_offset t3, -52
-; RV32IZCMP-SR-NEXT: .cfi_offset t4, -56
-; RV32IZCMP-SR-NEXT: .cfi_offset t5, -60
-; RV32IZCMP-SR-NEXT: .cfi_offset t6, -64
+; RV32IZCMP-SR-NEXT: addi sp, sp, -16
+; RV32IZCMP-SR-NEXT: .cfi_def_cfa_offset 80
+; RV32IZCMP-SR-NEXT: sw t0, 60(sp) # 4-byte Folded Spill
+; RV32IZCMP-SR-NEXT: sw t1, 56(sp) # 4-byte Folded Spill
+; RV32IZCMP-SR-NEXT: sw t2, 52(sp) # 4-byte Folded Spill
+; RV32IZCMP-SR-NEXT: sw a0, 48(sp) # 4-byte Folded Spill
+; RV32IZCMP-SR-NEXT: sw a1, 44(sp) # 4-byte Folded Spill
+; RV32IZCMP-SR-NEXT: sw a2, 40(sp) # 4-byte Folded Spill
+; RV32IZCMP-SR-NEXT: sw a3, 36(sp) # 4-byte Folded Spill
+; RV32IZCMP-SR-NEXT: sw a4, 32(sp) # 4-byte Folded Spill
+; RV32IZCMP-SR-NEXT: sw a5, 28(sp) # 4-byte Folded Spill
+; RV32IZCMP-SR-NEXT: sw a6, 24(sp) # 4-byte Folded Spill
+; RV32IZCMP-SR-NEXT: sw a7, 20(sp) # 4-byte Folded Spill
+; RV32IZCMP-SR-NEXT: sw t3, 16(sp) # 4-byte Folded Spill
+; RV32IZCMP-SR-NEXT: sw t4, 12(sp) # 4-byte Folded Spill
+; RV32IZCMP-SR-NEXT: sw t5, 8(sp) # 4-byte Folded Spill
+; RV32IZCMP-SR-NEXT: sw t6, 4(sp) # 4-byte Folded Spill
+; RV32IZCMP-SR-NEXT: .cfi_offset t0, -20
+; RV32IZCMP-SR-NEXT: .cfi_offset t1, -24
+; RV32IZCMP-SR-NEXT: .cfi_offset t2, -28
+; RV32IZCMP-SR-NEXT: .cfi_offset a0, -32
+; RV32IZCMP-SR-NEXT: .cfi_offset a1, -36
+; RV32IZCMP-SR-NEXT: .cfi_offset a2, -40
+; RV32IZCMP-SR-NEXT: .cfi_offset a3, -44
+; RV32IZCMP-SR-NEXT: .cfi_offset a4, -48
+; RV32IZCMP-SR-NEXT: .cfi_offset a5, -52
+; RV32IZCMP-SR-NEXT: .cfi_offset a6, -56
+; RV32IZCMP-SR-NEXT: .cfi_offset a7, -60
+; RV32IZCMP-SR-NEXT: .cfi_offset t3, -64
+; RV32IZCMP-SR-NEXT: .cfi_offset t4, -68
+; RV32IZCMP-SR-NEXT: .cfi_offset t5, -72
+; RV32IZCMP-SR-NEXT: .cfi_offset t6, -76
; RV32IZCMP-SR-NEXT: call foo_test_irq
-; RV32IZCMP-SR-NEXT: lw t0, 56(sp) # 4-byte Folded Reload
-; RV32IZCMP-SR-NEXT: lw t1, 52(sp) # 4-byte Folded Reload
-; RV32IZCMP-SR-NEXT: lw t2, 48(sp) # 4-byte Folded Reload
-; RV32IZCMP-SR-NEXT: lw a0, 44(sp) # 4-byte Folded Reload
-; RV32IZCMP-SR-NEXT: lw a1, 40(sp) # 4-byte Folded Reload
-; RV32IZCMP-SR-NEXT: lw a2, 36(sp) # 4-byte Folded Reload
-; RV32IZCMP-SR-NEXT: lw a3, 32(sp) # 4-byte Folded Reload
-; RV32IZCMP-SR-NEXT: lw a4, 28(sp) # 4-byte Folded Reload
-; RV32IZCMP-SR-NEXT: lw a5, 24(sp) # 4-byte Folded Reload
-; RV32IZCMP-SR-NEXT: lw a6, 20(sp) # 4-byte Folded Reload
-; RV32IZCMP-SR-NEXT: lw a7, 16(sp) # 4-byte Folded Reload
-; RV32IZCMP-SR-NEXT: lw t3, 12(sp) # 4-byte Folded Reload
-; RV32IZCMP-SR-NEXT: lw t4, 8(sp) # 4-byte Folded Reload
-; RV32IZCMP-SR-NEXT: lw t5, 4(sp) # 4-byte Folded Reload
-; RV32IZCMP-SR-NEXT: lw t6, 0(sp) # 4-byte Folded Reload
+; RV32IZCMP-SR-NEXT: lw t0, 60(sp) # 4-byte Folded Reload
+; RV32IZCMP-SR-NEXT: lw t1, 56(sp) # 4-byte Folded Reload
+; RV32IZCMP-SR-NEXT: lw t2, 52(sp) # 4-byte Folded Reload
+; RV32IZCMP-SR-NEXT: lw a0, 48(sp) # 4-byte Folded Reload
+; RV32IZCMP-SR-NEXT: lw a1, 44(sp) # 4-byte Folded Reload
+; RV32IZCMP-SR-NEXT: lw a2, 40(sp) # 4-byte Folded Reload
+; RV32IZCMP-SR-NEXT: lw a3, 36(sp) # 4-byte Folded Reload
+; RV32IZCMP-SR-NEXT: lw a4, 32(sp) # 4-byte Folded Reload
+; RV32IZCMP-SR-NEXT: lw a5, 28(sp) # 4-byte Folded Reload
+; RV32IZCMP-SR-NEXT: lw a6, 24(sp) # 4-byte Folded Reload
+; RV32IZCMP-SR-NEXT: lw a7, 20(sp) # 4-byte Folded Reload
+; RV32IZCMP-SR-NEXT: lw t3, 16(sp) # 4-byte Folded Reload
+; RV32IZCMP-SR-NEXT: lw t4, 12(sp) # 4-byte Folded Reload
+; RV32IZCMP-SR-NEXT: lw t5, 8(sp) # 4-byte Folded Reload
+; RV32IZCMP-SR-NEXT: lw t6, 4(sp) # 4-byte Folded Reload
; RV32IZCMP-SR-NEXT: .cfi_restore t0
; RV32IZCMP-SR-NEXT: .cfi_restore t1
; RV32IZCMP-SR-NEXT: .cfi_restore t2
@@ -1982,6 +1988,8 @@ define void @foo_with_irq() "interrupt"="machine" {
; RV32IZCMP-SR-NEXT: .cfi_restore t4
; RV32IZCMP-SR-NEXT: .cfi_restore t5
; RV32IZCMP-SR-NEXT: .cfi_restore t6
+; RV32IZCMP-SR-NEXT: addi sp, sp, 16
+; RV32IZCMP-SR-NEXT: .cfi_def_cfa_offset 64
; RV32IZCMP-SR-NEXT: cm.pop {ra}, 64
; RV32IZCMP-SR-NEXT: .cfi_restore ra
; RV32IZCMP-SR-NEXT: .cfi_def_cfa_offset 0
@@ -1992,54 +2000,54 @@ define void @foo_with_irq() "interrupt"="machine" {
; RV64IZCMP-SR-NEXT: cm.push {ra}, -64
; RV64IZCMP-SR-NEXT: .cfi_def_cfa_offset 64
; RV64IZCMP-SR-NEXT: .cfi_offset ra, -8
-; RV64IZCMP-SR-NEXT: addi sp, sp, -64
-; RV64IZCMP-SR-NEXT: .cfi_def_cfa_offset 128
-; RV64IZCMP-SR-NEXT: sd t0, 112(sp) # 8-byte Folded Spill
-; RV64IZCMP-SR-NEXT: sd t1, 104(sp) # 8-byte Folded Spill
-; RV64IZCMP-SR-NEXT: sd t2, 96(sp) # 8-byte Folded Spill
-; RV64IZCMP-SR-NEXT: sd a0, 88(sp) # 8-byte Folded Spill
-; RV64IZCMP-SR-NEXT: sd a1, 80(sp) # 8-byte Folded Spill
-; RV64IZCMP-SR-NEXT: sd a2, 72(sp) # 8-byte Folded Spill
-; RV64IZCMP-SR-NEXT: sd a3, 64(sp) # 8-byte Folded Spill
-; RV64IZCMP-SR-NEXT: sd a4, 56(sp) # 8-byte Folded Spill
-; RV64IZCMP-SR-NEXT: sd a5, 48(sp) # 8-byte Folded Spill
-; RV64IZCMP-SR-NEXT: sd a6, 40(sp) # 8-byte Folded Spill
-; RV64IZCMP-SR-NEXT: sd a7, 32(sp) # 8-byte Folded Spill
-; RV64IZCMP-SR-NEXT: sd t3, 24(sp) # 8-byte Folded Spill
-; RV64IZCMP-SR-NEXT: sd t4, 16(sp) # 8-byte Folded Spill
-; RV64IZCMP-SR-NEXT: sd t5, 8(sp) # 8-byte Folded Spill
-; RV64IZCMP-SR-NEXT: sd t6, 0(sp) # 8-byte Folded Spill
-; RV64IZCMP-SR-NEXT: .cfi_offset t0, -16
-; RV64IZCMP-SR-NEXT: .cfi_offset t1, -24
-; RV64IZCMP-SR-NEXT: .cfi_offset t2, -32
-; RV64IZCMP-SR-NEXT: .cfi_offset a0, -40
-; RV64IZCMP-SR-NEXT: .cfi_offset a1, -48
-; RV64IZCMP-SR-NEXT: .cfi_offset a2, -56
-; RV64IZCMP-SR-NEXT: .cfi_offset a3, -64
-; RV64IZCMP-SR-NEXT: .cfi_offset a4, -72
-; RV64IZCMP-SR-NEXT: .cfi_offset a5, -80
-; RV64IZCMP-SR-NEXT: .cfi_offset a6, -88
-; RV64IZCMP-SR-NEXT: .cfi_offset a7, -96
-; RV64IZCMP-SR-NEXT: .cfi_offset t3, -104
-; RV64IZCMP-SR-NEXT: .cfi_offset t4, -112
-; RV64IZCMP-SR-NEXT: .cfi_offset t5, -120
-; RV64IZCMP-SR-NEXT: .cfi_offset t6, -128
+; RV64IZCMP-SR-NEXT: addi sp, sp, -80
+; RV64IZCMP-SR-NEXT: .cfi_def_cfa_offset 144
+; RV64IZCMP-SR-NEXT: sd t0, 120(sp) # 8-byte Folded Spill
+; RV64IZCMP-SR-NEXT: sd t1, 112(sp) # 8-byte Folded Spill
+; RV64IZCMP-SR-NEXT: sd t2, 104(sp) # 8-byte Folded Spill
+; RV64IZCMP-SR-NEXT: sd a0, 96(sp) # 8-byte Folded Spill
+; RV64IZCMP-SR-NEXT: sd a1, 88(sp) # 8-byte Folded Spill
+; RV64IZCMP-SR-NEXT: sd a2, 80(sp) # 8-byte Folded Spill
+; RV64IZCMP-SR-NEXT: sd a3, 72(sp) # 8-byte Folded Spill
+; RV64IZCMP-SR-NEXT: sd a4, 64(sp) # 8-byte Folded Spill
+; RV64IZCMP-SR-NEXT: sd a5, 56(sp) # 8-byte Folded Spill
+; RV64IZCMP-SR-NEXT: sd a6, 48(sp) # 8-byte Folded Spill
+; RV64IZCMP-SR-NEXT: sd a7, 40(sp) # 8-byte Folded Spill
+; RV64IZCMP-SR-NEXT: sd t3, 32(sp) # 8-byte Folded Spill
+; RV64IZCMP-SR-NEXT: sd t4, 24(sp) # 8-byte Folded Spill
+; RV64IZCMP-SR-NEXT: sd t5, 16(sp) # 8-byte Folded Spill
+; RV64IZCMP-SR-NEXT: sd t6, 8(sp) # 8-byte Folded Spill
+; RV64IZCMP-SR-NEXT: .cfi_offset t0, -24
+; RV64IZCMP-SR-NEXT: .cfi_offset t1, -32
+; RV64IZCMP-SR-NEXT: .cfi_offset t2, -40
+; RV64IZCMP-SR-NEXT: .cfi_offset a0, -48
+; RV64IZCMP-SR-NEXT: .cfi_offset a1, -56
+; RV64IZCMP-SR-NEXT: .cfi_offset a2, -64
+; RV64IZCMP-SR-NEXT: .cfi_offset a3, -72
+; RV64IZCMP-SR-NEXT: .cfi_offset a4, -80
+; RV64IZCMP-SR-NEXT: .cfi_offset a5, -88
+; RV64IZCMP-SR-NEXT: .cfi_offset a6, -96
+; RV64IZCMP-SR-NEXT: .cfi_offset a7, -104
+; RV64IZCMP-SR-NEXT: .cfi_offset t3, -112
+; RV64IZCMP-SR-NEXT: .cfi_offset t4, -120
+; RV64IZCMP-SR-NEXT: .cfi_offset t5, -128
+; RV64IZCMP-SR-NEXT: .cfi_offset t6, -136
; RV64IZCMP-SR-NEXT: call foo_test_irq
-; RV64IZCMP-SR-NEXT: ld t0, 112(sp) # 8-byte Folded Reload
-; RV64IZCMP-SR-NEXT: ld t1, 104(sp) # 8-byte Folded Reload
-; RV64IZCMP-SR-NEXT: ld t2, 96(sp) # 8-byte Folded Reload
-; RV64IZCMP-SR-NEXT: ld a0, 88(sp) # 8-byte Folded Reload
-; RV64IZCMP-SR-NEXT: ld a1, 80(sp) # 8-byte Folded Reload
-; RV64IZCMP-SR-NEXT: ld a2, 72(sp) # 8-byte Folded Reload
-; RV64IZCMP-SR-NEXT: ld a3, 64(sp) # 8-byte Folded Reload
-; RV64IZCMP-SR-NEXT: ld a4, 56(sp) # 8-byte Folded Reload
-; RV64IZCMP-SR-NEXT: ld a5, 48(sp) # 8-byte Folded Reload
-; RV64IZCMP-SR-NEXT: ld a6, 40(sp) # 8-byte Folded Reload
-; RV64IZCMP-SR-NEXT: ld a7, 32(sp) # 8-byte Folded Reload
-; RV64IZCMP-SR-NEXT: ld t3, 24(sp) # 8-byte Folded Reload
-; RV64IZCMP-SR-NEXT: ld t4, 16(sp) # 8-byte Folded Reload
-; RV64IZCMP-SR-NEXT: ld t5, 8(sp) # 8-byte Folded Reload
-; RV64IZCMP-SR-NEXT: ld t6, 0(sp) # 8-byte Folded Reload
+; RV64IZCMP-SR-NEXT: ld t0, 120(sp) # 8-byte Folded Reload
+; RV64IZCMP-SR-NEXT: ld t1, 112(sp) # 8-byte Folded Reload
+; RV64IZCMP-SR-NEXT: ld t2, 104(sp) # 8-byte Folded Reload
+; RV64IZCMP-SR-NEXT: ld a0, 96(sp) # 8-byte Folded Reload
+; RV64IZCMP-SR-NEXT: ld a1, 88(sp) # 8-byte Folded Reload
+; RV64IZCMP-SR-NEXT: ld a2, 80(sp) # 8-byte Folded Reload
+; RV64IZCMP-SR-NEXT: ld a3, 72(sp) # 8-byte Folded Reload
+; RV64IZCMP-SR-NEXT: ld a4, 64(sp) # 8-byte Folded Reload
+; RV64IZCMP-SR-NEXT: ld a5, 56(sp) # 8-byte Folded Reload
+; RV64IZCMP-SR-NEXT: ld a6, 48(sp) # 8-byte Folded Reload
+; RV64IZCMP-SR-NEXT: ld a7, 40(sp) # 8-byte Folded Reload
+; RV64IZCMP-SR-NEXT: ld t3, 32(sp) # 8-byte Folded Reload
+; RV64IZCMP-SR-NEXT: ld t4, 24(sp) # 8-byte Folded Reload
+; RV64IZCMP-SR-NEXT: ld t5, 16(sp) # 8-byte Folded Reload
+; RV64IZCMP-SR-NEXT: ld t6, 8(sp) # 8-byte Folded Reload
; RV64IZCMP-SR-NEXT: .cfi_restore t0
; RV64IZCMP-SR-NEXT: .cfi_restore t1
; RV64IZCMP-SR-NEXT: .cfi_restore t2
@@ -2055,7 +2063,7 @@ define void @foo_with_irq() "interrupt"="machine" {
; RV64IZCMP-SR-NEXT: .cfi_restore t4
; RV64IZCMP-SR-NEXT: .cfi_restore t5
; RV64IZCMP-SR-NEXT: .cfi_restore t6
-; RV64IZCMP-SR-NEXT: addi sp, sp, 64
+; RV64IZCMP-SR-NEXT: addi sp, sp, 80
; RV64IZCMP-SR-NEXT: .cfi_def_cfa_offset 64
; RV64IZCMP-SR-NEXT: cm.pop {ra}, 64
; RV64IZCMP-SR-NEXT: .cfi_restore ra
@@ -2291,52 +2299,52 @@ define void @callee_with_irq() "interrupt"="machine" {
; RV32IZCMP-NEXT: .cfi_offset s9, -12
; RV32IZCMP-NEXT: .cfi_offset s10, -8
; RV32IZCMP-NEXT: .cfi_offset s11, -4
-; RV32IZCMP-NEXT: addi sp, sp, -32
-; RV32IZCMP-NEXT: .cfi_def_cfa_offset 144
-; RV32IZCMP-NEXT: sw t0, 88(sp) # 4-byte Folded Spill
-; RV32IZCMP-NEXT: sw t1, 84(sp) # 4-byte Folded Spill
-; RV32IZCMP-NEXT: sw t2, 80(sp) # 4-byte Folded Spill
-; RV32IZCMP-NEXT: sw a0, 76(sp) # 4-byte Folded Spill
-; RV32IZCMP-NEXT: sw a1, 72(sp) # 4-byte Folded Spill
-; RV32IZCMP-NEXT: sw a2, 68(sp) # 4-byte Folded Spill
-; RV32IZCMP-NEXT: sw a3, 64(sp) # 4-byte Folded Spill
-; RV32IZCMP-NEXT: sw a4, 60(sp) # 4-byte Folded Spill
-; RV32IZCMP-NEXT: sw a5, 56(sp) # 4-byte Folded Spill
-; RV32IZCMP-NEXT: sw a6, 52(sp) # 4-byte Folded Spill
-; RV32IZCMP-NEXT: sw a7, 48(sp) # 4-byte Folded Spill
-; RV32IZCMP-NEXT: sw t3, 44(sp) # 4-byte Folded Spill
-; RV32IZCMP-NEXT: sw t4, 40(sp) # 4-byte Folded Spill
-; RV32IZCMP-NEXT: sw t5, 36(sp) # 4-byte Folded Spill
-; RV32IZCMP-NEXT: sw t6, 32(sp) # 4-byte Folded Spill
-; RV32IZCMP-NEXT: .cfi_offset t0, -56
-; RV32IZCMP-NEXT: .cfi_offset t1, -60
-; RV32IZCMP-NEXT: .cfi_offset t2, -64
-; RV32IZCMP-NEXT: .cfi_offset a0, -68
-; RV32IZCMP-NEXT: .cfi_offset a1, -72
-; RV32IZCMP-NEXT: .cfi_offset a2, -76
-; RV32IZCMP-NEXT: .cfi_offset a3, -80
-; RV32IZCMP-NEXT: .cfi_offset a4, -84
-; RV32IZCMP-NEXT: .cfi_offset a5, -88
-; RV32IZCMP-NEXT: .cfi_offset a6, -92
-; RV32IZCMP-NEXT: .cfi_offset a7, -96
-; RV32IZCMP-NEXT: .cfi_offset t3, -100
-; RV32IZCMP-NEXT: .cfi_offset t4, -104
-; RV32IZCMP-NEXT: .cfi_offset t5, -108
-; RV32IZCMP-NEXT: .cfi_offset t6, -112
+; RV32IZCMP-NEXT: addi sp, sp, -48
+; RV32IZCMP-NEXT: .cfi_def_cfa_offset 160
+; RV32IZCMP-NEXT: sw t0, 92(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: sw t1, 88(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: sw t2, 84(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: sw a0, 80(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: sw a1, 76(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: sw a2, 72(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: sw a3, 68(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: sw a4, 64(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: sw a5, 60(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: sw a6, 56(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: sw a7, 52(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: sw t3, 48(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: sw t4, 44(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: sw t5, 40(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: sw t6, 36(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: .cfi_offset t0, -68
+; RV32IZCMP-NEXT: .cfi_offset t1, -72
+; RV32IZCMP-NEXT: .cfi_offset t2, -76
+; RV32IZCMP-NEXT: .cfi_offset a0, -80
+; RV32IZCMP-NEXT: .cfi_offset a1, -84
+; RV32IZCMP-NEXT: .cfi_offset a2, -88
+; RV32IZCMP-NEXT: .cfi_offset a3, -92
+; RV32IZCMP-NEXT: .cfi_offset a4, -96
+; RV32IZCMP-NEXT: .cfi_offset a5, -100
+; RV32IZCMP-NEXT: .cfi_offset a6, -104
+; RV32IZCMP-NEXT: .cfi_offset a7, -108
+; RV32IZCMP-NEXT: .cfi_offset t3, -112
+; RV32IZCMP-NEXT: .cfi_offset t4, -116
+; RV32IZCMP-NEXT: .cfi_offset t5, -120
+; RV32IZCMP-NEXT: .cfi_offset t6, -124
; RV32IZCMP-NEXT: lui t0, %hi(var_test_irq)
; RV32IZCMP-NEXT: lw a0, %lo(var_test_irq)(t0)
-; RV32IZCMP-NEXT: sw a0, 28(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: sw a0, 32(sp) # 4-byte Folded Spill
; RV32IZCMP-NEXT: lw a0, %lo(var_test_irq+4)(t0)
-; RV32IZCMP-NEXT: sw a0, 24(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: sw a0, 28(sp) # 4-byte Folded Spill
; RV32IZCMP-NEXT: lw a0, %lo(var_test_irq+8)(t0)
-; RV32IZCMP-NEXT: sw a0, 20(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: sw a0, 24(sp) # 4-byte Folded Spill
; RV32IZCMP-NEXT: lw a0, %lo(var_test_irq+12)(t0)
-; RV32IZCMP-NEXT: sw a0, 16(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: sw a0, 20(sp) # 4-byte Folded Spill
; RV32IZCMP-NEXT: addi a5, t0, %lo(var_test_irq)
; RV32IZCMP-NEXT: lw a0, 16(a5)
-; RV32IZCMP-NEXT: sw a0, 12(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: sw a0, 16(sp) # 4-byte Folded Spill
; RV32IZCMP-NEXT: lw a0, 20(a5)
-; RV32IZCMP-NEXT: sw a0, 8(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: sw a0, 12(sp) # 4-byte Folded Spill
; RV32IZCMP-NEXT: lw t4, 24(a5)
; RV32IZCMP-NEXT: lw t5, 28(a5)
; RV32IZCMP-NEXT: lw t6, 32(a5)
@@ -2389,33 +2397,33 @@ define void @callee_with_irq() "interrupt"="machine" {
; RV32IZCMP-NEXT: sw t6, 32(a5)
; RV32IZCMP-NEXT: sw t5, 28(a5)
; RV32IZCMP-NEXT: sw t4, 24(a5)
-; RV32IZCMP-NEXT: lw a0, 8(sp) # 4-byte Folded Reload
-; RV32IZCMP-NEXT: sw a0, 20(a5)
; RV32IZCMP-NEXT: lw a0, 12(sp) # 4-byte Folded Reload
-; RV32IZCMP-NEXT: sw a0, 16(a5)
+; RV32IZCMP-NEXT: sw a0, 20(a5)
; RV32IZCMP-NEXT: lw a0, 16(sp) # 4-byte Folded Reload
-; RV32IZCMP-NEXT: sw a0, %lo(var_test_irq+12)(t0)
+; RV32IZCMP-NEXT: sw a0, 16(a5)
; RV32IZCMP-NEXT: lw a0, 20(sp) # 4-byte Folded Reload
-; RV32IZCMP-NEXT: sw a0, %lo(var_test_irq+8)(t0)
+; RV32IZCMP-NEXT: sw a0, %lo(var_test_irq+12)(t0)
; RV32IZCMP-NEXT: lw a0, 24(sp) # 4-byte Folded Reload
-; RV32IZCMP-NEXT: sw a0, %lo(var_test_irq+4)(t0)
+; RV32IZCMP-NEXT: sw a0, %lo(var_test_irq+8)(t0)
; RV32IZCMP-NEXT: lw a0, 28(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT: sw a0, %lo(var_test_irq+4)(t0)
+; RV32IZCMP-NEXT: lw a0, 32(sp) # 4-byte Folded Reload
; RV32IZCMP-NEXT: sw a0, %lo(var_test_irq)(t0)
-; RV32IZCMP-NEXT: lw t0, 88(sp) # 4-byte Folded Reload
-; RV32IZCMP-NEXT: lw t1, 84(sp) # 4-byte Folded Reload
-; RV32IZCMP-NEXT: lw t2, 80(sp) # 4-byte Folded Reload
-; RV32IZCMP-NEXT: lw a0, 76(sp) # 4-byte Folded Reload
-; RV32IZCMP-NEXT: lw a1, 72(sp) # 4-byte Folded Reload
-; RV32IZCMP-NEXT: lw a2, 68(sp) # 4-byte Folded Reload
-; RV32IZCMP-NEXT: lw a3, 64(sp) # 4-byte Folded Reload
-; RV32IZCMP-NEXT: lw a4, 60(sp) # 4-byte Folded Reload
-; RV32IZCMP-NEXT: lw a5, 56(sp) # 4-byte Folded Reload
-; RV32IZCMP-NEXT: lw a6, 52(sp) # 4-byte Folded Reload
-; RV32IZCMP-NEXT: lw a7, 48(sp) # 4-byte Folded Reload
-; RV32IZCMP-NEXT: lw t3, 44(sp) # 4-byte Folded Reload
-; RV32IZCMP-NEXT: lw t4, 40(sp) # 4-byte Folded Reload
-; RV32IZCMP-NEXT: lw t5, 36(sp) # 4-byte Folded Reload
-; RV32IZCMP-NEXT: lw t6, 32(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT: lw t0, 92(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT: lw t1, 88(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT: lw t2, 84(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT: lw a0, 80(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT: lw a1, 76(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT: lw a2, 72(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT: lw a3, 68(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT: lw a4, 64(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT: lw a5, 60(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT: lw a6, 56(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT: lw a7, 52(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT: lw t3, 48(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT: lw t4, 44(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT: lw t5, 40(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT: lw t6, 36(sp) # 4-byte Folded Reload
; RV32IZCMP-NEXT: .cfi_restore t0
; RV32IZCMP-NEXT: .cfi_restore t1
; RV32IZCMP-NEXT: .cfi_restore t2
@@ -2431,7 +2439,7 @@ define void @callee_with_irq() "interrupt"="machine" {
; RV32IZCMP-NEXT: .cfi_restore t4
; RV32IZCMP-NEXT: .cfi_restore t5
; RV32IZCMP-NEXT: .cfi_restore t6
-; RV32IZCMP-NEXT: addi sp, sp, 32
+; RV32IZCMP-NEXT: addi sp, sp, 48
; RV32IZCMP-NEXT: .cfi_def_cfa_offset 112
; RV32IZCMP-NEXT: cm.pop {ra, s0-s11}, 112
; RV32IZCMP-NEXT: .cfi_restore ra
@@ -2467,52 +2475,52 @@ define void @callee_with_irq() "interrupt"="machine" {
; RV64IZCMP-NEXT: .cfi_offset s9, -24
; RV64IZCMP-NEXT: .cfi_offset s10, -16
; RV64IZCMP-NEXT: .cfi_offset s11, -8
-; RV64IZCMP-NEXT: addi sp, sp, -112
-; RV64IZCMP-NEXT: .cfi_def_cfa_offset 272
-; RV64IZCMP-NEXT: sd t0, 160(sp) # 8-byte Folded Spill
-; RV64IZCMP-NEXT: sd t1, 152(sp) # 8-byte Folded Spill
-; RV64IZCMP-NEXT: sd t2, 144(sp) # 8-byte Folded Spill
-; RV64IZCMP-NEXT: sd a0, 136(sp) # 8-byte Folded Spill
-; RV64IZCMP-NEXT: sd a1, 128(sp) # 8-byte Folded Spill
-; RV64IZCMP-NEXT: sd a2, 120(sp) # 8-byte Folded Spill
-; RV64IZCMP-NEXT: sd a3, 112(sp) # 8-byte Folded Spill
-; RV64IZCMP-NEXT: sd a4, 104(sp) # 8-byte Folded Spill
-; RV64IZCMP-NEXT: sd a5, 96(sp) # 8-byte Folded Spill
-; RV64IZCMP-NEXT: sd a6, 88(sp) # 8-byte Folded Spill
-; RV64IZCMP-NEXT: sd a7, 80(sp) # 8-byte Folded Spill
-; RV64IZCMP-NEXT: sd t3, 72(sp) # 8-byte Folded Spill
-; RV64IZCMP-NEXT: sd t4, 64(sp) # 8-byte Folded Spill
-; RV64IZCMP-NEXT: sd t5, 56(sp) # 8-byte Folded Spill
-; RV64IZCMP-NEXT: sd t6, 48(sp) # 8-byte Folded Spill
-; RV64IZCMP-NEXT: .cfi_offset t0, -112
-; RV64IZCMP-NEXT: .cfi_offset t1, -120
-; RV64IZCMP-NEXT: .cfi_offset t2, -128
-; RV64IZCMP-NEXT: .cfi_offset a0, -136
-; RV64IZCMP-NEXT: .cfi_offset a1, -144
-; RV64IZCMP-NEXT: .cfi_offset a2, -152
-; RV64IZCMP-NEXT: .cfi_offset a3, -160
-; RV64IZCMP-NEXT: .cfi_offset a4, -168
-; RV64IZCMP-NEXT: .cfi_offset a5, -176
-; RV64IZCMP-NEXT: .cfi_offset a6, -184
-; RV64IZCMP-NEXT: .cfi_offset a7, -192
-; RV64IZCMP-NEXT: .cfi_offset t3, -200
-; RV64IZCMP-NEXT: .cfi_offset t4, -208
-; RV64IZCMP-NEXT: .cfi_offset t5, -216
-; RV64IZCMP-NEXT: .cfi_offset t6, -224
+; RV64IZCMP-NEXT: addi sp, sp, -128
+; RV64IZCMP-NEXT: .cfi_def_cfa_offset 288
+; RV64IZCMP-NEXT: sd t0, 168(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: sd t1, 160(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: sd t2, 152(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: sd a0, 144(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: sd a1, 136(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: sd a2, 128(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: sd a3, 120(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: sd a4, 112(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: sd a5, 104(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: sd a6, 96(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: sd a7, 88(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: sd t3, 80(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: sd t4, 72(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: sd t5, 64(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: sd t6, 56(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: .cfi_offset t0, -120
+; RV64IZCMP-NEXT: .cfi_offset t1, -128
+; RV64IZCMP-NEXT: .cfi_offset t2, -136
+; RV64IZCMP-NEXT: .cfi_offset a0, -144
+; RV64IZCMP-NEXT: .cfi_offset a1, -152
+; RV64IZCMP-NEXT: .cfi_offset a2, -160
+; RV64IZCMP-NEXT: .cfi_offset a3, -168
+; RV64IZCMP-NEXT: .cfi_offset a4, -176
+; RV64IZCMP-NEXT: .cfi_offset a5, -184
+; RV64IZCMP-NEXT: .cfi_offset a6, -192
+; RV64IZCMP-NEXT: .cfi_offset a7, -200
+; RV64IZCMP-NEXT: .cfi_offset t3, -208
+; RV64IZCMP-NEXT: .cfi_offset t4, -216
+; RV64IZCMP-NEXT: .cfi_offset t5, -224
+; RV64IZCMP-NEXT: .cfi_offset t6, -232
; RV64IZCMP-NEXT: lui t0, %hi(var_test_irq)
; RV64IZCMP-NEXT: lw a0, %lo(var_test_irq)(t0)
-; RV64IZCMP-NEXT: sd a0, 40(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: sd a0, 48(sp) # 8-byte Folded Spill
; RV64IZCMP-NEXT: lw a0, %lo(var_test_irq+4)(t0)
-; RV64IZCMP-NEXT: sd a0, 32(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: sd a0, 40(sp) # 8-byte Folded Spill
; RV64IZCMP-NEXT: lw a0, %lo(var_test_irq+8)(t0)
-; RV64IZCMP-NEXT: sd a0, 24(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: sd a0, 32(sp) # 8-byte Folded Spill
; RV64IZCMP-NEXT: lw a0, %lo(var_test_irq+12)(t0)
-; RV64IZCMP-NEXT: sd a0, 16(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: sd a0, 24(sp) # 8-byte Folded Spill
; RV64IZCMP-NEXT: addi a5, t0, %lo(var_test_irq)
; RV64IZCMP-NEXT: lw a0, 16(a5)
-; RV64IZCMP-NEXT: sd a0, 8(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: sd a0, 16(sp) # 8-byte Folded Spill
; RV64IZCMP-NEXT: lw a0, 20(a5)
-; RV64IZCMP-NEXT: sd a0, 0(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: sd a0, 8(sp) # 8-byte Folded Spill
; RV64IZCMP-NEXT: lw t4, 24(a5)
; RV64IZCMP-NEXT: lw t5, 28(a5)
; RV64IZCMP-NEXT: lw t6, 32(a5)
@@ -2565,33 +2573,33 @@ define void @callee_with_irq() "interrupt"="machine" {
; RV64IZCMP-NEXT: sw t6, 32(a5)
; RV64IZCMP-NEXT: sw t5, 28(a5)
; RV64IZCMP-NEXT: sw t4, 24(a5)
-; RV64IZCMP-NEXT: ld a0, 0(sp) # 8-byte Folded Reload
-; RV64IZCMP-NEXT: sw a0, 20(a5)
; RV64IZCMP-NEXT: ld a0, 8(sp) # 8-byte Folded Reload
-; RV64IZCMP-NEXT: sw a0, 16(a5)
+; RV64IZCMP-NEXT: sw a0, 20(a5)
; RV64IZCMP-NEXT: ld a0, 16(sp) # 8-byte Folded Reload
-; RV64IZCMP-NEXT: sw a0, %lo(var_test_irq+12)(t0)
+; RV64IZCMP-NEXT: sw a0, 16(a5)
; RV64IZCMP-NEXT: ld a0, 24(sp) # 8-byte Folded Reload
-; RV64IZCMP-NEXT: sw a0, %lo(var_test_irq+8)(t0)
+; RV64IZCMP-NEXT: sw a0, %lo(var_test_irq+12)(t0)
; RV64IZCMP-NEXT: ld a0, 32(sp) # 8-byte Folded Reload
-; RV64IZCMP-NEXT: sw a0, %lo(var_test_irq+4)(t0)
+; RV64IZCMP-NEXT: sw a0, %lo(var_test_irq+8)(t0)
; RV64IZCMP-NEXT: ld a0, 40(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: sw a0, %lo(var_test_irq+4)(t0)
+; RV64IZCMP-NEXT: ld a0, 48(sp) # 8-byte Folded Reload
; RV64IZCMP-NEXT: sw a0, %lo(var_test_irq)(t0)
-; RV64IZCMP-NEXT: ld t0, 160(sp) # 8-byte Folded Reload
-; RV64IZCMP-NEXT: ld t1, 152(sp) # 8-byte Folded Reload
-; RV64IZCMP-NEXT: ld t2, 144(sp) # 8-byte Folded Reload
-; RV64IZCMP-NEXT: ld a0, 136(sp) # 8-byte Folded Reload
-; RV64IZCMP-NEXT: ld a1, 128(sp) # 8-byte Folded Reload
-; RV64IZCMP-NEXT: ld a2, 120(sp) # 8-byte Folded Reload
-; RV64IZCMP-NEXT: ld a3, 112(sp) # 8-byte Folded Reload
-; RV64IZCMP-NEXT: ld a4, 104(sp) # 8-byte Folded Reload
-; RV64IZCMP-NEXT: ld a5, 96(sp) # 8-byte Folded Reload
-; RV64IZCMP-NEXT: ld a6, 88(sp) # 8-byte Folded Reload
-; RV64IZCMP-NEXT: ld a7, 80(sp) # 8-byte Folded Reload
-; RV64IZCMP-NEXT: ld t3, 72(sp) # 8-byte Folded Reload
-; RV64IZCMP-NEXT: ld t4, 64(sp) # 8-byte Folded Reload
-; RV64IZCMP-NEXT: ld t5, 56(sp) # 8-byte Folded Reload
-; RV64IZCMP-NEXT: ld t6, 48(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: ld t0, 168(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: ld t1, 160(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: ld t2, 152(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: ld a0, 144(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: ld a1, 136(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: ld a2, 128(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: ld a3, 120(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: ld a4, 112(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: ld a5, 104(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: ld a6, 96(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: ld a7, 88(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: ld t3, 80(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: ld t4, 72(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: ld t5, 64(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: ld t6, 56(sp) # 8-byte Folded Reload
; RV64IZCMP-NEXT: .cfi_restore t0
; RV64IZCMP-NEXT: .cfi_restore t1
; RV64IZCMP-NEXT: .cfi_restore t2
@@ -2607,7 +2615,7 @@ define void @callee_with_irq() "interrupt"="machine" {
; RV64IZCMP-NEXT: .cfi_restore t4
; RV64IZCMP-NEXT: .cfi_restore t5
; RV64IZCMP-NEXT: .cfi_restore t6
-; RV64IZCMP-NEXT: addi sp, sp, 112
+; RV64IZCMP-NEXT: addi sp, sp, 128
; RV64IZCMP-NEXT: .cfi_def_cfa_offset 160
; RV64IZCMP-NEXT: cm.pop {ra, s0-s11}, 160
; RV64IZCMP-NEXT: .cfi_restore ra
@@ -2643,52 +2651,52 @@ define void @callee_with_irq() "interrupt"="machine" {
; RV32IZCMP-SR-NEXT: .cfi_offset s9, -12
; RV32IZCMP-SR-NEXT: .cfi_offset s10, -8
; RV32IZCMP-SR-NEXT: .cfi_offset s11, -4
-; RV32IZCMP-SR-NEXT: addi sp, sp, -32
-; RV32IZCMP-SR-NEXT: .cfi_def_cfa_offset 144
-; RV32IZCMP-SR-NEXT: sw t0, 88(sp) # 4-byte Folded Spill
-; RV32IZCMP-SR-NEXT: sw t1, 84(sp) # 4-byte Folded Spill
-; RV32IZCMP-SR-NEXT: sw t2, 80(sp) # 4-byte Folded Spill
-; RV32IZCMP-SR-NEXT: sw a0, 76(sp) # 4-byte Folded Spill
-; RV32IZCMP-SR-NEXT: sw a1, 72(sp) # 4-byte Folded Spill
-; RV32IZCMP-SR-NEXT: sw a2, 68(sp) # 4-byte Folded Spill
-; RV32IZCMP-SR-NEXT: sw a3, 64(sp) # 4-byte Folded Spill
-; RV32IZCMP-SR-NEXT: sw a4, 60(sp) # 4-byte Folded Spill
-; RV32IZCMP-SR-NEXT: sw a5, 56(sp) # 4-byte Folded Spill
-; RV32IZCMP-SR-NEXT: sw a6, 52(sp) # 4-byte Folded Spill
-; RV32IZCMP-SR-NEXT: sw a7, 48(sp) # 4-byte Folded Spill
-; RV32IZCMP-SR-NEXT: sw t3, 44(sp) # 4-byte Folded Spill
-; RV32IZCMP-SR-NEXT: sw t4, 40(sp) # 4-byte Folded Spill
-; RV32IZCMP-SR-NEXT: sw t5, 36(sp) # 4-byte Folded Spill
-; RV32IZCMP-SR-NEXT: sw t6, 32(sp) # 4-byte Folded Spill
-; RV32IZCMP-SR-NEXT: .cfi_offset t0, -56
-; RV32IZCMP-SR-NEXT: .cfi_offset t1, -60
-; RV32IZCMP-SR-NEXT: .cfi_offset t2, -64
-; RV32IZCMP-SR-NEXT: .cfi_offset a0, -68
-; RV32IZCMP-SR-NEXT: .cfi_offset a1, -72
-; RV32IZCMP-SR-NEXT: .cfi_offset a2, -76
-; RV32IZCMP-SR-NEXT: .cfi_offset a3, -80
-; RV32IZCMP-SR-NEXT: .cfi_offset a4, -84
-; RV32IZCMP-SR-NEXT: .cfi_offset a5, -88
-; RV32IZCMP-SR-NEXT: .cfi_offset a6, -92
-; RV32IZCMP-SR-NEXT: .cfi_offset a7, -96
-; RV32IZCMP-SR-NEXT: .cfi_offset t3, -100
-; RV32IZCMP-SR-NEXT: .cfi_offset t4, -104
-; RV32IZCMP-SR-NEXT: .cfi_offset t5, -108
-; RV32IZCMP-SR-NEXT: .cfi_offset t6, -112
+; RV32IZCMP-SR-NEXT: addi sp, sp, -48
+; RV32IZCMP-SR-NEXT: .cfi_def_cfa_offset 160
+; RV32IZCMP-SR-NEXT: sw t0, 92(sp) # 4-byte Folded Spill
+; RV32IZCMP-SR-NEXT: sw t1, 88(sp) # 4-byte Folded Spill
+; RV32IZCMP-SR-NEXT: sw t2, 84(sp) # 4-byte Folded Spill
+; RV32IZCMP-SR-NEXT: sw a0, 80(sp) # 4-byte Folded Spill
+; RV32IZCMP-SR-NEXT: sw a1, 76(sp) # 4-byte Folded Spill
+; RV32IZCMP-SR-NEXT: sw a2, 72(sp) # 4-byte Folded Spill
+; RV32IZCMP-SR-NEXT: sw a3, 68(sp) # 4-byte Folded Spill
+; RV32IZCMP-SR-NEXT: sw a4, 64(sp) # 4-byte Folded Spill
+; RV32IZCMP-SR-NEXT: sw a5, 60(sp) # 4-byte Folded Spill
+; RV32IZCMP-SR-NEXT: sw a6, 56(sp) # 4-byte Folded Spill
+; RV32IZCMP-SR-NEXT: sw a7, 52(sp) # 4-byte Folded Spill
+; RV32IZCMP-SR-NEXT: sw t3, 48(sp) # 4-byte Folded Spill
+; RV32IZCMP-SR-NEXT: sw t4, 44(sp) # 4-byte Folded Spill
+; RV32IZCMP-SR-NEXT: sw t5, 40(sp) # 4-byte Folded Spill
+; RV32IZCMP-SR-NEXT: sw t6, 36(sp) # 4-byte Folded Spill
+; RV32IZCMP-SR-NEXT: .cfi_offset t0, -68
+; RV32IZCMP-SR-NEXT: .cfi_offset t1, -72
+; RV32IZCMP-SR-NEXT: .cfi_offset t2, -76
+; RV32IZCMP-SR-NEXT: .cfi_offset a0, -80
+; RV32IZCMP-SR-NEXT: .cfi_offset a1, -84
+; RV32IZCMP-SR-NEXT: .cfi_offset a2, -88
+; RV32IZCMP-SR-NEXT: .cfi_offset a3, -92
+; RV32IZCMP-SR-NEXT: .cfi_offset a4, -96
+; RV32IZCMP-SR-NEXT: .cfi_offset a5, -100
+; RV32IZCMP-SR-NEXT: .cfi_offset a6, -104
+; RV32IZCMP-SR-NEXT: .cfi_offset a7, -108
+; RV32IZCMP-SR-NEXT: .cfi_offset t3, -112
+; RV32IZCMP-SR-NEXT: .cfi_offset t4, -116
+; RV32IZCMP-SR-NEXT: .cfi_offset t5, -120
+; RV32IZCMP-SR-NEXT: .cfi_offset t6, -124
; RV32IZCMP-SR-NEXT: lui t0, %hi(var_test_irq)
; RV32IZCMP-SR-NEXT: lw a0, %lo(var_test_irq)(t0)
-; RV32IZCMP-SR-NEXT: sw a0, 28(sp) # 4-byte Folded Spill
+; RV32IZCMP-SR-NEXT: sw a0, 32(sp) # 4-byte Folded Spill
; RV32IZCMP-SR-NEXT: lw a0, %lo(var_test_irq+4)(t0)
-; RV32IZCMP-SR-NEXT: sw a0, 24(sp) # 4-byte Folded Spill
+; RV32IZCMP-SR-NEXT: sw a0, 28(sp) # 4-byte Folded Spill
; RV32IZCMP-SR-NEXT: lw a0, %lo(var_test_irq+8)(t0)
-; RV32IZCMP-SR-NEXT: sw a0, 20(sp) # 4-byte Folded Spill
+; RV32IZCMP-SR-NEXT: sw a0, 24(sp) # 4-byte Folded Spill
; RV32IZCMP-SR-NEXT: lw a0, %lo(var_test_irq+12)(t0)
-; RV32IZCMP-SR-NEXT: sw a0, 16(sp) # 4-byte Folded Spill
+; RV32IZCMP-SR-NEXT: sw a0, 20(sp) # 4-byte Folded Spill
; RV32IZCMP-SR-NEXT: addi a5, t0, %lo(var_test_irq)
; RV32IZCMP-SR-NEXT: lw a0, 16(a5)
-; RV32IZCMP-SR-NEXT: sw a0, 12(sp) # 4-byte Folded Spill
+; RV32IZCMP-SR-NEXT: sw a0, 16(sp) # 4-byte Folded Spill
; RV32IZCMP-SR-NEXT: lw a0, 20(a5)
-; RV32IZCMP-SR-NEXT: sw a0, 8(sp) # 4-byte Folded Spill
+; RV32IZCMP-SR-NEXT: sw a0, 12(sp) # 4-byte Folded Spill
; RV32IZCMP-SR-NEXT: lw t4, 24(a5)
; RV32IZCMP-SR-NEXT: lw t5, 28(a5)
; RV32IZCMP-SR-NEXT: lw t6, 32(a5)
@@ -2741,33 +2749,33 @@ define void @callee_with_irq() "interrupt"="machine" {
; RV32IZCMP-SR-NEXT: sw t6, 32(a5)
; RV32IZCMP-SR-NEXT: sw t5, 28(a5)
; RV32IZCMP-SR-NEXT: sw t4, 24(a5)
-; RV32IZCMP-SR-NEXT: lw a0, 8(sp) # 4-byte Folded Reload
-; RV32IZCMP-SR-NEXT: sw a0, 20(a5)
; RV32IZCMP-SR-NEXT: lw a0, 12(sp) # 4-byte Folded Reload
-; RV32IZCMP-SR-NEXT: sw a0, 16(a5)
+; RV32IZCMP-SR-NEXT: sw a0, 20(a5)
; RV32IZCMP-SR-NEXT: lw a0, 16(sp) # 4-byte Folded Reload
-; RV32IZCMP-SR-NEXT: sw a0, %lo(var_test_irq+12)(t0)
+; RV32IZCMP-SR-NEXT: sw a0, 16(a5)
; RV32IZCMP-SR-NEXT: lw a0, 20(sp) # 4-byte Folded Reload
-; RV32IZCMP-SR-NEXT: sw a0, %lo(var_test_irq+8)(t0)
+; RV32IZCMP-SR-NEXT: sw a0, %lo(var_test_irq+12)(t0)
; RV32IZCMP-SR-NEXT: lw a0, 24(sp) # 4-byte Folded Reload
-; RV32IZCMP-SR-NEXT: sw a0, %lo(var_test_irq+4)(t0)
+; RV32IZCMP-SR-NEXT: sw a0, %lo(var_test_irq+8)(t0)
; RV32IZCMP-SR-NEXT: lw a0, 28(sp) # 4-byte Folded Reload
+; RV32IZCMP-SR-NEXT: sw a0, %lo(var_test_irq+4)(t0)
+; RV32IZCMP-SR-NEXT: lw a0, 32(sp) # 4-byte Folded Reload
; RV32IZCMP-SR-NEXT: sw a0, %lo(var_test_irq)(t0)
-; RV32IZCMP-SR-NEXT: lw t0, 88(sp) # 4-byte Folded Reload
-; RV32IZCMP-SR-NEXT: lw t1, 84(sp) # 4-byte Folded Reload
-; RV32IZCMP-SR-NEXT: lw t2, 80(sp) # 4-byte Folded Reload
-; RV32IZCMP-SR-NEXT: lw a0, 76(sp) # 4-byte Folded Reload
-; RV32IZCMP-SR-NEXT: lw a1, 72(sp) # 4-byte Folded Reload
-; RV32IZCMP-SR-NEXT: lw a2, 68(sp) # 4-byte Folded Reload
-; RV32IZCMP-SR-NEXT: lw a3, 64(sp) # 4-byte Folded Reload
-; RV32IZCMP-SR-NEXT: lw a4, 60(sp) # 4-byte Folded Reload
-; RV32IZCMP-SR-NEXT: lw a5, 56(sp) # 4-byte Folded Reload
-; RV32IZCMP-SR-NEXT: lw a6, 52(sp) # 4-byte Folded Reload
-; RV32IZCMP-SR-NEXT: lw a7, 48(sp) # 4-byte Folded Reload
-; RV32IZCMP-SR-NEXT: lw t3, 44(sp) # 4-byte Folded Reload
-; RV32IZCMP-SR-NEXT: lw t4, 40(sp) # 4-byte Folded Reload
-; RV32IZCMP-SR-NEXT: lw t5, 36(sp) # 4-byte Folded Reload
-; RV32IZCMP-SR-NEXT: lw t6, 32(sp) # 4-byte Folded Reload
+; RV32IZCMP-SR-NEXT: lw t0, 92(sp) # 4-byte Folded Reload
+; RV32IZCMP-SR-NEXT: lw t1, 88(sp) # 4-byte Folded Reload
+; RV32IZCMP-SR-NEXT: lw t2, 84(sp) # 4-byte Folded Reload
+; RV32IZCMP-SR-NEXT: lw a0, 80(sp) # 4-byte Folded Reload
+; RV32IZCMP-SR-NEXT: lw a1, 76(sp) # 4-byte Folded Reload
+; RV32IZCMP-SR-NEXT: lw a2, 72(sp) # 4-byte Folded Reload
+; RV32IZCMP-SR-NEXT: lw a3, 68(sp) # 4-byte Folded Reload
+; RV32IZCMP-SR-NEXT: lw a4, 64(sp) # 4-byte Folded Reload
+; RV32IZCMP-SR-NEXT: lw a5, 60(sp) # 4-byte Folded Reload
+; RV32IZCMP-SR-NEXT: lw a6, 56(sp) # 4-byte Folded Reload
+; RV32IZCMP-SR-NEXT: lw a7, 52(sp) # 4-byte Folded Reload
+; RV32IZCMP-SR-NEXT: lw t3, 48(sp) # 4-byte Folded Reload
+; RV32IZCMP-SR-NEXT: lw t4, 44(sp) # 4-byte Folded Reload
+; RV32IZCMP-SR-NEXT: lw t5, 40(sp) # 4-byte Folded Reload
+; RV32IZCMP-SR-NEXT: lw t6, 36(sp) # 4-byte Folded Reload
; RV32IZCMP-SR-NEXT: .cfi_restore t0
; RV32IZCMP-SR-NEXT: .cfi_restore t1
; RV32IZCMP-SR-NEXT: .cfi_restore t2
@@ -2783,7 +2791,7 @@ define void @callee_with_irq() "interrupt"="machine" {
; RV32IZCMP-SR-NEXT: .cfi_restore t4
; RV32IZCMP-SR-NEXT: .cfi_restore t5
; RV32IZCMP-SR-NEXT: .cfi_restore t6
-; RV32IZCMP-SR-NEXT: addi sp, sp, 32
+; RV32IZCMP-SR-NEXT: addi sp, sp, 48
; RV32IZCMP-SR-NEXT: .cfi_def_cfa_offset 112
; RV32IZCMP-SR-NEXT: cm.pop {ra, s0-s11}, 112
; RV32IZCMP-SR-NEXT: .cfi_restore ra
@@ -2819,52 +2827,52 @@ define void @callee_with_irq() "interrupt"="machine" {
; RV64IZCMP-SR-NEXT: .cfi_offset s9, -24
; RV64IZCMP-SR-NEXT: .cfi_offset s10, -16
; RV64IZCMP-SR-NEXT: .cfi_offset s11, -8
-; RV64IZCMP-SR-NEXT: addi sp, sp, -112
-; RV64IZCMP-SR-NEXT: .cfi_def_cfa_offset 272
-; RV64IZCMP-SR-NEXT: sd t0, 160(sp) # 8-byte Folded Spill
-; RV64IZCMP-SR-NEXT: sd t1, 152(sp) # 8-byte Folded Spill
-; RV64IZCMP-SR-NEXT: sd t2, 144(sp) # 8-byte Folded Spill
-; RV64IZCMP-SR-NEXT: sd a0, 136(sp) # 8-byte Folded Spill
-; RV64IZCMP-SR-NEXT: sd a1, 128(sp) # 8-byte Folded Spill
-; RV64IZCMP-SR-NEXT: sd a2, 120(sp) # 8-byte Folded Spill
-; RV64IZCMP-SR-NEXT: sd a3, 112(sp) # 8-byte Folded Spill
-; RV64IZCMP-SR-NEXT: sd a4, 104(sp) # 8-byte Folded Spill
-; RV64IZCMP-SR-NEXT: sd a5, 96(sp) # 8-byte Folded Spill
-; RV64IZCMP-SR-NEXT: sd a6, 88(sp) # 8-byte Folded Spill
-; RV64IZCMP-SR-NEXT: sd a7, 80(sp) # 8-byte Folded Spill
-; RV64IZCMP-SR-NEXT: sd t3, 72(sp) # 8-byte Folded Spill
-; RV64IZCMP-SR-NEXT: sd t4, 64(sp) # 8-byte Folded Spill
-; RV64IZCMP-SR-NEXT: sd t5, 56(sp) # 8-byte Folded Spill
-; RV64IZCMP-SR-NEXT: sd t6, 48(sp) # 8-byte Folded Spill
-; RV64IZCMP-SR-NEXT: .cfi_offset t0, -112
-; RV64IZCMP-SR-NEXT: .cfi_offset t1, -120
-; RV64IZCMP-SR-NEXT: .cfi_offset t2, -128
-; RV64IZCMP-SR-NEXT: .cfi_offset a0, -136
-; RV64IZCMP-SR-NEXT: .cfi_offset a1, -144
-; RV64IZCMP-SR-NEXT: .cfi_offset a2, -152
-; RV64IZCMP-SR-NEXT: .cfi_offset a3, -160
-; RV64IZCMP-SR-NEXT: .cfi_offset a4, -168
-; RV64IZCMP-SR-NEXT: .cfi_offset a5, -176
-; RV64IZCMP-SR-NEXT: .cfi_offset a6, -184
-; RV64IZCMP-SR-NEXT: .cfi_offset a7, -192
-; RV64IZCMP-SR-NEXT: .cfi_offset t3, -200
-; RV64IZCMP-SR-NEXT: .cfi_offset t4, -208
-; RV64IZCMP-SR-NEXT: .cfi_offset t5, -216
-; RV64IZCMP-SR-NEXT: .cfi_offset t6, -224
+; RV64IZCMP-SR-NEXT: addi sp, sp, -128
+; RV64IZCMP-SR-NEXT: .cfi_def_cfa_offset 288
+; RV64IZCMP-SR-NEXT: sd t0, 168(sp) # 8-byte Folded Spill
+; RV64IZCMP-SR-NEXT: sd t1, 160(sp) # 8-byte Folded Spill
+; RV64IZCMP-SR-NEXT: sd t2, 152(sp) # 8-byte Folded Spill
+; RV64IZCMP-SR-NEXT: sd a0, 144(sp) # 8-byte Folded Spill
+; RV64IZCMP-SR-NEXT: sd a1, 136(sp) # 8-byte Folded Spill
+; RV64IZCMP-SR-NEXT: sd a2, 128(sp) # 8-byte Folded Spill
+; RV64IZCMP-SR-NEXT: sd a3, 120(sp) # 8-byte Folded Spill
+; RV64IZCMP-SR-NEXT: sd a4, 112(sp) # 8-byte Folded Spill
+; RV64IZCMP-SR-NEXT: sd a5, 104(sp) # 8-byte Folded Spill
+; RV64IZCMP-SR-NEXT: sd a6, 96(sp) # 8-byte Folded Spill
+; RV64IZCMP-SR-NEXT: sd a7, 88(sp) # 8-byte Folded Spill
+; RV64IZCMP-SR-NEXT: sd t3, 80(sp) # 8-byte Folded Spill
+; RV64IZCMP-SR-NEXT: sd t4, 72(sp) # 8-byte Folded Spill
+; RV64IZCMP-SR-NEXT: sd t5, 64(sp) # 8-byte Folded Spill
+; RV64IZCMP-SR-NEXT: sd t6, 56(sp) # 8-byte Folded Spill
+; RV64IZCMP-SR-NEXT: .cfi_offset t0, -120
+; RV64IZCMP-SR-NEXT: .cfi_offset t1, -128
+; RV64IZCMP-SR-NEXT: .cfi_offset t2, -136
+; RV64IZCMP-SR-NEXT: .cfi_offset a0, -144
+; RV64IZCMP-SR-NEXT: .cfi_offset a1, -152
+; RV64IZCMP-SR-NEXT: .cfi_offset a2, -160
+; RV64IZCMP-SR-NEXT: .cfi_offset a3, -168
+; RV64IZCMP-SR-NEXT: .cfi_offset a4, -176
+; RV64IZCMP-SR-NEXT: .cfi_offset a5, -184
+; RV64IZCMP-SR-NEXT: .cfi_offset a6, -192
+; RV64IZCMP-SR-NEXT: .cfi_offset a7, -200
+; RV64IZCMP-SR-NEXT: .cfi_offset t3, -208
+; RV64IZCMP-SR-NEXT: .cfi_offset t4, -216
+; RV64IZCMP-SR-NEXT: .cfi_offset t5, -224
+; RV64IZCMP-SR-NEXT: .cfi_offset t6, -232
; RV64IZCMP-SR-NEXT: lui t0, %hi(var_test_irq)
; RV64IZCMP-SR-NEXT: lw a0, %lo(var_test_irq)(t0)
-; RV64IZCMP-SR-NEXT: sd a0, 40(sp) # 8-byte Folded Spill
+; RV64IZCMP-SR-NEXT: sd a0, 48(sp) # 8-byte Folded Spill
; RV64IZCMP-SR-NEXT: lw a0, %lo(var_test_irq+4)(t0)
-; RV64IZCMP-SR-NEXT: sd a0, 32(sp) # 8-byte Folded Spill
+; RV64IZCMP-SR-NEXT: sd a0, 40(sp) # 8-byte Folded Spill
; RV64IZCMP-SR-NEXT: lw a0, %lo(var_test_irq+8)(t0)
-; RV64IZCMP-SR-NEXT: sd a0, 24(sp) # 8-byte Folded Spill
+; RV64IZCMP-SR-NEXT: sd a0, 32(sp) # 8-byte Folded Spill
; RV64IZCMP-SR-NEXT: lw a0, %lo(var_test_irq+12)(t0)
-; RV64IZCMP-SR-NEXT: sd a0, 16(sp) # 8-byte Folded Spill
+; RV64IZCMP-SR-NEXT: sd a0, 24(sp) # 8-byte Folded Spill
; RV64IZCMP-SR-NEXT: addi a5, t0, %lo(var_test_irq)
; RV64IZCMP-SR-NEXT: lw a0, 16(a5)
-; RV64IZCMP-SR-NEXT: sd a0, 8(sp) # 8-byte Folded Spill
+; RV64IZCMP-SR-NEXT: sd a0, 16(sp) # 8-byte Folded Spill
; RV64IZCMP-SR-NEXT: lw a0, 20(a5)
-; RV64IZCMP-SR-NEXT: sd a0, 0(sp) # 8-byte Folded Spill
+; RV64IZCMP-SR-NEXT: sd a0, 8(sp) # 8-byte Folded Spill
; RV64IZCMP-SR-NEXT: lw t4, 24(a5)
; RV64IZCMP-SR-NEXT: lw t5, 28(a5)
; RV64IZCMP-SR-NEXT: lw t6, 32(a5)
@@ -2917,33 +2925,33 @@ define void @callee_with_irq() "interrupt"="machine" {
; RV64IZCMP-SR-NEXT: sw t6, 32(a5)
; RV64IZCMP-SR-NEXT: sw t5, 28(a5)
; RV64IZCMP-SR-NEXT: sw t4, 24(a5)
-; RV64IZCMP-SR-NEXT: ld a0, 0(sp) # 8-byte Folded Reload
-; RV64IZCMP-SR-NEXT: sw a0, 20(a5)
; RV64IZCMP-SR-NEXT: ld a0, 8(sp) # 8-byte Folded Reload
-; RV64IZCMP-SR-NEXT: sw a0, 16(a5)
+; RV64IZCMP-SR-NEXT: sw a0, 20(a5)
; RV64IZCMP-SR-NEXT: ld a0, 16(sp) # 8-byte Folded Reload
-; RV64IZCMP-SR-NEXT: sw a0, %lo(var_test_irq+12)(t0)
+; RV64IZCMP-SR-NEXT: sw a0, 16(a5)
; RV64IZCMP-SR-NEXT: ld a0, 24(sp) # 8-byte Folded Reload
-; RV64IZCMP-SR-NEXT: sw a0, %lo(var_test_irq+8)(t0)
+; RV64IZCMP-SR-NEXT: sw a0, %lo(var_test_irq+12)(t0)
; RV64IZCMP-SR-NEXT: ld a0, 32(sp) # 8-byte Folded Reload
-; RV64IZCMP-SR-NEXT: sw a0, %lo(var_test_irq+4)(t0)
+; RV64IZCMP-SR-NEXT: sw a0, %lo(var_test_irq+8)(t0)
; RV64IZCMP-SR-NEXT: ld a0, 40(sp) # 8-byte Folded Reload
+; RV64IZCMP-SR-NEXT: sw a0, %lo(var_test_irq+4)(t0)
+; RV64IZCMP-SR-NEXT: ld a0, 48(sp) # 8-byte Folded Reload
; RV64IZCMP-SR-NEXT: sw a0, %lo(var_test_irq)(t0)
-; RV64IZCMP-SR-NEXT: ld t0, 160(sp) # 8-byte Folded Reload
-; RV64IZCMP-SR-NEXT: ld t1, 152(sp) # 8-byte Folded Reload
-; RV64IZCMP-SR-NEXT: ld t2, 144(sp) # 8-byte Folded Reload
-; RV64IZCMP-SR-NEXT: ld a0, 136(sp) # 8-byte Folded Reload
-; RV64IZCMP-SR-NEXT: ld a1, 128(sp) # 8-byte Folded Reload
-; RV64IZCMP-SR-NEXT: ld a2, 120(sp) # 8-byte Folded Reload
-; RV64IZCMP-SR-NEXT: ld a3, 112(sp) # 8-byte Folded Reload
-; RV64IZCMP-SR-NEXT: ld a4, 104(sp) # 8-byte Folded Reload
-; RV64IZCMP-SR-NEXT: ld a5, 96(sp) # 8-byte Folded Reload
-; RV64IZCMP-SR-NEXT: ld a6, 88(sp) # 8-byte Folded Reload
-; RV64IZCMP-SR-NEXT: ld a7, 80(sp) # 8-byte Folded Reload
-; RV64IZCMP-SR-NEXT: ld t3, 72(sp) # 8-byte Folded Reload
-; RV64IZCMP-SR-NEXT: ld t4, 64(sp) # 8-byte Folded Reload
-; RV64IZCMP-SR-NEXT: ld t5, 56(sp) # 8-byte Folded Reload
-; RV64IZCMP-SR-NEXT: ld t6, 48(sp) # 8-byte Folded Reload
+; RV64IZCMP-SR-NEXT: ld t0, 168(sp) # 8-byte Folded Reload
+; RV64IZCMP-SR-NEXT: ld t1, 160(sp) # 8-byte Folded Reload
+; RV64IZCMP-SR-NEXT: ld t2, 152(sp) # 8-byte Folded Reload
+; RV64IZCMP-SR-NEXT: ld a0, 144(sp) # 8-byte Folded Reload
+; RV64IZCMP-SR-NEXT: ld a1, 136(sp) # 8-byte Folded Reload
+; RV64IZCMP-SR-NEXT: ld a2, 128(sp) # 8-byte Folded Reload
+; RV64IZCMP-SR-NEXT: ld a3, 120(sp) # 8-byte Folded Reload
+; RV64IZCMP-SR-NEXT: ld a4, 112(sp) # 8-byte Folded Reload
+; RV64IZCMP-SR-NEXT: ld a5, 104(sp) # 8-byte Folded Reload
+; RV64IZCMP-SR-NEXT: ld a6, 96(sp) # 8-byte Folded Reload
+; RV64IZCMP-SR-NEXT: ld a7, 88(sp) # 8-byte Folded Reload
+; RV64IZCMP-SR-NEXT: ld t3, 80(sp) # 8-byte Folded Reload
+; RV64IZCMP-SR-NEXT: ld t4, 72(sp) # 8-byte Folded Reload
+; RV64IZCMP-SR-NEXT: ld t5, 64(sp) # 8-byte Folded Reload
+; RV64IZCMP-SR-NEXT: ld t6, 56(sp) # 8-byte Folded Reload
; RV64IZCMP-SR-NEXT: .cfi_restore t0
; RV64IZCMP-SR-NEXT: .cfi_restore t1
; RV64IZCMP-SR-NEXT: .cfi_restore t2
@@ -2959,7 +2967,7 @@ define void @callee_with_irq() "interrupt"="machine" {
; RV64IZCMP-SR-NEXT: .cfi_restore t4
; RV64IZCMP-SR-NEXT: .cfi_restore t5
; RV64IZCMP-SR-NEXT: .cfi_restore t6
-; RV64IZCMP-SR-NEXT: addi sp, sp, 112
+; RV64IZCMP-SR-NEXT: addi sp, sp, 128
; RV64IZCMP-SR-NEXT: .cfi_def_cfa_offset 160
; RV64IZCMP-SR-NEXT: cm.pop {ra, s0-s11}, 160
; RV64IZCMP-SR-NEXT: .cfi_restore ra
@@ -3381,8 +3389,8 @@ define void @callee_with_irq() "interrupt"="machine" {
define void @callee_no_irq() {
; RV32IZCMP-LABEL: callee_no_irq:
; RV32IZCMP: # %bb.0:
-; RV32IZCMP-NEXT: cm.push {ra, s0-s11}, -80
-; RV32IZCMP-NEXT: .cfi_def_cfa_offset 80
+; RV32IZCMP-NEXT: cm.push {ra, s0-s11}, -96
+; RV32IZCMP-NEXT: .cfi_def_cfa_offset 96
; RV32IZCMP-NEXT: .cfi_offset ra, -52
; RV32IZCMP-NEXT: .cfi_offset s0, -48
; RV32IZCMP-NEXT: .cfi_offset s1, -44
@@ -3398,18 +3406,18 @@ define void @callee_no_irq() {
; RV32IZCMP-NEXT: .cfi_offset s11, -4
; RV32IZCMP-NEXT: lui t0, %hi(var_test_irq)
; RV32IZCMP-NEXT: lw a0, %lo(var_test_irq)(t0)
-; RV32IZCMP-NEXT: sw a0, 24(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: sw a0, 28(sp) # 4-byte Folded Spill
; RV32IZCMP-NEXT: lw a0, %lo(var_test_irq+4)(t0)
-; RV32IZCMP-NEXT: sw a0, 20(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: sw a0, 24(sp) # 4-byte Folded Spill
; RV32IZCMP-NEXT: lw a0, %lo(var_test_irq+8)(t0)
-; RV32IZCMP-NEXT: sw a0, 16(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: sw a0, 20(sp) # 4-byte Folded Spill
; RV32IZCMP-NEXT: lw a0, %lo(var_test_irq+12)(t0)
-; RV32IZCMP-NEXT: sw a0, 12(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: sw a0, 16(sp) # 4-byte Folded Spill
; RV32IZCMP-NEXT: addi a5, t0, %lo(var_test_irq)
; RV32IZCMP-NEXT: lw a0, 16(a5)
-; RV32IZCMP-NEXT: sw a0, 8(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: sw a0, 12(sp) # 4-byte Folded Spill
; RV32IZCMP-NEXT: lw a0, 20(a5)
-; RV32IZCMP-NEXT: sw a0, 4(sp) # 4-byte Folded Spill
+; RV32IZCMP-NEXT: sw a0, 8(sp) # 4-byte Folded Spill
; RV32IZCMP-NEXT: lw t4, 24(a5)
; RV32IZCMP-NEXT: lw t5, 28(a5)
; RV32IZCMP-NEXT: lw t6, 32(a5)
@@ -3462,19 +3470,19 @@ define void @callee_no_irq() {
; RV32IZCMP-NEXT: sw t6, 32(a5)
; RV32IZCMP-NEXT: sw t5, 28(a5)
; RV32IZCMP-NEXT: sw t4, 24(a5)
-; RV32IZCMP-NEXT: lw a0, 4(sp) # 4-byte Folded Reload
-; RV32IZCMP-NEXT: sw a0, 20(a5)
; RV32IZCMP-NEXT: lw a0, 8(sp) # 4-byte Folded Reload
-; RV32IZCMP-NEXT: sw a0, 16(a5)
+; RV32IZCMP-NEXT: sw a0, 20(a5)
; RV32IZCMP-NEXT: lw a0, 12(sp) # 4-byte Folded Reload
-; RV32IZCMP-NEXT: sw a0, %lo(var_test_irq+12)(t0)
+; RV32IZCMP-NEXT: sw a0, 16(a5)
; RV32IZCMP-NEXT: lw a0, 16(sp) # 4-byte Folded Reload
-; RV32IZCMP-NEXT: sw a0, %lo(var_test_irq+8)(t0)
+; RV32IZCMP-NEXT: sw a0, %lo(var_test_irq+12)(t0)
; RV32IZCMP-NEXT: lw a0, 20(sp) # 4-byte Folded Reload
-; RV32IZCMP-NEXT: sw a0, %lo(var_test_irq+4)(t0)
+; RV32IZCMP-NEXT: sw a0, %lo(var_test_irq+8)(t0)
; RV32IZCMP-NEXT: lw a0, 24(sp) # 4-byte Folded Reload
+; RV32IZCMP-NEXT: sw a0, %lo(var_test_irq+4)(t0)
+; RV32IZCMP-NEXT: lw a0, 28(sp) # 4-byte Folded Reload
; RV32IZCMP-NEXT: sw a0, %lo(var_test_irq)(t0)
-; RV32IZCMP-NEXT: cm.popret {ra, s0-s11}, 80
+; RV32IZCMP-NEXT: cm.popret {ra, s0-s11}, 96
;
; RV64IZCMP-LABEL: callee_no_irq:
; RV64IZCMP: # %bb.0:
@@ -3495,18 +3503,18 @@ define void @callee_no_irq() {
; RV64IZCMP-NEXT: .cfi_offset s11, -8
; RV64IZCMP-NEXT: lui t0, %hi(var_test_irq)
; RV64IZCMP-NEXT: lw a0, %lo(var_test_irq)(t0)
-; RV64IZCMP-NEXT: sd a0, 48(sp) # 8-byte Folded Spill
-; RV64IZCMP-NEXT: lw a0, %lo(var_test_irq+4)(t0)
; RV64IZCMP-NEXT: sd a0, 40(sp) # 8-byte Folded Spill
-; RV64IZCMP-NEXT: lw a0, %lo(var_test_irq+8)(t0)
+; RV64IZCMP-NEXT: lw a0, %lo(var_test_irq+4)(t0)
; RV64IZCMP-NEXT: sd a0, 32(sp) # 8-byte Folded Spill
-; RV64IZCMP-NEXT: lw a0, %lo(var_test_irq+12)(t0)
+; RV64IZCMP-NEXT: lw a0, %lo(var_test_irq+8)(t0)
; RV64IZCMP-NEXT: sd a0, 24(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: lw a0, %lo(var_test_irq+12)(t0)
+; RV64IZCMP-NEXT: sd a0, 16(sp) # 8-byte Folded Spill
; RV64IZCMP-NEXT: addi a5, t0, %lo(var_test_irq)
; RV64IZCMP-NEXT: lw a0, 16(a5)
-; RV64IZCMP-NEXT: sd a0, 16(sp) # 8-byte Folded Spill
-; RV64IZCMP-NEXT: lw a0, 20(a5)
; RV64IZCMP-NEXT: sd a0, 8(sp) # 8-byte Folded Spill
+; RV64IZCMP-NEXT: lw a0, 20(a5)
+; RV64IZCMP-NEXT: sd a0, 0(sp) # 8-byte Folded Spill
; RV64IZCMP-NEXT: lw t4, 24(a5)
; RV64IZCMP-NEXT: lw t5, 28(a5)
; RV64IZCMP-NEXT: lw t6, 32(a5)
@@ -3559,24 +3567,24 @@ define void @callee_no_irq() {
; RV64IZCMP-NEXT: sw t6, 32(a5)
; RV64IZCMP-NEXT: sw t5, 28(a5)
; RV64IZCMP-NEXT: sw t4, 24(a5)
-; RV64IZCMP-NEXT: ld a0, 8(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: ld a0, 0(sp) # 8-byte Folded Reload
; RV64IZCMP-NEXT: sw a0, 20(a5)
-; RV64IZCMP-NEXT: ld a0, 16(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: ld a0, 8(sp) # 8-byte Folded Reload
; RV64IZCMP-NEXT: sw a0, 16(a5)
-; RV64IZCMP-NEXT: ld a0, 24(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: ld a0, 16(sp) # 8-byte Folded Reload
; RV64IZCMP-NEXT: sw a0, %lo(var_test_irq+12)(t0)
-; RV64IZCMP-NEXT: ld a0, 32(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: ld a0, 24(sp) # 8-byte Folded Reload
; RV64IZCMP-NEXT: sw a0, %lo(var_test_irq+8)(t0)
-; RV64IZCMP-NEXT: ld a0, 40(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: ld a0, 32(sp) # 8-byte Folded Reload
; RV64IZCMP-NEXT: sw a0, %lo(var_test_irq+4)(t0)
-; RV64IZCMP-NEXT: ld a0, 48(sp) # 8-byte Folded Reload
+; RV64IZCMP-NEXT: ld a0, 40(sp) # 8-byte Folded Reload
; RV64IZCMP-NEXT: sw a0, %lo(var_test_irq)(t0)
; RV64IZCMP-NEXT: cm.popret {ra, s0-s11}, 160
;
; RV32IZCMP-SR-LABEL: callee_no_irq:
; RV32IZCMP-SR: # %bb.0:
-; RV32IZCMP-SR-NEXT: cm.push {ra, s0-s11}, -80
-; RV32IZCMP-SR-NEXT: .cfi_def_cfa_offset 80
+; RV32IZCMP-SR-NEXT: cm.push {ra, s0-s11}, -96
+; RV32IZCMP-SR-NEXT: .cfi_def_cfa_offset 96
; RV32IZCMP-SR-NEXT: .cfi_offset ra, -52
; RV32IZCMP-SR-NEXT: .cfi_offset s0, -48
; RV32IZCMP-SR-NEXT: .cfi_offset s1, -44
@@ -3592,18 +3600,18 @@ define void @callee_no_irq() {
; RV32IZCMP-SR-NEXT: .cfi_offset s11, -4
; RV32IZCMP-SR-NEXT: lui t0, %hi(var_test_irq)
; RV32IZCMP-SR-NEXT: lw a0, %lo(var_test_irq)(t0)
-; RV32IZCMP-SR-NEXT: sw a0, 24(sp) # 4-byte Folded Spill
+; RV32IZCMP-SR-NEXT: sw a0, 28(sp) # 4-byte Folded Spill
; RV32IZCMP-SR-NEXT: lw a0, %lo(var_test_irq+4)(t0)
-; RV32IZCMP-SR-NEXT: sw a0, 20(sp) # 4-byte Folded Spill
+; RV32IZCMP-SR-NEXT: sw a0, 24(sp) # 4-byte Folded Spill
; RV32IZCMP-SR-NEXT: lw a0, %lo(var_test_irq+8)(t0)
-; RV32IZCMP-SR-NEXT: sw a0, 16(sp) # 4-byte Folded Spill
+; RV32IZCMP-SR-NEXT: sw a0, 20(sp) # 4-byte Folded Spill
; RV32IZCMP-SR-NEXT: lw a0, %lo(var_test_irq+12)(t0)
-; RV32IZCMP-SR-NEXT: sw a0, 12(sp) # 4-byte Folded Spill
+; RV32IZCMP-SR-NEXT: sw a0, 16(sp) # 4-byte Folded Spill
; RV32IZCMP-SR-NEXT: addi a5, t0, %lo(var_test_irq)
; RV32IZCMP-SR-NEXT: lw a0, 16(a5)
-; RV32IZCMP-SR-NEXT: sw a0, 8(sp) # 4-byte Folded Spill
+; RV32IZCMP-SR-NEXT: sw a0, 12(sp) # 4-byte Folded Spill
; RV32IZCMP-SR-NEXT: lw a0, 20(a5)
-; RV32IZCMP-SR-NEXT: sw a0, 4(sp) # 4-byte Folded Spill
+; RV32IZCMP-SR-NEXT: sw a0, 8(sp) # 4-byte Folded Spill
; RV32IZCMP-SR-NEXT: lw t4, 24(a5)
; RV32IZCMP-SR-NEXT: lw t5, 28(a5)
; RV32IZCMP-SR-NEXT: lw t6, 32(a5)
@@ -3656,19 +3664,19 @@ define void @callee_no_irq() {
; RV32IZCMP-SR-NEXT: sw t6, 32(a5)
; RV32IZCMP-SR-NEXT: sw t5, 28(a5)
; RV32IZCMP-SR-NEXT: sw t4, 24(a5)
-; RV32IZCMP-SR-NEXT: lw a0, 4(sp) # 4-byte Folded Reload
-; RV32IZCMP-SR-NEXT: sw a0, 20(a5)
; RV32IZCMP-SR-NEXT: lw a0, 8(sp) # 4-byte Folded Reload
-; RV32IZCMP-SR-NEXT: sw a0, 16(a5)
+; RV32IZCMP-SR-NEXT: sw a0, 20(a5)
; RV32IZCMP-SR-NEXT: lw a0, 12(sp) # 4-byte Folded Reload
-; RV32IZCMP-SR-NEXT: sw a0, %lo(var_test_irq+12)(t0)
+; RV32IZCMP-SR-NEXT: sw a0, 16(a5)
; RV32IZCMP-SR-NEXT: lw a0, 16(sp) # 4-byte Folded Reload
-; RV32IZCMP-SR-NEXT: sw a0, %lo(var_test_irq+8)(t0)
+; RV32IZCMP-SR-NEXT: sw a0, %lo(var_test_irq+12)(t0)
; RV32IZCMP-SR-NEXT: lw a0, 20(sp) # 4-byte Folded Reload
-; RV32IZCMP-SR-NEXT: sw a0, %lo(var_test_irq+4)(t0)
+; RV32IZCMP-SR-NEXT: sw a0, %lo(var_test_irq+8)(t0)
; RV32IZCMP-SR-NEXT: lw a0, 24(sp) # 4-byte Folded Reload
+; RV32IZCMP-SR-NEXT: sw a0, %lo(var_test_irq+4)(t0)
+; RV32IZCMP-SR-NEXT: lw a0, 28(sp) # 4-byte Folded Reload
; RV32IZCMP-SR-NEXT: sw a0, %lo(var_test_irq)(t0)
-; RV32IZCMP-SR-NEXT: cm.popret {ra, s0-s11}, 80
+; RV32IZCMP-SR-NEXT: cm.popret {ra, s0-s11}, 96
;
; RV64IZCMP-SR-LABEL: callee_no_irq:
; RV64IZCMP-SR: # %bb.0:
@@ -3689,18 +3697,18 @@ define void @callee_no_irq() {
; RV64IZCMP-SR-NEXT: .cfi_offset s11, -8
; RV64IZCMP-SR-NEXT: lui t0, %hi(var_test_irq)
; RV64IZCMP-SR-NEXT: lw a0, %lo(var_test_irq)(t0)
-; RV64IZCMP-SR-NEXT: sd a0, 48(sp) # 8-byte Folded Spill
-; RV64IZCMP-SR-NEXT: lw a0, %lo(var_test_irq+4)(t0)
; RV64IZCMP-SR-NEXT: sd a0, 40(sp) # 8-byte Folded Spill
-; RV64IZCMP-SR-NEXT: lw a0, %lo(var_test_irq+8)(t0)
+; RV64IZCMP-SR-NEXT: lw a0, %lo(var_test_irq+4)(t0)
; RV64IZCMP-SR-NEXT: sd a0, 32(sp) # 8-byte Folded Spill
-; RV64IZCMP-SR-NEXT: lw a0, %lo(var_test_irq+12)(t0)
+; RV64IZCMP-SR-NEXT: lw a0, %lo(var_test_irq+8)(t0)
; RV64IZCMP-SR-NEXT: sd a0, 24(sp) # 8-byte Folded Spill
+; RV64IZCMP-SR-NEXT: lw a0, %lo(var_test_irq+12)(t0)
+; RV64IZCMP-SR-NEXT: sd a0, 16(sp) # 8-byte Folded Spill
; RV64IZCMP-SR-NEXT: addi a5, t0, %lo(var_test_irq)
; RV64IZCMP-SR-NEXT: lw a0, 16(a5)
-; RV64IZCMP-SR-NEXT: sd a0, 16(sp) # 8-byte Folded Spill
-; RV64IZCMP-SR-NEXT: lw a0, 20(a5)
; RV64IZCMP-SR-NEXT: sd a0, 8(sp) # 8-byte Folded Spill
+; RV64IZCMP-SR-NEXT: lw a0, 20(a5)
+; RV64IZCMP-SR-NEXT: sd a0, 0(sp) # 8-byte Folded Spill
; RV64IZCMP-SR-NEXT: lw t4, 24(a5)
; RV64IZCMP-SR-NEXT: lw t5, 28(a5)
; RV64IZCMP-SR-NEXT: lw t6, 32(a5)
@@ -3753,17 +3761,17 @@ define void @callee_no_irq() {
; RV64IZCMP-SR-NEXT: sw t6, 32(a5)
; RV64IZCMP-SR-NEXT: sw t5, 28(a5)
; RV64IZCMP-SR-NEXT: sw t4, 24(a5)
-; RV64IZCMP-SR-NEXT: ld a0, 8(sp) # 8-byte Folded Reload
+; RV64IZCMP-SR-NEXT: ld a0, 0(sp) # 8-byte Folded Reload
; RV64IZCMP-SR-NEXT: sw a0, 20(a5)
-; RV64IZCMP-SR-NEXT: ld a0, 16(sp) # 8-byte Folded Reload
+; RV64IZCMP-SR-NEXT: ld a0, 8(sp) # 8-byte Folded Reload
; RV64IZCMP-SR-NEXT: sw a0, 16(a5)
-; RV64IZCMP-SR-NEXT: ld a0, 24(sp) # 8-byte Folded Reload
+; RV64IZCMP-SR-NEXT: ld a0, 16(sp) # 8-byte Folded Reload
; RV64IZCMP-SR-NEXT: sw a0, %lo(var_test_irq+12)(t0)
-; RV64IZCMP-SR-NEXT: ld a0, 32(sp) # 8-byte Folded Reload
+; RV64IZCMP-SR-NEXT: ld a0, 24(sp) # 8-byte Folded Reload
; RV64IZCMP-SR-NEXT: sw a0, %lo(var_test_irq+8)(t0)
-; RV64IZCMP-SR-NEXT: ld a0, 40(sp) # 8-byte Folded Reload
+; RV64IZCMP-SR-NEXT: ld a0, 32(sp) # 8-byte Folded Reload
; RV64IZCMP-SR-NEXT: sw a0, %lo(var_test_irq+4)(t0)
-; RV64IZCMP-SR-NEXT: ld a0, 48(sp) # 8-byte Folded Reload
+; RV64IZCMP-SR-NEXT: ld a0, 40(sp) # 8-byte Folded Reload
; RV64IZCMP-SR-NEXT: sw a0, %lo(var_test_irq)(t0)
; RV64IZCMP-SR-NEXT: cm.popret {ra, s0-s11}, 160
;
@@ -4053,71 +4061,71 @@ declare ptr @llvm.frameaddress.p0(i32 immarg)
define i32 @use_fp(i32 %x) {
; RV32IZCMP-LABEL: use_fp:
; RV32IZCMP: # %bb.0: # %entry
-; RV32IZCMP-NEXT: cm.push {ra, s0-s1}, -16
-; RV32IZCMP-NEXT: .cfi_def_cfa_offset 16
+; RV32IZCMP-NEXT: cm.push {ra, s0-s1}, -32
+; RV32IZCMP-NEXT: .cfi_def_cfa_offset 32
; RV32IZCMP-NEXT: .cfi_offset ra, -12
; RV32IZCMP-NEXT: .cfi_offset s0, -8
; RV32IZCMP-NEXT: .cfi_offset s1, -4
-; RV32IZCMP-NEXT: addi s0, sp, 16
+; RV32IZCMP-NEXT: addi s0, sp, 32
; RV32IZCMP-NEXT: .cfi_def_cfa s0, 0
; RV32IZCMP-NEXT: mv s1, a0
-; RV32IZCMP-NEXT: addi a1, s0, -16
+; RV32IZCMP-NEXT: addi a1, s0, -20
; RV32IZCMP-NEXT: mv a0, s0
; RV32IZCMP-NEXT: call bar
; RV32IZCMP-NEXT: mv a0, s1
-; RV32IZCMP-NEXT: .cfi_def_cfa sp, 16
-; RV32IZCMP-NEXT: cm.popret {ra, s0-s1}, 16
+; RV32IZCMP-NEXT: .cfi_def_cfa sp, 32
+; RV32IZCMP-NEXT: cm.popret {ra, s0-s1}, 32
;
; RV64IZCMP-LABEL: use_fp:
; RV64IZCMP: # %bb.0: # %entry
-; RV64IZCMP-NEXT: cm.push {ra, s0-s1}, -32
-; RV64IZCMP-NEXT: .cfi_def_cfa_offset 32
+; RV64IZCMP-NEXT: cm.push {ra, s0-s1}, -48
+; RV64IZCMP-NEXT: .cfi_def_cfa_offset 48
; RV64IZCMP-NEXT: .cfi_offset ra, -24
; RV64IZCMP-NEXT: .cfi_offset s0, -16
; RV64IZCMP-NEXT: .cfi_offset s1, -8
-; RV64IZCMP-NEXT: addi s0, sp, 32
+; RV64IZCMP-NEXT: addi s0, sp, 48
; RV64IZCMP-NEXT: .cfi_def_cfa s0, 0
; RV64IZCMP-NEXT: mv s1, a0
-; RV64IZCMP-NEXT: addi a1, s0, -28
+; RV64IZCMP-NEXT: addi a1, s0, -36
; RV64IZCMP-NEXT: mv a0, s0
; RV64IZCMP-NEXT: call bar
; RV64IZCMP-NEXT: mv a0, s1
-; RV64IZCMP-NEXT: .cfi_def_cfa sp, 32
-; RV64IZCMP-NEXT: cm.popret {ra, s0-s1}, 32
+; RV64IZCMP-NEXT: .cfi_def_cfa sp, 48
+; RV64IZCMP-NEXT: cm.popret {ra, s0-s1}, 48
;
; RV32IZCMP-SR-LABEL: use_fp:
; RV32IZCMP-SR: # %bb.0: # %entry
-; RV32IZCMP-SR-NEXT: cm.push {ra, s0-s1}, -16
-; RV32IZCMP-SR-NEXT: .cfi_def_cfa_offset 16
+; RV32IZCMP-SR-NEXT: cm.push {ra, s0-s1}, -32
+; RV32IZCMP-SR-NEXT: .cfi_def_cfa_offset 32
; RV32IZCMP-SR-NEXT: .cfi_offset ra, -12
; RV32IZCMP-SR-NEXT: .cfi_offset s0, -8
; RV32IZCMP-SR-NEXT: .cfi_offset s1, -4
-; RV32IZCMP-SR-NEXT: addi s0, sp, 16
+; RV32IZCMP-SR-NEXT: addi s0, sp, 32
; RV32IZCMP-SR-NEXT: .cfi_def_cfa s0, 0
; RV32IZCMP-SR-NEXT: mv s1, a0
-; RV32IZCMP-SR-NEXT: addi a1, s0, -16
+; RV32IZCMP-SR-NEXT: addi a1, s0, -20
; RV32IZCMP-SR-NEXT: mv a0, s0
; RV32IZCMP-SR-NEXT: call bar
; RV32IZCMP-SR-NEXT: mv a0, s1
-; RV32IZCMP-SR-NEXT: .cfi_def_cfa sp, 16
-; RV32IZCMP-SR-NEXT: cm.popret {ra, s0-s1}, 16
+; RV32IZCMP-SR-NEXT: .cfi_def_cfa sp, 32
+; RV32IZCMP-SR-NEXT: cm.popret {ra, s0-s1}, 32
;
; RV64IZCMP-SR-LABEL: use_fp:
; RV64IZCMP-SR: # %bb.0: # %entry
-; RV64IZCMP-SR-NEXT: cm.push {ra, s0-s1}, -32
-; RV64IZCMP-SR-NEXT: .cfi_def_cfa_offset 32
+; RV64IZCMP-SR-NEXT: cm.push {ra, s0-s1}, -48
+; RV64IZCMP-SR-NEXT: .cfi_def_cfa_offset 48
; RV64IZCMP-SR-NEXT: .cfi_offset ra, -24
; RV64IZCMP-SR-NEXT: .cfi_offset s0, -16
; RV64IZCMP-SR-NEXT: .cfi_offset s1, -8
-; RV64IZCMP-SR-NEXT: addi s0, sp, 32
+; RV64IZCMP-SR-NEXT: addi s0, sp, 48
; RV64IZCMP-SR-NEXT: .cfi_def_cfa s0, 0
; RV64IZCMP-SR-NEXT: mv s1, a0
-; RV64IZCMP-SR-NEXT: addi a1, s0, -28
+; RV64IZCMP-SR-NEXT: addi a1, s0, -36
; RV64IZCMP-SR-NEXT: mv a0, s0
; RV64IZCMP-SR-NEXT: call bar
; RV64IZCMP-SR-NEXT: mv a0, s1
-; RV64IZCMP-SR-NEXT: .cfi_def_cfa sp, 32
-; RV64IZCMP-SR-NEXT: cm.popret {ra, s0-s1}, 32
+; RV64IZCMP-SR-NEXT: .cfi_def_cfa sp, 48
+; RV64IZCMP-SR-NEXT: cm.popret {ra, s0-s1}, 48
;
; RV32I-LABEL: use_fp:
; RV32I: # %bb.0: # %entry
diff --git a/llvm/test/CodeGen/RISCV/rvv/rv32-spill-vector-csr.ll b/llvm/test/CodeGen/RISCV/rvv/rv32-spill-vector-csr.ll
index d78603c..526ff09 100644
--- a/llvm/test/CodeGen/RISCV/rvv/rv32-spill-vector-csr.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/rv32-spill-vector-csr.ll
@@ -108,35 +108,39 @@ define <vscale x 1 x double> @foo(<vscale x 1 x double> %a, <vscale x 1 x double
;
; SPILL-O2-ZCMP-LABEL: foo:
; SPILL-O2-ZCMP: # %bb.0:
-; SPILL-O2-ZCMP-NEXT: cm.push {ra, s0}, -16
-; SPILL-O2-ZCMP-NEXT: .cfi_def_cfa_offset 16
+; SPILL-O2-ZCMP-NEXT: cm.push {ra, s0}, -32
+; SPILL-O2-ZCMP-NEXT: .cfi_def_cfa_offset 32
; SPILL-O2-ZCMP-NEXT: .cfi_offset ra, -8
; SPILL-O2-ZCMP-NEXT: .cfi_offset s0, -4
; SPILL-O2-ZCMP-NEXT: csrr a1, vlenb
; SPILL-O2-ZCMP-NEXT: slli a1, a1, 1
; SPILL-O2-ZCMP-NEXT: sub sp, sp, a1
-; SPILL-O2-ZCMP-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 2 * vlenb
+; SPILL-O2-ZCMP-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x20, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 32 + 2 * vlenb
; SPILL-O2-ZCMP-NEXT: mv s0, a0
-; SPILL-O2-ZCMP-NEXT: vs1r.v v8, (sp) # vscale x 8-byte Folded Spill
+; SPILL-O2-ZCMP-NEXT: addi a1, sp, 16
+; SPILL-O2-ZCMP-NEXT: vs1r.v v8, (a1) # vscale x 8-byte Folded Spill
; SPILL-O2-ZCMP-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; SPILL-O2-ZCMP-NEXT: vfadd.vv v9, v8, v9
; SPILL-O2-ZCMP-NEXT: csrr a0, vlenb
; SPILL-O2-ZCMP-NEXT: add a0, a0, sp
+; SPILL-O2-ZCMP-NEXT: addi a0, a0, 16
; SPILL-O2-ZCMP-NEXT: vs1r.v v9, (a0) # vscale x 8-byte Folded Spill
; SPILL-O2-ZCMP-NEXT: lui a0, %hi(.L.str)
; SPILL-O2-ZCMP-NEXT: addi a0, a0, %lo(.L.str)
; SPILL-O2-ZCMP-NEXT: call puts
; SPILL-O2-ZCMP-NEXT: csrr a0, vlenb
; SPILL-O2-ZCMP-NEXT: add a0, a0, sp
+; SPILL-O2-ZCMP-NEXT: addi a0, a0, 16
; SPILL-O2-ZCMP-NEXT: vl1r.v v8, (a0) # vscale x 8-byte Folded Reload
-; SPILL-O2-ZCMP-NEXT: vl1r.v v9, (sp) # vscale x 8-byte Folded Reload
+; SPILL-O2-ZCMP-NEXT: addi a0, sp, 16
+; SPILL-O2-ZCMP-NEXT: vl1r.v v9, (a0) # vscale x 8-byte Folded Reload
; SPILL-O2-ZCMP-NEXT: vsetvli zero, s0, e64, m1, ta, ma
; SPILL-O2-ZCMP-NEXT: vfadd.vv v8, v9, v8
; SPILL-O2-ZCMP-NEXT: csrr a0, vlenb
; SPILL-O2-ZCMP-NEXT: slli a0, a0, 1
; SPILL-O2-ZCMP-NEXT: add sp, sp, a0
-; SPILL-O2-ZCMP-NEXT: .cfi_def_cfa sp, 16
-; SPILL-O2-ZCMP-NEXT: cm.popret {ra, s0}, 16
+; SPILL-O2-ZCMP-NEXT: .cfi_def_cfa sp, 32
+; SPILL-O2-ZCMP-NEXT: cm.popret {ra, s0}, 32
;
; SPILL-O0-VSETVLI-LABEL: foo:
; SPILL-O0-VSETVLI: # %bb.0:
@@ -227,33 +231,37 @@ define <vscale x 1 x double> @foo(<vscale x 1 x double> %a, <vscale x 1 x double
;
; SPILL-O2-ZCMP-VSETVLI-LABEL: foo:
; SPILL-O2-ZCMP-VSETVLI: # %bb.0:
-; SPILL-O2-ZCMP-VSETVLI-NEXT: cm.push {ra, s0}, -16
-; SPILL-O2-ZCMP-VSETVLI-NEXT: .cfi_def_cfa_offset 16
+; SPILL-O2-ZCMP-VSETVLI-NEXT: cm.push {ra, s0}, -32
+; SPILL-O2-ZCMP-VSETVLI-NEXT: .cfi_def_cfa_offset 32
; SPILL-O2-ZCMP-VSETVLI-NEXT: .cfi_offset ra, -8
; SPILL-O2-ZCMP-VSETVLI-NEXT: .cfi_offset s0, -4
; SPILL-O2-ZCMP-VSETVLI-NEXT: vsetvli a1, zero, e8, m2, ta, ma
; SPILL-O2-ZCMP-VSETVLI-NEXT: sub sp, sp, a1
-; SPILL-O2-ZCMP-VSETVLI-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 2 * vlenb
+; SPILL-O2-ZCMP-VSETVLI-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x20, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 32 + 2 * vlenb
; SPILL-O2-ZCMP-VSETVLI-NEXT: mv s0, a0
-; SPILL-O2-ZCMP-VSETVLI-NEXT: vs1r.v v8, (sp) # vscale x 8-byte Folded Spill
+; SPILL-O2-ZCMP-VSETVLI-NEXT: addi a1, sp, 16
+; SPILL-O2-ZCMP-VSETVLI-NEXT: vs1r.v v8, (a1) # vscale x 8-byte Folded Spill
; SPILL-O2-ZCMP-VSETVLI-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; SPILL-O2-ZCMP-VSETVLI-NEXT: vfadd.vv v9, v8, v9
; SPILL-O2-ZCMP-VSETVLI-NEXT: csrr a0, vlenb
; SPILL-O2-ZCMP-VSETVLI-NEXT: add a0, a0, sp
+; SPILL-O2-ZCMP-VSETVLI-NEXT: addi a0, a0, 16
; SPILL-O2-ZCMP-VSETVLI-NEXT: vs1r.v v9, (a0) # vscale x 8-byte Folded Spill
; SPILL-O2-ZCMP-VSETVLI-NEXT: lui a0, %hi(.L.str)
; SPILL-O2-ZCMP-VSETVLI-NEXT: addi a0, a0, %lo(.L.str)
; SPILL-O2-ZCMP-VSETVLI-NEXT: call puts
; SPILL-O2-ZCMP-VSETVLI-NEXT: csrr a0, vlenb
; SPILL-O2-ZCMP-VSETVLI-NEXT: add a0, a0, sp
+; SPILL-O2-ZCMP-VSETVLI-NEXT: addi a0, a0, 16
; SPILL-O2-ZCMP-VSETVLI-NEXT: vl1r.v v8, (a0) # vscale x 8-byte Folded Reload
-; SPILL-O2-ZCMP-VSETVLI-NEXT: vl1r.v v9, (sp) # vscale x 8-byte Folded Reload
+; SPILL-O2-ZCMP-VSETVLI-NEXT: addi a0, sp, 16
+; SPILL-O2-ZCMP-VSETVLI-NEXT: vl1r.v v9, (a0) # vscale x 8-byte Folded Reload
; SPILL-O2-ZCMP-VSETVLI-NEXT: vsetvli zero, s0, e64, m1, ta, ma
; SPILL-O2-ZCMP-VSETVLI-NEXT: vfadd.vv v8, v9, v8
; SPILL-O2-ZCMP-VSETVLI-NEXT: vsetvli a0, zero, e8, m2, ta, ma
; SPILL-O2-ZCMP-VSETVLI-NEXT: add sp, sp, a0
-; SPILL-O2-ZCMP-VSETVLI-NEXT: .cfi_def_cfa sp, 16
-; SPILL-O2-ZCMP-VSETVLI-NEXT: cm.popret {ra, s0}, 16
+; SPILL-O2-ZCMP-VSETVLI-NEXT: .cfi_def_cfa sp, 32
+; SPILL-O2-ZCMP-VSETVLI-NEXT: cm.popret {ra, s0}, 32
{
%x = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> %a, <vscale x 1 x double> %b, i32 7, i32 %gvl)
%call = call signext i32 @puts(ptr @.str)
diff --git a/llvm/test/CodeGen/RISCV/xqccmp-additional-stack.ll b/llvm/test/CodeGen/RISCV/xqccmp-additional-stack.ll
index 5a5a1cc..14e6b9b 100644
--- a/llvm/test/CodeGen/RISCV/xqccmp-additional-stack.ll
+++ b/llvm/test/CodeGen/RISCV/xqccmp-additional-stack.ll
@@ -9,8 +9,8 @@ define ptr @func(ptr %s, i32 %_c, ptr %incdec.ptr, i1 %0, i8 %conv14) #0 {
; RV32-NEXT: .cfi_offset ra, -4
; RV32-NEXT: .cfi_offset s0, -8
; RV32-NEXT: .cfi_offset s1, -12
-; RV32-NEXT: addi sp, sp, -4
-; RV32-NEXT: .cfi_def_cfa_offset 20
+; RV32-NEXT: addi sp, sp, -8
+; RV32-NEXT: .cfi_def_cfa_offset 24
; RV32-NEXT: sw a4, 4(sp) # 4-byte Folded Spill
; RV32-NEXT: sw a2, 0(sp) # 4-byte Folded Spill
; RV32-NEXT: mv a2, a1
@@ -34,7 +34,7 @@ define ptr @func(ptr %s, i32 %_c, ptr %incdec.ptr, i1 %0, i8 %conv14) #0 {
; RV32-NEXT: lw a0, 4(sp) # 4-byte Folded Reload
; RV32-NEXT: sb a0, 0(s0)
; RV32-NEXT: mv a0, s1
-; RV32-NEXT: addi sp, sp, 4
+; RV32-NEXT: addi sp, sp, 8
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: qc.cm.popret {ra, s0-s1}, 16
entry:
diff --git a/llvm/test/CodeGen/RISCV/xqccmp-callee-saved-gprs.ll b/llvm/test/CodeGen/RISCV/xqccmp-callee-saved-gprs.ll
index 957469a..529d1d3 100644
--- a/llvm/test/CodeGen/RISCV/xqccmp-callee-saved-gprs.ll
+++ b/llvm/test/CodeGen/RISCV/xqccmp-callee-saved-gprs.ll
@@ -16,8 +16,8 @@
define void @callee() {
; RV32IXQCCMP-LABEL: callee:
; RV32IXQCCMP: # %bb.0:
-; RV32IXQCCMP-NEXT: qc.cm.push {ra, s0-s11}, -80
-; RV32IXQCCMP-NEXT: .cfi_def_cfa_offset 80
+; RV32IXQCCMP-NEXT: qc.cm.push {ra, s0-s11}, -96
+; RV32IXQCCMP-NEXT: .cfi_def_cfa_offset 96
; RV32IXQCCMP-NEXT: .cfi_offset ra, -4
; RV32IXQCCMP-NEXT: .cfi_offset s0, -8
; RV32IXQCCMP-NEXT: .cfi_offset s1, -12
@@ -33,18 +33,18 @@ define void @callee() {
; RV32IXQCCMP-NEXT: .cfi_offset s11, -52
; RV32IXQCCMP-NEXT: lui t0, %hi(var)
; RV32IXQCCMP-NEXT: lw a0, %lo(var)(t0)
-; RV32IXQCCMP-NEXT: sw a0, 24(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT: sw a0, 28(sp) # 4-byte Folded Spill
; RV32IXQCCMP-NEXT: lw a0, %lo(var+4)(t0)
-; RV32IXQCCMP-NEXT: sw a0, 20(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT: sw a0, 24(sp) # 4-byte Folded Spill
; RV32IXQCCMP-NEXT: lw a0, %lo(var+8)(t0)
-; RV32IXQCCMP-NEXT: sw a0, 16(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT: sw a0, 20(sp) # 4-byte Folded Spill
; RV32IXQCCMP-NEXT: lw a0, %lo(var+12)(t0)
-; RV32IXQCCMP-NEXT: sw a0, 12(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT: sw a0, 16(sp) # 4-byte Folded Spill
; RV32IXQCCMP-NEXT: addi a5, t0, %lo(var)
; RV32IXQCCMP-NEXT: lw a0, 16(a5)
-; RV32IXQCCMP-NEXT: sw a0, 8(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT: sw a0, 12(sp) # 4-byte Folded Spill
; RV32IXQCCMP-NEXT: lw a0, 20(a5)
-; RV32IXQCCMP-NEXT: sw a0, 4(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT: sw a0, 8(sp) # 4-byte Folded Spill
; RV32IXQCCMP-NEXT: lw t4, 24(a5)
; RV32IXQCCMP-NEXT: lw t5, 28(a5)
; RV32IXQCCMP-NEXT: lw t6, 32(a5)
@@ -97,24 +97,24 @@ define void @callee() {
; RV32IXQCCMP-NEXT: sw t6, 32(a5)
; RV32IXQCCMP-NEXT: sw t5, 28(a5)
; RV32IXQCCMP-NEXT: sw t4, 24(a5)
-; RV32IXQCCMP-NEXT: lw a0, 4(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-NEXT: sw a0, 20(a5)
; RV32IXQCCMP-NEXT: lw a0, 8(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-NEXT: sw a0, 16(a5)
+; RV32IXQCCMP-NEXT: sw a0, 20(a5)
; RV32IXQCCMP-NEXT: lw a0, 12(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-NEXT: sw a0, %lo(var+12)(t0)
+; RV32IXQCCMP-NEXT: sw a0, 16(a5)
; RV32IXQCCMP-NEXT: lw a0, 16(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-NEXT: sw a0, %lo(var+8)(t0)
+; RV32IXQCCMP-NEXT: sw a0, %lo(var+12)(t0)
; RV32IXQCCMP-NEXT: lw a0, 20(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-NEXT: sw a0, %lo(var+4)(t0)
+; RV32IXQCCMP-NEXT: sw a0, %lo(var+8)(t0)
; RV32IXQCCMP-NEXT: lw a0, 24(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-NEXT: sw a0, %lo(var+4)(t0)
+; RV32IXQCCMP-NEXT: lw a0, 28(sp) # 4-byte Folded Reload
; RV32IXQCCMP-NEXT: sw a0, %lo(var)(t0)
-; RV32IXQCCMP-NEXT: qc.cm.popret {ra, s0-s11}, 80
+; RV32IXQCCMP-NEXT: qc.cm.popret {ra, s0-s11}, 96
;
; RV32IXQCCMP-WITH-FP-LABEL: callee:
; RV32IXQCCMP-WITH-FP: # %bb.0:
-; RV32IXQCCMP-WITH-FP-NEXT: qc.cm.pushfp {ra, s0-s11}, -80
-; RV32IXQCCMP-WITH-FP-NEXT: .cfi_def_cfa_offset 80
+; RV32IXQCCMP-WITH-FP-NEXT: qc.cm.pushfp {ra, s0-s11}, -96
+; RV32IXQCCMP-WITH-FP-NEXT: .cfi_def_cfa_offset 96
; RV32IXQCCMP-WITH-FP-NEXT: .cfi_offset ra, -4
; RV32IXQCCMP-WITH-FP-NEXT: .cfi_offset s0, -8
; RV32IXQCCMP-WITH-FP-NEXT: .cfi_offset s1, -12
@@ -131,20 +131,20 @@ define void @callee() {
; RV32IXQCCMP-WITH-FP-NEXT: .cfi_def_cfa s0, 0
; RV32IXQCCMP-WITH-FP-NEXT: lui t1, %hi(var)
; RV32IXQCCMP-WITH-FP-NEXT: lw a0, %lo(var)(t1)
-; RV32IXQCCMP-WITH-FP-NEXT: sw a0, -56(s0) # 4-byte Folded Spill
+; RV32IXQCCMP-WITH-FP-NEXT: sw a0, -68(s0) # 4-byte Folded Spill
; RV32IXQCCMP-WITH-FP-NEXT: lw a0, %lo(var+4)(t1)
-; RV32IXQCCMP-WITH-FP-NEXT: sw a0, -60(s0) # 4-byte Folded Spill
+; RV32IXQCCMP-WITH-FP-NEXT: sw a0, -72(s0) # 4-byte Folded Spill
; RV32IXQCCMP-WITH-FP-NEXT: lw a0, %lo(var+8)(t1)
-; RV32IXQCCMP-WITH-FP-NEXT: sw a0, -64(s0) # 4-byte Folded Spill
+; RV32IXQCCMP-WITH-FP-NEXT: sw a0, -76(s0) # 4-byte Folded Spill
; RV32IXQCCMP-WITH-FP-NEXT: lw a0, %lo(var+12)(t1)
-; RV32IXQCCMP-WITH-FP-NEXT: sw a0, -68(s0) # 4-byte Folded Spill
+; RV32IXQCCMP-WITH-FP-NEXT: sw a0, -80(s0) # 4-byte Folded Spill
; RV32IXQCCMP-WITH-FP-NEXT: addi a5, t1, %lo(var)
; RV32IXQCCMP-WITH-FP-NEXT: lw a0, 16(a5)
-; RV32IXQCCMP-WITH-FP-NEXT: sw a0, -72(s0) # 4-byte Folded Spill
+; RV32IXQCCMP-WITH-FP-NEXT: sw a0, -84(s0) # 4-byte Folded Spill
; RV32IXQCCMP-WITH-FP-NEXT: lw a0, 20(a5)
-; RV32IXQCCMP-WITH-FP-NEXT: sw a0, -76(s0) # 4-byte Folded Spill
+; RV32IXQCCMP-WITH-FP-NEXT: sw a0, -88(s0) # 4-byte Folded Spill
; RV32IXQCCMP-WITH-FP-NEXT: lw a0, 24(a5)
-; RV32IXQCCMP-WITH-FP-NEXT: sw a0, -80(s0) # 4-byte Folded Spill
+; RV32IXQCCMP-WITH-FP-NEXT: sw a0, -92(s0) # 4-byte Folded Spill
; RV32IXQCCMP-WITH-FP-NEXT: lw t5, 28(a5)
; RV32IXQCCMP-WITH-FP-NEXT: lw t6, 32(a5)
; RV32IXQCCMP-WITH-FP-NEXT: lw s2, 36(a5)
@@ -195,22 +195,22 @@ define void @callee() {
; RV32IXQCCMP-WITH-FP-NEXT: sw s2, 36(a5)
; RV32IXQCCMP-WITH-FP-NEXT: sw t6, 32(a5)
; RV32IXQCCMP-WITH-FP-NEXT: sw t5, 28(a5)
-; RV32IXQCCMP-WITH-FP-NEXT: lw a0, -80(s0) # 4-byte Folded Reload
+; RV32IXQCCMP-WITH-FP-NEXT: lw a0, -92(s0) # 4-byte Folded Reload
; RV32IXQCCMP-WITH-FP-NEXT: sw a0, 24(a5)
-; RV32IXQCCMP-WITH-FP-NEXT: lw a0, -76(s0) # 4-byte Folded Reload
+; RV32IXQCCMP-WITH-FP-NEXT: lw a0, -88(s0) # 4-byte Folded Reload
; RV32IXQCCMP-WITH-FP-NEXT: sw a0, 20(a5)
-; RV32IXQCCMP-WITH-FP-NEXT: lw a0, -72(s0) # 4-byte Folded Reload
+; RV32IXQCCMP-WITH-FP-NEXT: lw a0, -84(s0) # 4-byte Folded Reload
; RV32IXQCCMP-WITH-FP-NEXT: sw a0, 16(a5)
-; RV32IXQCCMP-WITH-FP-NEXT: lw a0, -68(s0) # 4-byte Folded Reload
+; RV32IXQCCMP-WITH-FP-NEXT: lw a0, -80(s0) # 4-byte Folded Reload
; RV32IXQCCMP-WITH-FP-NEXT: sw a0, %lo(var+12)(t1)
-; RV32IXQCCMP-WITH-FP-NEXT: lw a0, -64(s0) # 4-byte Folded Reload
+; RV32IXQCCMP-WITH-FP-NEXT: lw a0, -76(s0) # 4-byte Folded Reload
; RV32IXQCCMP-WITH-FP-NEXT: sw a0, %lo(var+8)(t1)
-; RV32IXQCCMP-WITH-FP-NEXT: lw a0, -60(s0) # 4-byte Folded Reload
+; RV32IXQCCMP-WITH-FP-NEXT: lw a0, -72(s0) # 4-byte Folded Reload
; RV32IXQCCMP-WITH-FP-NEXT: sw a0, %lo(var+4)(t1)
-; RV32IXQCCMP-WITH-FP-NEXT: lw a0, -56(s0) # 4-byte Folded Reload
+; RV32IXQCCMP-WITH-FP-NEXT: lw a0, -68(s0) # 4-byte Folded Reload
; RV32IXQCCMP-WITH-FP-NEXT: sw a0, %lo(var)(t1)
-; RV32IXQCCMP-WITH-FP-NEXT: .cfi_def_cfa sp, 80
-; RV32IXQCCMP-WITH-FP-NEXT: qc.cm.popret {ra, s0-s11}, 80
+; RV32IXQCCMP-WITH-FP-NEXT: .cfi_def_cfa sp, 96
+; RV32IXQCCMP-WITH-FP-NEXT: qc.cm.popret {ra, s0-s11}, 96
;
; RV64IXQCCMP-LABEL: callee:
; RV64IXQCCMP: # %bb.0:
@@ -231,18 +231,18 @@ define void @callee() {
; RV64IXQCCMP-NEXT: .cfi_offset s11, -104
; RV64IXQCCMP-NEXT: lui t0, %hi(var)
; RV64IXQCCMP-NEXT: lw a0, %lo(var)(t0)
-; RV64IXQCCMP-NEXT: sd a0, 48(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-NEXT: lw a0, %lo(var+4)(t0)
; RV64IXQCCMP-NEXT: sd a0, 40(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-NEXT: lw a0, %lo(var+8)(t0)
+; RV64IXQCCMP-NEXT: lw a0, %lo(var+4)(t0)
; RV64IXQCCMP-NEXT: sd a0, 32(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-NEXT: lw a0, %lo(var+12)(t0)
+; RV64IXQCCMP-NEXT: lw a0, %lo(var+8)(t0)
; RV64IXQCCMP-NEXT: sd a0, 24(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT: lw a0, %lo(var+12)(t0)
+; RV64IXQCCMP-NEXT: sd a0, 16(sp) # 8-byte Folded Spill
; RV64IXQCCMP-NEXT: addi a5, t0, %lo(var)
; RV64IXQCCMP-NEXT: lw a0, 16(a5)
-; RV64IXQCCMP-NEXT: sd a0, 16(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-NEXT: lw a0, 20(a5)
; RV64IXQCCMP-NEXT: sd a0, 8(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT: lw a0, 20(a5)
+; RV64IXQCCMP-NEXT: sd a0, 0(sp) # 8-byte Folded Spill
; RV64IXQCCMP-NEXT: lw t4, 24(a5)
; RV64IXQCCMP-NEXT: lw t5, 28(a5)
; RV64IXQCCMP-NEXT: lw t6, 32(a5)
@@ -295,17 +295,17 @@ define void @callee() {
; RV64IXQCCMP-NEXT: sw t6, 32(a5)
; RV64IXQCCMP-NEXT: sw t5, 28(a5)
; RV64IXQCCMP-NEXT: sw t4, 24(a5)
-; RV64IXQCCMP-NEXT: ld a0, 8(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT: ld a0, 0(sp) # 8-byte Folded Reload
; RV64IXQCCMP-NEXT: sw a0, 20(a5)
-; RV64IXQCCMP-NEXT: ld a0, 16(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT: ld a0, 8(sp) # 8-byte Folded Reload
; RV64IXQCCMP-NEXT: sw a0, 16(a5)
-; RV64IXQCCMP-NEXT: ld a0, 24(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT: ld a0, 16(sp) # 8-byte Folded Reload
; RV64IXQCCMP-NEXT: sw a0, %lo(var+12)(t0)
-; RV64IXQCCMP-NEXT: ld a0, 32(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT: ld a0, 24(sp) # 8-byte Folded Reload
; RV64IXQCCMP-NEXT: sw a0, %lo(var+8)(t0)
-; RV64IXQCCMP-NEXT: ld a0, 40(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT: ld a0, 32(sp) # 8-byte Folded Reload
; RV64IXQCCMP-NEXT: sw a0, %lo(var+4)(t0)
-; RV64IXQCCMP-NEXT: ld a0, 48(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT: ld a0, 40(sp) # 8-byte Folded Reload
; RV64IXQCCMP-NEXT: sw a0, %lo(var)(t0)
; RV64IXQCCMP-NEXT: qc.cm.popret {ra, s0-s11}, 160
;
@@ -326,23 +326,25 @@ define void @callee() {
; RV64IXQCCMP-WITH-FP-NEXT: .cfi_offset s9, -88
; RV64IXQCCMP-WITH-FP-NEXT: .cfi_offset s10, -96
; RV64IXQCCMP-WITH-FP-NEXT: .cfi_offset s11, -104
+; RV64IXQCCMP-WITH-FP-NEXT: addi sp, sp, -16
+; RV64IXQCCMP-WITH-FP-NEXT: .cfi_def_cfa_offset 176
; RV64IXQCCMP-WITH-FP-NEXT: .cfi_def_cfa s0, 0
; RV64IXQCCMP-WITH-FP-NEXT: lui t1, %hi(var)
; RV64IXQCCMP-WITH-FP-NEXT: lw a0, %lo(var)(t1)
-; RV64IXQCCMP-WITH-FP-NEXT: sd a0, -112(s0) # 8-byte Folded Spill
-; RV64IXQCCMP-WITH-FP-NEXT: lw a0, %lo(var+4)(t1)
; RV64IXQCCMP-WITH-FP-NEXT: sd a0, -120(s0) # 8-byte Folded Spill
-; RV64IXQCCMP-WITH-FP-NEXT: lw a0, %lo(var+8)(t1)
+; RV64IXQCCMP-WITH-FP-NEXT: lw a0, %lo(var+4)(t1)
; RV64IXQCCMP-WITH-FP-NEXT: sd a0, -128(s0) # 8-byte Folded Spill
-; RV64IXQCCMP-WITH-FP-NEXT: lw a0, %lo(var+12)(t1)
+; RV64IXQCCMP-WITH-FP-NEXT: lw a0, %lo(var+8)(t1)
; RV64IXQCCMP-WITH-FP-NEXT: sd a0, -136(s0) # 8-byte Folded Spill
+; RV64IXQCCMP-WITH-FP-NEXT: lw a0, %lo(var+12)(t1)
+; RV64IXQCCMP-WITH-FP-NEXT: sd a0, -144(s0) # 8-byte Folded Spill
; RV64IXQCCMP-WITH-FP-NEXT: addi a5, t1, %lo(var)
; RV64IXQCCMP-WITH-FP-NEXT: lw a0, 16(a5)
-; RV64IXQCCMP-WITH-FP-NEXT: sd a0, -144(s0) # 8-byte Folded Spill
-; RV64IXQCCMP-WITH-FP-NEXT: lw a0, 20(a5)
; RV64IXQCCMP-WITH-FP-NEXT: sd a0, -152(s0) # 8-byte Folded Spill
-; RV64IXQCCMP-WITH-FP-NEXT: lw a0, 24(a5)
+; RV64IXQCCMP-WITH-FP-NEXT: lw a0, 20(a5)
; RV64IXQCCMP-WITH-FP-NEXT: sd a0, -160(s0) # 8-byte Folded Spill
+; RV64IXQCCMP-WITH-FP-NEXT: lw a0, 24(a5)
+; RV64IXQCCMP-WITH-FP-NEXT: sd a0, -168(s0) # 8-byte Folded Spill
; RV64IXQCCMP-WITH-FP-NEXT: lw t5, 28(a5)
; RV64IXQCCMP-WITH-FP-NEXT: lw t6, 32(a5)
; RV64IXQCCMP-WITH-FP-NEXT: lw s2, 36(a5)
@@ -393,21 +395,23 @@ define void @callee() {
; RV64IXQCCMP-WITH-FP-NEXT: sw s2, 36(a5)
; RV64IXQCCMP-WITH-FP-NEXT: sw t6, 32(a5)
; RV64IXQCCMP-WITH-FP-NEXT: sw t5, 28(a5)
-; RV64IXQCCMP-WITH-FP-NEXT: ld a0, -160(s0) # 8-byte Folded Reload
+; RV64IXQCCMP-WITH-FP-NEXT: ld a0, -168(s0) # 8-byte Folded Reload
; RV64IXQCCMP-WITH-FP-NEXT: sw a0, 24(a5)
-; RV64IXQCCMP-WITH-FP-NEXT: ld a0, -152(s0) # 8-byte Folded Reload
+; RV64IXQCCMP-WITH-FP-NEXT: ld a0, -160(s0) # 8-byte Folded Reload
; RV64IXQCCMP-WITH-FP-NEXT: sw a0, 20(a5)
-; RV64IXQCCMP-WITH-FP-NEXT: ld a0, -144(s0) # 8-byte Folded Reload
+; RV64IXQCCMP-WITH-FP-NEXT: ld a0, -152(s0) # 8-byte Folded Reload
; RV64IXQCCMP-WITH-FP-NEXT: sw a0, 16(a5)
-; RV64IXQCCMP-WITH-FP-NEXT: ld a0, -136(s0) # 8-byte Folded Reload
+; RV64IXQCCMP-WITH-FP-NEXT: ld a0, -144(s0) # 8-byte Folded Reload
; RV64IXQCCMP-WITH-FP-NEXT: sw a0, %lo(var+12)(t1)
-; RV64IXQCCMP-WITH-FP-NEXT: ld a0, -128(s0) # 8-byte Folded Reload
+; RV64IXQCCMP-WITH-FP-NEXT: ld a0, -136(s0) # 8-byte Folded Reload
; RV64IXQCCMP-WITH-FP-NEXT: sw a0, %lo(var+8)(t1)
-; RV64IXQCCMP-WITH-FP-NEXT: ld a0, -120(s0) # 8-byte Folded Reload
+; RV64IXQCCMP-WITH-FP-NEXT: ld a0, -128(s0) # 8-byte Folded Reload
; RV64IXQCCMP-WITH-FP-NEXT: sw a0, %lo(var+4)(t1)
-; RV64IXQCCMP-WITH-FP-NEXT: ld a0, -112(s0) # 8-byte Folded Reload
+; RV64IXQCCMP-WITH-FP-NEXT: ld a0, -120(s0) # 8-byte Folded Reload
; RV64IXQCCMP-WITH-FP-NEXT: sw a0, %lo(var)(t1)
-; RV64IXQCCMP-WITH-FP-NEXT: .cfi_def_cfa sp, 160
+; RV64IXQCCMP-WITH-FP-NEXT: .cfi_def_cfa sp, 176
+; RV64IXQCCMP-WITH-FP-NEXT: addi sp, sp, 16
+; RV64IXQCCMP-WITH-FP-NEXT: .cfi_def_cfa_offset 160
; RV64IXQCCMP-WITH-FP-NEXT: qc.cm.popret {ra, s0-s11}, 160
%val = load [32 x i32], ptr @var
store volatile [32 x i32] %val, ptr @var
@@ -435,54 +439,54 @@ define void @caller() {
; RV32IXQCCMP-NEXT: .cfi_offset s9, -44
; RV32IXQCCMP-NEXT: .cfi_offset s10, -48
; RV32IXQCCMP-NEXT: .cfi_offset s11, -52
-; RV32IXQCCMP-NEXT: addi sp, sp, -32
-; RV32IXQCCMP-NEXT: .cfi_def_cfa_offset 144
+; RV32IXQCCMP-NEXT: addi sp, sp, -48
+; RV32IXQCCMP-NEXT: .cfi_def_cfa_offset 160
; RV32IXQCCMP-NEXT: lui s0, %hi(var)
; RV32IXQCCMP-NEXT: lw a0, %lo(var)(s0)
-; RV32IXQCCMP-NEXT: sw a0, 88(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT: sw a0, 92(sp) # 4-byte Folded Spill
; RV32IXQCCMP-NEXT: lw a0, %lo(var+4)(s0)
-; RV32IXQCCMP-NEXT: sw a0, 84(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT: sw a0, 88(sp) # 4-byte Folded Spill
; RV32IXQCCMP-NEXT: lw a0, %lo(var+8)(s0)
-; RV32IXQCCMP-NEXT: sw a0, 80(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT: sw a0, 84(sp) # 4-byte Folded Spill
; RV32IXQCCMP-NEXT: lw a0, %lo(var+12)(s0)
-; RV32IXQCCMP-NEXT: sw a0, 76(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT: sw a0, 80(sp) # 4-byte Folded Spill
; RV32IXQCCMP-NEXT: addi s1, s0, %lo(var)
; RV32IXQCCMP-NEXT: lw a0, 16(s1)
-; RV32IXQCCMP-NEXT: sw a0, 72(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT: sw a0, 76(sp) # 4-byte Folded Spill
; RV32IXQCCMP-NEXT: lw a0, 20(s1)
-; RV32IXQCCMP-NEXT: sw a0, 68(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT: sw a0, 72(sp) # 4-byte Folded Spill
; RV32IXQCCMP-NEXT: lw a0, 24(s1)
-; RV32IXQCCMP-NEXT: sw a0, 64(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT: sw a0, 68(sp) # 4-byte Folded Spill
; RV32IXQCCMP-NEXT: lw a0, 28(s1)
-; RV32IXQCCMP-NEXT: sw a0, 60(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT: sw a0, 64(sp) # 4-byte Folded Spill
; RV32IXQCCMP-NEXT: lw a0, 32(s1)
-; RV32IXQCCMP-NEXT: sw a0, 56(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT: sw a0, 60(sp) # 4-byte Folded Spill
; RV32IXQCCMP-NEXT: lw a0, 36(s1)
-; RV32IXQCCMP-NEXT: sw a0, 52(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT: sw a0, 56(sp) # 4-byte Folded Spill
; RV32IXQCCMP-NEXT: lw a0, 40(s1)
-; RV32IXQCCMP-NEXT: sw a0, 48(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT: sw a0, 52(sp) # 4-byte Folded Spill
; RV32IXQCCMP-NEXT: lw a0, 44(s1)
-; RV32IXQCCMP-NEXT: sw a0, 44(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT: sw a0, 48(sp) # 4-byte Folded Spill
; RV32IXQCCMP-NEXT: lw a0, 48(s1)
-; RV32IXQCCMP-NEXT: sw a0, 40(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT: sw a0, 44(sp) # 4-byte Folded Spill
; RV32IXQCCMP-NEXT: lw a0, 52(s1)
-; RV32IXQCCMP-NEXT: sw a0, 36(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT: sw a0, 40(sp) # 4-byte Folded Spill
; RV32IXQCCMP-NEXT: lw a0, 56(s1)
-; RV32IXQCCMP-NEXT: sw a0, 32(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT: sw a0, 36(sp) # 4-byte Folded Spill
; RV32IXQCCMP-NEXT: lw a0, 60(s1)
-; RV32IXQCCMP-NEXT: sw a0, 28(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT: sw a0, 32(sp) # 4-byte Folded Spill
; RV32IXQCCMP-NEXT: lw a0, 64(s1)
-; RV32IXQCCMP-NEXT: sw a0, 24(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT: sw a0, 28(sp) # 4-byte Folded Spill
; RV32IXQCCMP-NEXT: lw a0, 68(s1)
-; RV32IXQCCMP-NEXT: sw a0, 20(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT: sw a0, 24(sp) # 4-byte Folded Spill
; RV32IXQCCMP-NEXT: lw a0, 72(s1)
-; RV32IXQCCMP-NEXT: sw a0, 16(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT: sw a0, 20(sp) # 4-byte Folded Spill
; RV32IXQCCMP-NEXT: lw a0, 76(s1)
-; RV32IXQCCMP-NEXT: sw a0, 12(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT: sw a0, 16(sp) # 4-byte Folded Spill
; RV32IXQCCMP-NEXT: lw a0, 80(s1)
-; RV32IXQCCMP-NEXT: sw a0, 8(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT: sw a0, 12(sp) # 4-byte Folded Spill
; RV32IXQCCMP-NEXT: lw a0, 84(s1)
-; RV32IXQCCMP-NEXT: sw a0, 4(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT: sw a0, 8(sp) # 4-byte Folded Spill
; RV32IXQCCMP-NEXT: lw s4, 88(s1)
; RV32IXQCCMP-NEXT: lw s5, 92(s1)
; RV32IXQCCMP-NEXT: lw s6, 96(s1)
@@ -504,51 +508,51 @@ define void @caller() {
; RV32IXQCCMP-NEXT: sw s6, 96(s1)
; RV32IXQCCMP-NEXT: sw s5, 92(s1)
; RV32IXQCCMP-NEXT: sw s4, 88(s1)
-; RV32IXQCCMP-NEXT: lw a0, 4(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-NEXT: sw a0, 84(s1)
; RV32IXQCCMP-NEXT: lw a0, 8(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-NEXT: sw a0, 80(s1)
+; RV32IXQCCMP-NEXT: sw a0, 84(s1)
; RV32IXQCCMP-NEXT: lw a0, 12(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-NEXT: sw a0, 76(s1)
+; RV32IXQCCMP-NEXT: sw a0, 80(s1)
; RV32IXQCCMP-NEXT: lw a0, 16(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-NEXT: sw a0, 72(s1)
+; RV32IXQCCMP-NEXT: sw a0, 76(s1)
; RV32IXQCCMP-NEXT: lw a0, 20(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-NEXT: sw a0, 68(s1)
+; RV32IXQCCMP-NEXT: sw a0, 72(s1)
; RV32IXQCCMP-NEXT: lw a0, 24(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-NEXT: sw a0, 64(s1)
+; RV32IXQCCMP-NEXT: sw a0, 68(s1)
; RV32IXQCCMP-NEXT: lw a0, 28(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-NEXT: sw a0, 60(s1)
+; RV32IXQCCMP-NEXT: sw a0, 64(s1)
; RV32IXQCCMP-NEXT: lw a0, 32(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-NEXT: sw a0, 56(s1)
+; RV32IXQCCMP-NEXT: sw a0, 60(s1)
; RV32IXQCCMP-NEXT: lw a0, 36(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-NEXT: sw a0, 52(s1)
+; RV32IXQCCMP-NEXT: sw a0, 56(s1)
; RV32IXQCCMP-NEXT: lw a0, 40(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-NEXT: sw a0, 48(s1)
+; RV32IXQCCMP-NEXT: sw a0, 52(s1)
; RV32IXQCCMP-NEXT: lw a0, 44(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-NEXT: sw a0, 44(s1)
+; RV32IXQCCMP-NEXT: sw a0, 48(s1)
; RV32IXQCCMP-NEXT: lw a0, 48(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-NEXT: sw a0, 40(s1)
+; RV32IXQCCMP-NEXT: sw a0, 44(s1)
; RV32IXQCCMP-NEXT: lw a0, 52(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-NEXT: sw a0, 36(s1)
+; RV32IXQCCMP-NEXT: sw a0, 40(s1)
; RV32IXQCCMP-NEXT: lw a0, 56(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-NEXT: sw a0, 32(s1)
+; RV32IXQCCMP-NEXT: sw a0, 36(s1)
; RV32IXQCCMP-NEXT: lw a0, 60(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-NEXT: sw a0, 28(s1)
+; RV32IXQCCMP-NEXT: sw a0, 32(s1)
; RV32IXQCCMP-NEXT: lw a0, 64(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-NEXT: sw a0, 24(s1)
+; RV32IXQCCMP-NEXT: sw a0, 28(s1)
; RV32IXQCCMP-NEXT: lw a0, 68(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-NEXT: sw a0, 20(s1)
+; RV32IXQCCMP-NEXT: sw a0, 24(s1)
; RV32IXQCCMP-NEXT: lw a0, 72(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-NEXT: sw a0, 16(s1)
+; RV32IXQCCMP-NEXT: sw a0, 20(s1)
; RV32IXQCCMP-NEXT: lw a0, 76(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-NEXT: sw a0, %lo(var+12)(s0)
+; RV32IXQCCMP-NEXT: sw a0, 16(s1)
; RV32IXQCCMP-NEXT: lw a0, 80(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-NEXT: sw a0, %lo(var+8)(s0)
+; RV32IXQCCMP-NEXT: sw a0, %lo(var+12)(s0)
; RV32IXQCCMP-NEXT: lw a0, 84(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-NEXT: sw a0, %lo(var+4)(s0)
+; RV32IXQCCMP-NEXT: sw a0, %lo(var+8)(s0)
; RV32IXQCCMP-NEXT: lw a0, 88(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-NEXT: sw a0, %lo(var+4)(s0)
+; RV32IXQCCMP-NEXT: lw a0, 92(sp) # 4-byte Folded Reload
; RV32IXQCCMP-NEXT: sw a0, %lo(var)(s0)
-; RV32IXQCCMP-NEXT: addi sp, sp, 32
+; RV32IXQCCMP-NEXT: addi sp, sp, 48
; RV32IXQCCMP-NEXT: .cfi_def_cfa_offset 112
; RV32IXQCCMP-NEXT: qc.cm.popret {ra, s0-s11}, 112
;
@@ -569,57 +573,57 @@ define void @caller() {
; RV32IXQCCMP-WITH-FP-NEXT: .cfi_offset s9, -44
; RV32IXQCCMP-WITH-FP-NEXT: .cfi_offset s10, -48
; RV32IXQCCMP-WITH-FP-NEXT: .cfi_offset s11, -52
-; RV32IXQCCMP-WITH-FP-NEXT: addi sp, sp, -32
-; RV32IXQCCMP-WITH-FP-NEXT: .cfi_def_cfa_offset 144
+; RV32IXQCCMP-WITH-FP-NEXT: addi sp, sp, -48
+; RV32IXQCCMP-WITH-FP-NEXT: .cfi_def_cfa_offset 160
; RV32IXQCCMP-WITH-FP-NEXT: .cfi_def_cfa s0, 0
; RV32IXQCCMP-WITH-FP-NEXT: lui s6, %hi(var)
; RV32IXQCCMP-WITH-FP-NEXT: lw a0, %lo(var)(s6)
-; RV32IXQCCMP-WITH-FP-NEXT: sw a0, -56(s0) # 4-byte Folded Spill
+; RV32IXQCCMP-WITH-FP-NEXT: sw a0, -68(s0) # 4-byte Folded Spill
; RV32IXQCCMP-WITH-FP-NEXT: lw a0, %lo(var+4)(s6)
-; RV32IXQCCMP-WITH-FP-NEXT: sw a0, -60(s0) # 4-byte Folded Spill
+; RV32IXQCCMP-WITH-FP-NEXT: sw a0, -72(s0) # 4-byte Folded Spill
; RV32IXQCCMP-WITH-FP-NEXT: lw a0, %lo(var+8)(s6)
-; RV32IXQCCMP-WITH-FP-NEXT: sw a0, -64(s0) # 4-byte Folded Spill
+; RV32IXQCCMP-WITH-FP-NEXT: sw a0, -76(s0) # 4-byte Folded Spill
; RV32IXQCCMP-WITH-FP-NEXT: lw a0, %lo(var+12)(s6)
-; RV32IXQCCMP-WITH-FP-NEXT: sw a0, -68(s0) # 4-byte Folded Spill
+; RV32IXQCCMP-WITH-FP-NEXT: sw a0, -80(s0) # 4-byte Folded Spill
; RV32IXQCCMP-WITH-FP-NEXT: addi s1, s6, %lo(var)
; RV32IXQCCMP-WITH-FP-NEXT: lw a0, 16(s1)
-; RV32IXQCCMP-WITH-FP-NEXT: sw a0, -72(s0) # 4-byte Folded Spill
+; RV32IXQCCMP-WITH-FP-NEXT: sw a0, -84(s0) # 4-byte Folded Spill
; RV32IXQCCMP-WITH-FP-NEXT: lw a0, 20(s1)
-; RV32IXQCCMP-WITH-FP-NEXT: sw a0, -76(s0) # 4-byte Folded Spill
+; RV32IXQCCMP-WITH-FP-NEXT: sw a0, -88(s0) # 4-byte Folded Spill
; RV32IXQCCMP-WITH-FP-NEXT: lw a0, 24(s1)
-; RV32IXQCCMP-WITH-FP-NEXT: sw a0, -80(s0) # 4-byte Folded Spill
+; RV32IXQCCMP-WITH-FP-NEXT: sw a0, -92(s0) # 4-byte Folded Spill
; RV32IXQCCMP-WITH-FP-NEXT: lw a0, 28(s1)
-; RV32IXQCCMP-WITH-FP-NEXT: sw a0, -84(s0) # 4-byte Folded Spill
+; RV32IXQCCMP-WITH-FP-NEXT: sw a0, -96(s0) # 4-byte Folded Spill
; RV32IXQCCMP-WITH-FP-NEXT: lw a0, 32(s1)
-; RV32IXQCCMP-WITH-FP-NEXT: sw a0, -88(s0) # 4-byte Folded Spill
+; RV32IXQCCMP-WITH-FP-NEXT: sw a0, -100(s0) # 4-byte Folded Spill
; RV32IXQCCMP-WITH-FP-NEXT: lw a0, 36(s1)
-; RV32IXQCCMP-WITH-FP-NEXT: sw a0, -92(s0) # 4-byte Folded Spill
+; RV32IXQCCMP-WITH-FP-NEXT: sw a0, -104(s0) # 4-byte Folded Spill
; RV32IXQCCMP-WITH-FP-NEXT: lw a0, 40(s1)
-; RV32IXQCCMP-WITH-FP-NEXT: sw a0, -96(s0) # 4-byte Folded Spill
+; RV32IXQCCMP-WITH-FP-NEXT: sw a0, -108(s0) # 4-byte Folded Spill
; RV32IXQCCMP-WITH-FP-NEXT: lw a0, 44(s1)
-; RV32IXQCCMP-WITH-FP-NEXT: sw a0, -100(s0) # 4-byte Folded Spill
+; RV32IXQCCMP-WITH-FP-NEXT: sw a0, -112(s0) # 4-byte Folded Spill
; RV32IXQCCMP-WITH-FP-NEXT: lw a0, 48(s1)
-; RV32IXQCCMP-WITH-FP-NEXT: sw a0, -104(s0) # 4-byte Folded Spill
+; RV32IXQCCMP-WITH-FP-NEXT: sw a0, -116(s0) # 4-byte Folded Spill
; RV32IXQCCMP-WITH-FP-NEXT: lw a0, 52(s1)
-; RV32IXQCCMP-WITH-FP-NEXT: sw a0, -108(s0) # 4-byte Folded Spill
+; RV32IXQCCMP-WITH-FP-NEXT: sw a0, -120(s0) # 4-byte Folded Spill
; RV32IXQCCMP-WITH-FP-NEXT: lw a0, 56(s1)
-; RV32IXQCCMP-WITH-FP-NEXT: sw a0, -112(s0) # 4-byte Folded Spill
+; RV32IXQCCMP-WITH-FP-NEXT: sw a0, -124(s0) # 4-byte Folded Spill
; RV32IXQCCMP-WITH-FP-NEXT: lw a0, 60(s1)
-; RV32IXQCCMP-WITH-FP-NEXT: sw a0, -116(s0) # 4-byte Folded Spill
+; RV32IXQCCMP-WITH-FP-NEXT: sw a0, -128(s0) # 4-byte Folded Spill
; RV32IXQCCMP-WITH-FP-NEXT: lw a0, 64(s1)
-; RV32IXQCCMP-WITH-FP-NEXT: sw a0, -120(s0) # 4-byte Folded Spill
+; RV32IXQCCMP-WITH-FP-NEXT: sw a0, -132(s0) # 4-byte Folded Spill
; RV32IXQCCMP-WITH-FP-NEXT: lw a0, 68(s1)
-; RV32IXQCCMP-WITH-FP-NEXT: sw a0, -124(s0) # 4-byte Folded Spill
+; RV32IXQCCMP-WITH-FP-NEXT: sw a0, -136(s0) # 4-byte Folded Spill
; RV32IXQCCMP-WITH-FP-NEXT: lw a0, 72(s1)
-; RV32IXQCCMP-WITH-FP-NEXT: sw a0, -128(s0) # 4-byte Folded Spill
+; RV32IXQCCMP-WITH-FP-NEXT: sw a0, -140(s0) # 4-byte Folded Spill
; RV32IXQCCMP-WITH-FP-NEXT: lw a0, 76(s1)
-; RV32IXQCCMP-WITH-FP-NEXT: sw a0, -132(s0) # 4-byte Folded Spill
+; RV32IXQCCMP-WITH-FP-NEXT: sw a0, -144(s0) # 4-byte Folded Spill
; RV32IXQCCMP-WITH-FP-NEXT: lw a0, 80(s1)
-; RV32IXQCCMP-WITH-FP-NEXT: sw a0, -136(s0) # 4-byte Folded Spill
+; RV32IXQCCMP-WITH-FP-NEXT: sw a0, -148(s0) # 4-byte Folded Spill
; RV32IXQCCMP-WITH-FP-NEXT: lw a0, 84(s1)
-; RV32IXQCCMP-WITH-FP-NEXT: sw a0, -140(s0) # 4-byte Folded Spill
+; RV32IXQCCMP-WITH-FP-NEXT: sw a0, -152(s0) # 4-byte Folded Spill
; RV32IXQCCMP-WITH-FP-NEXT: lw a0, 88(s1)
-; RV32IXQCCMP-WITH-FP-NEXT: sw a0, -144(s0) # 4-byte Folded Spill
+; RV32IXQCCMP-WITH-FP-NEXT: sw a0, -156(s0) # 4-byte Folded Spill
; RV32IXQCCMP-WITH-FP-NEXT: lw s8, 92(s1)
; RV32IXQCCMP-WITH-FP-NEXT: lw s9, 96(s1)
; RV32IXQCCMP-WITH-FP-NEXT: lw s10, 100(s1)
@@ -639,54 +643,54 @@ define void @caller() {
; RV32IXQCCMP-WITH-FP-NEXT: sw s10, 100(s1)
; RV32IXQCCMP-WITH-FP-NEXT: sw s9, 96(s1)
; RV32IXQCCMP-WITH-FP-NEXT: sw s8, 92(s1)
-; RV32IXQCCMP-WITH-FP-NEXT: lw a0, -144(s0) # 4-byte Folded Reload
+; RV32IXQCCMP-WITH-FP-NEXT: lw a0, -156(s0) # 4-byte Folded Reload
; RV32IXQCCMP-WITH-FP-NEXT: sw a0, 88(s1)
-; RV32IXQCCMP-WITH-FP-NEXT: lw a0, -140(s0) # 4-byte Folded Reload
+; RV32IXQCCMP-WITH-FP-NEXT: lw a0, -152(s0) # 4-byte Folded Reload
; RV32IXQCCMP-WITH-FP-NEXT: sw a0, 84(s1)
-; RV32IXQCCMP-WITH-FP-NEXT: lw a0, -136(s0) # 4-byte Folded Reload
+; RV32IXQCCMP-WITH-FP-NEXT: lw a0, -148(s0) # 4-byte Folded Reload
; RV32IXQCCMP-WITH-FP-NEXT: sw a0, 80(s1)
-; RV32IXQCCMP-WITH-FP-NEXT: lw a0, -132(s0) # 4-byte Folded Reload
+; RV32IXQCCMP-WITH-FP-NEXT: lw a0, -144(s0) # 4-byte Folded Reload
; RV32IXQCCMP-WITH-FP-NEXT: sw a0, 76(s1)
-; RV32IXQCCMP-WITH-FP-NEXT: lw a0, -128(s0) # 4-byte Folded Reload
+; RV32IXQCCMP-WITH-FP-NEXT: lw a0, -140(s0) # 4-byte Folded Reload
; RV32IXQCCMP-WITH-FP-NEXT: sw a0, 72(s1)
-; RV32IXQCCMP-WITH-FP-NEXT: lw a0, -124(s0) # 4-byte Folded Reload
+; RV32IXQCCMP-WITH-FP-NEXT: lw a0, -136(s0) # 4-byte Folded Reload
; RV32IXQCCMP-WITH-FP-NEXT: sw a0, 68(s1)
-; RV32IXQCCMP-WITH-FP-NEXT: lw a0, -120(s0) # 4-byte Folded Reload
+; RV32IXQCCMP-WITH-FP-NEXT: lw a0, -132(s0) # 4-byte Folded Reload
; RV32IXQCCMP-WITH-FP-NEXT: sw a0, 64(s1)
-; RV32IXQCCMP-WITH-FP-NEXT: lw a0, -116(s0) # 4-byte Folded Reload
+; RV32IXQCCMP-WITH-FP-NEXT: lw a0, -128(s0) # 4-byte Folded Reload
; RV32IXQCCMP-WITH-FP-NEXT: sw a0, 60(s1)
-; RV32IXQCCMP-WITH-FP-NEXT: lw a0, -112(s0) # 4-byte Folded Reload
+; RV32IXQCCMP-WITH-FP-NEXT: lw a0, -124(s0) # 4-byte Folded Reload
; RV32IXQCCMP-WITH-FP-NEXT: sw a0, 56(s1)
-; RV32IXQCCMP-WITH-FP-NEXT: lw a0, -108(s0) # 4-byte Folded Reload
+; RV32IXQCCMP-WITH-FP-NEXT: lw a0, -120(s0) # 4-byte Folded Reload
; RV32IXQCCMP-WITH-FP-NEXT: sw a0, 52(s1)
-; RV32IXQCCMP-WITH-FP-NEXT: lw a0, -104(s0) # 4-byte Folded Reload
+; RV32IXQCCMP-WITH-FP-NEXT: lw a0, -116(s0) # 4-byte Folded Reload
; RV32IXQCCMP-WITH-FP-NEXT: sw a0, 48(s1)
-; RV32IXQCCMP-WITH-FP-NEXT: lw a0, -100(s0) # 4-byte Folded Reload
+; RV32IXQCCMP-WITH-FP-NEXT: lw a0, -112(s0) # 4-byte Folded Reload
; RV32IXQCCMP-WITH-FP-NEXT: sw a0, 44(s1)
-; RV32IXQCCMP-WITH-FP-NEXT: lw a0, -96(s0) # 4-byte Folded Reload
+; RV32IXQCCMP-WITH-FP-NEXT: lw a0, -108(s0) # 4-byte Folded Reload
; RV32IXQCCMP-WITH-FP-NEXT: sw a0, 40(s1)
-; RV32IXQCCMP-WITH-FP-NEXT: lw a0, -92(s0) # 4-byte Folded Reload
+; RV32IXQCCMP-WITH-FP-NEXT: lw a0, -104(s0) # 4-byte Folded Reload
; RV32IXQCCMP-WITH-FP-NEXT: sw a0, 36(s1)
-; RV32IXQCCMP-WITH-FP-NEXT: lw a0, -88(s0) # 4-byte Folded Reload
+; RV32IXQCCMP-WITH-FP-NEXT: lw a0, -100(s0) # 4-byte Folded Reload
; RV32IXQCCMP-WITH-FP-NEXT: sw a0, 32(s1)
-; RV32IXQCCMP-WITH-FP-NEXT: lw a0, -84(s0) # 4-byte Folded Reload
+; RV32IXQCCMP-WITH-FP-NEXT: lw a0, -96(s0) # 4-byte Folded Reload
; RV32IXQCCMP-WITH-FP-NEXT: sw a0, 28(s1)
-; RV32IXQCCMP-WITH-FP-NEXT: lw a0, -80(s0) # 4-byte Folded Reload
+; RV32IXQCCMP-WITH-FP-NEXT: lw a0, -92(s0) # 4-byte Folded Reload
; RV32IXQCCMP-WITH-FP-NEXT: sw a0, 24(s1)
-; RV32IXQCCMP-WITH-FP-NEXT: lw a0, -76(s0) # 4-byte Folded Reload
+; RV32IXQCCMP-WITH-FP-NEXT: lw a0, -88(s0) # 4-byte Folded Reload
; RV32IXQCCMP-WITH-FP-NEXT: sw a0, 20(s1)
-; RV32IXQCCMP-WITH-FP-NEXT: lw a0, -72(s0) # 4-byte Folded Reload
+; RV32IXQCCMP-WITH-FP-NEXT: lw a0, -84(s0) # 4-byte Folded Reload
; RV32IXQCCMP-WITH-FP-NEXT: sw a0, 16(s1)
-; RV32IXQCCMP-WITH-FP-NEXT: lw a0, -68(s0) # 4-byte Folded Reload
+; RV32IXQCCMP-WITH-FP-NEXT: lw a0, -80(s0) # 4-byte Folded Reload
; RV32IXQCCMP-WITH-FP-NEXT: sw a0, %lo(var+12)(s6)
-; RV32IXQCCMP-WITH-FP-NEXT: lw a0, -64(s0) # 4-byte Folded Reload
+; RV32IXQCCMP-WITH-FP-NEXT: lw a0, -76(s0) # 4-byte Folded Reload
; RV32IXQCCMP-WITH-FP-NEXT: sw a0, %lo(var+8)(s6)
-; RV32IXQCCMP-WITH-FP-NEXT: lw a0, -60(s0) # 4-byte Folded Reload
+; RV32IXQCCMP-WITH-FP-NEXT: lw a0, -72(s0) # 4-byte Folded Reload
; RV32IXQCCMP-WITH-FP-NEXT: sw a0, %lo(var+4)(s6)
-; RV32IXQCCMP-WITH-FP-NEXT: lw a0, -56(s0) # 4-byte Folded Reload
+; RV32IXQCCMP-WITH-FP-NEXT: lw a0, -68(s0) # 4-byte Folded Reload
; RV32IXQCCMP-WITH-FP-NEXT: sw a0, %lo(var)(s6)
-; RV32IXQCCMP-WITH-FP-NEXT: .cfi_def_cfa sp, 144
-; RV32IXQCCMP-WITH-FP-NEXT: addi sp, sp, 32
+; RV32IXQCCMP-WITH-FP-NEXT: .cfi_def_cfa sp, 160
+; RV32IXQCCMP-WITH-FP-NEXT: addi sp, sp, 48
; RV32IXQCCMP-WITH-FP-NEXT: .cfi_def_cfa_offset 112
; RV32IXQCCMP-WITH-FP-NEXT: qc.cm.popret {ra, s0-s11}, 112
;
@@ -711,50 +715,50 @@ define void @caller() {
; RV64IXQCCMP-NEXT: .cfi_def_cfa_offset 288
; RV64IXQCCMP-NEXT: lui s0, %hi(var)
; RV64IXQCCMP-NEXT: lw a0, %lo(var)(s0)
-; RV64IXQCCMP-NEXT: sd a0, 176(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-NEXT: lw a0, %lo(var+4)(s0)
; RV64IXQCCMP-NEXT: sd a0, 168(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-NEXT: lw a0, %lo(var+8)(s0)
+; RV64IXQCCMP-NEXT: lw a0, %lo(var+4)(s0)
; RV64IXQCCMP-NEXT: sd a0, 160(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-NEXT: lw a0, %lo(var+12)(s0)
+; RV64IXQCCMP-NEXT: lw a0, %lo(var+8)(s0)
; RV64IXQCCMP-NEXT: sd a0, 152(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT: lw a0, %lo(var+12)(s0)
+; RV64IXQCCMP-NEXT: sd a0, 144(sp) # 8-byte Folded Spill
; RV64IXQCCMP-NEXT: addi s1, s0, %lo(var)
; RV64IXQCCMP-NEXT: lw a0, 16(s1)
-; RV64IXQCCMP-NEXT: sd a0, 144(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-NEXT: lw a0, 20(s1)
; RV64IXQCCMP-NEXT: sd a0, 136(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-NEXT: lw a0, 24(s1)
+; RV64IXQCCMP-NEXT: lw a0, 20(s1)
; RV64IXQCCMP-NEXT: sd a0, 128(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-NEXT: lw a0, 28(s1)
+; RV64IXQCCMP-NEXT: lw a0, 24(s1)
; RV64IXQCCMP-NEXT: sd a0, 120(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-NEXT: lw a0, 32(s1)
+; RV64IXQCCMP-NEXT: lw a0, 28(s1)
; RV64IXQCCMP-NEXT: sd a0, 112(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-NEXT: lw a0, 36(s1)
+; RV64IXQCCMP-NEXT: lw a0, 32(s1)
; RV64IXQCCMP-NEXT: sd a0, 104(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-NEXT: lw a0, 40(s1)
+; RV64IXQCCMP-NEXT: lw a0, 36(s1)
; RV64IXQCCMP-NEXT: sd a0, 96(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-NEXT: lw a0, 44(s1)
+; RV64IXQCCMP-NEXT: lw a0, 40(s1)
; RV64IXQCCMP-NEXT: sd a0, 88(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-NEXT: lw a0, 48(s1)
+; RV64IXQCCMP-NEXT: lw a0, 44(s1)
; RV64IXQCCMP-NEXT: sd a0, 80(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-NEXT: lw a0, 52(s1)
+; RV64IXQCCMP-NEXT: lw a0, 48(s1)
; RV64IXQCCMP-NEXT: sd a0, 72(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-NEXT: lw a0, 56(s1)
+; RV64IXQCCMP-NEXT: lw a0, 52(s1)
; RV64IXQCCMP-NEXT: sd a0, 64(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-NEXT: lw a0, 60(s1)
+; RV64IXQCCMP-NEXT: lw a0, 56(s1)
; RV64IXQCCMP-NEXT: sd a0, 56(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-NEXT: lw a0, 64(s1)
+; RV64IXQCCMP-NEXT: lw a0, 60(s1)
; RV64IXQCCMP-NEXT: sd a0, 48(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-NEXT: lw a0, 68(s1)
+; RV64IXQCCMP-NEXT: lw a0, 64(s1)
; RV64IXQCCMP-NEXT: sd a0, 40(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-NEXT: lw a0, 72(s1)
+; RV64IXQCCMP-NEXT: lw a0, 68(s1)
; RV64IXQCCMP-NEXT: sd a0, 32(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-NEXT: lw a0, 76(s1)
+; RV64IXQCCMP-NEXT: lw a0, 72(s1)
; RV64IXQCCMP-NEXT: sd a0, 24(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-NEXT: lw a0, 80(s1)
+; RV64IXQCCMP-NEXT: lw a0, 76(s1)
; RV64IXQCCMP-NEXT: sd a0, 16(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-NEXT: lw a0, 84(s1)
+; RV64IXQCCMP-NEXT: lw a0, 80(s1)
; RV64IXQCCMP-NEXT: sd a0, 8(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT: lw a0, 84(s1)
+; RV64IXQCCMP-NEXT: sd a0, 0(sp) # 8-byte Folded Spill
; RV64IXQCCMP-NEXT: lw s4, 88(s1)
; RV64IXQCCMP-NEXT: lw s5, 92(s1)
; RV64IXQCCMP-NEXT: lw s6, 96(s1)
@@ -776,49 +780,49 @@ define void @caller() {
; RV64IXQCCMP-NEXT: sw s6, 96(s1)
; RV64IXQCCMP-NEXT: sw s5, 92(s1)
; RV64IXQCCMP-NEXT: sw s4, 88(s1)
-; RV64IXQCCMP-NEXT: ld a0, 8(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT: ld a0, 0(sp) # 8-byte Folded Reload
; RV64IXQCCMP-NEXT: sw a0, 84(s1)
-; RV64IXQCCMP-NEXT: ld a0, 16(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT: ld a0, 8(sp) # 8-byte Folded Reload
; RV64IXQCCMP-NEXT: sw a0, 80(s1)
-; RV64IXQCCMP-NEXT: ld a0, 24(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT: ld a0, 16(sp) # 8-byte Folded Reload
; RV64IXQCCMP-NEXT: sw a0, 76(s1)
-; RV64IXQCCMP-NEXT: ld a0, 32(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT: ld a0, 24(sp) # 8-byte Folded Reload
; RV64IXQCCMP-NEXT: sw a0, 72(s1)
-; RV64IXQCCMP-NEXT: ld a0, 40(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT: ld a0, 32(sp) # 8-byte Folded Reload
; RV64IXQCCMP-NEXT: sw a0, 68(s1)
-; RV64IXQCCMP-NEXT: ld a0, 48(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT: ld a0, 40(sp) # 8-byte Folded Reload
; RV64IXQCCMP-NEXT: sw a0, 64(s1)
-; RV64IXQCCMP-NEXT: ld a0, 56(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT: ld a0, 48(sp) # 8-byte Folded Reload
; RV64IXQCCMP-NEXT: sw a0, 60(s1)
-; RV64IXQCCMP-NEXT: ld a0, 64(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT: ld a0, 56(sp) # 8-byte Folded Reload
; RV64IXQCCMP-NEXT: sw a0, 56(s1)
-; RV64IXQCCMP-NEXT: ld a0, 72(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT: ld a0, 64(sp) # 8-byte Folded Reload
; RV64IXQCCMP-NEXT: sw a0, 52(s1)
-; RV64IXQCCMP-NEXT: ld a0, 80(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT: ld a0, 72(sp) # 8-byte Folded Reload
; RV64IXQCCMP-NEXT: sw a0, 48(s1)
-; RV64IXQCCMP-NEXT: ld a0, 88(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT: ld a0, 80(sp) # 8-byte Folded Reload
; RV64IXQCCMP-NEXT: sw a0, 44(s1)
-; RV64IXQCCMP-NEXT: ld a0, 96(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT: ld a0, 88(sp) # 8-byte Folded Reload
; RV64IXQCCMP-NEXT: sw a0, 40(s1)
-; RV64IXQCCMP-NEXT: ld a0, 104(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT: ld a0, 96(sp) # 8-byte Folded Reload
; RV64IXQCCMP-NEXT: sw a0, 36(s1)
-; RV64IXQCCMP-NEXT: ld a0, 112(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT: ld a0, 104(sp) # 8-byte Folded Reload
; RV64IXQCCMP-NEXT: sw a0, 32(s1)
-; RV64IXQCCMP-NEXT: ld a0, 120(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT: ld a0, 112(sp) # 8-byte Folded Reload
; RV64IXQCCMP-NEXT: sw a0, 28(s1)
-; RV64IXQCCMP-NEXT: ld a0, 128(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT: ld a0, 120(sp) # 8-byte Folded Reload
; RV64IXQCCMP-NEXT: sw a0, 24(s1)
-; RV64IXQCCMP-NEXT: ld a0, 136(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT: ld a0, 128(sp) # 8-byte Folded Reload
; RV64IXQCCMP-NEXT: sw a0, 20(s1)
-; RV64IXQCCMP-NEXT: ld a0, 144(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT: ld a0, 136(sp) # 8-byte Folded Reload
; RV64IXQCCMP-NEXT: sw a0, 16(s1)
-; RV64IXQCCMP-NEXT: ld a0, 152(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT: ld a0, 144(sp) # 8-byte Folded Reload
; RV64IXQCCMP-NEXT: sw a0, %lo(var+12)(s0)
-; RV64IXQCCMP-NEXT: ld a0, 160(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT: ld a0, 152(sp) # 8-byte Folded Reload
; RV64IXQCCMP-NEXT: sw a0, %lo(var+8)(s0)
-; RV64IXQCCMP-NEXT: ld a0, 168(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT: ld a0, 160(sp) # 8-byte Folded Reload
; RV64IXQCCMP-NEXT: sw a0, %lo(var+4)(s0)
-; RV64IXQCCMP-NEXT: ld a0, 176(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT: ld a0, 168(sp) # 8-byte Folded Reload
; RV64IXQCCMP-NEXT: sw a0, %lo(var)(s0)
; RV64IXQCCMP-NEXT: addi sp, sp, 128
; RV64IXQCCMP-NEXT: .cfi_def_cfa_offset 160
@@ -841,57 +845,57 @@ define void @caller() {
; RV64IXQCCMP-WITH-FP-NEXT: .cfi_offset s9, -88
; RV64IXQCCMP-WITH-FP-NEXT: .cfi_offset s10, -96
; RV64IXQCCMP-WITH-FP-NEXT: .cfi_offset s11, -104
-; RV64IXQCCMP-WITH-FP-NEXT: addi sp, sp, -128
-; RV64IXQCCMP-WITH-FP-NEXT: .cfi_def_cfa_offset 288
+; RV64IXQCCMP-WITH-FP-NEXT: addi sp, sp, -144
+; RV64IXQCCMP-WITH-FP-NEXT: .cfi_def_cfa_offset 304
; RV64IXQCCMP-WITH-FP-NEXT: .cfi_def_cfa s0, 0
; RV64IXQCCMP-WITH-FP-NEXT: lui s6, %hi(var)
; RV64IXQCCMP-WITH-FP-NEXT: lw a0, %lo(var)(s6)
-; RV64IXQCCMP-WITH-FP-NEXT: sd a0, -112(s0) # 8-byte Folded Spill
-; RV64IXQCCMP-WITH-FP-NEXT: lw a0, %lo(var+4)(s6)
; RV64IXQCCMP-WITH-FP-NEXT: sd a0, -120(s0) # 8-byte Folded Spill
-; RV64IXQCCMP-WITH-FP-NEXT: lw a0, %lo(var+8)(s6)
+; RV64IXQCCMP-WITH-FP-NEXT: lw a0, %lo(var+4)(s6)
; RV64IXQCCMP-WITH-FP-NEXT: sd a0, -128(s0) # 8-byte Folded Spill
-; RV64IXQCCMP-WITH-FP-NEXT: lw a0, %lo(var+12)(s6)
+; RV64IXQCCMP-WITH-FP-NEXT: lw a0, %lo(var+8)(s6)
; RV64IXQCCMP-WITH-FP-NEXT: sd a0, -136(s0) # 8-byte Folded Spill
+; RV64IXQCCMP-WITH-FP-NEXT: lw a0, %lo(var+12)(s6)
+; RV64IXQCCMP-WITH-FP-NEXT: sd a0, -144(s0) # 8-byte Folded Spill
; RV64IXQCCMP-WITH-FP-NEXT: addi s1, s6, %lo(var)
; RV64IXQCCMP-WITH-FP-NEXT: lw a0, 16(s1)
-; RV64IXQCCMP-WITH-FP-NEXT: sd a0, -144(s0) # 8-byte Folded Spill
-; RV64IXQCCMP-WITH-FP-NEXT: lw a0, 20(s1)
; RV64IXQCCMP-WITH-FP-NEXT: sd a0, -152(s0) # 8-byte Folded Spill
-; RV64IXQCCMP-WITH-FP-NEXT: lw a0, 24(s1)
+; RV64IXQCCMP-WITH-FP-NEXT: lw a0, 20(s1)
; RV64IXQCCMP-WITH-FP-NEXT: sd a0, -160(s0) # 8-byte Folded Spill
-; RV64IXQCCMP-WITH-FP-NEXT: lw a0, 28(s1)
+; RV64IXQCCMP-WITH-FP-NEXT: lw a0, 24(s1)
; RV64IXQCCMP-WITH-FP-NEXT: sd a0, -168(s0) # 8-byte Folded Spill
-; RV64IXQCCMP-WITH-FP-NEXT: lw a0, 32(s1)
+; RV64IXQCCMP-WITH-FP-NEXT: lw a0, 28(s1)
; RV64IXQCCMP-WITH-FP-NEXT: sd a0, -176(s0) # 8-byte Folded Spill
-; RV64IXQCCMP-WITH-FP-NEXT: lw a0, 36(s1)
+; RV64IXQCCMP-WITH-FP-NEXT: lw a0, 32(s1)
; RV64IXQCCMP-WITH-FP-NEXT: sd a0, -184(s0) # 8-byte Folded Spill
-; RV64IXQCCMP-WITH-FP-NEXT: lw a0, 40(s1)
+; RV64IXQCCMP-WITH-FP-NEXT: lw a0, 36(s1)
; RV64IXQCCMP-WITH-FP-NEXT: sd a0, -192(s0) # 8-byte Folded Spill
-; RV64IXQCCMP-WITH-FP-NEXT: lw a0, 44(s1)
+; RV64IXQCCMP-WITH-FP-NEXT: lw a0, 40(s1)
; RV64IXQCCMP-WITH-FP-NEXT: sd a0, -200(s0) # 8-byte Folded Spill
-; RV64IXQCCMP-WITH-FP-NEXT: lw a0, 48(s1)
+; RV64IXQCCMP-WITH-FP-NEXT: lw a0, 44(s1)
; RV64IXQCCMP-WITH-FP-NEXT: sd a0, -208(s0) # 8-byte Folded Spill
-; RV64IXQCCMP-WITH-FP-NEXT: lw a0, 52(s1)
+; RV64IXQCCMP-WITH-FP-NEXT: lw a0, 48(s1)
; RV64IXQCCMP-WITH-FP-NEXT: sd a0, -216(s0) # 8-byte Folded Spill
-; RV64IXQCCMP-WITH-FP-NEXT: lw a0, 56(s1)
+; RV64IXQCCMP-WITH-FP-NEXT: lw a0, 52(s1)
; RV64IXQCCMP-WITH-FP-NEXT: sd a0, -224(s0) # 8-byte Folded Spill
-; RV64IXQCCMP-WITH-FP-NEXT: lw a0, 60(s1)
+; RV64IXQCCMP-WITH-FP-NEXT: lw a0, 56(s1)
; RV64IXQCCMP-WITH-FP-NEXT: sd a0, -232(s0) # 8-byte Folded Spill
-; RV64IXQCCMP-WITH-FP-NEXT: lw a0, 64(s1)
+; RV64IXQCCMP-WITH-FP-NEXT: lw a0, 60(s1)
; RV64IXQCCMP-WITH-FP-NEXT: sd a0, -240(s0) # 8-byte Folded Spill
-; RV64IXQCCMP-WITH-FP-NEXT: lw a0, 68(s1)
+; RV64IXQCCMP-WITH-FP-NEXT: lw a0, 64(s1)
; RV64IXQCCMP-WITH-FP-NEXT: sd a0, -248(s0) # 8-byte Folded Spill
-; RV64IXQCCMP-WITH-FP-NEXT: lw a0, 72(s1)
+; RV64IXQCCMP-WITH-FP-NEXT: lw a0, 68(s1)
; RV64IXQCCMP-WITH-FP-NEXT: sd a0, -256(s0) # 8-byte Folded Spill
-; RV64IXQCCMP-WITH-FP-NEXT: lw a0, 76(s1)
+; RV64IXQCCMP-WITH-FP-NEXT: lw a0, 72(s1)
; RV64IXQCCMP-WITH-FP-NEXT: sd a0, -264(s0) # 8-byte Folded Spill
-; RV64IXQCCMP-WITH-FP-NEXT: lw a0, 80(s1)
+; RV64IXQCCMP-WITH-FP-NEXT: lw a0, 76(s1)
; RV64IXQCCMP-WITH-FP-NEXT: sd a0, -272(s0) # 8-byte Folded Spill
-; RV64IXQCCMP-WITH-FP-NEXT: lw a0, 84(s1)
+; RV64IXQCCMP-WITH-FP-NEXT: lw a0, 80(s1)
; RV64IXQCCMP-WITH-FP-NEXT: sd a0, -280(s0) # 8-byte Folded Spill
-; RV64IXQCCMP-WITH-FP-NEXT: lw a0, 88(s1)
+; RV64IXQCCMP-WITH-FP-NEXT: lw a0, 84(s1)
; RV64IXQCCMP-WITH-FP-NEXT: sd a0, -288(s0) # 8-byte Folded Spill
+; RV64IXQCCMP-WITH-FP-NEXT: lw a0, 88(s1)
+; RV64IXQCCMP-WITH-FP-NEXT: sd a0, -296(s0) # 8-byte Folded Spill
; RV64IXQCCMP-WITH-FP-NEXT: lw s8, 92(s1)
; RV64IXQCCMP-WITH-FP-NEXT: lw s9, 96(s1)
; RV64IXQCCMP-WITH-FP-NEXT: lw s10, 100(s1)
@@ -911,54 +915,54 @@ define void @caller() {
; RV64IXQCCMP-WITH-FP-NEXT: sw s10, 100(s1)
; RV64IXQCCMP-WITH-FP-NEXT: sw s9, 96(s1)
; RV64IXQCCMP-WITH-FP-NEXT: sw s8, 92(s1)
-; RV64IXQCCMP-WITH-FP-NEXT: ld a0, -288(s0) # 8-byte Folded Reload
+; RV64IXQCCMP-WITH-FP-NEXT: ld a0, -296(s0) # 8-byte Folded Reload
; RV64IXQCCMP-WITH-FP-NEXT: sw a0, 88(s1)
-; RV64IXQCCMP-WITH-FP-NEXT: ld a0, -280(s0) # 8-byte Folded Reload
+; RV64IXQCCMP-WITH-FP-NEXT: ld a0, -288(s0) # 8-byte Folded Reload
; RV64IXQCCMP-WITH-FP-NEXT: sw a0, 84(s1)
-; RV64IXQCCMP-WITH-FP-NEXT: ld a0, -272(s0) # 8-byte Folded Reload
+; RV64IXQCCMP-WITH-FP-NEXT: ld a0, -280(s0) # 8-byte Folded Reload
; RV64IXQCCMP-WITH-FP-NEXT: sw a0, 80(s1)
-; RV64IXQCCMP-WITH-FP-NEXT: ld a0, -264(s0) # 8-byte Folded Reload
+; RV64IXQCCMP-WITH-FP-NEXT: ld a0, -272(s0) # 8-byte Folded Reload
; RV64IXQCCMP-WITH-FP-NEXT: sw a0, 76(s1)
-; RV64IXQCCMP-WITH-FP-NEXT: ld a0, -256(s0) # 8-byte Folded Reload
+; RV64IXQCCMP-WITH-FP-NEXT: ld a0, -264(s0) # 8-byte Folded Reload
; RV64IXQCCMP-WITH-FP-NEXT: sw a0, 72(s1)
-; RV64IXQCCMP-WITH-FP-NEXT: ld a0, -248(s0) # 8-byte Folded Reload
+; RV64IXQCCMP-WITH-FP-NEXT: ld a0, -256(s0) # 8-byte Folded Reload
; RV64IXQCCMP-WITH-FP-NEXT: sw a0, 68(s1)
-; RV64IXQCCMP-WITH-FP-NEXT: ld a0, -240(s0) # 8-byte Folded Reload
+; RV64IXQCCMP-WITH-FP-NEXT: ld a0, -248(s0) # 8-byte Folded Reload
; RV64IXQCCMP-WITH-FP-NEXT: sw a0, 64(s1)
-; RV64IXQCCMP-WITH-FP-NEXT: ld a0, -232(s0) # 8-byte Folded Reload
+; RV64IXQCCMP-WITH-FP-NEXT: ld a0, -240(s0) # 8-byte Folded Reload
; RV64IXQCCMP-WITH-FP-NEXT: sw a0, 60(s1)
-; RV64IXQCCMP-WITH-FP-NEXT: ld a0, -224(s0) # 8-byte Folded Reload
+; RV64IXQCCMP-WITH-FP-NEXT: ld a0, -232(s0) # 8-byte Folded Reload
; RV64IXQCCMP-WITH-FP-NEXT: sw a0, 56(s1)
-; RV64IXQCCMP-WITH-FP-NEXT: ld a0, -216(s0) # 8-byte Folded Reload
+; RV64IXQCCMP-WITH-FP-NEXT: ld a0, -224(s0) # 8-byte Folded Reload
; RV64IXQCCMP-WITH-FP-NEXT: sw a0, 52(s1)
-; RV64IXQCCMP-WITH-FP-NEXT: ld a0, -208(s0) # 8-byte Folded Reload
+; RV64IXQCCMP-WITH-FP-NEXT: ld a0, -216(s0) # 8-byte Folded Reload
; RV64IXQCCMP-WITH-FP-NEXT: sw a0, 48(s1)
-; RV64IXQCCMP-WITH-FP-NEXT: ld a0, -200(s0) # 8-byte Folded Reload
+; RV64IXQCCMP-WITH-FP-NEXT: ld a0, -208(s0) # 8-byte Folded Reload
; RV64IXQCCMP-WITH-FP-NEXT: sw a0, 44(s1)
-; RV64IXQCCMP-WITH-FP-NEXT: ld a0, -192(s0) # 8-byte Folded Reload
+; RV64IXQCCMP-WITH-FP-NEXT: ld a0, -200(s0) # 8-byte Folded Reload
; RV64IXQCCMP-WITH-FP-NEXT: sw a0, 40(s1)
-; RV64IXQCCMP-WITH-FP-NEXT: ld a0, -184(s0) # 8-byte Folded Reload
+; RV64IXQCCMP-WITH-FP-NEXT: ld a0, -192(s0) # 8-byte Folded Reload
; RV64IXQCCMP-WITH-FP-NEXT: sw a0, 36(s1)
-; RV64IXQCCMP-WITH-FP-NEXT: ld a0, -176(s0) # 8-byte Folded Reload
+; RV64IXQCCMP-WITH-FP-NEXT: ld a0, -184(s0) # 8-byte Folded Reload
; RV64IXQCCMP-WITH-FP-NEXT: sw a0, 32(s1)
-; RV64IXQCCMP-WITH-FP-NEXT: ld a0, -168(s0) # 8-byte Folded Reload
+; RV64IXQCCMP-WITH-FP-NEXT: ld a0, -176(s0) # 8-byte Folded Reload
; RV64IXQCCMP-WITH-FP-NEXT: sw a0, 28(s1)
-; RV64IXQCCMP-WITH-FP-NEXT: ld a0, -160(s0) # 8-byte Folded Reload
+; RV64IXQCCMP-WITH-FP-NEXT: ld a0, -168(s0) # 8-byte Folded Reload
; RV64IXQCCMP-WITH-FP-NEXT: sw a0, 24(s1)
-; RV64IXQCCMP-WITH-FP-NEXT: ld a0, -152(s0) # 8-byte Folded Reload
+; RV64IXQCCMP-WITH-FP-NEXT: ld a0, -160(s0) # 8-byte Folded Reload
; RV64IXQCCMP-WITH-FP-NEXT: sw a0, 20(s1)
-; RV64IXQCCMP-WITH-FP-NEXT: ld a0, -144(s0) # 8-byte Folded Reload
+; RV64IXQCCMP-WITH-FP-NEXT: ld a0, -152(s0) # 8-byte Folded Reload
; RV64IXQCCMP-WITH-FP-NEXT: sw a0, 16(s1)
-; RV64IXQCCMP-WITH-FP-NEXT: ld a0, -136(s0) # 8-byte Folded Reload
+; RV64IXQCCMP-WITH-FP-NEXT: ld a0, -144(s0) # 8-byte Folded Reload
; RV64IXQCCMP-WITH-FP-NEXT: sw a0, %lo(var+12)(s6)
-; RV64IXQCCMP-WITH-FP-NEXT: ld a0, -128(s0) # 8-byte Folded Reload
+; RV64IXQCCMP-WITH-FP-NEXT: ld a0, -136(s0) # 8-byte Folded Reload
; RV64IXQCCMP-WITH-FP-NEXT: sw a0, %lo(var+8)(s6)
-; RV64IXQCCMP-WITH-FP-NEXT: ld a0, -120(s0) # 8-byte Folded Reload
+; RV64IXQCCMP-WITH-FP-NEXT: ld a0, -128(s0) # 8-byte Folded Reload
; RV64IXQCCMP-WITH-FP-NEXT: sw a0, %lo(var+4)(s6)
-; RV64IXQCCMP-WITH-FP-NEXT: ld a0, -112(s0) # 8-byte Folded Reload
+; RV64IXQCCMP-WITH-FP-NEXT: ld a0, -120(s0) # 8-byte Folded Reload
; RV64IXQCCMP-WITH-FP-NEXT: sw a0, %lo(var)(s6)
-; RV64IXQCCMP-WITH-FP-NEXT: .cfi_def_cfa sp, 288
-; RV64IXQCCMP-WITH-FP-NEXT: addi sp, sp, 128
+; RV64IXQCCMP-WITH-FP-NEXT: .cfi_def_cfa sp, 304
+; RV64IXQCCMP-WITH-FP-NEXT: addi sp, sp, 144
; RV64IXQCCMP-WITH-FP-NEXT: .cfi_def_cfa_offset 160
; RV64IXQCCMP-WITH-FP-NEXT: qc.cm.popret {ra, s0-s11}, 160
%val = load [32 x i32], ptr @var
diff --git a/llvm/test/CodeGen/RISCV/xqccmp-push-pop-popret.ll b/llvm/test/CodeGen/RISCV/xqccmp-push-pop-popret.ll
index c1a5e60..415511f 100644
--- a/llvm/test/CodeGen/RISCV/xqccmp-push-pop-popret.ll
+++ b/llvm/test/CodeGen/RISCV/xqccmp-push-pop-popret.ll
@@ -24,7 +24,7 @@ define i32 @foo() {
; RV32IXQCCMP-NEXT: .cfi_offset ra, -4
; RV32IXQCCMP-NEXT: addi sp, sp, -464
; RV32IXQCCMP-NEXT: .cfi_def_cfa_offset 528
-; RV32IXQCCMP-NEXT: addi a0, sp, 12
+; RV32IXQCCMP-NEXT: mv a0, sp
; RV32IXQCCMP-NEXT: call test
; RV32IXQCCMP-NEXT: addi sp, sp, 464
; RV32IXQCCMP-NEXT: .cfi_def_cfa_offset 64
@@ -37,7 +37,7 @@ define i32 @foo() {
; RV64IXQCCMP-NEXT: .cfi_offset ra, -8
; RV64IXQCCMP-NEXT: addi sp, sp, -464
; RV64IXQCCMP-NEXT: .cfi_def_cfa_offset 528
-; RV64IXQCCMP-NEXT: addi a0, sp, 8
+; RV64IXQCCMP-NEXT: mv a0, sp
; RV64IXQCCMP-NEXT: call test
; RV64IXQCCMP-NEXT: addi sp, sp, 464
; RV64IXQCCMP-NEXT: .cfi_def_cfa_offset 64
@@ -52,7 +52,7 @@ define i32 @foo() {
; RV32IXQCCMP-FP-NEXT: addi sp, sp, -464
; RV32IXQCCMP-FP-NEXT: .cfi_def_cfa_offset 528
; RV32IXQCCMP-FP-NEXT: .cfi_def_cfa s0, 0
-; RV32IXQCCMP-FP-NEXT: addi a0, s0, -520
+; RV32IXQCCMP-FP-NEXT: addi a0, s0, -528
; RV32IXQCCMP-FP-NEXT: call test
; RV32IXQCCMP-FP-NEXT: .cfi_def_cfa sp, 528
; RV32IXQCCMP-FP-NEXT: addi sp, sp, 464
@@ -82,7 +82,7 @@ define i32 @foo() {
; RV32IXQCCMP-SR-NEXT: .cfi_offset ra, -4
; RV32IXQCCMP-SR-NEXT: addi sp, sp, -464
; RV32IXQCCMP-SR-NEXT: .cfi_def_cfa_offset 528
-; RV32IXQCCMP-SR-NEXT: addi a0, sp, 12
+; RV32IXQCCMP-SR-NEXT: mv a0, sp
; RV32IXQCCMP-SR-NEXT: call test
; RV32IXQCCMP-SR-NEXT: addi sp, sp, 464
; RV32IXQCCMP-SR-NEXT: .cfi_def_cfa_offset 64
@@ -95,7 +95,7 @@ define i32 @foo() {
; RV64IXQCCMP-SR-NEXT: .cfi_offset ra, -8
; RV64IXQCCMP-SR-NEXT: addi sp, sp, -464
; RV64IXQCCMP-SR-NEXT: .cfi_def_cfa_offset 528
-; RV64IXQCCMP-SR-NEXT: addi a0, sp, 8
+; RV64IXQCCMP-SR-NEXT: mv a0, sp
; RV64IXQCCMP-SR-NEXT: call test
; RV64IXQCCMP-SR-NEXT: addi sp, sp, 464
; RV64IXQCCMP-SR-NEXT: .cfi_def_cfa_offset 64
@@ -1595,52 +1595,54 @@ define void @foo_with_irq() "interrupt"="machine" {
; RV32IXQCCMP-NEXT: qc.cm.push {ra}, -64
; RV32IXQCCMP-NEXT: .cfi_def_cfa_offset 64
; RV32IXQCCMP-NEXT: .cfi_offset ra, -4
-; RV32IXQCCMP-NEXT: sw t0, 56(sp) # 4-byte Folded Spill
-; RV32IXQCCMP-NEXT: sw t1, 52(sp) # 4-byte Folded Spill
-; RV32IXQCCMP-NEXT: sw t2, 48(sp) # 4-byte Folded Spill
-; RV32IXQCCMP-NEXT: sw a0, 44(sp) # 4-byte Folded Spill
-; RV32IXQCCMP-NEXT: sw a1, 40(sp) # 4-byte Folded Spill
-; RV32IXQCCMP-NEXT: sw a2, 36(sp) # 4-byte Folded Spill
-; RV32IXQCCMP-NEXT: sw a3, 32(sp) # 4-byte Folded Spill
-; RV32IXQCCMP-NEXT: sw a4, 28(sp) # 4-byte Folded Spill
-; RV32IXQCCMP-NEXT: sw a5, 24(sp) # 4-byte Folded Spill
-; RV32IXQCCMP-NEXT: sw a6, 20(sp) # 4-byte Folded Spill
-; RV32IXQCCMP-NEXT: sw a7, 16(sp) # 4-byte Folded Spill
-; RV32IXQCCMP-NEXT: sw t3, 12(sp) # 4-byte Folded Spill
-; RV32IXQCCMP-NEXT: sw t4, 8(sp) # 4-byte Folded Spill
-; RV32IXQCCMP-NEXT: sw t5, 4(sp) # 4-byte Folded Spill
-; RV32IXQCCMP-NEXT: sw t6, 0(sp) # 4-byte Folded Spill
-; RV32IXQCCMP-NEXT: .cfi_offset t0, -8
-; RV32IXQCCMP-NEXT: .cfi_offset t1, -12
-; RV32IXQCCMP-NEXT: .cfi_offset t2, -16
-; RV32IXQCCMP-NEXT: .cfi_offset a0, -20
-; RV32IXQCCMP-NEXT: .cfi_offset a1, -24
-; RV32IXQCCMP-NEXT: .cfi_offset a2, -28
-; RV32IXQCCMP-NEXT: .cfi_offset a3, -32
-; RV32IXQCCMP-NEXT: .cfi_offset a4, -36
-; RV32IXQCCMP-NEXT: .cfi_offset a5, -40
-; RV32IXQCCMP-NEXT: .cfi_offset a6, -44
-; RV32IXQCCMP-NEXT: .cfi_offset a7, -48
-; RV32IXQCCMP-NEXT: .cfi_offset t3, -52
-; RV32IXQCCMP-NEXT: .cfi_offset t4, -56
-; RV32IXQCCMP-NEXT: .cfi_offset t5, -60
-; RV32IXQCCMP-NEXT: .cfi_offset t6, -64
+; RV32IXQCCMP-NEXT: addi sp, sp, -16
+; RV32IXQCCMP-NEXT: .cfi_def_cfa_offset 80
+; RV32IXQCCMP-NEXT: sw t0, 60(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT: sw t1, 56(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT: sw t2, 52(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT: sw a0, 48(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT: sw a1, 44(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT: sw a2, 40(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT: sw a3, 36(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT: sw a4, 32(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT: sw a5, 28(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT: sw a6, 24(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT: sw a7, 20(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT: sw t3, 16(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT: sw t4, 12(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT: sw t5, 8(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT: sw t6, 4(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT: .cfi_offset t0, -20
+; RV32IXQCCMP-NEXT: .cfi_offset t1, -24
+; RV32IXQCCMP-NEXT: .cfi_offset t2, -28
+; RV32IXQCCMP-NEXT: .cfi_offset a0, -32
+; RV32IXQCCMP-NEXT: .cfi_offset a1, -36
+; RV32IXQCCMP-NEXT: .cfi_offset a2, -40
+; RV32IXQCCMP-NEXT: .cfi_offset a3, -44
+; RV32IXQCCMP-NEXT: .cfi_offset a4, -48
+; RV32IXQCCMP-NEXT: .cfi_offset a5, -52
+; RV32IXQCCMP-NEXT: .cfi_offset a6, -56
+; RV32IXQCCMP-NEXT: .cfi_offset a7, -60
+; RV32IXQCCMP-NEXT: .cfi_offset t3, -64
+; RV32IXQCCMP-NEXT: .cfi_offset t4, -68
+; RV32IXQCCMP-NEXT: .cfi_offset t5, -72
+; RV32IXQCCMP-NEXT: .cfi_offset t6, -76
; RV32IXQCCMP-NEXT: call foo_test_irq
-; RV32IXQCCMP-NEXT: lw t0, 56(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-NEXT: lw t1, 52(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-NEXT: lw t2, 48(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-NEXT: lw a0, 44(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-NEXT: lw a1, 40(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-NEXT: lw a2, 36(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-NEXT: lw a3, 32(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-NEXT: lw a4, 28(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-NEXT: lw a5, 24(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-NEXT: lw a6, 20(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-NEXT: lw a7, 16(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-NEXT: lw t3, 12(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-NEXT: lw t4, 8(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-NEXT: lw t5, 4(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-NEXT: lw t6, 0(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-NEXT: lw t0, 60(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-NEXT: lw t1, 56(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-NEXT: lw t2, 52(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-NEXT: lw a0, 48(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-NEXT: lw a1, 44(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-NEXT: lw a2, 40(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-NEXT: lw a3, 36(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-NEXT: lw a4, 32(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-NEXT: lw a5, 28(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-NEXT: lw a6, 24(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-NEXT: lw a7, 20(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-NEXT: lw t3, 16(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-NEXT: lw t4, 12(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-NEXT: lw t5, 8(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-NEXT: lw t6, 4(sp) # 4-byte Folded Reload
; RV32IXQCCMP-NEXT: .cfi_restore t0
; RV32IXQCCMP-NEXT: .cfi_restore t1
; RV32IXQCCMP-NEXT: .cfi_restore t2
@@ -1656,6 +1658,8 @@ define void @foo_with_irq() "interrupt"="machine" {
; RV32IXQCCMP-NEXT: .cfi_restore t4
; RV32IXQCCMP-NEXT: .cfi_restore t5
; RV32IXQCCMP-NEXT: .cfi_restore t6
+; RV32IXQCCMP-NEXT: addi sp, sp, 16
+; RV32IXQCCMP-NEXT: .cfi_def_cfa_offset 64
; RV32IXQCCMP-NEXT: qc.cm.pop {ra}, 64
; RV32IXQCCMP-NEXT: .cfi_restore ra
; RV32IXQCCMP-NEXT: .cfi_def_cfa_offset 0
@@ -1666,54 +1670,54 @@ define void @foo_with_irq() "interrupt"="machine" {
; RV64IXQCCMP-NEXT: qc.cm.push {ra}, -64
; RV64IXQCCMP-NEXT: .cfi_def_cfa_offset 64
; RV64IXQCCMP-NEXT: .cfi_offset ra, -8
-; RV64IXQCCMP-NEXT: addi sp, sp, -64
-; RV64IXQCCMP-NEXT: .cfi_def_cfa_offset 128
-; RV64IXQCCMP-NEXT: sd t0, 112(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-NEXT: sd t1, 104(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-NEXT: sd t2, 96(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-NEXT: sd a0, 88(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-NEXT: sd a1, 80(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-NEXT: sd a2, 72(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-NEXT: sd a3, 64(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-NEXT: sd a4, 56(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-NEXT: sd a5, 48(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-NEXT: sd a6, 40(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-NEXT: sd a7, 32(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-NEXT: sd t3, 24(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-NEXT: sd t4, 16(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-NEXT: sd t5, 8(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-NEXT: sd t6, 0(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-NEXT: .cfi_offset t0, -16
-; RV64IXQCCMP-NEXT: .cfi_offset t1, -24
-; RV64IXQCCMP-NEXT: .cfi_offset t2, -32
-; RV64IXQCCMP-NEXT: .cfi_offset a0, -40
-; RV64IXQCCMP-NEXT: .cfi_offset a1, -48
-; RV64IXQCCMP-NEXT: .cfi_offset a2, -56
-; RV64IXQCCMP-NEXT: .cfi_offset a3, -64
-; RV64IXQCCMP-NEXT: .cfi_offset a4, -72
-; RV64IXQCCMP-NEXT: .cfi_offset a5, -80
-; RV64IXQCCMP-NEXT: .cfi_offset a6, -88
-; RV64IXQCCMP-NEXT: .cfi_offset a7, -96
-; RV64IXQCCMP-NEXT: .cfi_offset t3, -104
-; RV64IXQCCMP-NEXT: .cfi_offset t4, -112
-; RV64IXQCCMP-NEXT: .cfi_offset t5, -120
-; RV64IXQCCMP-NEXT: .cfi_offset t6, -128
+; RV64IXQCCMP-NEXT: addi sp, sp, -80
+; RV64IXQCCMP-NEXT: .cfi_def_cfa_offset 144
+; RV64IXQCCMP-NEXT: sd t0, 120(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT: sd t1, 112(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT: sd t2, 104(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT: sd a0, 96(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT: sd a1, 88(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT: sd a2, 80(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT: sd a3, 72(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT: sd a4, 64(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT: sd a5, 56(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT: sd a6, 48(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT: sd a7, 40(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT: sd t3, 32(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT: sd t4, 24(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT: sd t5, 16(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT: sd t6, 8(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT: .cfi_offset t0, -24
+; RV64IXQCCMP-NEXT: .cfi_offset t1, -32
+; RV64IXQCCMP-NEXT: .cfi_offset t2, -40
+; RV64IXQCCMP-NEXT: .cfi_offset a0, -48
+; RV64IXQCCMP-NEXT: .cfi_offset a1, -56
+; RV64IXQCCMP-NEXT: .cfi_offset a2, -64
+; RV64IXQCCMP-NEXT: .cfi_offset a3, -72
+; RV64IXQCCMP-NEXT: .cfi_offset a4, -80
+; RV64IXQCCMP-NEXT: .cfi_offset a5, -88
+; RV64IXQCCMP-NEXT: .cfi_offset a6, -96
+; RV64IXQCCMP-NEXT: .cfi_offset a7, -104
+; RV64IXQCCMP-NEXT: .cfi_offset t3, -112
+; RV64IXQCCMP-NEXT: .cfi_offset t4, -120
+; RV64IXQCCMP-NEXT: .cfi_offset t5, -128
+; RV64IXQCCMP-NEXT: .cfi_offset t6, -136
; RV64IXQCCMP-NEXT: call foo_test_irq
-; RV64IXQCCMP-NEXT: ld t0, 112(sp) # 8-byte Folded Reload
-; RV64IXQCCMP-NEXT: ld t1, 104(sp) # 8-byte Folded Reload
-; RV64IXQCCMP-NEXT: ld t2, 96(sp) # 8-byte Folded Reload
-; RV64IXQCCMP-NEXT: ld a0, 88(sp) # 8-byte Folded Reload
-; RV64IXQCCMP-NEXT: ld a1, 80(sp) # 8-byte Folded Reload
-; RV64IXQCCMP-NEXT: ld a2, 72(sp) # 8-byte Folded Reload
-; RV64IXQCCMP-NEXT: ld a3, 64(sp) # 8-byte Folded Reload
-; RV64IXQCCMP-NEXT: ld a4, 56(sp) # 8-byte Folded Reload
-; RV64IXQCCMP-NEXT: ld a5, 48(sp) # 8-byte Folded Reload
-; RV64IXQCCMP-NEXT: ld a6, 40(sp) # 8-byte Folded Reload
-; RV64IXQCCMP-NEXT: ld a7, 32(sp) # 8-byte Folded Reload
-; RV64IXQCCMP-NEXT: ld t3, 24(sp) # 8-byte Folded Reload
-; RV64IXQCCMP-NEXT: ld t4, 16(sp) # 8-byte Folded Reload
-; RV64IXQCCMP-NEXT: ld t5, 8(sp) # 8-byte Folded Reload
-; RV64IXQCCMP-NEXT: ld t6, 0(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT: ld t0, 120(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT: ld t1, 112(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT: ld t2, 104(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT: ld a0, 96(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT: ld a1, 88(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT: ld a2, 80(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT: ld a3, 72(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT: ld a4, 64(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT: ld a5, 56(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT: ld a6, 48(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT: ld a7, 40(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT: ld t3, 32(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT: ld t4, 24(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT: ld t5, 16(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT: ld t6, 8(sp) # 8-byte Folded Reload
; RV64IXQCCMP-NEXT: .cfi_restore t0
; RV64IXQCCMP-NEXT: .cfi_restore t1
; RV64IXQCCMP-NEXT: .cfi_restore t2
@@ -1729,7 +1733,7 @@ define void @foo_with_irq() "interrupt"="machine" {
; RV64IXQCCMP-NEXT: .cfi_restore t4
; RV64IXQCCMP-NEXT: .cfi_restore t5
; RV64IXQCCMP-NEXT: .cfi_restore t6
-; RV64IXQCCMP-NEXT: addi sp, sp, 64
+; RV64IXQCCMP-NEXT: addi sp, sp, 80
; RV64IXQCCMP-NEXT: .cfi_def_cfa_offset 64
; RV64IXQCCMP-NEXT: qc.cm.pop {ra}, 64
; RV64IXQCCMP-NEXT: .cfi_restore ra
@@ -1744,54 +1748,54 @@ define void @foo_with_irq() "interrupt"="machine" {
; RV32IXQCCMP-FP-NEXT: .cfi_offset s0, -8
; RV32IXQCCMP-FP-NEXT: addi sp, sp, -16
; RV32IXQCCMP-FP-NEXT: .cfi_def_cfa_offset 80
-; RV32IXQCCMP-FP-NEXT: sw t0, 68(sp) # 4-byte Folded Spill
-; RV32IXQCCMP-FP-NEXT: sw t1, 64(sp) # 4-byte Folded Spill
-; RV32IXQCCMP-FP-NEXT: sw t2, 60(sp) # 4-byte Folded Spill
-; RV32IXQCCMP-FP-NEXT: sw a0, 56(sp) # 4-byte Folded Spill
-; RV32IXQCCMP-FP-NEXT: sw a1, 52(sp) # 4-byte Folded Spill
-; RV32IXQCCMP-FP-NEXT: sw a2, 48(sp) # 4-byte Folded Spill
-; RV32IXQCCMP-FP-NEXT: sw a3, 44(sp) # 4-byte Folded Spill
-; RV32IXQCCMP-FP-NEXT: sw a4, 40(sp) # 4-byte Folded Spill
-; RV32IXQCCMP-FP-NEXT: sw a5, 36(sp) # 4-byte Folded Spill
-; RV32IXQCCMP-FP-NEXT: sw a6, 32(sp) # 4-byte Folded Spill
-; RV32IXQCCMP-FP-NEXT: sw a7, 28(sp) # 4-byte Folded Spill
-; RV32IXQCCMP-FP-NEXT: sw t3, 24(sp) # 4-byte Folded Spill
-; RV32IXQCCMP-FP-NEXT: sw t4, 20(sp) # 4-byte Folded Spill
-; RV32IXQCCMP-FP-NEXT: sw t5, 16(sp) # 4-byte Folded Spill
-; RV32IXQCCMP-FP-NEXT: sw t6, 12(sp) # 4-byte Folded Spill
-; RV32IXQCCMP-FP-NEXT: .cfi_offset t0, -12
-; RV32IXQCCMP-FP-NEXT: .cfi_offset t1, -16
-; RV32IXQCCMP-FP-NEXT: .cfi_offset t2, -20
-; RV32IXQCCMP-FP-NEXT: .cfi_offset a0, -24
-; RV32IXQCCMP-FP-NEXT: .cfi_offset a1, -28
-; RV32IXQCCMP-FP-NEXT: .cfi_offset a2, -32
-; RV32IXQCCMP-FP-NEXT: .cfi_offset a3, -36
-; RV32IXQCCMP-FP-NEXT: .cfi_offset a4, -40
-; RV32IXQCCMP-FP-NEXT: .cfi_offset a5, -44
-; RV32IXQCCMP-FP-NEXT: .cfi_offset a6, -48
-; RV32IXQCCMP-FP-NEXT: .cfi_offset a7, -52
-; RV32IXQCCMP-FP-NEXT: .cfi_offset t3, -56
-; RV32IXQCCMP-FP-NEXT: .cfi_offset t4, -60
-; RV32IXQCCMP-FP-NEXT: .cfi_offset t5, -64
-; RV32IXQCCMP-FP-NEXT: .cfi_offset t6, -68
+; RV32IXQCCMP-FP-NEXT: sw t0, 60(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-FP-NEXT: sw t1, 56(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-FP-NEXT: sw t2, 52(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-FP-NEXT: sw a0, 48(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-FP-NEXT: sw a1, 44(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-FP-NEXT: sw a2, 40(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-FP-NEXT: sw a3, 36(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-FP-NEXT: sw a4, 32(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-FP-NEXT: sw a5, 28(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-FP-NEXT: sw a6, 24(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-FP-NEXT: sw a7, 20(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-FP-NEXT: sw t3, 16(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-FP-NEXT: sw t4, 12(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-FP-NEXT: sw t5, 8(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-FP-NEXT: sw t6, 4(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-FP-NEXT: .cfi_offset t0, -20
+; RV32IXQCCMP-FP-NEXT: .cfi_offset t1, -24
+; RV32IXQCCMP-FP-NEXT: .cfi_offset t2, -28
+; RV32IXQCCMP-FP-NEXT: .cfi_offset a0, -32
+; RV32IXQCCMP-FP-NEXT: .cfi_offset a1, -36
+; RV32IXQCCMP-FP-NEXT: .cfi_offset a2, -40
+; RV32IXQCCMP-FP-NEXT: .cfi_offset a3, -44
+; RV32IXQCCMP-FP-NEXT: .cfi_offset a4, -48
+; RV32IXQCCMP-FP-NEXT: .cfi_offset a5, -52
+; RV32IXQCCMP-FP-NEXT: .cfi_offset a6, -56
+; RV32IXQCCMP-FP-NEXT: .cfi_offset a7, -60
+; RV32IXQCCMP-FP-NEXT: .cfi_offset t3, -64
+; RV32IXQCCMP-FP-NEXT: .cfi_offset t4, -68
+; RV32IXQCCMP-FP-NEXT: .cfi_offset t5, -72
+; RV32IXQCCMP-FP-NEXT: .cfi_offset t6, -76
; RV32IXQCCMP-FP-NEXT: .cfi_def_cfa s0, 0
; RV32IXQCCMP-FP-NEXT: call foo_test_irq
; RV32IXQCCMP-FP-NEXT: .cfi_def_cfa sp, 80
-; RV32IXQCCMP-FP-NEXT: lw t0, 68(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-FP-NEXT: lw t1, 64(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-FP-NEXT: lw t2, 60(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-FP-NEXT: lw a0, 56(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-FP-NEXT: lw a1, 52(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-FP-NEXT: lw a2, 48(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-FP-NEXT: lw a3, 44(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-FP-NEXT: lw a4, 40(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-FP-NEXT: lw a5, 36(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-FP-NEXT: lw a6, 32(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-FP-NEXT: lw a7, 28(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-FP-NEXT: lw t3, 24(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-FP-NEXT: lw t4, 20(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-FP-NEXT: lw t5, 16(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-FP-NEXT: lw t6, 12(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-FP-NEXT: lw t0, 60(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-FP-NEXT: lw t1, 56(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-FP-NEXT: lw t2, 52(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-FP-NEXT: lw a0, 48(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-FP-NEXT: lw a1, 44(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-FP-NEXT: lw a2, 40(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-FP-NEXT: lw a3, 36(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-FP-NEXT: lw a4, 32(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-FP-NEXT: lw a5, 28(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-FP-NEXT: lw a6, 24(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-FP-NEXT: lw a7, 20(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-FP-NEXT: lw t3, 16(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-FP-NEXT: lw t4, 12(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-FP-NEXT: lw t5, 8(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-FP-NEXT: lw t6, 4(sp) # 4-byte Folded Reload
; RV32IXQCCMP-FP-NEXT: .cfi_restore t0
; RV32IXQCCMP-FP-NEXT: .cfi_restore t1
; RV32IXQCCMP-FP-NEXT: .cfi_restore t2
@@ -1899,52 +1903,54 @@ define void @foo_with_irq() "interrupt"="machine" {
; RV32IXQCCMP-SR-NEXT: qc.cm.push {ra}, -64
; RV32IXQCCMP-SR-NEXT: .cfi_def_cfa_offset 64
; RV32IXQCCMP-SR-NEXT: .cfi_offset ra, -4
-; RV32IXQCCMP-SR-NEXT: sw t0, 56(sp) # 4-byte Folded Spill
-; RV32IXQCCMP-SR-NEXT: sw t1, 52(sp) # 4-byte Folded Spill
-; RV32IXQCCMP-SR-NEXT: sw t2, 48(sp) # 4-byte Folded Spill
-; RV32IXQCCMP-SR-NEXT: sw a0, 44(sp) # 4-byte Folded Spill
-; RV32IXQCCMP-SR-NEXT: sw a1, 40(sp) # 4-byte Folded Spill
-; RV32IXQCCMP-SR-NEXT: sw a2, 36(sp) # 4-byte Folded Spill
-; RV32IXQCCMP-SR-NEXT: sw a3, 32(sp) # 4-byte Folded Spill
-; RV32IXQCCMP-SR-NEXT: sw a4, 28(sp) # 4-byte Folded Spill
-; RV32IXQCCMP-SR-NEXT: sw a5, 24(sp) # 4-byte Folded Spill
-; RV32IXQCCMP-SR-NEXT: sw a6, 20(sp) # 4-byte Folded Spill
-; RV32IXQCCMP-SR-NEXT: sw a7, 16(sp) # 4-byte Folded Spill
-; RV32IXQCCMP-SR-NEXT: sw t3, 12(sp) # 4-byte Folded Spill
-; RV32IXQCCMP-SR-NEXT: sw t4, 8(sp) # 4-byte Folded Spill
-; RV32IXQCCMP-SR-NEXT: sw t5, 4(sp) # 4-byte Folded Spill
-; RV32IXQCCMP-SR-NEXT: sw t6, 0(sp) # 4-byte Folded Spill
-; RV32IXQCCMP-SR-NEXT: .cfi_offset t0, -8
-; RV32IXQCCMP-SR-NEXT: .cfi_offset t1, -12
-; RV32IXQCCMP-SR-NEXT: .cfi_offset t2, -16
-; RV32IXQCCMP-SR-NEXT: .cfi_offset a0, -20
-; RV32IXQCCMP-SR-NEXT: .cfi_offset a1, -24
-; RV32IXQCCMP-SR-NEXT: .cfi_offset a2, -28
-; RV32IXQCCMP-SR-NEXT: .cfi_offset a3, -32
-; RV32IXQCCMP-SR-NEXT: .cfi_offset a4, -36
-; RV32IXQCCMP-SR-NEXT: .cfi_offset a5, -40
-; RV32IXQCCMP-SR-NEXT: .cfi_offset a6, -44
-; RV32IXQCCMP-SR-NEXT: .cfi_offset a7, -48
-; RV32IXQCCMP-SR-NEXT: .cfi_offset t3, -52
-; RV32IXQCCMP-SR-NEXT: .cfi_offset t4, -56
-; RV32IXQCCMP-SR-NEXT: .cfi_offset t5, -60
-; RV32IXQCCMP-SR-NEXT: .cfi_offset t6, -64
+; RV32IXQCCMP-SR-NEXT: addi sp, sp, -16
+; RV32IXQCCMP-SR-NEXT: .cfi_def_cfa_offset 80
+; RV32IXQCCMP-SR-NEXT: sw t0, 60(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-SR-NEXT: sw t1, 56(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-SR-NEXT: sw t2, 52(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-SR-NEXT: sw a0, 48(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-SR-NEXT: sw a1, 44(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-SR-NEXT: sw a2, 40(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-SR-NEXT: sw a3, 36(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-SR-NEXT: sw a4, 32(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-SR-NEXT: sw a5, 28(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-SR-NEXT: sw a6, 24(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-SR-NEXT: sw a7, 20(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-SR-NEXT: sw t3, 16(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-SR-NEXT: sw t4, 12(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-SR-NEXT: sw t5, 8(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-SR-NEXT: sw t6, 4(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-SR-NEXT: .cfi_offset t0, -20
+; RV32IXQCCMP-SR-NEXT: .cfi_offset t1, -24
+; RV32IXQCCMP-SR-NEXT: .cfi_offset t2, -28
+; RV32IXQCCMP-SR-NEXT: .cfi_offset a0, -32
+; RV32IXQCCMP-SR-NEXT: .cfi_offset a1, -36
+; RV32IXQCCMP-SR-NEXT: .cfi_offset a2, -40
+; RV32IXQCCMP-SR-NEXT: .cfi_offset a3, -44
+; RV32IXQCCMP-SR-NEXT: .cfi_offset a4, -48
+; RV32IXQCCMP-SR-NEXT: .cfi_offset a5, -52
+; RV32IXQCCMP-SR-NEXT: .cfi_offset a6, -56
+; RV32IXQCCMP-SR-NEXT: .cfi_offset a7, -60
+; RV32IXQCCMP-SR-NEXT: .cfi_offset t3, -64
+; RV32IXQCCMP-SR-NEXT: .cfi_offset t4, -68
+; RV32IXQCCMP-SR-NEXT: .cfi_offset t5, -72
+; RV32IXQCCMP-SR-NEXT: .cfi_offset t6, -76
; RV32IXQCCMP-SR-NEXT: call foo_test_irq
-; RV32IXQCCMP-SR-NEXT: lw t0, 56(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-SR-NEXT: lw t1, 52(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-SR-NEXT: lw t2, 48(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-SR-NEXT: lw a0, 44(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-SR-NEXT: lw a1, 40(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-SR-NEXT: lw a2, 36(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-SR-NEXT: lw a3, 32(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-SR-NEXT: lw a4, 28(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-SR-NEXT: lw a5, 24(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-SR-NEXT: lw a6, 20(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-SR-NEXT: lw a7, 16(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-SR-NEXT: lw t3, 12(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-SR-NEXT: lw t4, 8(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-SR-NEXT: lw t5, 4(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-SR-NEXT: lw t6, 0(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-SR-NEXT: lw t0, 60(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-SR-NEXT: lw t1, 56(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-SR-NEXT: lw t2, 52(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-SR-NEXT: lw a0, 48(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-SR-NEXT: lw a1, 44(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-SR-NEXT: lw a2, 40(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-SR-NEXT: lw a3, 36(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-SR-NEXT: lw a4, 32(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-SR-NEXT: lw a5, 28(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-SR-NEXT: lw a6, 24(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-SR-NEXT: lw a7, 20(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-SR-NEXT: lw t3, 16(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-SR-NEXT: lw t4, 12(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-SR-NEXT: lw t5, 8(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-SR-NEXT: lw t6, 4(sp) # 4-byte Folded Reload
; RV32IXQCCMP-SR-NEXT: .cfi_restore t0
; RV32IXQCCMP-SR-NEXT: .cfi_restore t1
; RV32IXQCCMP-SR-NEXT: .cfi_restore t2
@@ -1960,6 +1966,8 @@ define void @foo_with_irq() "interrupt"="machine" {
; RV32IXQCCMP-SR-NEXT: .cfi_restore t4
; RV32IXQCCMP-SR-NEXT: .cfi_restore t5
; RV32IXQCCMP-SR-NEXT: .cfi_restore t6
+; RV32IXQCCMP-SR-NEXT: addi sp, sp, 16
+; RV32IXQCCMP-SR-NEXT: .cfi_def_cfa_offset 64
; RV32IXQCCMP-SR-NEXT: qc.cm.pop {ra}, 64
; RV32IXQCCMP-SR-NEXT: .cfi_restore ra
; RV32IXQCCMP-SR-NEXT: .cfi_def_cfa_offset 0
@@ -1970,54 +1978,54 @@ define void @foo_with_irq() "interrupt"="machine" {
; RV64IXQCCMP-SR-NEXT: qc.cm.push {ra}, -64
; RV64IXQCCMP-SR-NEXT: .cfi_def_cfa_offset 64
; RV64IXQCCMP-SR-NEXT: .cfi_offset ra, -8
-; RV64IXQCCMP-SR-NEXT: addi sp, sp, -64
-; RV64IXQCCMP-SR-NEXT: .cfi_def_cfa_offset 128
-; RV64IXQCCMP-SR-NEXT: sd t0, 112(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-SR-NEXT: sd t1, 104(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-SR-NEXT: sd t2, 96(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-SR-NEXT: sd a0, 88(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-SR-NEXT: sd a1, 80(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-SR-NEXT: sd a2, 72(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-SR-NEXT: sd a3, 64(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-SR-NEXT: sd a4, 56(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-SR-NEXT: sd a5, 48(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-SR-NEXT: sd a6, 40(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-SR-NEXT: sd a7, 32(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-SR-NEXT: sd t3, 24(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-SR-NEXT: sd t4, 16(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-SR-NEXT: sd t5, 8(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-SR-NEXT: sd t6, 0(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-SR-NEXT: .cfi_offset t0, -16
-; RV64IXQCCMP-SR-NEXT: .cfi_offset t1, -24
-; RV64IXQCCMP-SR-NEXT: .cfi_offset t2, -32
-; RV64IXQCCMP-SR-NEXT: .cfi_offset a0, -40
-; RV64IXQCCMP-SR-NEXT: .cfi_offset a1, -48
-; RV64IXQCCMP-SR-NEXT: .cfi_offset a2, -56
-; RV64IXQCCMP-SR-NEXT: .cfi_offset a3, -64
-; RV64IXQCCMP-SR-NEXT: .cfi_offset a4, -72
-; RV64IXQCCMP-SR-NEXT: .cfi_offset a5, -80
-; RV64IXQCCMP-SR-NEXT: .cfi_offset a6, -88
-; RV64IXQCCMP-SR-NEXT: .cfi_offset a7, -96
-; RV64IXQCCMP-SR-NEXT: .cfi_offset t3, -104
-; RV64IXQCCMP-SR-NEXT: .cfi_offset t4, -112
-; RV64IXQCCMP-SR-NEXT: .cfi_offset t5, -120
-; RV64IXQCCMP-SR-NEXT: .cfi_offset t6, -128
+; RV64IXQCCMP-SR-NEXT: addi sp, sp, -80
+; RV64IXQCCMP-SR-NEXT: .cfi_def_cfa_offset 144
+; RV64IXQCCMP-SR-NEXT: sd t0, 120(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-SR-NEXT: sd t1, 112(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-SR-NEXT: sd t2, 104(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-SR-NEXT: sd a0, 96(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-SR-NEXT: sd a1, 88(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-SR-NEXT: sd a2, 80(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-SR-NEXT: sd a3, 72(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-SR-NEXT: sd a4, 64(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-SR-NEXT: sd a5, 56(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-SR-NEXT: sd a6, 48(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-SR-NEXT: sd a7, 40(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-SR-NEXT: sd t3, 32(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-SR-NEXT: sd t4, 24(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-SR-NEXT: sd t5, 16(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-SR-NEXT: sd t6, 8(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-SR-NEXT: .cfi_offset t0, -24
+; RV64IXQCCMP-SR-NEXT: .cfi_offset t1, -32
+; RV64IXQCCMP-SR-NEXT: .cfi_offset t2, -40
+; RV64IXQCCMP-SR-NEXT: .cfi_offset a0, -48
+; RV64IXQCCMP-SR-NEXT: .cfi_offset a1, -56
+; RV64IXQCCMP-SR-NEXT: .cfi_offset a2, -64
+; RV64IXQCCMP-SR-NEXT: .cfi_offset a3, -72
+; RV64IXQCCMP-SR-NEXT: .cfi_offset a4, -80
+; RV64IXQCCMP-SR-NEXT: .cfi_offset a5, -88
+; RV64IXQCCMP-SR-NEXT: .cfi_offset a6, -96
+; RV64IXQCCMP-SR-NEXT: .cfi_offset a7, -104
+; RV64IXQCCMP-SR-NEXT: .cfi_offset t3, -112
+; RV64IXQCCMP-SR-NEXT: .cfi_offset t4, -120
+; RV64IXQCCMP-SR-NEXT: .cfi_offset t5, -128
+; RV64IXQCCMP-SR-NEXT: .cfi_offset t6, -136
; RV64IXQCCMP-SR-NEXT: call foo_test_irq
-; RV64IXQCCMP-SR-NEXT: ld t0, 112(sp) # 8-byte Folded Reload
-; RV64IXQCCMP-SR-NEXT: ld t1, 104(sp) # 8-byte Folded Reload
-; RV64IXQCCMP-SR-NEXT: ld t2, 96(sp) # 8-byte Folded Reload
-; RV64IXQCCMP-SR-NEXT: ld a0, 88(sp) # 8-byte Folded Reload
-; RV64IXQCCMP-SR-NEXT: ld a1, 80(sp) # 8-byte Folded Reload
-; RV64IXQCCMP-SR-NEXT: ld a2, 72(sp) # 8-byte Folded Reload
-; RV64IXQCCMP-SR-NEXT: ld a3, 64(sp) # 8-byte Folded Reload
-; RV64IXQCCMP-SR-NEXT: ld a4, 56(sp) # 8-byte Folded Reload
-; RV64IXQCCMP-SR-NEXT: ld a5, 48(sp) # 8-byte Folded Reload
-; RV64IXQCCMP-SR-NEXT: ld a6, 40(sp) # 8-byte Folded Reload
-; RV64IXQCCMP-SR-NEXT: ld a7, 32(sp) # 8-byte Folded Reload
-; RV64IXQCCMP-SR-NEXT: ld t3, 24(sp) # 8-byte Folded Reload
-; RV64IXQCCMP-SR-NEXT: ld t4, 16(sp) # 8-byte Folded Reload
-; RV64IXQCCMP-SR-NEXT: ld t5, 8(sp) # 8-byte Folded Reload
-; RV64IXQCCMP-SR-NEXT: ld t6, 0(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-SR-NEXT: ld t0, 120(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-SR-NEXT: ld t1, 112(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-SR-NEXT: ld t2, 104(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-SR-NEXT: ld a0, 96(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-SR-NEXT: ld a1, 88(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-SR-NEXT: ld a2, 80(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-SR-NEXT: ld a3, 72(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-SR-NEXT: ld a4, 64(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-SR-NEXT: ld a5, 56(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-SR-NEXT: ld a6, 48(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-SR-NEXT: ld a7, 40(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-SR-NEXT: ld t3, 32(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-SR-NEXT: ld t4, 24(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-SR-NEXT: ld t5, 16(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-SR-NEXT: ld t6, 8(sp) # 8-byte Folded Reload
; RV64IXQCCMP-SR-NEXT: .cfi_restore t0
; RV64IXQCCMP-SR-NEXT: .cfi_restore t1
; RV64IXQCCMP-SR-NEXT: .cfi_restore t2
@@ -2033,7 +2041,7 @@ define void @foo_with_irq() "interrupt"="machine" {
; RV64IXQCCMP-SR-NEXT: .cfi_restore t4
; RV64IXQCCMP-SR-NEXT: .cfi_restore t5
; RV64IXQCCMP-SR-NEXT: .cfi_restore t6
-; RV64IXQCCMP-SR-NEXT: addi sp, sp, 64
+; RV64IXQCCMP-SR-NEXT: addi sp, sp, 80
; RV64IXQCCMP-SR-NEXT: .cfi_def_cfa_offset 64
; RV64IXQCCMP-SR-NEXT: qc.cm.pop {ra}, 64
; RV64IXQCCMP-SR-NEXT: .cfi_restore ra
@@ -2119,52 +2127,52 @@ define void @callee_with_irq() "interrupt"="machine" {
; RV32IXQCCMP-NEXT: .cfi_offset s9, -44
; RV32IXQCCMP-NEXT: .cfi_offset s10, -48
; RV32IXQCCMP-NEXT: .cfi_offset s11, -52
-; RV32IXQCCMP-NEXT: addi sp, sp, -32
-; RV32IXQCCMP-NEXT: .cfi_def_cfa_offset 144
-; RV32IXQCCMP-NEXT: sw t0, 88(sp) # 4-byte Folded Spill
-; RV32IXQCCMP-NEXT: sw t1, 84(sp) # 4-byte Folded Spill
-; RV32IXQCCMP-NEXT: sw t2, 80(sp) # 4-byte Folded Spill
-; RV32IXQCCMP-NEXT: sw a0, 76(sp) # 4-byte Folded Spill
-; RV32IXQCCMP-NEXT: sw a1, 72(sp) # 4-byte Folded Spill
-; RV32IXQCCMP-NEXT: sw a2, 68(sp) # 4-byte Folded Spill
-; RV32IXQCCMP-NEXT: sw a3, 64(sp) # 4-byte Folded Spill
-; RV32IXQCCMP-NEXT: sw a4, 60(sp) # 4-byte Folded Spill
-; RV32IXQCCMP-NEXT: sw a5, 56(sp) # 4-byte Folded Spill
-; RV32IXQCCMP-NEXT: sw a6, 52(sp) # 4-byte Folded Spill
-; RV32IXQCCMP-NEXT: sw a7, 48(sp) # 4-byte Folded Spill
-; RV32IXQCCMP-NEXT: sw t3, 44(sp) # 4-byte Folded Spill
-; RV32IXQCCMP-NEXT: sw t4, 40(sp) # 4-byte Folded Spill
-; RV32IXQCCMP-NEXT: sw t5, 36(sp) # 4-byte Folded Spill
-; RV32IXQCCMP-NEXT: sw t6, 32(sp) # 4-byte Folded Spill
-; RV32IXQCCMP-NEXT: .cfi_offset t0, -56
-; RV32IXQCCMP-NEXT: .cfi_offset t1, -60
-; RV32IXQCCMP-NEXT: .cfi_offset t2, -64
-; RV32IXQCCMP-NEXT: .cfi_offset a0, -68
-; RV32IXQCCMP-NEXT: .cfi_offset a1, -72
-; RV32IXQCCMP-NEXT: .cfi_offset a2, -76
-; RV32IXQCCMP-NEXT: .cfi_offset a3, -80
-; RV32IXQCCMP-NEXT: .cfi_offset a4, -84
-; RV32IXQCCMP-NEXT: .cfi_offset a5, -88
-; RV32IXQCCMP-NEXT: .cfi_offset a6, -92
-; RV32IXQCCMP-NEXT: .cfi_offset a7, -96
-; RV32IXQCCMP-NEXT: .cfi_offset t3, -100
-; RV32IXQCCMP-NEXT: .cfi_offset t4, -104
-; RV32IXQCCMP-NEXT: .cfi_offset t5, -108
-; RV32IXQCCMP-NEXT: .cfi_offset t6, -112
+; RV32IXQCCMP-NEXT: addi sp, sp, -48
+; RV32IXQCCMP-NEXT: .cfi_def_cfa_offset 160
+; RV32IXQCCMP-NEXT: sw t0, 92(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT: sw t1, 88(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT: sw t2, 84(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT: sw a0, 80(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT: sw a1, 76(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT: sw a2, 72(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT: sw a3, 68(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT: sw a4, 64(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT: sw a5, 60(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT: sw a6, 56(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT: sw a7, 52(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT: sw t3, 48(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT: sw t4, 44(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT: sw t5, 40(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT: sw t6, 36(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT: .cfi_offset t0, -68
+; RV32IXQCCMP-NEXT: .cfi_offset t1, -72
+; RV32IXQCCMP-NEXT: .cfi_offset t2, -76
+; RV32IXQCCMP-NEXT: .cfi_offset a0, -80
+; RV32IXQCCMP-NEXT: .cfi_offset a1, -84
+; RV32IXQCCMP-NEXT: .cfi_offset a2, -88
+; RV32IXQCCMP-NEXT: .cfi_offset a3, -92
+; RV32IXQCCMP-NEXT: .cfi_offset a4, -96
+; RV32IXQCCMP-NEXT: .cfi_offset a5, -100
+; RV32IXQCCMP-NEXT: .cfi_offset a6, -104
+; RV32IXQCCMP-NEXT: .cfi_offset a7, -108
+; RV32IXQCCMP-NEXT: .cfi_offset t3, -112
+; RV32IXQCCMP-NEXT: .cfi_offset t4, -116
+; RV32IXQCCMP-NEXT: .cfi_offset t5, -120
+; RV32IXQCCMP-NEXT: .cfi_offset t6, -124
; RV32IXQCCMP-NEXT: lui t0, %hi(var_test_irq)
; RV32IXQCCMP-NEXT: lw a0, %lo(var_test_irq)(t0)
-; RV32IXQCCMP-NEXT: sw a0, 28(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT: sw a0, 32(sp) # 4-byte Folded Spill
; RV32IXQCCMP-NEXT: lw a0, %lo(var_test_irq+4)(t0)
-; RV32IXQCCMP-NEXT: sw a0, 24(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT: sw a0, 28(sp) # 4-byte Folded Spill
; RV32IXQCCMP-NEXT: lw a0, %lo(var_test_irq+8)(t0)
-; RV32IXQCCMP-NEXT: sw a0, 20(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT: sw a0, 24(sp) # 4-byte Folded Spill
; RV32IXQCCMP-NEXT: lw a0, %lo(var_test_irq+12)(t0)
-; RV32IXQCCMP-NEXT: sw a0, 16(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT: sw a0, 20(sp) # 4-byte Folded Spill
; RV32IXQCCMP-NEXT: addi a5, t0, %lo(var_test_irq)
; RV32IXQCCMP-NEXT: lw a0, 16(a5)
-; RV32IXQCCMP-NEXT: sw a0, 12(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT: sw a0, 16(sp) # 4-byte Folded Spill
; RV32IXQCCMP-NEXT: lw a0, 20(a5)
-; RV32IXQCCMP-NEXT: sw a0, 8(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT: sw a0, 12(sp) # 4-byte Folded Spill
; RV32IXQCCMP-NEXT: lw t4, 24(a5)
; RV32IXQCCMP-NEXT: lw t5, 28(a5)
; RV32IXQCCMP-NEXT: lw t6, 32(a5)
@@ -2217,33 +2225,33 @@ define void @callee_with_irq() "interrupt"="machine" {
; RV32IXQCCMP-NEXT: sw t6, 32(a5)
; RV32IXQCCMP-NEXT: sw t5, 28(a5)
; RV32IXQCCMP-NEXT: sw t4, 24(a5)
-; RV32IXQCCMP-NEXT: lw a0, 8(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-NEXT: sw a0, 20(a5)
; RV32IXQCCMP-NEXT: lw a0, 12(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-NEXT: sw a0, 16(a5)
+; RV32IXQCCMP-NEXT: sw a0, 20(a5)
; RV32IXQCCMP-NEXT: lw a0, 16(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-NEXT: sw a0, %lo(var_test_irq+12)(t0)
+; RV32IXQCCMP-NEXT: sw a0, 16(a5)
; RV32IXQCCMP-NEXT: lw a0, 20(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-NEXT: sw a0, %lo(var_test_irq+8)(t0)
+; RV32IXQCCMP-NEXT: sw a0, %lo(var_test_irq+12)(t0)
; RV32IXQCCMP-NEXT: lw a0, 24(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-NEXT: sw a0, %lo(var_test_irq+4)(t0)
+; RV32IXQCCMP-NEXT: sw a0, %lo(var_test_irq+8)(t0)
; RV32IXQCCMP-NEXT: lw a0, 28(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-NEXT: sw a0, %lo(var_test_irq+4)(t0)
+; RV32IXQCCMP-NEXT: lw a0, 32(sp) # 4-byte Folded Reload
; RV32IXQCCMP-NEXT: sw a0, %lo(var_test_irq)(t0)
-; RV32IXQCCMP-NEXT: lw t0, 88(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-NEXT: lw t1, 84(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-NEXT: lw t2, 80(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-NEXT: lw a0, 76(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-NEXT: lw a1, 72(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-NEXT: lw a2, 68(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-NEXT: lw a3, 64(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-NEXT: lw a4, 60(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-NEXT: lw a5, 56(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-NEXT: lw a6, 52(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-NEXT: lw a7, 48(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-NEXT: lw t3, 44(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-NEXT: lw t4, 40(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-NEXT: lw t5, 36(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-NEXT: lw t6, 32(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-NEXT: lw t0, 92(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-NEXT: lw t1, 88(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-NEXT: lw t2, 84(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-NEXT: lw a0, 80(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-NEXT: lw a1, 76(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-NEXT: lw a2, 72(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-NEXT: lw a3, 68(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-NEXT: lw a4, 64(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-NEXT: lw a5, 60(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-NEXT: lw a6, 56(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-NEXT: lw a7, 52(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-NEXT: lw t3, 48(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-NEXT: lw t4, 44(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-NEXT: lw t5, 40(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-NEXT: lw t6, 36(sp) # 4-byte Folded Reload
; RV32IXQCCMP-NEXT: .cfi_restore t0
; RV32IXQCCMP-NEXT: .cfi_restore t1
; RV32IXQCCMP-NEXT: .cfi_restore t2
@@ -2259,7 +2267,7 @@ define void @callee_with_irq() "interrupt"="machine" {
; RV32IXQCCMP-NEXT: .cfi_restore t4
; RV32IXQCCMP-NEXT: .cfi_restore t5
; RV32IXQCCMP-NEXT: .cfi_restore t6
-; RV32IXQCCMP-NEXT: addi sp, sp, 32
+; RV32IXQCCMP-NEXT: addi sp, sp, 48
; RV32IXQCCMP-NEXT: .cfi_def_cfa_offset 112
; RV32IXQCCMP-NEXT: qc.cm.pop {ra, s0-s11}, 112
; RV32IXQCCMP-NEXT: .cfi_restore ra
@@ -2295,52 +2303,52 @@ define void @callee_with_irq() "interrupt"="machine" {
; RV64IXQCCMP-NEXT: .cfi_offset s9, -88
; RV64IXQCCMP-NEXT: .cfi_offset s10, -96
; RV64IXQCCMP-NEXT: .cfi_offset s11, -104
-; RV64IXQCCMP-NEXT: addi sp, sp, -112
-; RV64IXQCCMP-NEXT: .cfi_def_cfa_offset 272
-; RV64IXQCCMP-NEXT: sd t0, 160(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-NEXT: sd t1, 152(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-NEXT: sd t2, 144(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-NEXT: sd a0, 136(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-NEXT: sd a1, 128(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-NEXT: sd a2, 120(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-NEXT: sd a3, 112(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-NEXT: sd a4, 104(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-NEXT: sd a5, 96(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-NEXT: sd a6, 88(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-NEXT: sd a7, 80(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-NEXT: sd t3, 72(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-NEXT: sd t4, 64(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-NEXT: sd t5, 56(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-NEXT: sd t6, 48(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-NEXT: .cfi_offset t0, -112
-; RV64IXQCCMP-NEXT: .cfi_offset t1, -120
-; RV64IXQCCMP-NEXT: .cfi_offset t2, -128
-; RV64IXQCCMP-NEXT: .cfi_offset a0, -136
-; RV64IXQCCMP-NEXT: .cfi_offset a1, -144
-; RV64IXQCCMP-NEXT: .cfi_offset a2, -152
-; RV64IXQCCMP-NEXT: .cfi_offset a3, -160
-; RV64IXQCCMP-NEXT: .cfi_offset a4, -168
-; RV64IXQCCMP-NEXT: .cfi_offset a5, -176
-; RV64IXQCCMP-NEXT: .cfi_offset a6, -184
-; RV64IXQCCMP-NEXT: .cfi_offset a7, -192
-; RV64IXQCCMP-NEXT: .cfi_offset t3, -200
-; RV64IXQCCMP-NEXT: .cfi_offset t4, -208
-; RV64IXQCCMP-NEXT: .cfi_offset t5, -216
-; RV64IXQCCMP-NEXT: .cfi_offset t6, -224
+; RV64IXQCCMP-NEXT: addi sp, sp, -128
+; RV64IXQCCMP-NEXT: .cfi_def_cfa_offset 288
+; RV64IXQCCMP-NEXT: sd t0, 168(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT: sd t1, 160(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT: sd t2, 152(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT: sd a0, 144(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT: sd a1, 136(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT: sd a2, 128(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT: sd a3, 120(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT: sd a4, 112(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT: sd a5, 104(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT: sd a6, 96(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT: sd a7, 88(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT: sd t3, 80(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT: sd t4, 72(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT: sd t5, 64(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT: sd t6, 56(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT: .cfi_offset t0, -120
+; RV64IXQCCMP-NEXT: .cfi_offset t1, -128
+; RV64IXQCCMP-NEXT: .cfi_offset t2, -136
+; RV64IXQCCMP-NEXT: .cfi_offset a0, -144
+; RV64IXQCCMP-NEXT: .cfi_offset a1, -152
+; RV64IXQCCMP-NEXT: .cfi_offset a2, -160
+; RV64IXQCCMP-NEXT: .cfi_offset a3, -168
+; RV64IXQCCMP-NEXT: .cfi_offset a4, -176
+; RV64IXQCCMP-NEXT: .cfi_offset a5, -184
+; RV64IXQCCMP-NEXT: .cfi_offset a6, -192
+; RV64IXQCCMP-NEXT: .cfi_offset a7, -200
+; RV64IXQCCMP-NEXT: .cfi_offset t3, -208
+; RV64IXQCCMP-NEXT: .cfi_offset t4, -216
+; RV64IXQCCMP-NEXT: .cfi_offset t5, -224
+; RV64IXQCCMP-NEXT: .cfi_offset t6, -232
; RV64IXQCCMP-NEXT: lui t0, %hi(var_test_irq)
; RV64IXQCCMP-NEXT: lw a0, %lo(var_test_irq)(t0)
-; RV64IXQCCMP-NEXT: sd a0, 40(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT: sd a0, 48(sp) # 8-byte Folded Spill
; RV64IXQCCMP-NEXT: lw a0, %lo(var_test_irq+4)(t0)
-; RV64IXQCCMP-NEXT: sd a0, 32(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT: sd a0, 40(sp) # 8-byte Folded Spill
; RV64IXQCCMP-NEXT: lw a0, %lo(var_test_irq+8)(t0)
-; RV64IXQCCMP-NEXT: sd a0, 24(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT: sd a0, 32(sp) # 8-byte Folded Spill
; RV64IXQCCMP-NEXT: lw a0, %lo(var_test_irq+12)(t0)
-; RV64IXQCCMP-NEXT: sd a0, 16(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT: sd a0, 24(sp) # 8-byte Folded Spill
; RV64IXQCCMP-NEXT: addi a5, t0, %lo(var_test_irq)
; RV64IXQCCMP-NEXT: lw a0, 16(a5)
-; RV64IXQCCMP-NEXT: sd a0, 8(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT: sd a0, 16(sp) # 8-byte Folded Spill
; RV64IXQCCMP-NEXT: lw a0, 20(a5)
-; RV64IXQCCMP-NEXT: sd a0, 0(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT: sd a0, 8(sp) # 8-byte Folded Spill
; RV64IXQCCMP-NEXT: lw t4, 24(a5)
; RV64IXQCCMP-NEXT: lw t5, 28(a5)
; RV64IXQCCMP-NEXT: lw t6, 32(a5)
@@ -2393,33 +2401,33 @@ define void @callee_with_irq() "interrupt"="machine" {
; RV64IXQCCMP-NEXT: sw t6, 32(a5)
; RV64IXQCCMP-NEXT: sw t5, 28(a5)
; RV64IXQCCMP-NEXT: sw t4, 24(a5)
-; RV64IXQCCMP-NEXT: ld a0, 0(sp) # 8-byte Folded Reload
-; RV64IXQCCMP-NEXT: sw a0, 20(a5)
; RV64IXQCCMP-NEXT: ld a0, 8(sp) # 8-byte Folded Reload
-; RV64IXQCCMP-NEXT: sw a0, 16(a5)
+; RV64IXQCCMP-NEXT: sw a0, 20(a5)
; RV64IXQCCMP-NEXT: ld a0, 16(sp) # 8-byte Folded Reload
-; RV64IXQCCMP-NEXT: sw a0, %lo(var_test_irq+12)(t0)
+; RV64IXQCCMP-NEXT: sw a0, 16(a5)
; RV64IXQCCMP-NEXT: ld a0, 24(sp) # 8-byte Folded Reload
-; RV64IXQCCMP-NEXT: sw a0, %lo(var_test_irq+8)(t0)
+; RV64IXQCCMP-NEXT: sw a0, %lo(var_test_irq+12)(t0)
; RV64IXQCCMP-NEXT: ld a0, 32(sp) # 8-byte Folded Reload
-; RV64IXQCCMP-NEXT: sw a0, %lo(var_test_irq+4)(t0)
+; RV64IXQCCMP-NEXT: sw a0, %lo(var_test_irq+8)(t0)
; RV64IXQCCMP-NEXT: ld a0, 40(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT: sw a0, %lo(var_test_irq+4)(t0)
+; RV64IXQCCMP-NEXT: ld a0, 48(sp) # 8-byte Folded Reload
; RV64IXQCCMP-NEXT: sw a0, %lo(var_test_irq)(t0)
-; RV64IXQCCMP-NEXT: ld t0, 160(sp) # 8-byte Folded Reload
-; RV64IXQCCMP-NEXT: ld t1, 152(sp) # 8-byte Folded Reload
-; RV64IXQCCMP-NEXT: ld t2, 144(sp) # 8-byte Folded Reload
-; RV64IXQCCMP-NEXT: ld a0, 136(sp) # 8-byte Folded Reload
-; RV64IXQCCMP-NEXT: ld a1, 128(sp) # 8-byte Folded Reload
-; RV64IXQCCMP-NEXT: ld a2, 120(sp) # 8-byte Folded Reload
-; RV64IXQCCMP-NEXT: ld a3, 112(sp) # 8-byte Folded Reload
-; RV64IXQCCMP-NEXT: ld a4, 104(sp) # 8-byte Folded Reload
-; RV64IXQCCMP-NEXT: ld a5, 96(sp) # 8-byte Folded Reload
-; RV64IXQCCMP-NEXT: ld a6, 88(sp) # 8-byte Folded Reload
-; RV64IXQCCMP-NEXT: ld a7, 80(sp) # 8-byte Folded Reload
-; RV64IXQCCMP-NEXT: ld t3, 72(sp) # 8-byte Folded Reload
-; RV64IXQCCMP-NEXT: ld t4, 64(sp) # 8-byte Folded Reload
-; RV64IXQCCMP-NEXT: ld t5, 56(sp) # 8-byte Folded Reload
-; RV64IXQCCMP-NEXT: ld t6, 48(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT: ld t0, 168(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT: ld t1, 160(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT: ld t2, 152(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT: ld a0, 144(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT: ld a1, 136(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT: ld a2, 128(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT: ld a3, 120(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT: ld a4, 112(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT: ld a5, 104(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT: ld a6, 96(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT: ld a7, 88(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT: ld t3, 80(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT: ld t4, 72(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT: ld t5, 64(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT: ld t6, 56(sp) # 8-byte Folded Reload
; RV64IXQCCMP-NEXT: .cfi_restore t0
; RV64IXQCCMP-NEXT: .cfi_restore t1
; RV64IXQCCMP-NEXT: .cfi_restore t2
@@ -2435,7 +2443,7 @@ define void @callee_with_irq() "interrupt"="machine" {
; RV64IXQCCMP-NEXT: .cfi_restore t4
; RV64IXQCCMP-NEXT: .cfi_restore t5
; RV64IXQCCMP-NEXT: .cfi_restore t6
-; RV64IXQCCMP-NEXT: addi sp, sp, 112
+; RV64IXQCCMP-NEXT: addi sp, sp, 128
; RV64IXQCCMP-NEXT: .cfi_def_cfa_offset 160
; RV64IXQCCMP-NEXT: qc.cm.pop {ra, s0-s11}, 160
; RV64IXQCCMP-NEXT: .cfi_restore ra
@@ -2471,55 +2479,55 @@ define void @callee_with_irq() "interrupt"="machine" {
; RV32IXQCCMP-FP-NEXT: .cfi_offset s9, -44
; RV32IXQCCMP-FP-NEXT: .cfi_offset s10, -48
; RV32IXQCCMP-FP-NEXT: .cfi_offset s11, -52
-; RV32IXQCCMP-FP-NEXT: addi sp, sp, -32
-; RV32IXQCCMP-FP-NEXT: .cfi_def_cfa_offset 144
-; RV32IXQCCMP-FP-NEXT: sw t0, 88(sp) # 4-byte Folded Spill
-; RV32IXQCCMP-FP-NEXT: sw t1, 84(sp) # 4-byte Folded Spill
-; RV32IXQCCMP-FP-NEXT: sw t2, 80(sp) # 4-byte Folded Spill
-; RV32IXQCCMP-FP-NEXT: sw a0, 76(sp) # 4-byte Folded Spill
-; RV32IXQCCMP-FP-NEXT: sw a1, 72(sp) # 4-byte Folded Spill
-; RV32IXQCCMP-FP-NEXT: sw a2, 68(sp) # 4-byte Folded Spill
-; RV32IXQCCMP-FP-NEXT: sw a3, 64(sp) # 4-byte Folded Spill
-; RV32IXQCCMP-FP-NEXT: sw a4, 60(sp) # 4-byte Folded Spill
-; RV32IXQCCMP-FP-NEXT: sw a5, 56(sp) # 4-byte Folded Spill
-; RV32IXQCCMP-FP-NEXT: sw a6, 52(sp) # 4-byte Folded Spill
-; RV32IXQCCMP-FP-NEXT: sw a7, 48(sp) # 4-byte Folded Spill
-; RV32IXQCCMP-FP-NEXT: sw t3, 44(sp) # 4-byte Folded Spill
-; RV32IXQCCMP-FP-NEXT: sw t4, 40(sp) # 4-byte Folded Spill
-; RV32IXQCCMP-FP-NEXT: sw t5, 36(sp) # 4-byte Folded Spill
-; RV32IXQCCMP-FP-NEXT: sw t6, 32(sp) # 4-byte Folded Spill
-; RV32IXQCCMP-FP-NEXT: .cfi_offset t0, -56
-; RV32IXQCCMP-FP-NEXT: .cfi_offset t1, -60
-; RV32IXQCCMP-FP-NEXT: .cfi_offset t2, -64
-; RV32IXQCCMP-FP-NEXT: .cfi_offset a0, -68
-; RV32IXQCCMP-FP-NEXT: .cfi_offset a1, -72
-; RV32IXQCCMP-FP-NEXT: .cfi_offset a2, -76
-; RV32IXQCCMP-FP-NEXT: .cfi_offset a3, -80
-; RV32IXQCCMP-FP-NEXT: .cfi_offset a4, -84
-; RV32IXQCCMP-FP-NEXT: .cfi_offset a5, -88
-; RV32IXQCCMP-FP-NEXT: .cfi_offset a6, -92
-; RV32IXQCCMP-FP-NEXT: .cfi_offset a7, -96
-; RV32IXQCCMP-FP-NEXT: .cfi_offset t3, -100
-; RV32IXQCCMP-FP-NEXT: .cfi_offset t4, -104
-; RV32IXQCCMP-FP-NEXT: .cfi_offset t5, -108
-; RV32IXQCCMP-FP-NEXT: .cfi_offset t6, -112
+; RV32IXQCCMP-FP-NEXT: addi sp, sp, -48
+; RV32IXQCCMP-FP-NEXT: .cfi_def_cfa_offset 160
+; RV32IXQCCMP-FP-NEXT: sw t0, 92(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-FP-NEXT: sw t1, 88(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-FP-NEXT: sw t2, 84(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-FP-NEXT: sw a0, 80(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-FP-NEXT: sw a1, 76(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-FP-NEXT: sw a2, 72(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-FP-NEXT: sw a3, 68(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-FP-NEXT: sw a4, 64(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-FP-NEXT: sw a5, 60(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-FP-NEXT: sw a6, 56(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-FP-NEXT: sw a7, 52(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-FP-NEXT: sw t3, 48(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-FP-NEXT: sw t4, 44(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-FP-NEXT: sw t5, 40(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-FP-NEXT: sw t6, 36(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-FP-NEXT: .cfi_offset t0, -68
+; RV32IXQCCMP-FP-NEXT: .cfi_offset t1, -72
+; RV32IXQCCMP-FP-NEXT: .cfi_offset t2, -76
+; RV32IXQCCMP-FP-NEXT: .cfi_offset a0, -80
+; RV32IXQCCMP-FP-NEXT: .cfi_offset a1, -84
+; RV32IXQCCMP-FP-NEXT: .cfi_offset a2, -88
+; RV32IXQCCMP-FP-NEXT: .cfi_offset a3, -92
+; RV32IXQCCMP-FP-NEXT: .cfi_offset a4, -96
+; RV32IXQCCMP-FP-NEXT: .cfi_offset a5, -100
+; RV32IXQCCMP-FP-NEXT: .cfi_offset a6, -104
+; RV32IXQCCMP-FP-NEXT: .cfi_offset a7, -108
+; RV32IXQCCMP-FP-NEXT: .cfi_offset t3, -112
+; RV32IXQCCMP-FP-NEXT: .cfi_offset t4, -116
+; RV32IXQCCMP-FP-NEXT: .cfi_offset t5, -120
+; RV32IXQCCMP-FP-NEXT: .cfi_offset t6, -124
; RV32IXQCCMP-FP-NEXT: .cfi_def_cfa s0, 0
; RV32IXQCCMP-FP-NEXT: lui t1, %hi(var_test_irq)
; RV32IXQCCMP-FP-NEXT: lw a0, %lo(var_test_irq)(t1)
-; RV32IXQCCMP-FP-NEXT: sw a0, -116(s0) # 4-byte Folded Spill
+; RV32IXQCCMP-FP-NEXT: sw a0, -128(s0) # 4-byte Folded Spill
; RV32IXQCCMP-FP-NEXT: lw a0, %lo(var_test_irq+4)(t1)
-; RV32IXQCCMP-FP-NEXT: sw a0, -120(s0) # 4-byte Folded Spill
+; RV32IXQCCMP-FP-NEXT: sw a0, -132(s0) # 4-byte Folded Spill
; RV32IXQCCMP-FP-NEXT: lw a0, %lo(var_test_irq+8)(t1)
-; RV32IXQCCMP-FP-NEXT: sw a0, -124(s0) # 4-byte Folded Spill
+; RV32IXQCCMP-FP-NEXT: sw a0, -136(s0) # 4-byte Folded Spill
; RV32IXQCCMP-FP-NEXT: lw a0, %lo(var_test_irq+12)(t1)
-; RV32IXQCCMP-FP-NEXT: sw a0, -128(s0) # 4-byte Folded Spill
+; RV32IXQCCMP-FP-NEXT: sw a0, -140(s0) # 4-byte Folded Spill
; RV32IXQCCMP-FP-NEXT: addi a5, t1, %lo(var_test_irq)
; RV32IXQCCMP-FP-NEXT: lw a0, 16(a5)
-; RV32IXQCCMP-FP-NEXT: sw a0, -132(s0) # 4-byte Folded Spill
+; RV32IXQCCMP-FP-NEXT: sw a0, -144(s0) # 4-byte Folded Spill
; RV32IXQCCMP-FP-NEXT: lw a0, 20(a5)
-; RV32IXQCCMP-FP-NEXT: sw a0, -136(s0) # 4-byte Folded Spill
+; RV32IXQCCMP-FP-NEXT: sw a0, -148(s0) # 4-byte Folded Spill
; RV32IXQCCMP-FP-NEXT: lw a0, 24(a5)
-; RV32IXQCCMP-FP-NEXT: sw a0, -140(s0) # 4-byte Folded Spill
+; RV32IXQCCMP-FP-NEXT: sw a0, -152(s0) # 4-byte Folded Spill
; RV32IXQCCMP-FP-NEXT: lw t5, 28(a5)
; RV32IXQCCMP-FP-NEXT: lw t6, 32(a5)
; RV32IXQCCMP-FP-NEXT: lw s2, 36(a5)
@@ -2570,36 +2578,36 @@ define void @callee_with_irq() "interrupt"="machine" {
; RV32IXQCCMP-FP-NEXT: sw s2, 36(a5)
; RV32IXQCCMP-FP-NEXT: sw t6, 32(a5)
; RV32IXQCCMP-FP-NEXT: sw t5, 28(a5)
-; RV32IXQCCMP-FP-NEXT: lw a0, -140(s0) # 4-byte Folded Reload
+; RV32IXQCCMP-FP-NEXT: lw a0, -152(s0) # 4-byte Folded Reload
; RV32IXQCCMP-FP-NEXT: sw a0, 24(a5)
-; RV32IXQCCMP-FP-NEXT: lw a0, -136(s0) # 4-byte Folded Reload
+; RV32IXQCCMP-FP-NEXT: lw a0, -148(s0) # 4-byte Folded Reload
; RV32IXQCCMP-FP-NEXT: sw a0, 20(a5)
-; RV32IXQCCMP-FP-NEXT: lw a0, -132(s0) # 4-byte Folded Reload
+; RV32IXQCCMP-FP-NEXT: lw a0, -144(s0) # 4-byte Folded Reload
; RV32IXQCCMP-FP-NEXT: sw a0, 16(a5)
-; RV32IXQCCMP-FP-NEXT: lw a0, -128(s0) # 4-byte Folded Reload
+; RV32IXQCCMP-FP-NEXT: lw a0, -140(s0) # 4-byte Folded Reload
; RV32IXQCCMP-FP-NEXT: sw a0, %lo(var_test_irq+12)(t1)
-; RV32IXQCCMP-FP-NEXT: lw a0, -124(s0) # 4-byte Folded Reload
+; RV32IXQCCMP-FP-NEXT: lw a0, -136(s0) # 4-byte Folded Reload
; RV32IXQCCMP-FP-NEXT: sw a0, %lo(var_test_irq+8)(t1)
-; RV32IXQCCMP-FP-NEXT: lw a0, -120(s0) # 4-byte Folded Reload
+; RV32IXQCCMP-FP-NEXT: lw a0, -132(s0) # 4-byte Folded Reload
; RV32IXQCCMP-FP-NEXT: sw a0, %lo(var_test_irq+4)(t1)
-; RV32IXQCCMP-FP-NEXT: lw a0, -116(s0) # 4-byte Folded Reload
+; RV32IXQCCMP-FP-NEXT: lw a0, -128(s0) # 4-byte Folded Reload
; RV32IXQCCMP-FP-NEXT: sw a0, %lo(var_test_irq)(t1)
-; RV32IXQCCMP-FP-NEXT: .cfi_def_cfa sp, 144
-; RV32IXQCCMP-FP-NEXT: lw t0, 88(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-FP-NEXT: lw t1, 84(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-FP-NEXT: lw t2, 80(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-FP-NEXT: lw a0, 76(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-FP-NEXT: lw a1, 72(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-FP-NEXT: lw a2, 68(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-FP-NEXT: lw a3, 64(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-FP-NEXT: lw a4, 60(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-FP-NEXT: lw a5, 56(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-FP-NEXT: lw a6, 52(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-FP-NEXT: lw a7, 48(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-FP-NEXT: lw t3, 44(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-FP-NEXT: lw t4, 40(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-FP-NEXT: lw t5, 36(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-FP-NEXT: lw t6, 32(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-FP-NEXT: .cfi_def_cfa sp, 160
+; RV32IXQCCMP-FP-NEXT: lw t0, 92(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-FP-NEXT: lw t1, 88(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-FP-NEXT: lw t2, 84(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-FP-NEXT: lw a0, 80(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-FP-NEXT: lw a1, 76(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-FP-NEXT: lw a2, 72(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-FP-NEXT: lw a3, 68(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-FP-NEXT: lw a4, 64(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-FP-NEXT: lw a5, 60(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-FP-NEXT: lw a6, 56(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-FP-NEXT: lw a7, 52(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-FP-NEXT: lw t3, 48(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-FP-NEXT: lw t4, 44(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-FP-NEXT: lw t5, 40(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-FP-NEXT: lw t6, 36(sp) # 4-byte Folded Reload
; RV32IXQCCMP-FP-NEXT: .cfi_restore t0
; RV32IXQCCMP-FP-NEXT: .cfi_restore t1
; RV32IXQCCMP-FP-NEXT: .cfi_restore t2
@@ -2615,7 +2623,7 @@ define void @callee_with_irq() "interrupt"="machine" {
; RV32IXQCCMP-FP-NEXT: .cfi_restore t4
; RV32IXQCCMP-FP-NEXT: .cfi_restore t5
; RV32IXQCCMP-FP-NEXT: .cfi_restore t6
-; RV32IXQCCMP-FP-NEXT: addi sp, sp, 32
+; RV32IXQCCMP-FP-NEXT: addi sp, sp, 48
; RV32IXQCCMP-FP-NEXT: .cfi_def_cfa_offset 112
; RV32IXQCCMP-FP-NEXT: qc.cm.pop {ra, s0-s11}, 112
; RV32IXQCCMP-FP-NEXT: .cfi_restore ra
@@ -2653,53 +2661,53 @@ define void @callee_with_irq() "interrupt"="machine" {
; RV64IXQCCMP-FP-NEXT: .cfi_offset s11, -104
; RV64IXQCCMP-FP-NEXT: addi sp, sp, -128
; RV64IXQCCMP-FP-NEXT: .cfi_def_cfa_offset 288
-; RV64IXQCCMP-FP-NEXT: sd t0, 176(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-FP-NEXT: sd t1, 168(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-FP-NEXT: sd t2, 160(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-FP-NEXT: sd a0, 152(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-FP-NEXT: sd a1, 144(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-FP-NEXT: sd a2, 136(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-FP-NEXT: sd a3, 128(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-FP-NEXT: sd a4, 120(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-FP-NEXT: sd a5, 112(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-FP-NEXT: sd a6, 104(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-FP-NEXT: sd a7, 96(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-FP-NEXT: sd t3, 88(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-FP-NEXT: sd t4, 80(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-FP-NEXT: sd t5, 72(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-FP-NEXT: sd t6, 64(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-FP-NEXT: .cfi_offset t0, -112
-; RV64IXQCCMP-FP-NEXT: .cfi_offset t1, -120
-; RV64IXQCCMP-FP-NEXT: .cfi_offset t2, -128
-; RV64IXQCCMP-FP-NEXT: .cfi_offset a0, -136
-; RV64IXQCCMP-FP-NEXT: .cfi_offset a1, -144
-; RV64IXQCCMP-FP-NEXT: .cfi_offset a2, -152
-; RV64IXQCCMP-FP-NEXT: .cfi_offset a3, -160
-; RV64IXQCCMP-FP-NEXT: .cfi_offset a4, -168
-; RV64IXQCCMP-FP-NEXT: .cfi_offset a5, -176
-; RV64IXQCCMP-FP-NEXT: .cfi_offset a6, -184
-; RV64IXQCCMP-FP-NEXT: .cfi_offset a7, -192
-; RV64IXQCCMP-FP-NEXT: .cfi_offset t3, -200
-; RV64IXQCCMP-FP-NEXT: .cfi_offset t4, -208
-; RV64IXQCCMP-FP-NEXT: .cfi_offset t5, -216
-; RV64IXQCCMP-FP-NEXT: .cfi_offset t6, -224
+; RV64IXQCCMP-FP-NEXT: sd t0, 168(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-FP-NEXT: sd t1, 160(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-FP-NEXT: sd t2, 152(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-FP-NEXT: sd a0, 144(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-FP-NEXT: sd a1, 136(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-FP-NEXT: sd a2, 128(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-FP-NEXT: sd a3, 120(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-FP-NEXT: sd a4, 112(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-FP-NEXT: sd a5, 104(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-FP-NEXT: sd a6, 96(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-FP-NEXT: sd a7, 88(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-FP-NEXT: sd t3, 80(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-FP-NEXT: sd t4, 72(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-FP-NEXT: sd t5, 64(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-FP-NEXT: sd t6, 56(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-FP-NEXT: .cfi_offset t0, -120
+; RV64IXQCCMP-FP-NEXT: .cfi_offset t1, -128
+; RV64IXQCCMP-FP-NEXT: .cfi_offset t2, -136
+; RV64IXQCCMP-FP-NEXT: .cfi_offset a0, -144
+; RV64IXQCCMP-FP-NEXT: .cfi_offset a1, -152
+; RV64IXQCCMP-FP-NEXT: .cfi_offset a2, -160
+; RV64IXQCCMP-FP-NEXT: .cfi_offset a3, -168
+; RV64IXQCCMP-FP-NEXT: .cfi_offset a4, -176
+; RV64IXQCCMP-FP-NEXT: .cfi_offset a5, -184
+; RV64IXQCCMP-FP-NEXT: .cfi_offset a6, -192
+; RV64IXQCCMP-FP-NEXT: .cfi_offset a7, -200
+; RV64IXQCCMP-FP-NEXT: .cfi_offset t3, -208
+; RV64IXQCCMP-FP-NEXT: .cfi_offset t4, -216
+; RV64IXQCCMP-FP-NEXT: .cfi_offset t5, -224
+; RV64IXQCCMP-FP-NEXT: .cfi_offset t6, -232
; RV64IXQCCMP-FP-NEXT: .cfi_def_cfa s0, 0
; RV64IXQCCMP-FP-NEXT: lui t1, %hi(var_test_irq)
; RV64IXQCCMP-FP-NEXT: lw a0, %lo(var_test_irq)(t1)
-; RV64IXQCCMP-FP-NEXT: sd a0, -232(s0) # 8-byte Folded Spill
-; RV64IXQCCMP-FP-NEXT: lw a0, %lo(var_test_irq+4)(t1)
; RV64IXQCCMP-FP-NEXT: sd a0, -240(s0) # 8-byte Folded Spill
-; RV64IXQCCMP-FP-NEXT: lw a0, %lo(var_test_irq+8)(t1)
+; RV64IXQCCMP-FP-NEXT: lw a0, %lo(var_test_irq+4)(t1)
; RV64IXQCCMP-FP-NEXT: sd a0, -248(s0) # 8-byte Folded Spill
-; RV64IXQCCMP-FP-NEXT: lw a0, %lo(var_test_irq+12)(t1)
+; RV64IXQCCMP-FP-NEXT: lw a0, %lo(var_test_irq+8)(t1)
; RV64IXQCCMP-FP-NEXT: sd a0, -256(s0) # 8-byte Folded Spill
+; RV64IXQCCMP-FP-NEXT: lw a0, %lo(var_test_irq+12)(t1)
+; RV64IXQCCMP-FP-NEXT: sd a0, -264(s0) # 8-byte Folded Spill
; RV64IXQCCMP-FP-NEXT: addi a5, t1, %lo(var_test_irq)
; RV64IXQCCMP-FP-NEXT: lw a0, 16(a5)
-; RV64IXQCCMP-FP-NEXT: sd a0, -264(s0) # 8-byte Folded Spill
-; RV64IXQCCMP-FP-NEXT: lw a0, 20(a5)
; RV64IXQCCMP-FP-NEXT: sd a0, -272(s0) # 8-byte Folded Spill
-; RV64IXQCCMP-FP-NEXT: lw a0, 24(a5)
+; RV64IXQCCMP-FP-NEXT: lw a0, 20(a5)
; RV64IXQCCMP-FP-NEXT: sd a0, -280(s0) # 8-byte Folded Spill
+; RV64IXQCCMP-FP-NEXT: lw a0, 24(a5)
+; RV64IXQCCMP-FP-NEXT: sd a0, -288(s0) # 8-byte Folded Spill
; RV64IXQCCMP-FP-NEXT: lw t5, 28(a5)
; RV64IXQCCMP-FP-NEXT: lw t6, 32(a5)
; RV64IXQCCMP-FP-NEXT: lw s2, 36(a5)
@@ -2750,36 +2758,36 @@ define void @callee_with_irq() "interrupt"="machine" {
; RV64IXQCCMP-FP-NEXT: sw s2, 36(a5)
; RV64IXQCCMP-FP-NEXT: sw t6, 32(a5)
; RV64IXQCCMP-FP-NEXT: sw t5, 28(a5)
-; RV64IXQCCMP-FP-NEXT: ld a0, -280(s0) # 8-byte Folded Reload
+; RV64IXQCCMP-FP-NEXT: ld a0, -288(s0) # 8-byte Folded Reload
; RV64IXQCCMP-FP-NEXT: sw a0, 24(a5)
-; RV64IXQCCMP-FP-NEXT: ld a0, -272(s0) # 8-byte Folded Reload
+; RV64IXQCCMP-FP-NEXT: ld a0, -280(s0) # 8-byte Folded Reload
; RV64IXQCCMP-FP-NEXT: sw a0, 20(a5)
-; RV64IXQCCMP-FP-NEXT: ld a0, -264(s0) # 8-byte Folded Reload
+; RV64IXQCCMP-FP-NEXT: ld a0, -272(s0) # 8-byte Folded Reload
; RV64IXQCCMP-FP-NEXT: sw a0, 16(a5)
-; RV64IXQCCMP-FP-NEXT: ld a0, -256(s0) # 8-byte Folded Reload
+; RV64IXQCCMP-FP-NEXT: ld a0, -264(s0) # 8-byte Folded Reload
; RV64IXQCCMP-FP-NEXT: sw a0, %lo(var_test_irq+12)(t1)
-; RV64IXQCCMP-FP-NEXT: ld a0, -248(s0) # 8-byte Folded Reload
+; RV64IXQCCMP-FP-NEXT: ld a0, -256(s0) # 8-byte Folded Reload
; RV64IXQCCMP-FP-NEXT: sw a0, %lo(var_test_irq+8)(t1)
-; RV64IXQCCMP-FP-NEXT: ld a0, -240(s0) # 8-byte Folded Reload
+; RV64IXQCCMP-FP-NEXT: ld a0, -248(s0) # 8-byte Folded Reload
; RV64IXQCCMP-FP-NEXT: sw a0, %lo(var_test_irq+4)(t1)
-; RV64IXQCCMP-FP-NEXT: ld a0, -232(s0) # 8-byte Folded Reload
+; RV64IXQCCMP-FP-NEXT: ld a0, -240(s0) # 8-byte Folded Reload
; RV64IXQCCMP-FP-NEXT: sw a0, %lo(var_test_irq)(t1)
; RV64IXQCCMP-FP-NEXT: .cfi_def_cfa sp, 288
-; RV64IXQCCMP-FP-NEXT: ld t0, 176(sp) # 8-byte Folded Reload
-; RV64IXQCCMP-FP-NEXT: ld t1, 168(sp) # 8-byte Folded Reload
-; RV64IXQCCMP-FP-NEXT: ld t2, 160(sp) # 8-byte Folded Reload
-; RV64IXQCCMP-FP-NEXT: ld a0, 152(sp) # 8-byte Folded Reload
-; RV64IXQCCMP-FP-NEXT: ld a1, 144(sp) # 8-byte Folded Reload
-; RV64IXQCCMP-FP-NEXT: ld a2, 136(sp) # 8-byte Folded Reload
-; RV64IXQCCMP-FP-NEXT: ld a3, 128(sp) # 8-byte Folded Reload
-; RV64IXQCCMP-FP-NEXT: ld a4, 120(sp) # 8-byte Folded Reload
-; RV64IXQCCMP-FP-NEXT: ld a5, 112(sp) # 8-byte Folded Reload
-; RV64IXQCCMP-FP-NEXT: ld a6, 104(sp) # 8-byte Folded Reload
-; RV64IXQCCMP-FP-NEXT: ld a7, 96(sp) # 8-byte Folded Reload
-; RV64IXQCCMP-FP-NEXT: ld t3, 88(sp) # 8-byte Folded Reload
-; RV64IXQCCMP-FP-NEXT: ld t4, 80(sp) # 8-byte Folded Reload
-; RV64IXQCCMP-FP-NEXT: ld t5, 72(sp) # 8-byte Folded Reload
-; RV64IXQCCMP-FP-NEXT: ld t6, 64(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-FP-NEXT: ld t0, 168(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-FP-NEXT: ld t1, 160(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-FP-NEXT: ld t2, 152(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-FP-NEXT: ld a0, 144(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-FP-NEXT: ld a1, 136(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-FP-NEXT: ld a2, 128(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-FP-NEXT: ld a3, 120(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-FP-NEXT: ld a4, 112(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-FP-NEXT: ld a5, 104(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-FP-NEXT: ld a6, 96(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-FP-NEXT: ld a7, 88(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-FP-NEXT: ld t3, 80(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-FP-NEXT: ld t4, 72(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-FP-NEXT: ld t5, 64(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-FP-NEXT: ld t6, 56(sp) # 8-byte Folded Reload
; RV64IXQCCMP-FP-NEXT: .cfi_restore t0
; RV64IXQCCMP-FP-NEXT: .cfi_restore t1
; RV64IXQCCMP-FP-NEXT: .cfi_restore t2
@@ -2831,52 +2839,52 @@ define void @callee_with_irq() "interrupt"="machine" {
; RV32IXQCCMP-SR-NEXT: .cfi_offset s9, -44
; RV32IXQCCMP-SR-NEXT: .cfi_offset s10, -48
; RV32IXQCCMP-SR-NEXT: .cfi_offset s11, -52
-; RV32IXQCCMP-SR-NEXT: addi sp, sp, -32
-; RV32IXQCCMP-SR-NEXT: .cfi_def_cfa_offset 144
-; RV32IXQCCMP-SR-NEXT: sw t0, 88(sp) # 4-byte Folded Spill
-; RV32IXQCCMP-SR-NEXT: sw t1, 84(sp) # 4-byte Folded Spill
-; RV32IXQCCMP-SR-NEXT: sw t2, 80(sp) # 4-byte Folded Spill
-; RV32IXQCCMP-SR-NEXT: sw a0, 76(sp) # 4-byte Folded Spill
-; RV32IXQCCMP-SR-NEXT: sw a1, 72(sp) # 4-byte Folded Spill
-; RV32IXQCCMP-SR-NEXT: sw a2, 68(sp) # 4-byte Folded Spill
-; RV32IXQCCMP-SR-NEXT: sw a3, 64(sp) # 4-byte Folded Spill
-; RV32IXQCCMP-SR-NEXT: sw a4, 60(sp) # 4-byte Folded Spill
-; RV32IXQCCMP-SR-NEXT: sw a5, 56(sp) # 4-byte Folded Spill
-; RV32IXQCCMP-SR-NEXT: sw a6, 52(sp) # 4-byte Folded Spill
-; RV32IXQCCMP-SR-NEXT: sw a7, 48(sp) # 4-byte Folded Spill
-; RV32IXQCCMP-SR-NEXT: sw t3, 44(sp) # 4-byte Folded Spill
-; RV32IXQCCMP-SR-NEXT: sw t4, 40(sp) # 4-byte Folded Spill
-; RV32IXQCCMP-SR-NEXT: sw t5, 36(sp) # 4-byte Folded Spill
-; RV32IXQCCMP-SR-NEXT: sw t6, 32(sp) # 4-byte Folded Spill
-; RV32IXQCCMP-SR-NEXT: .cfi_offset t0, -56
-; RV32IXQCCMP-SR-NEXT: .cfi_offset t1, -60
-; RV32IXQCCMP-SR-NEXT: .cfi_offset t2, -64
-; RV32IXQCCMP-SR-NEXT: .cfi_offset a0, -68
-; RV32IXQCCMP-SR-NEXT: .cfi_offset a1, -72
-; RV32IXQCCMP-SR-NEXT: .cfi_offset a2, -76
-; RV32IXQCCMP-SR-NEXT: .cfi_offset a3, -80
-; RV32IXQCCMP-SR-NEXT: .cfi_offset a4, -84
-; RV32IXQCCMP-SR-NEXT: .cfi_offset a5, -88
-; RV32IXQCCMP-SR-NEXT: .cfi_offset a6, -92
-; RV32IXQCCMP-SR-NEXT: .cfi_offset a7, -96
-; RV32IXQCCMP-SR-NEXT: .cfi_offset t3, -100
-; RV32IXQCCMP-SR-NEXT: .cfi_offset t4, -104
-; RV32IXQCCMP-SR-NEXT: .cfi_offset t5, -108
-; RV32IXQCCMP-SR-NEXT: .cfi_offset t6, -112
+; RV32IXQCCMP-SR-NEXT: addi sp, sp, -48
+; RV32IXQCCMP-SR-NEXT: .cfi_def_cfa_offset 160
+; RV32IXQCCMP-SR-NEXT: sw t0, 92(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-SR-NEXT: sw t1, 88(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-SR-NEXT: sw t2, 84(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-SR-NEXT: sw a0, 80(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-SR-NEXT: sw a1, 76(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-SR-NEXT: sw a2, 72(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-SR-NEXT: sw a3, 68(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-SR-NEXT: sw a4, 64(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-SR-NEXT: sw a5, 60(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-SR-NEXT: sw a6, 56(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-SR-NEXT: sw a7, 52(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-SR-NEXT: sw t3, 48(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-SR-NEXT: sw t4, 44(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-SR-NEXT: sw t5, 40(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-SR-NEXT: sw t6, 36(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-SR-NEXT: .cfi_offset t0, -68
+; RV32IXQCCMP-SR-NEXT: .cfi_offset t1, -72
+; RV32IXQCCMP-SR-NEXT: .cfi_offset t2, -76
+; RV32IXQCCMP-SR-NEXT: .cfi_offset a0, -80
+; RV32IXQCCMP-SR-NEXT: .cfi_offset a1, -84
+; RV32IXQCCMP-SR-NEXT: .cfi_offset a2, -88
+; RV32IXQCCMP-SR-NEXT: .cfi_offset a3, -92
+; RV32IXQCCMP-SR-NEXT: .cfi_offset a4, -96
+; RV32IXQCCMP-SR-NEXT: .cfi_offset a5, -100
+; RV32IXQCCMP-SR-NEXT: .cfi_offset a6, -104
+; RV32IXQCCMP-SR-NEXT: .cfi_offset a7, -108
+; RV32IXQCCMP-SR-NEXT: .cfi_offset t3, -112
+; RV32IXQCCMP-SR-NEXT: .cfi_offset t4, -116
+; RV32IXQCCMP-SR-NEXT: .cfi_offset t5, -120
+; RV32IXQCCMP-SR-NEXT: .cfi_offset t6, -124
; RV32IXQCCMP-SR-NEXT: lui t0, %hi(var_test_irq)
; RV32IXQCCMP-SR-NEXT: lw a0, %lo(var_test_irq)(t0)
-; RV32IXQCCMP-SR-NEXT: sw a0, 28(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-SR-NEXT: sw a0, 32(sp) # 4-byte Folded Spill
; RV32IXQCCMP-SR-NEXT: lw a0, %lo(var_test_irq+4)(t0)
-; RV32IXQCCMP-SR-NEXT: sw a0, 24(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-SR-NEXT: sw a0, 28(sp) # 4-byte Folded Spill
; RV32IXQCCMP-SR-NEXT: lw a0, %lo(var_test_irq+8)(t0)
-; RV32IXQCCMP-SR-NEXT: sw a0, 20(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-SR-NEXT: sw a0, 24(sp) # 4-byte Folded Spill
; RV32IXQCCMP-SR-NEXT: lw a0, %lo(var_test_irq+12)(t0)
-; RV32IXQCCMP-SR-NEXT: sw a0, 16(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-SR-NEXT: sw a0, 20(sp) # 4-byte Folded Spill
; RV32IXQCCMP-SR-NEXT: addi a5, t0, %lo(var_test_irq)
; RV32IXQCCMP-SR-NEXT: lw a0, 16(a5)
-; RV32IXQCCMP-SR-NEXT: sw a0, 12(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-SR-NEXT: sw a0, 16(sp) # 4-byte Folded Spill
; RV32IXQCCMP-SR-NEXT: lw a0, 20(a5)
-; RV32IXQCCMP-SR-NEXT: sw a0, 8(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-SR-NEXT: sw a0, 12(sp) # 4-byte Folded Spill
; RV32IXQCCMP-SR-NEXT: lw t4, 24(a5)
; RV32IXQCCMP-SR-NEXT: lw t5, 28(a5)
; RV32IXQCCMP-SR-NEXT: lw t6, 32(a5)
@@ -2929,33 +2937,33 @@ define void @callee_with_irq() "interrupt"="machine" {
; RV32IXQCCMP-SR-NEXT: sw t6, 32(a5)
; RV32IXQCCMP-SR-NEXT: sw t5, 28(a5)
; RV32IXQCCMP-SR-NEXT: sw t4, 24(a5)
-; RV32IXQCCMP-SR-NEXT: lw a0, 8(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-SR-NEXT: sw a0, 20(a5)
; RV32IXQCCMP-SR-NEXT: lw a0, 12(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-SR-NEXT: sw a0, 16(a5)
+; RV32IXQCCMP-SR-NEXT: sw a0, 20(a5)
; RV32IXQCCMP-SR-NEXT: lw a0, 16(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-SR-NEXT: sw a0, %lo(var_test_irq+12)(t0)
+; RV32IXQCCMP-SR-NEXT: sw a0, 16(a5)
; RV32IXQCCMP-SR-NEXT: lw a0, 20(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-SR-NEXT: sw a0, %lo(var_test_irq+8)(t0)
+; RV32IXQCCMP-SR-NEXT: sw a0, %lo(var_test_irq+12)(t0)
; RV32IXQCCMP-SR-NEXT: lw a0, 24(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-SR-NEXT: sw a0, %lo(var_test_irq+4)(t0)
+; RV32IXQCCMP-SR-NEXT: sw a0, %lo(var_test_irq+8)(t0)
; RV32IXQCCMP-SR-NEXT: lw a0, 28(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-SR-NEXT: sw a0, %lo(var_test_irq+4)(t0)
+; RV32IXQCCMP-SR-NEXT: lw a0, 32(sp) # 4-byte Folded Reload
; RV32IXQCCMP-SR-NEXT: sw a0, %lo(var_test_irq)(t0)
-; RV32IXQCCMP-SR-NEXT: lw t0, 88(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-SR-NEXT: lw t1, 84(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-SR-NEXT: lw t2, 80(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-SR-NEXT: lw a0, 76(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-SR-NEXT: lw a1, 72(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-SR-NEXT: lw a2, 68(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-SR-NEXT: lw a3, 64(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-SR-NEXT: lw a4, 60(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-SR-NEXT: lw a5, 56(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-SR-NEXT: lw a6, 52(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-SR-NEXT: lw a7, 48(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-SR-NEXT: lw t3, 44(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-SR-NEXT: lw t4, 40(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-SR-NEXT: lw t5, 36(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-SR-NEXT: lw t6, 32(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-SR-NEXT: lw t0, 92(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-SR-NEXT: lw t1, 88(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-SR-NEXT: lw t2, 84(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-SR-NEXT: lw a0, 80(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-SR-NEXT: lw a1, 76(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-SR-NEXT: lw a2, 72(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-SR-NEXT: lw a3, 68(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-SR-NEXT: lw a4, 64(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-SR-NEXT: lw a5, 60(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-SR-NEXT: lw a6, 56(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-SR-NEXT: lw a7, 52(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-SR-NEXT: lw t3, 48(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-SR-NEXT: lw t4, 44(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-SR-NEXT: lw t5, 40(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-SR-NEXT: lw t6, 36(sp) # 4-byte Folded Reload
; RV32IXQCCMP-SR-NEXT: .cfi_restore t0
; RV32IXQCCMP-SR-NEXT: .cfi_restore t1
; RV32IXQCCMP-SR-NEXT: .cfi_restore t2
@@ -2971,7 +2979,7 @@ define void @callee_with_irq() "interrupt"="machine" {
; RV32IXQCCMP-SR-NEXT: .cfi_restore t4
; RV32IXQCCMP-SR-NEXT: .cfi_restore t5
; RV32IXQCCMP-SR-NEXT: .cfi_restore t6
-; RV32IXQCCMP-SR-NEXT: addi sp, sp, 32
+; RV32IXQCCMP-SR-NEXT: addi sp, sp, 48
; RV32IXQCCMP-SR-NEXT: .cfi_def_cfa_offset 112
; RV32IXQCCMP-SR-NEXT: qc.cm.pop {ra, s0-s11}, 112
; RV32IXQCCMP-SR-NEXT: .cfi_restore ra
@@ -3007,52 +3015,52 @@ define void @callee_with_irq() "interrupt"="machine" {
; RV64IXQCCMP-SR-NEXT: .cfi_offset s9, -88
; RV64IXQCCMP-SR-NEXT: .cfi_offset s10, -96
; RV64IXQCCMP-SR-NEXT: .cfi_offset s11, -104
-; RV64IXQCCMP-SR-NEXT: addi sp, sp, -112
-; RV64IXQCCMP-SR-NEXT: .cfi_def_cfa_offset 272
-; RV64IXQCCMP-SR-NEXT: sd t0, 160(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-SR-NEXT: sd t1, 152(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-SR-NEXT: sd t2, 144(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-SR-NEXT: sd a0, 136(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-SR-NEXT: sd a1, 128(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-SR-NEXT: sd a2, 120(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-SR-NEXT: sd a3, 112(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-SR-NEXT: sd a4, 104(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-SR-NEXT: sd a5, 96(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-SR-NEXT: sd a6, 88(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-SR-NEXT: sd a7, 80(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-SR-NEXT: sd t3, 72(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-SR-NEXT: sd t4, 64(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-SR-NEXT: sd t5, 56(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-SR-NEXT: sd t6, 48(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-SR-NEXT: .cfi_offset t0, -112
-; RV64IXQCCMP-SR-NEXT: .cfi_offset t1, -120
-; RV64IXQCCMP-SR-NEXT: .cfi_offset t2, -128
-; RV64IXQCCMP-SR-NEXT: .cfi_offset a0, -136
-; RV64IXQCCMP-SR-NEXT: .cfi_offset a1, -144
-; RV64IXQCCMP-SR-NEXT: .cfi_offset a2, -152
-; RV64IXQCCMP-SR-NEXT: .cfi_offset a3, -160
-; RV64IXQCCMP-SR-NEXT: .cfi_offset a4, -168
-; RV64IXQCCMP-SR-NEXT: .cfi_offset a5, -176
-; RV64IXQCCMP-SR-NEXT: .cfi_offset a6, -184
-; RV64IXQCCMP-SR-NEXT: .cfi_offset a7, -192
-; RV64IXQCCMP-SR-NEXT: .cfi_offset t3, -200
-; RV64IXQCCMP-SR-NEXT: .cfi_offset t4, -208
-; RV64IXQCCMP-SR-NEXT: .cfi_offset t5, -216
-; RV64IXQCCMP-SR-NEXT: .cfi_offset t6, -224
+; RV64IXQCCMP-SR-NEXT: addi sp, sp, -128
+; RV64IXQCCMP-SR-NEXT: .cfi_def_cfa_offset 288
+; RV64IXQCCMP-SR-NEXT: sd t0, 168(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-SR-NEXT: sd t1, 160(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-SR-NEXT: sd t2, 152(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-SR-NEXT: sd a0, 144(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-SR-NEXT: sd a1, 136(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-SR-NEXT: sd a2, 128(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-SR-NEXT: sd a3, 120(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-SR-NEXT: sd a4, 112(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-SR-NEXT: sd a5, 104(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-SR-NEXT: sd a6, 96(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-SR-NEXT: sd a7, 88(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-SR-NEXT: sd t3, 80(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-SR-NEXT: sd t4, 72(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-SR-NEXT: sd t5, 64(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-SR-NEXT: sd t6, 56(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-SR-NEXT: .cfi_offset t0, -120
+; RV64IXQCCMP-SR-NEXT: .cfi_offset t1, -128
+; RV64IXQCCMP-SR-NEXT: .cfi_offset t2, -136
+; RV64IXQCCMP-SR-NEXT: .cfi_offset a0, -144
+; RV64IXQCCMP-SR-NEXT: .cfi_offset a1, -152
+; RV64IXQCCMP-SR-NEXT: .cfi_offset a2, -160
+; RV64IXQCCMP-SR-NEXT: .cfi_offset a3, -168
+; RV64IXQCCMP-SR-NEXT: .cfi_offset a4, -176
+; RV64IXQCCMP-SR-NEXT: .cfi_offset a5, -184
+; RV64IXQCCMP-SR-NEXT: .cfi_offset a6, -192
+; RV64IXQCCMP-SR-NEXT: .cfi_offset a7, -200
+; RV64IXQCCMP-SR-NEXT: .cfi_offset t3, -208
+; RV64IXQCCMP-SR-NEXT: .cfi_offset t4, -216
+; RV64IXQCCMP-SR-NEXT: .cfi_offset t5, -224
+; RV64IXQCCMP-SR-NEXT: .cfi_offset t6, -232
; RV64IXQCCMP-SR-NEXT: lui t0, %hi(var_test_irq)
; RV64IXQCCMP-SR-NEXT: lw a0, %lo(var_test_irq)(t0)
-; RV64IXQCCMP-SR-NEXT: sd a0, 40(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-SR-NEXT: sd a0, 48(sp) # 8-byte Folded Spill
; RV64IXQCCMP-SR-NEXT: lw a0, %lo(var_test_irq+4)(t0)
-; RV64IXQCCMP-SR-NEXT: sd a0, 32(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-SR-NEXT: sd a0, 40(sp) # 8-byte Folded Spill
; RV64IXQCCMP-SR-NEXT: lw a0, %lo(var_test_irq+8)(t0)
-; RV64IXQCCMP-SR-NEXT: sd a0, 24(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-SR-NEXT: sd a0, 32(sp) # 8-byte Folded Spill
; RV64IXQCCMP-SR-NEXT: lw a0, %lo(var_test_irq+12)(t0)
-; RV64IXQCCMP-SR-NEXT: sd a0, 16(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-SR-NEXT: sd a0, 24(sp) # 8-byte Folded Spill
; RV64IXQCCMP-SR-NEXT: addi a5, t0, %lo(var_test_irq)
; RV64IXQCCMP-SR-NEXT: lw a0, 16(a5)
-; RV64IXQCCMP-SR-NEXT: sd a0, 8(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-SR-NEXT: sd a0, 16(sp) # 8-byte Folded Spill
; RV64IXQCCMP-SR-NEXT: lw a0, 20(a5)
-; RV64IXQCCMP-SR-NEXT: sd a0, 0(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-SR-NEXT: sd a0, 8(sp) # 8-byte Folded Spill
; RV64IXQCCMP-SR-NEXT: lw t4, 24(a5)
; RV64IXQCCMP-SR-NEXT: lw t5, 28(a5)
; RV64IXQCCMP-SR-NEXT: lw t6, 32(a5)
@@ -3105,33 +3113,33 @@ define void @callee_with_irq() "interrupt"="machine" {
; RV64IXQCCMP-SR-NEXT: sw t6, 32(a5)
; RV64IXQCCMP-SR-NEXT: sw t5, 28(a5)
; RV64IXQCCMP-SR-NEXT: sw t4, 24(a5)
-; RV64IXQCCMP-SR-NEXT: ld a0, 0(sp) # 8-byte Folded Reload
-; RV64IXQCCMP-SR-NEXT: sw a0, 20(a5)
; RV64IXQCCMP-SR-NEXT: ld a0, 8(sp) # 8-byte Folded Reload
-; RV64IXQCCMP-SR-NEXT: sw a0, 16(a5)
+; RV64IXQCCMP-SR-NEXT: sw a0, 20(a5)
; RV64IXQCCMP-SR-NEXT: ld a0, 16(sp) # 8-byte Folded Reload
-; RV64IXQCCMP-SR-NEXT: sw a0, %lo(var_test_irq+12)(t0)
+; RV64IXQCCMP-SR-NEXT: sw a0, 16(a5)
; RV64IXQCCMP-SR-NEXT: ld a0, 24(sp) # 8-byte Folded Reload
-; RV64IXQCCMP-SR-NEXT: sw a0, %lo(var_test_irq+8)(t0)
+; RV64IXQCCMP-SR-NEXT: sw a0, %lo(var_test_irq+12)(t0)
; RV64IXQCCMP-SR-NEXT: ld a0, 32(sp) # 8-byte Folded Reload
-; RV64IXQCCMP-SR-NEXT: sw a0, %lo(var_test_irq+4)(t0)
+; RV64IXQCCMP-SR-NEXT: sw a0, %lo(var_test_irq+8)(t0)
; RV64IXQCCMP-SR-NEXT: ld a0, 40(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-SR-NEXT: sw a0, %lo(var_test_irq+4)(t0)
+; RV64IXQCCMP-SR-NEXT: ld a0, 48(sp) # 8-byte Folded Reload
; RV64IXQCCMP-SR-NEXT: sw a0, %lo(var_test_irq)(t0)
-; RV64IXQCCMP-SR-NEXT: ld t0, 160(sp) # 8-byte Folded Reload
-; RV64IXQCCMP-SR-NEXT: ld t1, 152(sp) # 8-byte Folded Reload
-; RV64IXQCCMP-SR-NEXT: ld t2, 144(sp) # 8-byte Folded Reload
-; RV64IXQCCMP-SR-NEXT: ld a0, 136(sp) # 8-byte Folded Reload
-; RV64IXQCCMP-SR-NEXT: ld a1, 128(sp) # 8-byte Folded Reload
-; RV64IXQCCMP-SR-NEXT: ld a2, 120(sp) # 8-byte Folded Reload
-; RV64IXQCCMP-SR-NEXT: ld a3, 112(sp) # 8-byte Folded Reload
-; RV64IXQCCMP-SR-NEXT: ld a4, 104(sp) # 8-byte Folded Reload
-; RV64IXQCCMP-SR-NEXT: ld a5, 96(sp) # 8-byte Folded Reload
-; RV64IXQCCMP-SR-NEXT: ld a6, 88(sp) # 8-byte Folded Reload
-; RV64IXQCCMP-SR-NEXT: ld a7, 80(sp) # 8-byte Folded Reload
-; RV64IXQCCMP-SR-NEXT: ld t3, 72(sp) # 8-byte Folded Reload
-; RV64IXQCCMP-SR-NEXT: ld t4, 64(sp) # 8-byte Folded Reload
-; RV64IXQCCMP-SR-NEXT: ld t5, 56(sp) # 8-byte Folded Reload
-; RV64IXQCCMP-SR-NEXT: ld t6, 48(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-SR-NEXT: ld t0, 168(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-SR-NEXT: ld t1, 160(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-SR-NEXT: ld t2, 152(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-SR-NEXT: ld a0, 144(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-SR-NEXT: ld a1, 136(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-SR-NEXT: ld a2, 128(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-SR-NEXT: ld a3, 120(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-SR-NEXT: ld a4, 112(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-SR-NEXT: ld a5, 104(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-SR-NEXT: ld a6, 96(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-SR-NEXT: ld a7, 88(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-SR-NEXT: ld t3, 80(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-SR-NEXT: ld t4, 72(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-SR-NEXT: ld t5, 64(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-SR-NEXT: ld t6, 56(sp) # 8-byte Folded Reload
; RV64IXQCCMP-SR-NEXT: .cfi_restore t0
; RV64IXQCCMP-SR-NEXT: .cfi_restore t1
; RV64IXQCCMP-SR-NEXT: .cfi_restore t2
@@ -3147,7 +3155,7 @@ define void @callee_with_irq() "interrupt"="machine" {
; RV64IXQCCMP-SR-NEXT: .cfi_restore t4
; RV64IXQCCMP-SR-NEXT: .cfi_restore t5
; RV64IXQCCMP-SR-NEXT: .cfi_restore t6
-; RV64IXQCCMP-SR-NEXT: addi sp, sp, 112
+; RV64IXQCCMP-SR-NEXT: addi sp, sp, 128
; RV64IXQCCMP-SR-NEXT: .cfi_def_cfa_offset 160
; RV64IXQCCMP-SR-NEXT: qc.cm.pop {ra, s0-s11}, 160
; RV64IXQCCMP-SR-NEXT: .cfi_restore ra
@@ -3173,8 +3181,8 @@ define void @callee_with_irq() "interrupt"="machine" {
define void @callee_no_irq() {
; RV32IXQCCMP-LABEL: callee_no_irq:
; RV32IXQCCMP: # %bb.0:
-; RV32IXQCCMP-NEXT: qc.cm.push {ra, s0-s11}, -80
-; RV32IXQCCMP-NEXT: .cfi_def_cfa_offset 80
+; RV32IXQCCMP-NEXT: qc.cm.push {ra, s0-s11}, -96
+; RV32IXQCCMP-NEXT: .cfi_def_cfa_offset 96
; RV32IXQCCMP-NEXT: .cfi_offset ra, -4
; RV32IXQCCMP-NEXT: .cfi_offset s0, -8
; RV32IXQCCMP-NEXT: .cfi_offset s1, -12
@@ -3190,18 +3198,18 @@ define void @callee_no_irq() {
; RV32IXQCCMP-NEXT: .cfi_offset s11, -52
; RV32IXQCCMP-NEXT: lui t0, %hi(var_test_irq)
; RV32IXQCCMP-NEXT: lw a0, %lo(var_test_irq)(t0)
-; RV32IXQCCMP-NEXT: sw a0, 24(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT: sw a0, 28(sp) # 4-byte Folded Spill
; RV32IXQCCMP-NEXT: lw a0, %lo(var_test_irq+4)(t0)
-; RV32IXQCCMP-NEXT: sw a0, 20(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT: sw a0, 24(sp) # 4-byte Folded Spill
; RV32IXQCCMP-NEXT: lw a0, %lo(var_test_irq+8)(t0)
-; RV32IXQCCMP-NEXT: sw a0, 16(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT: sw a0, 20(sp) # 4-byte Folded Spill
; RV32IXQCCMP-NEXT: lw a0, %lo(var_test_irq+12)(t0)
-; RV32IXQCCMP-NEXT: sw a0, 12(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT: sw a0, 16(sp) # 4-byte Folded Spill
; RV32IXQCCMP-NEXT: addi a5, t0, %lo(var_test_irq)
; RV32IXQCCMP-NEXT: lw a0, 16(a5)
-; RV32IXQCCMP-NEXT: sw a0, 8(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT: sw a0, 12(sp) # 4-byte Folded Spill
; RV32IXQCCMP-NEXT: lw a0, 20(a5)
-; RV32IXQCCMP-NEXT: sw a0, 4(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-NEXT: sw a0, 8(sp) # 4-byte Folded Spill
; RV32IXQCCMP-NEXT: lw t4, 24(a5)
; RV32IXQCCMP-NEXT: lw t5, 28(a5)
; RV32IXQCCMP-NEXT: lw t6, 32(a5)
@@ -3254,19 +3262,19 @@ define void @callee_no_irq() {
; RV32IXQCCMP-NEXT: sw t6, 32(a5)
; RV32IXQCCMP-NEXT: sw t5, 28(a5)
; RV32IXQCCMP-NEXT: sw t4, 24(a5)
-; RV32IXQCCMP-NEXT: lw a0, 4(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-NEXT: sw a0, 20(a5)
; RV32IXQCCMP-NEXT: lw a0, 8(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-NEXT: sw a0, 16(a5)
+; RV32IXQCCMP-NEXT: sw a0, 20(a5)
; RV32IXQCCMP-NEXT: lw a0, 12(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-NEXT: sw a0, %lo(var_test_irq+12)(t0)
+; RV32IXQCCMP-NEXT: sw a0, 16(a5)
; RV32IXQCCMP-NEXT: lw a0, 16(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-NEXT: sw a0, %lo(var_test_irq+8)(t0)
+; RV32IXQCCMP-NEXT: sw a0, %lo(var_test_irq+12)(t0)
; RV32IXQCCMP-NEXT: lw a0, 20(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-NEXT: sw a0, %lo(var_test_irq+4)(t0)
+; RV32IXQCCMP-NEXT: sw a0, %lo(var_test_irq+8)(t0)
; RV32IXQCCMP-NEXT: lw a0, 24(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-NEXT: sw a0, %lo(var_test_irq+4)(t0)
+; RV32IXQCCMP-NEXT: lw a0, 28(sp) # 4-byte Folded Reload
; RV32IXQCCMP-NEXT: sw a0, %lo(var_test_irq)(t0)
-; RV32IXQCCMP-NEXT: qc.cm.popret {ra, s0-s11}, 80
+; RV32IXQCCMP-NEXT: qc.cm.popret {ra, s0-s11}, 96
;
; RV64IXQCCMP-LABEL: callee_no_irq:
; RV64IXQCCMP: # %bb.0:
@@ -3287,18 +3295,18 @@ define void @callee_no_irq() {
; RV64IXQCCMP-NEXT: .cfi_offset s11, -104
; RV64IXQCCMP-NEXT: lui t0, %hi(var_test_irq)
; RV64IXQCCMP-NEXT: lw a0, %lo(var_test_irq)(t0)
-; RV64IXQCCMP-NEXT: sd a0, 48(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-NEXT: lw a0, %lo(var_test_irq+4)(t0)
; RV64IXQCCMP-NEXT: sd a0, 40(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-NEXT: lw a0, %lo(var_test_irq+8)(t0)
+; RV64IXQCCMP-NEXT: lw a0, %lo(var_test_irq+4)(t0)
; RV64IXQCCMP-NEXT: sd a0, 32(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-NEXT: lw a0, %lo(var_test_irq+12)(t0)
+; RV64IXQCCMP-NEXT: lw a0, %lo(var_test_irq+8)(t0)
; RV64IXQCCMP-NEXT: sd a0, 24(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT: lw a0, %lo(var_test_irq+12)(t0)
+; RV64IXQCCMP-NEXT: sd a0, 16(sp) # 8-byte Folded Spill
; RV64IXQCCMP-NEXT: addi a5, t0, %lo(var_test_irq)
; RV64IXQCCMP-NEXT: lw a0, 16(a5)
-; RV64IXQCCMP-NEXT: sd a0, 16(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-NEXT: lw a0, 20(a5)
; RV64IXQCCMP-NEXT: sd a0, 8(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-NEXT: lw a0, 20(a5)
+; RV64IXQCCMP-NEXT: sd a0, 0(sp) # 8-byte Folded Spill
; RV64IXQCCMP-NEXT: lw t4, 24(a5)
; RV64IXQCCMP-NEXT: lw t5, 28(a5)
; RV64IXQCCMP-NEXT: lw t6, 32(a5)
@@ -3351,24 +3359,24 @@ define void @callee_no_irq() {
; RV64IXQCCMP-NEXT: sw t6, 32(a5)
; RV64IXQCCMP-NEXT: sw t5, 28(a5)
; RV64IXQCCMP-NEXT: sw t4, 24(a5)
-; RV64IXQCCMP-NEXT: ld a0, 8(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT: ld a0, 0(sp) # 8-byte Folded Reload
; RV64IXQCCMP-NEXT: sw a0, 20(a5)
-; RV64IXQCCMP-NEXT: ld a0, 16(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT: ld a0, 8(sp) # 8-byte Folded Reload
; RV64IXQCCMP-NEXT: sw a0, 16(a5)
-; RV64IXQCCMP-NEXT: ld a0, 24(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT: ld a0, 16(sp) # 8-byte Folded Reload
; RV64IXQCCMP-NEXT: sw a0, %lo(var_test_irq+12)(t0)
-; RV64IXQCCMP-NEXT: ld a0, 32(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT: ld a0, 24(sp) # 8-byte Folded Reload
; RV64IXQCCMP-NEXT: sw a0, %lo(var_test_irq+8)(t0)
-; RV64IXQCCMP-NEXT: ld a0, 40(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT: ld a0, 32(sp) # 8-byte Folded Reload
; RV64IXQCCMP-NEXT: sw a0, %lo(var_test_irq+4)(t0)
-; RV64IXQCCMP-NEXT: ld a0, 48(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-NEXT: ld a0, 40(sp) # 8-byte Folded Reload
; RV64IXQCCMP-NEXT: sw a0, %lo(var_test_irq)(t0)
; RV64IXQCCMP-NEXT: qc.cm.popret {ra, s0-s11}, 160
;
; RV32IXQCCMP-FP-LABEL: callee_no_irq:
; RV32IXQCCMP-FP: # %bb.0:
-; RV32IXQCCMP-FP-NEXT: qc.cm.pushfp {ra, s0-s11}, -80
-; RV32IXQCCMP-FP-NEXT: .cfi_def_cfa_offset 80
+; RV32IXQCCMP-FP-NEXT: qc.cm.pushfp {ra, s0-s11}, -96
+; RV32IXQCCMP-FP-NEXT: .cfi_def_cfa_offset 96
; RV32IXQCCMP-FP-NEXT: .cfi_offset ra, -4
; RV32IXQCCMP-FP-NEXT: .cfi_offset s0, -8
; RV32IXQCCMP-FP-NEXT: .cfi_offset s1, -12
@@ -3385,20 +3393,20 @@ define void @callee_no_irq() {
; RV32IXQCCMP-FP-NEXT: .cfi_def_cfa s0, 0
; RV32IXQCCMP-FP-NEXT: lui t1, %hi(var_test_irq)
; RV32IXQCCMP-FP-NEXT: lw a0, %lo(var_test_irq)(t1)
-; RV32IXQCCMP-FP-NEXT: sw a0, -56(s0) # 4-byte Folded Spill
+; RV32IXQCCMP-FP-NEXT: sw a0, -68(s0) # 4-byte Folded Spill
; RV32IXQCCMP-FP-NEXT: lw a0, %lo(var_test_irq+4)(t1)
-; RV32IXQCCMP-FP-NEXT: sw a0, -60(s0) # 4-byte Folded Spill
+; RV32IXQCCMP-FP-NEXT: sw a0, -72(s0) # 4-byte Folded Spill
; RV32IXQCCMP-FP-NEXT: lw a0, %lo(var_test_irq+8)(t1)
-; RV32IXQCCMP-FP-NEXT: sw a0, -64(s0) # 4-byte Folded Spill
+; RV32IXQCCMP-FP-NEXT: sw a0, -76(s0) # 4-byte Folded Spill
; RV32IXQCCMP-FP-NEXT: lw a0, %lo(var_test_irq+12)(t1)
-; RV32IXQCCMP-FP-NEXT: sw a0, -68(s0) # 4-byte Folded Spill
+; RV32IXQCCMP-FP-NEXT: sw a0, -80(s0) # 4-byte Folded Spill
; RV32IXQCCMP-FP-NEXT: addi a5, t1, %lo(var_test_irq)
; RV32IXQCCMP-FP-NEXT: lw a0, 16(a5)
-; RV32IXQCCMP-FP-NEXT: sw a0, -72(s0) # 4-byte Folded Spill
+; RV32IXQCCMP-FP-NEXT: sw a0, -84(s0) # 4-byte Folded Spill
; RV32IXQCCMP-FP-NEXT: lw a0, 20(a5)
-; RV32IXQCCMP-FP-NEXT: sw a0, -76(s0) # 4-byte Folded Spill
+; RV32IXQCCMP-FP-NEXT: sw a0, -88(s0) # 4-byte Folded Spill
; RV32IXQCCMP-FP-NEXT: lw a0, 24(a5)
-; RV32IXQCCMP-FP-NEXT: sw a0, -80(s0) # 4-byte Folded Spill
+; RV32IXQCCMP-FP-NEXT: sw a0, -92(s0) # 4-byte Folded Spill
; RV32IXQCCMP-FP-NEXT: lw t5, 28(a5)
; RV32IXQCCMP-FP-NEXT: lw t6, 32(a5)
; RV32IXQCCMP-FP-NEXT: lw s2, 36(a5)
@@ -3449,22 +3457,22 @@ define void @callee_no_irq() {
; RV32IXQCCMP-FP-NEXT: sw s2, 36(a5)
; RV32IXQCCMP-FP-NEXT: sw t6, 32(a5)
; RV32IXQCCMP-FP-NEXT: sw t5, 28(a5)
-; RV32IXQCCMP-FP-NEXT: lw a0, -80(s0) # 4-byte Folded Reload
+; RV32IXQCCMP-FP-NEXT: lw a0, -92(s0) # 4-byte Folded Reload
; RV32IXQCCMP-FP-NEXT: sw a0, 24(a5)
-; RV32IXQCCMP-FP-NEXT: lw a0, -76(s0) # 4-byte Folded Reload
+; RV32IXQCCMP-FP-NEXT: lw a0, -88(s0) # 4-byte Folded Reload
; RV32IXQCCMP-FP-NEXT: sw a0, 20(a5)
-; RV32IXQCCMP-FP-NEXT: lw a0, -72(s0) # 4-byte Folded Reload
+; RV32IXQCCMP-FP-NEXT: lw a0, -84(s0) # 4-byte Folded Reload
; RV32IXQCCMP-FP-NEXT: sw a0, 16(a5)
-; RV32IXQCCMP-FP-NEXT: lw a0, -68(s0) # 4-byte Folded Reload
+; RV32IXQCCMP-FP-NEXT: lw a0, -80(s0) # 4-byte Folded Reload
; RV32IXQCCMP-FP-NEXT: sw a0, %lo(var_test_irq+12)(t1)
-; RV32IXQCCMP-FP-NEXT: lw a0, -64(s0) # 4-byte Folded Reload
+; RV32IXQCCMP-FP-NEXT: lw a0, -76(s0) # 4-byte Folded Reload
; RV32IXQCCMP-FP-NEXT: sw a0, %lo(var_test_irq+8)(t1)
-; RV32IXQCCMP-FP-NEXT: lw a0, -60(s0) # 4-byte Folded Reload
+; RV32IXQCCMP-FP-NEXT: lw a0, -72(s0) # 4-byte Folded Reload
; RV32IXQCCMP-FP-NEXT: sw a0, %lo(var_test_irq+4)(t1)
-; RV32IXQCCMP-FP-NEXT: lw a0, -56(s0) # 4-byte Folded Reload
+; RV32IXQCCMP-FP-NEXT: lw a0, -68(s0) # 4-byte Folded Reload
; RV32IXQCCMP-FP-NEXT: sw a0, %lo(var_test_irq)(t1)
-; RV32IXQCCMP-FP-NEXT: .cfi_def_cfa sp, 80
-; RV32IXQCCMP-FP-NEXT: qc.cm.popret {ra, s0-s11}, 80
+; RV32IXQCCMP-FP-NEXT: .cfi_def_cfa sp, 96
+; RV32IXQCCMP-FP-NEXT: qc.cm.popret {ra, s0-s11}, 96
;
; RV64IXQCCMP-FP-LABEL: callee_no_irq:
; RV64IXQCCMP-FP: # %bb.0:
@@ -3483,23 +3491,25 @@ define void @callee_no_irq() {
; RV64IXQCCMP-FP-NEXT: .cfi_offset s9, -88
; RV64IXQCCMP-FP-NEXT: .cfi_offset s10, -96
; RV64IXQCCMP-FP-NEXT: .cfi_offset s11, -104
+; RV64IXQCCMP-FP-NEXT: addi sp, sp, -16
+; RV64IXQCCMP-FP-NEXT: .cfi_def_cfa_offset 176
; RV64IXQCCMP-FP-NEXT: .cfi_def_cfa s0, 0
; RV64IXQCCMP-FP-NEXT: lui t1, %hi(var_test_irq)
; RV64IXQCCMP-FP-NEXT: lw a0, %lo(var_test_irq)(t1)
-; RV64IXQCCMP-FP-NEXT: sd a0, -112(s0) # 8-byte Folded Spill
-; RV64IXQCCMP-FP-NEXT: lw a0, %lo(var_test_irq+4)(t1)
; RV64IXQCCMP-FP-NEXT: sd a0, -120(s0) # 8-byte Folded Spill
-; RV64IXQCCMP-FP-NEXT: lw a0, %lo(var_test_irq+8)(t1)
+; RV64IXQCCMP-FP-NEXT: lw a0, %lo(var_test_irq+4)(t1)
; RV64IXQCCMP-FP-NEXT: sd a0, -128(s0) # 8-byte Folded Spill
-; RV64IXQCCMP-FP-NEXT: lw a0, %lo(var_test_irq+12)(t1)
+; RV64IXQCCMP-FP-NEXT: lw a0, %lo(var_test_irq+8)(t1)
; RV64IXQCCMP-FP-NEXT: sd a0, -136(s0) # 8-byte Folded Spill
+; RV64IXQCCMP-FP-NEXT: lw a0, %lo(var_test_irq+12)(t1)
+; RV64IXQCCMP-FP-NEXT: sd a0, -144(s0) # 8-byte Folded Spill
; RV64IXQCCMP-FP-NEXT: addi a5, t1, %lo(var_test_irq)
; RV64IXQCCMP-FP-NEXT: lw a0, 16(a5)
-; RV64IXQCCMP-FP-NEXT: sd a0, -144(s0) # 8-byte Folded Spill
-; RV64IXQCCMP-FP-NEXT: lw a0, 20(a5)
; RV64IXQCCMP-FP-NEXT: sd a0, -152(s0) # 8-byte Folded Spill
-; RV64IXQCCMP-FP-NEXT: lw a0, 24(a5)
+; RV64IXQCCMP-FP-NEXT: lw a0, 20(a5)
; RV64IXQCCMP-FP-NEXT: sd a0, -160(s0) # 8-byte Folded Spill
+; RV64IXQCCMP-FP-NEXT: lw a0, 24(a5)
+; RV64IXQCCMP-FP-NEXT: sd a0, -168(s0) # 8-byte Folded Spill
; RV64IXQCCMP-FP-NEXT: lw t5, 28(a5)
; RV64IXQCCMP-FP-NEXT: lw t6, 32(a5)
; RV64IXQCCMP-FP-NEXT: lw s2, 36(a5)
@@ -3550,27 +3560,29 @@ define void @callee_no_irq() {
; RV64IXQCCMP-FP-NEXT: sw s2, 36(a5)
; RV64IXQCCMP-FP-NEXT: sw t6, 32(a5)
; RV64IXQCCMP-FP-NEXT: sw t5, 28(a5)
-; RV64IXQCCMP-FP-NEXT: ld a0, -160(s0) # 8-byte Folded Reload
+; RV64IXQCCMP-FP-NEXT: ld a0, -168(s0) # 8-byte Folded Reload
; RV64IXQCCMP-FP-NEXT: sw a0, 24(a5)
-; RV64IXQCCMP-FP-NEXT: ld a0, -152(s0) # 8-byte Folded Reload
+; RV64IXQCCMP-FP-NEXT: ld a0, -160(s0) # 8-byte Folded Reload
; RV64IXQCCMP-FP-NEXT: sw a0, 20(a5)
-; RV64IXQCCMP-FP-NEXT: ld a0, -144(s0) # 8-byte Folded Reload
+; RV64IXQCCMP-FP-NEXT: ld a0, -152(s0) # 8-byte Folded Reload
; RV64IXQCCMP-FP-NEXT: sw a0, 16(a5)
-; RV64IXQCCMP-FP-NEXT: ld a0, -136(s0) # 8-byte Folded Reload
+; RV64IXQCCMP-FP-NEXT: ld a0, -144(s0) # 8-byte Folded Reload
; RV64IXQCCMP-FP-NEXT: sw a0, %lo(var_test_irq+12)(t1)
-; RV64IXQCCMP-FP-NEXT: ld a0, -128(s0) # 8-byte Folded Reload
+; RV64IXQCCMP-FP-NEXT: ld a0, -136(s0) # 8-byte Folded Reload
; RV64IXQCCMP-FP-NEXT: sw a0, %lo(var_test_irq+8)(t1)
-; RV64IXQCCMP-FP-NEXT: ld a0, -120(s0) # 8-byte Folded Reload
+; RV64IXQCCMP-FP-NEXT: ld a0, -128(s0) # 8-byte Folded Reload
; RV64IXQCCMP-FP-NEXT: sw a0, %lo(var_test_irq+4)(t1)
-; RV64IXQCCMP-FP-NEXT: ld a0, -112(s0) # 8-byte Folded Reload
+; RV64IXQCCMP-FP-NEXT: ld a0, -120(s0) # 8-byte Folded Reload
; RV64IXQCCMP-FP-NEXT: sw a0, %lo(var_test_irq)(t1)
-; RV64IXQCCMP-FP-NEXT: .cfi_def_cfa sp, 160
+; RV64IXQCCMP-FP-NEXT: .cfi_def_cfa sp, 176
+; RV64IXQCCMP-FP-NEXT: addi sp, sp, 16
+; RV64IXQCCMP-FP-NEXT: .cfi_def_cfa_offset 160
; RV64IXQCCMP-FP-NEXT: qc.cm.popret {ra, s0-s11}, 160
;
; RV32IXQCCMP-SR-LABEL: callee_no_irq:
; RV32IXQCCMP-SR: # %bb.0:
-; RV32IXQCCMP-SR-NEXT: qc.cm.push {ra, s0-s11}, -80
-; RV32IXQCCMP-SR-NEXT: .cfi_def_cfa_offset 80
+; RV32IXQCCMP-SR-NEXT: qc.cm.push {ra, s0-s11}, -96
+; RV32IXQCCMP-SR-NEXT: .cfi_def_cfa_offset 96
; RV32IXQCCMP-SR-NEXT: .cfi_offset ra, -4
; RV32IXQCCMP-SR-NEXT: .cfi_offset s0, -8
; RV32IXQCCMP-SR-NEXT: .cfi_offset s1, -12
@@ -3586,18 +3598,18 @@ define void @callee_no_irq() {
; RV32IXQCCMP-SR-NEXT: .cfi_offset s11, -52
; RV32IXQCCMP-SR-NEXT: lui t0, %hi(var_test_irq)
; RV32IXQCCMP-SR-NEXT: lw a0, %lo(var_test_irq)(t0)
-; RV32IXQCCMP-SR-NEXT: sw a0, 24(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-SR-NEXT: sw a0, 28(sp) # 4-byte Folded Spill
; RV32IXQCCMP-SR-NEXT: lw a0, %lo(var_test_irq+4)(t0)
-; RV32IXQCCMP-SR-NEXT: sw a0, 20(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-SR-NEXT: sw a0, 24(sp) # 4-byte Folded Spill
; RV32IXQCCMP-SR-NEXT: lw a0, %lo(var_test_irq+8)(t0)
-; RV32IXQCCMP-SR-NEXT: sw a0, 16(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-SR-NEXT: sw a0, 20(sp) # 4-byte Folded Spill
; RV32IXQCCMP-SR-NEXT: lw a0, %lo(var_test_irq+12)(t0)
-; RV32IXQCCMP-SR-NEXT: sw a0, 12(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-SR-NEXT: sw a0, 16(sp) # 4-byte Folded Spill
; RV32IXQCCMP-SR-NEXT: addi a5, t0, %lo(var_test_irq)
; RV32IXQCCMP-SR-NEXT: lw a0, 16(a5)
-; RV32IXQCCMP-SR-NEXT: sw a0, 8(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-SR-NEXT: sw a0, 12(sp) # 4-byte Folded Spill
; RV32IXQCCMP-SR-NEXT: lw a0, 20(a5)
-; RV32IXQCCMP-SR-NEXT: sw a0, 4(sp) # 4-byte Folded Spill
+; RV32IXQCCMP-SR-NEXT: sw a0, 8(sp) # 4-byte Folded Spill
; RV32IXQCCMP-SR-NEXT: lw t4, 24(a5)
; RV32IXQCCMP-SR-NEXT: lw t5, 28(a5)
; RV32IXQCCMP-SR-NEXT: lw t6, 32(a5)
@@ -3650,19 +3662,19 @@ define void @callee_no_irq() {
; RV32IXQCCMP-SR-NEXT: sw t6, 32(a5)
; RV32IXQCCMP-SR-NEXT: sw t5, 28(a5)
; RV32IXQCCMP-SR-NEXT: sw t4, 24(a5)
-; RV32IXQCCMP-SR-NEXT: lw a0, 4(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-SR-NEXT: sw a0, 20(a5)
; RV32IXQCCMP-SR-NEXT: lw a0, 8(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-SR-NEXT: sw a0, 16(a5)
+; RV32IXQCCMP-SR-NEXT: sw a0, 20(a5)
; RV32IXQCCMP-SR-NEXT: lw a0, 12(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-SR-NEXT: sw a0, %lo(var_test_irq+12)(t0)
+; RV32IXQCCMP-SR-NEXT: sw a0, 16(a5)
; RV32IXQCCMP-SR-NEXT: lw a0, 16(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-SR-NEXT: sw a0, %lo(var_test_irq+8)(t0)
+; RV32IXQCCMP-SR-NEXT: sw a0, %lo(var_test_irq+12)(t0)
; RV32IXQCCMP-SR-NEXT: lw a0, 20(sp) # 4-byte Folded Reload
-; RV32IXQCCMP-SR-NEXT: sw a0, %lo(var_test_irq+4)(t0)
+; RV32IXQCCMP-SR-NEXT: sw a0, %lo(var_test_irq+8)(t0)
; RV32IXQCCMP-SR-NEXT: lw a0, 24(sp) # 4-byte Folded Reload
+; RV32IXQCCMP-SR-NEXT: sw a0, %lo(var_test_irq+4)(t0)
+; RV32IXQCCMP-SR-NEXT: lw a0, 28(sp) # 4-byte Folded Reload
; RV32IXQCCMP-SR-NEXT: sw a0, %lo(var_test_irq)(t0)
-; RV32IXQCCMP-SR-NEXT: qc.cm.popret {ra, s0-s11}, 80
+; RV32IXQCCMP-SR-NEXT: qc.cm.popret {ra, s0-s11}, 96
;
; RV64IXQCCMP-SR-LABEL: callee_no_irq:
; RV64IXQCCMP-SR: # %bb.0:
@@ -3683,18 +3695,18 @@ define void @callee_no_irq() {
; RV64IXQCCMP-SR-NEXT: .cfi_offset s11, -104
; RV64IXQCCMP-SR-NEXT: lui t0, %hi(var_test_irq)
; RV64IXQCCMP-SR-NEXT: lw a0, %lo(var_test_irq)(t0)
-; RV64IXQCCMP-SR-NEXT: sd a0, 48(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-SR-NEXT: lw a0, %lo(var_test_irq+4)(t0)
; RV64IXQCCMP-SR-NEXT: sd a0, 40(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-SR-NEXT: lw a0, %lo(var_test_irq+8)(t0)
+; RV64IXQCCMP-SR-NEXT: lw a0, %lo(var_test_irq+4)(t0)
; RV64IXQCCMP-SR-NEXT: sd a0, 32(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-SR-NEXT: lw a0, %lo(var_test_irq+12)(t0)
+; RV64IXQCCMP-SR-NEXT: lw a0, %lo(var_test_irq+8)(t0)
; RV64IXQCCMP-SR-NEXT: sd a0, 24(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-SR-NEXT: lw a0, %lo(var_test_irq+12)(t0)
+; RV64IXQCCMP-SR-NEXT: sd a0, 16(sp) # 8-byte Folded Spill
; RV64IXQCCMP-SR-NEXT: addi a5, t0, %lo(var_test_irq)
; RV64IXQCCMP-SR-NEXT: lw a0, 16(a5)
-; RV64IXQCCMP-SR-NEXT: sd a0, 16(sp) # 8-byte Folded Spill
-; RV64IXQCCMP-SR-NEXT: lw a0, 20(a5)
; RV64IXQCCMP-SR-NEXT: sd a0, 8(sp) # 8-byte Folded Spill
+; RV64IXQCCMP-SR-NEXT: lw a0, 20(a5)
+; RV64IXQCCMP-SR-NEXT: sd a0, 0(sp) # 8-byte Folded Spill
; RV64IXQCCMP-SR-NEXT: lw t4, 24(a5)
; RV64IXQCCMP-SR-NEXT: lw t5, 28(a5)
; RV64IXQCCMP-SR-NEXT: lw t6, 32(a5)
@@ -3747,17 +3759,17 @@ define void @callee_no_irq() {
; RV64IXQCCMP-SR-NEXT: sw t6, 32(a5)
; RV64IXQCCMP-SR-NEXT: sw t5, 28(a5)
; RV64IXQCCMP-SR-NEXT: sw t4, 24(a5)
-; RV64IXQCCMP-SR-NEXT: ld a0, 8(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-SR-NEXT: ld a0, 0(sp) # 8-byte Folded Reload
; RV64IXQCCMP-SR-NEXT: sw a0, 20(a5)
-; RV64IXQCCMP-SR-NEXT: ld a0, 16(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-SR-NEXT: ld a0, 8(sp) # 8-byte Folded Reload
; RV64IXQCCMP-SR-NEXT: sw a0, 16(a5)
-; RV64IXQCCMP-SR-NEXT: ld a0, 24(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-SR-NEXT: ld a0, 16(sp) # 8-byte Folded Reload
; RV64IXQCCMP-SR-NEXT: sw a0, %lo(var_test_irq+12)(t0)
-; RV64IXQCCMP-SR-NEXT: ld a0, 32(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-SR-NEXT: ld a0, 24(sp) # 8-byte Folded Reload
; RV64IXQCCMP-SR-NEXT: sw a0, %lo(var_test_irq+8)(t0)
-; RV64IXQCCMP-SR-NEXT: ld a0, 40(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-SR-NEXT: ld a0, 32(sp) # 8-byte Folded Reload
; RV64IXQCCMP-SR-NEXT: sw a0, %lo(var_test_irq+4)(t0)
-; RV64IXQCCMP-SR-NEXT: ld a0, 48(sp) # 8-byte Folded Reload
+; RV64IXQCCMP-SR-NEXT: ld a0, 40(sp) # 8-byte Folded Reload
; RV64IXQCCMP-SR-NEXT: sw a0, %lo(var_test_irq)(t0)
; RV64IXQCCMP-SR-NEXT: qc.cm.popret {ra, s0-s11}, 160
%val = load [32 x i32], ptr @var_test_irq
@@ -3771,99 +3783,99 @@ declare ptr @llvm.frameaddress.p0(i32 immarg)
define i32 @use_fp(i32 %x) {
; RV32IXQCCMP-LABEL: use_fp:
; RV32IXQCCMP: # %bb.0: # %entry
-; RV32IXQCCMP-NEXT: qc.cm.pushfp {ra, s0-s1}, -16
-; RV32IXQCCMP-NEXT: .cfi_def_cfa_offset 16
+; RV32IXQCCMP-NEXT: qc.cm.pushfp {ra, s0-s1}, -32
+; RV32IXQCCMP-NEXT: .cfi_def_cfa_offset 32
; RV32IXQCCMP-NEXT: .cfi_offset ra, -4
; RV32IXQCCMP-NEXT: .cfi_offset s0, -8
; RV32IXQCCMP-NEXT: .cfi_offset s1, -12
; RV32IXQCCMP-NEXT: .cfi_def_cfa s0, 0
; RV32IXQCCMP-NEXT: mv s1, a0
-; RV32IXQCCMP-NEXT: addi a1, s0, -16
+; RV32IXQCCMP-NEXT: addi a1, s0, -20
; RV32IXQCCMP-NEXT: mv a0, s0
; RV32IXQCCMP-NEXT: call bar
; RV32IXQCCMP-NEXT: mv a0, s1
-; RV32IXQCCMP-NEXT: .cfi_def_cfa sp, 16
-; RV32IXQCCMP-NEXT: qc.cm.popret {ra, s0-s1}, 16
+; RV32IXQCCMP-NEXT: .cfi_def_cfa sp, 32
+; RV32IXQCCMP-NEXT: qc.cm.popret {ra, s0-s1}, 32
;
; RV64IXQCCMP-LABEL: use_fp:
; RV64IXQCCMP: # %bb.0: # %entry
-; RV64IXQCCMP-NEXT: qc.cm.pushfp {ra, s0-s1}, -32
-; RV64IXQCCMP-NEXT: .cfi_def_cfa_offset 32
+; RV64IXQCCMP-NEXT: qc.cm.pushfp {ra, s0-s1}, -48
+; RV64IXQCCMP-NEXT: .cfi_def_cfa_offset 48
; RV64IXQCCMP-NEXT: .cfi_offset ra, -8
; RV64IXQCCMP-NEXT: .cfi_offset s0, -16
; RV64IXQCCMP-NEXT: .cfi_offset s1, -24
; RV64IXQCCMP-NEXT: .cfi_def_cfa s0, 0
; RV64IXQCCMP-NEXT: mv s1, a0
-; RV64IXQCCMP-NEXT: addi a1, s0, -28
+; RV64IXQCCMP-NEXT: addi a1, s0, -36
; RV64IXQCCMP-NEXT: mv a0, s0
; RV64IXQCCMP-NEXT: call bar
; RV64IXQCCMP-NEXT: mv a0, s1
-; RV64IXQCCMP-NEXT: .cfi_def_cfa sp, 32
-; RV64IXQCCMP-NEXT: qc.cm.popret {ra, s0-s1}, 32
+; RV64IXQCCMP-NEXT: .cfi_def_cfa sp, 48
+; RV64IXQCCMP-NEXT: qc.cm.popret {ra, s0-s1}, 48
;
; RV32IXQCCMP-FP-LABEL: use_fp:
; RV32IXQCCMP-FP: # %bb.0: # %entry
-; RV32IXQCCMP-FP-NEXT: qc.cm.pushfp {ra, s0-s1}, -16
-; RV32IXQCCMP-FP-NEXT: .cfi_def_cfa_offset 16
+; RV32IXQCCMP-FP-NEXT: qc.cm.pushfp {ra, s0-s1}, -32
+; RV32IXQCCMP-FP-NEXT: .cfi_def_cfa_offset 32
; RV32IXQCCMP-FP-NEXT: .cfi_offset ra, -4
; RV32IXQCCMP-FP-NEXT: .cfi_offset s0, -8
; RV32IXQCCMP-FP-NEXT: .cfi_offset s1, -12
; RV32IXQCCMP-FP-NEXT: .cfi_def_cfa s0, 0
; RV32IXQCCMP-FP-NEXT: mv s1, a0
-; RV32IXQCCMP-FP-NEXT: addi a1, s0, -16
+; RV32IXQCCMP-FP-NEXT: addi a1, s0, -20
; RV32IXQCCMP-FP-NEXT: mv a0, s0
; RV32IXQCCMP-FP-NEXT: call bar
; RV32IXQCCMP-FP-NEXT: mv a0, s1
-; RV32IXQCCMP-FP-NEXT: .cfi_def_cfa sp, 16
-; RV32IXQCCMP-FP-NEXT: qc.cm.popret {ra, s0-s1}, 16
+; RV32IXQCCMP-FP-NEXT: .cfi_def_cfa sp, 32
+; RV32IXQCCMP-FP-NEXT: qc.cm.popret {ra, s0-s1}, 32
;
; RV64IXQCCMP-FP-LABEL: use_fp:
; RV64IXQCCMP-FP: # %bb.0: # %entry
-; RV64IXQCCMP-FP-NEXT: qc.cm.pushfp {ra, s0-s1}, -32
-; RV64IXQCCMP-FP-NEXT: .cfi_def_cfa_offset 32
+; RV64IXQCCMP-FP-NEXT: qc.cm.pushfp {ra, s0-s1}, -48
+; RV64IXQCCMP-FP-NEXT: .cfi_def_cfa_offset 48
; RV64IXQCCMP-FP-NEXT: .cfi_offset ra, -8
; RV64IXQCCMP-FP-NEXT: .cfi_offset s0, -16
; RV64IXQCCMP-FP-NEXT: .cfi_offset s1, -24
; RV64IXQCCMP-FP-NEXT: .cfi_def_cfa s0, 0
; RV64IXQCCMP-FP-NEXT: mv s1, a0
-; RV64IXQCCMP-FP-NEXT: addi a1, s0, -28
+; RV64IXQCCMP-FP-NEXT: addi a1, s0, -36
; RV64IXQCCMP-FP-NEXT: mv a0, s0
; RV64IXQCCMP-FP-NEXT: call bar
; RV64IXQCCMP-FP-NEXT: mv a0, s1
-; RV64IXQCCMP-FP-NEXT: .cfi_def_cfa sp, 32
-; RV64IXQCCMP-FP-NEXT: qc.cm.popret {ra, s0-s1}, 32
+; RV64IXQCCMP-FP-NEXT: .cfi_def_cfa sp, 48
+; RV64IXQCCMP-FP-NEXT: qc.cm.popret {ra, s0-s1}, 48
;
; RV32IXQCCMP-SR-LABEL: use_fp:
; RV32IXQCCMP-SR: # %bb.0: # %entry
-; RV32IXQCCMP-SR-NEXT: qc.cm.pushfp {ra, s0-s1}, -16
-; RV32IXQCCMP-SR-NEXT: .cfi_def_cfa_offset 16
+; RV32IXQCCMP-SR-NEXT: qc.cm.pushfp {ra, s0-s1}, -32
+; RV32IXQCCMP-SR-NEXT: .cfi_def_cfa_offset 32
; RV32IXQCCMP-SR-NEXT: .cfi_offset ra, -4
; RV32IXQCCMP-SR-NEXT: .cfi_offset s0, -8
; RV32IXQCCMP-SR-NEXT: .cfi_offset s1, -12
; RV32IXQCCMP-SR-NEXT: .cfi_def_cfa s0, 0
; RV32IXQCCMP-SR-NEXT: mv s1, a0
-; RV32IXQCCMP-SR-NEXT: addi a1, s0, -16
+; RV32IXQCCMP-SR-NEXT: addi a1, s0, -20
; RV32IXQCCMP-SR-NEXT: mv a0, s0
; RV32IXQCCMP-SR-NEXT: call bar
; RV32IXQCCMP-SR-NEXT: mv a0, s1
-; RV32IXQCCMP-SR-NEXT: .cfi_def_cfa sp, 16
-; RV32IXQCCMP-SR-NEXT: qc.cm.popret {ra, s0-s1}, 16
+; RV32IXQCCMP-SR-NEXT: .cfi_def_cfa sp, 32
+; RV32IXQCCMP-SR-NEXT: qc.cm.popret {ra, s0-s1}, 32
;
; RV64IXQCCMP-SR-LABEL: use_fp:
; RV64IXQCCMP-SR: # %bb.0: # %entry
-; RV64IXQCCMP-SR-NEXT: qc.cm.pushfp {ra, s0-s1}, -32
-; RV64IXQCCMP-SR-NEXT: .cfi_def_cfa_offset 32
+; RV64IXQCCMP-SR-NEXT: qc.cm.pushfp {ra, s0-s1}, -48
+; RV64IXQCCMP-SR-NEXT: .cfi_def_cfa_offset 48
; RV64IXQCCMP-SR-NEXT: .cfi_offset ra, -8
; RV64IXQCCMP-SR-NEXT: .cfi_offset s0, -16
; RV64IXQCCMP-SR-NEXT: .cfi_offset s1, -24
; RV64IXQCCMP-SR-NEXT: .cfi_def_cfa s0, 0
; RV64IXQCCMP-SR-NEXT: mv s1, a0
-; RV64IXQCCMP-SR-NEXT: addi a1, s0, -28
+; RV64IXQCCMP-SR-NEXT: addi a1, s0, -36
; RV64IXQCCMP-SR-NEXT: mv a0, s0
; RV64IXQCCMP-SR-NEXT: call bar
; RV64IXQCCMP-SR-NEXT: mv a0, s1
-; RV64IXQCCMP-SR-NEXT: .cfi_def_cfa sp, 32
-; RV64IXQCCMP-SR-NEXT: qc.cm.popret {ra, s0-s1}, 32
+; RV64IXQCCMP-SR-NEXT: .cfi_def_cfa sp, 48
+; RV64IXQCCMP-SR-NEXT: qc.cm.popret {ra, s0-s1}, 48
entry:
%var = alloca i32, align 4
%0 = tail call ptr @llvm.frameaddress.p0(i32 0)
diff --git a/llvm/test/CodeGen/RISCV/xqccmp-with-float.ll b/llvm/test/CodeGen/RISCV/xqccmp-with-float.ll
index f4f3c31..12d30cb 100644
--- a/llvm/test/CodeGen/RISCV/xqccmp-with-float.ll
+++ b/llvm/test/CodeGen/RISCV/xqccmp-with-float.ll
@@ -8,31 +8,31 @@ declare void @callee()
define float @foo(float %arg) {
; XQCCMP32-LABEL: foo:
; XQCCMP32: # %bb.0: # %entry
-; XQCCMP32-NEXT: qc.cm.push {ra}, -16
-; XQCCMP32-NEXT: .cfi_def_cfa_offset 16
+; XQCCMP32-NEXT: qc.cm.push {ra}, -32
+; XQCCMP32-NEXT: .cfi_def_cfa_offset 32
; XQCCMP32-NEXT: .cfi_offset ra, -4
-; XQCCMP32-NEXT: fsw fs0, 8(sp) # 4-byte Folded Spill
-; XQCCMP32-NEXT: .cfi_offset fs0, -8
+; XQCCMP32-NEXT: fsw fs0, 12(sp) # 4-byte Folded Spill
+; XQCCMP32-NEXT: .cfi_offset fs0, -20
; XQCCMP32-NEXT: fmv.s fs0, fa0
; XQCCMP32-NEXT: call callee
; XQCCMP32-NEXT: fmv.s fa0, fs0
-; XQCCMP32-NEXT: flw fs0, 8(sp) # 4-byte Folded Reload
+; XQCCMP32-NEXT: flw fs0, 12(sp) # 4-byte Folded Reload
; XQCCMP32-NEXT: .cfi_restore fs0
-; XQCCMP32-NEXT: qc.cm.popret {ra}, 16
+; XQCCMP32-NEXT: qc.cm.popret {ra}, 32
;
; XQCCMP64-LABEL: foo:
; XQCCMP64: # %bb.0: # %entry
-; XQCCMP64-NEXT: qc.cm.push {ra}, -16
-; XQCCMP64-NEXT: .cfi_def_cfa_offset 16
+; XQCCMP64-NEXT: qc.cm.push {ra}, -32
+; XQCCMP64-NEXT: .cfi_def_cfa_offset 32
; XQCCMP64-NEXT: .cfi_offset ra, -8
-; XQCCMP64-NEXT: fsw fs0, 4(sp) # 4-byte Folded Spill
-; XQCCMP64-NEXT: .cfi_offset fs0, -12
+; XQCCMP64-NEXT: fsw fs0, 12(sp) # 4-byte Folded Spill
+; XQCCMP64-NEXT: .cfi_offset fs0, -20
; XQCCMP64-NEXT: fmv.s fs0, fa0
; XQCCMP64-NEXT: call callee
; XQCCMP64-NEXT: fmv.s fa0, fs0
-; XQCCMP64-NEXT: flw fs0, 4(sp) # 4-byte Folded Reload
+; XQCCMP64-NEXT: flw fs0, 12(sp) # 4-byte Folded Reload
; XQCCMP64-NEXT: .cfi_restore fs0
-; XQCCMP64-NEXT: qc.cm.popret {ra}, 16
+; XQCCMP64-NEXT: qc.cm.popret {ra}, 32
entry:
call void @callee()
ret float %arg
@@ -41,20 +41,20 @@ entry:
define void @foo2(i32 %x, float %y) {
; XQCCMP32-LABEL: foo2:
; XQCCMP32: # %bb.0: # %entry
-; XQCCMP32-NEXT: qc.cm.push {ra, s0}, -16
-; XQCCMP32-NEXT: .cfi_def_cfa_offset 16
+; XQCCMP32-NEXT: qc.cm.push {ra, s0}, -32
+; XQCCMP32-NEXT: .cfi_def_cfa_offset 32
; XQCCMP32-NEXT: .cfi_offset ra, -4
; XQCCMP32-NEXT: .cfi_offset s0, -8
-; XQCCMP32-NEXT: fsw fs0, 4(sp) # 4-byte Folded Spill
-; XQCCMP32-NEXT: .cfi_offset fs0, -12
+; XQCCMP32-NEXT: fsw fs0, 12(sp) # 4-byte Folded Spill
+; XQCCMP32-NEXT: .cfi_offset fs0, -20
; XQCCMP32-NEXT: fmv.s fs0, fa0
; XQCCMP32-NEXT: mv s0, a0
; XQCCMP32-NEXT: call bar
; XQCCMP32-NEXT: mv a0, s0
; XQCCMP32-NEXT: fmv.s fa0, fs0
-; XQCCMP32-NEXT: flw fs0, 4(sp) # 4-byte Folded Reload
+; XQCCMP32-NEXT: flw fs0, 12(sp) # 4-byte Folded Reload
; XQCCMP32-NEXT: .cfi_restore fs0
-; XQCCMP32-NEXT: qc.cm.pop {ra, s0}, 16
+; XQCCMP32-NEXT: qc.cm.pop {ra, s0}, 32
; XQCCMP32-NEXT: .cfi_restore ra
; XQCCMP32-NEXT: .cfi_restore s0
; XQCCMP32-NEXT: .cfi_def_cfa_offset 0
diff --git a/llvm/test/CodeGen/RISCV/zcmp-additional-stack.ll b/llvm/test/CodeGen/RISCV/zcmp-additional-stack.ll
index 61c1de5..c98b9b8 100644
--- a/llvm/test/CodeGen/RISCV/zcmp-additional-stack.ll
+++ b/llvm/test/CodeGen/RISCV/zcmp-additional-stack.ll
@@ -8,8 +8,8 @@ define ptr @func(ptr %s, i32 %_c, ptr %incdec.ptr, i1 %0, i8 %conv14) #0 {
; RV32-NEXT: .cfi_offset ra, -12
; RV32-NEXT: .cfi_offset s0, -8
; RV32-NEXT: .cfi_offset s1, -4
-; RV32-NEXT: addi sp, sp, -4
-; RV32-NEXT: .cfi_def_cfa_offset 20
+; RV32-NEXT: addi sp, sp, -8
+; RV32-NEXT: .cfi_def_cfa_offset 24
; RV32-NEXT: sw a4, 4(sp) # 4-byte Folded Spill
; RV32-NEXT: sw a2, 0(sp) # 4-byte Folded Spill
; RV32-NEXT: mv a2, a1
@@ -33,7 +33,7 @@ define ptr @func(ptr %s, i32 %_c, ptr %incdec.ptr, i1 %0, i8 %conv14) #0 {
; RV32-NEXT: lw a0, 4(sp) # 4-byte Folded Reload
; RV32-NEXT: sb a0, 0(s0)
; RV32-NEXT: mv a0, s1
-; RV32-NEXT: addi sp, sp, 4
+; RV32-NEXT: addi sp, sp, 8
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: cm.popret {ra, s0-s1}, 16
entry:
diff --git a/llvm/test/CodeGen/RISCV/zcmp-with-float.ll b/llvm/test/CodeGen/RISCV/zcmp-with-float.ll
index 638a3af..d2ecba2 100644
--- a/llvm/test/CodeGen/RISCV/zcmp-with-float.ll
+++ b/llvm/test/CodeGen/RISCV/zcmp-with-float.ll
@@ -8,31 +8,31 @@ declare void @callee()
define float @foo(float %arg) {
; RV32-LABEL: foo:
; RV32: # %bb.0: # %entry
-; RV32-NEXT: cm.push {ra}, -16
-; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: cm.push {ra}, -32
+; RV32-NEXT: .cfi_def_cfa_offset 32
; RV32-NEXT: .cfi_offset ra, -4
-; RV32-NEXT: fsw fs0, 8(sp) # 4-byte Folded Spill
-; RV32-NEXT: .cfi_offset fs0, -8
+; RV32-NEXT: fsw fs0, 12(sp) # 4-byte Folded Spill
+; RV32-NEXT: .cfi_offset fs0, -20
; RV32-NEXT: fmv.s fs0, fa0
; RV32-NEXT: call callee
; RV32-NEXT: fmv.s fa0, fs0
-; RV32-NEXT: flw fs0, 8(sp) # 4-byte Folded Reload
+; RV32-NEXT: flw fs0, 12(sp) # 4-byte Folded Reload
; RV32-NEXT: .cfi_restore fs0
-; RV32-NEXT: cm.popret {ra}, 16
+; RV32-NEXT: cm.popret {ra}, 32
;
; RV64-LABEL: foo:
; RV64: # %bb.0: # %entry
-; RV64-NEXT: cm.push {ra}, -16
-; RV64-NEXT: .cfi_def_cfa_offset 16
+; RV64-NEXT: cm.push {ra}, -32
+; RV64-NEXT: .cfi_def_cfa_offset 32
; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: fsw fs0, 4(sp) # 4-byte Folded Spill
-; RV64-NEXT: .cfi_offset fs0, -12
+; RV64-NEXT: fsw fs0, 12(sp) # 4-byte Folded Spill
+; RV64-NEXT: .cfi_offset fs0, -20
; RV64-NEXT: fmv.s fs0, fa0
; RV64-NEXT: call callee
; RV64-NEXT: fmv.s fa0, fs0
-; RV64-NEXT: flw fs0, 4(sp) # 4-byte Folded Reload
+; RV64-NEXT: flw fs0, 12(sp) # 4-byte Folded Reload
; RV64-NEXT: .cfi_restore fs0
-; RV64-NEXT: cm.popret {ra}, 16
+; RV64-NEXT: cm.popret {ra}, 32
entry:
call void @callee()
ret float %arg
@@ -41,20 +41,20 @@ entry:
define void @foo2(i32 %x, float %y) {
; RV32-LABEL: foo2:
; RV32: # %bb.0: # %entry
-; RV32-NEXT: cm.push {ra, s0}, -16
-; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: cm.push {ra, s0}, -32
+; RV32-NEXT: .cfi_def_cfa_offset 32
; RV32-NEXT: .cfi_offset ra, -8
; RV32-NEXT: .cfi_offset s0, -4
-; RV32-NEXT: fsw fs0, 4(sp) # 4-byte Folded Spill
-; RV32-NEXT: .cfi_offset fs0, -12
+; RV32-NEXT: fsw fs0, 12(sp) # 4-byte Folded Spill
+; RV32-NEXT: .cfi_offset fs0, -20
; RV32-NEXT: fmv.s fs0, fa0
; RV32-NEXT: mv s0, a0
; RV32-NEXT: call bar
; RV32-NEXT: mv a0, s0
; RV32-NEXT: fmv.s fa0, fs0
-; RV32-NEXT: flw fs0, 4(sp) # 4-byte Folded Reload
+; RV32-NEXT: flw fs0, 12(sp) # 4-byte Folded Reload
; RV32-NEXT: .cfi_restore fs0
-; RV32-NEXT: cm.pop {ra, s0}, 16
+; RV32-NEXT: cm.pop {ra, s0}, 32
; RV32-NEXT: .cfi_restore ra
; RV32-NEXT: .cfi_restore s0
; RV32-NEXT: .cfi_def_cfa_offset 0
diff --git a/llvm/test/CodeGen/X86/extractelement-load.ll b/llvm/test/CodeGen/X86/extractelement-load.ll
index 022b25a..c251f2a 100644
--- a/llvm/test/CodeGen/X86/extractelement-load.ll
+++ b/llvm/test/CodeGen/X86/extractelement-load.ll
@@ -528,3 +528,70 @@ define i32 @main() nounwind {
%r = add i32 %e1, %e2
ret i32 %r
}
+
+; A test for incorrect combine for single value extraction from VBROADCAST_LOAD.
+; Wrong combine makes the second call (%t8) use the stored result in the
+; previous instructions instead of %t4.
+declare <2 x float> @ccosf(<2 x float>)
+define dso_local <2 x float> @multiuse_of_single_value_from_vbroadcast_load(ptr %p, ptr %arr) nounwind {
+; X86-SSE2-LABEL: multiuse_of_single_value_from_vbroadcast_load:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: pushl %esi
+; X86-SSE2-NEXT: subl $16, %esp
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-SSE2-NEXT: movups 24(%esi), %xmm0
+; X86-SSE2-NEXT: movups %xmm0, (%esp) # 16-byte Spill
+; X86-SSE2-NEXT: movhps %xmm0, (%eax)
+; X86-SSE2-NEXT: movaps 32(%esi), %xmm0
+; X86-SSE2-NEXT: calll ccosf@PLT
+; X86-SSE2-NEXT: movlps %xmm0, 32(%esi)
+; X86-SSE2-NEXT: movups (%esp), %xmm0 # 16-byte Reload
+; X86-SSE2-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
+; X86-SSE2-NEXT: calll ccosf@PLT
+; X86-SSE2-NEXT: addl $16, %esp
+; X86-SSE2-NEXT: popl %esi
+; X86-SSE2-NEXT: retl
+;
+; X64-SSSE3-LABEL: multiuse_of_single_value_from_vbroadcast_load:
+; X64-SSSE3: # %bb.0:
+; X64-SSSE3-NEXT: pushq %rbx
+; X64-SSSE3-NEXT: subq $16, %rsp
+; X64-SSSE3-NEXT: movq %rsi, %rbx
+; X64-SSSE3-NEXT: movddup {{.*#+}} xmm0 = mem[0,0]
+; X64-SSSE3-NEXT: movapd %xmm0, (%rsp) # 16-byte Spill
+; X64-SSSE3-NEXT: movlpd %xmm0, (%rdi)
+; X64-SSSE3-NEXT: movaps 32(%rsi), %xmm0
+; X64-SSSE3-NEXT: callq ccosf@PLT
+; X64-SSSE3-NEXT: movlps %xmm0, 32(%rbx)
+; X64-SSSE3-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
+; X64-SSSE3-NEXT: callq ccosf@PLT
+; X64-SSSE3-NEXT: addq $16, %rsp
+; X64-SSSE3-NEXT: popq %rbx
+; X64-SSSE3-NEXT: retq
+;
+; X64-AVX-LABEL: multiuse_of_single_value_from_vbroadcast_load:
+; X64-AVX: # %bb.0:
+; X64-AVX-NEXT: pushq %rbx
+; X64-AVX-NEXT: movq %rsi, %rbx
+; X64-AVX-NEXT: vmovsd 32(%rsi), %xmm0 # xmm0 = mem[0],zero
+; X64-AVX-NEXT: vmovsd %xmm0, (%rdi)
+; X64-AVX-NEXT: vmovaps 32(%rsi), %xmm0
+; X64-AVX-NEXT: callq ccosf@PLT
+; X64-AVX-NEXT: vmovlps %xmm0, 32(%rbx)
+; X64-AVX-NEXT: vmovddup 32(%rbx), %xmm0 # xmm0 = mem[0,0]
+; X64-AVX-NEXT: callq ccosf@PLT
+; X64-AVX-NEXT: popq %rbx
+; X64-AVX-NEXT: retq
+ %p1 = getelementptr [5 x <2 x float>], ptr %arr, i64 0, i64 3
+ %p2 = getelementptr inbounds [5 x <2 x float>], ptr %arr, i64 0, i64 4, i32 0
+ %t3 = load <4 x float>, ptr %p1, align 8
+ %t4 = shufflevector <4 x float> %t3, <4 x float> poison, <2 x i32> <i32 2, i32 3>
+ store <2 x float> %t4, ptr %p, align 16
+ %t5 = load <4 x float>, ptr %p2, align 32
+ %t6 = shufflevector <4 x float> %t5, <4 x float> poison, <2 x i32> <i32 0, i32 1>
+ %t7 = call <2 x float> @ccosf(<2 x float> %t6)
+ store <2 x float> %t7, ptr %p2, align 32
+ %t8 = call <2 x float> @ccosf(<2 x float> %t4)
+ ret <2 x float> %t8
+}
diff --git a/llvm/test/DebugInfo/NVPTX/debug-info.ll b/llvm/test/DebugInfo/NVPTX/debug-info.ll
index fa2925a..1fc945b 100644
--- a/llvm/test/DebugInfo/NVPTX/debug-info.ll
+++ b/llvm/test/DebugInfo/NVPTX/debug-info.ll
@@ -20,7 +20,7 @@
; CHECK: )
; CHECK: {
; CHECK-DAG: .reg .pred %p<2>;
-; CHECK-DAG: .reg .f32 %f<5>;
+; CHECK-DAG: .reg .b32 %f<5>;
; CHECK-DAG: .reg .b32 %r<6>;
; CHECK-DAG: .reg .b64 %rd<8>;
; CHECK: .loc [[DEBUG_INFO_CU:[0-9]+]] 5 0
diff --git a/llvm/test/Transforms/FunctionAttrs/initializes.ll b/llvm/test/Transforms/FunctionAttrs/initializes.ll
index 861c61d..937595b 100644
--- a/llvm/test/Transforms/FunctionAttrs/initializes.ll
+++ b/llvm/test/Transforms/FunctionAttrs/initializes.ll
@@ -635,3 +635,17 @@ define void @memset_offset_1_size_0(ptr %dst, ptr %src) {
call void @llvm.memmove.p0.p0.i64(ptr %dst.1, ptr %src, i64 0, i1 false)
ret void
}
+
+; We should bail if the range overflows a singed 64-bit int.
+define void @range_overflows_signed_64_bit_int(ptr %arg) {
+; CHECK: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: write)
+; CHECK-LABEL: define void @range_overflows_signed_64_bit_int(
+; CHECK-SAME: ptr writeonly captures(none) [[ARG:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[GETELEMENTPTR:%.*]] = getelementptr i8, ptr [[ARG]], i64 9223372036854775804
+; CHECK-NEXT: store i32 0, ptr [[GETELEMENTPTR]], align 4
+; CHECK-NEXT: ret void
+;
+ %getelementptr = getelementptr i8, ptr %arg, i64 9223372036854775804
+ store i32 0, ptr %getelementptr
+ ret void
+}
diff --git a/llvm/utils/gn/secondary/llvm/lib/ProfileData/BUILD.gn b/llvm/utils/gn/secondary/llvm/lib/ProfileData/BUILD.gn
index c6fa142..244688d 100644
--- a/llvm/utils/gn/secondary/llvm/lib/ProfileData/BUILD.gn
+++ b/llvm/utils/gn/secondary/llvm/lib/ProfileData/BUILD.gn
@@ -10,6 +10,7 @@ static_library("ProfileData") {
]
sources = [
"GCOV.cpp",
+ "IndexedMemProfData.cpp",
"InstrProf.cpp",
"InstrProfCorrelator.cpp",
"InstrProfReader.cpp",
diff --git a/llvm/utils/vim/syntax/llvm.vim b/llvm/utils/vim/syntax/llvm.vim
index fac509c..e3b8ff8 100644
--- a/llvm/utils/vim/syntax/llvm.vim
+++ b/llvm/utils/vim/syntax/llvm.vim
@@ -24,17 +24,17 @@ syn match llvmType /\<i\d\+\>/
" much more common for these tokens to be used for boolean constants.
syn keyword llvmStatement add addrspacecast alloca and arcp ashr atomicrmw
syn keyword llvmStatement bitcast br catchpad catchswitch catchret call callbr
-syn keyword llvmStatement cleanuppad cleanupret cmpxchg eq exact extractelement
-syn keyword llvmStatement extractvalue fadd fast fcmp fdiv fence fmul fneg fpext
-syn keyword llvmStatement fptosi fptoui fptrunc free freeze frem fsub
-syn keyword llvmStatement getelementptr icmp inbounds indirectbr insertelement
-syn keyword llvmStatement insertvalue inttoptr invoke landingpad load lshr
-syn keyword llvmStatement malloc max min mul nand ne ninf nnan nsw nsz nuw oeq
-syn keyword llvmStatement oge ogt ole olt one or ord phi ptrtoint resume ret
-syn keyword llvmStatement sdiv select sext sge sgt shl shufflevector sitofp
-syn keyword llvmStatement sle slt srem store sub switch trunc udiv ueq uge ugt
-syn keyword llvmStatement uitofp ule ult umax umin une uno unreachable unwind
-syn keyword llvmStatement urem va_arg xchg xor zext
+syn keyword llvmStatement cleanuppad cleanupret cmpxchg disjoint eq exact
+syn keyword llvmStatement extractelement extractvalue fadd fast fcmp fdiv fence
+syn keyword llvmStatement fmul fneg fpext fptosi fptoui fptrunc free freeze
+syn keyword llvmStatement frem fsub getelementptr icmp inbounds indirectbr
+syn keyword llvmStatement insertelement insertvalue inttoptr invoke landingpad
+syn keyword llvmStatement load lshr malloc max min mul nand ne ninf nnan nsw
+syn keyword llvmStatement nsz nuw oeq oge ogt ole olt one or ord phi ptrtoint
+syn keyword llvmStatement resume ret sdiv select sext sge sgt shl shufflevector
+syn keyword llvmStatement sitofp sle slt srem store sub switch trunc udiv ueq
+syn keyword llvmStatement uge ugt uitofp ule ult umax umin une uno unreachable
+syn keyword llvmStatement unwind urem va_arg xchg xor zext
" Debug records.
syn match llvmStatement /\v#dbg_(assign|declare|label|value)/
diff --git a/mlir/include/mlir/Dialect/GPU/Transforms/Passes.h b/mlir/include/mlir/Dialect/GPU/Transforms/Passes.h
index 5cc6508..a13ad33 100644
--- a/mlir/include/mlir/Dialect/GPU/Transforms/Passes.h
+++ b/mlir/include/mlir/Dialect/GPU/Transforms/Passes.h
@@ -13,6 +13,7 @@
#ifndef MLIR_DIALECT_GPU_TRANSFORMS_PASSES_H_
#define MLIR_DIALECT_GPU_TRANSFORMS_PASSES_H_
+#include "mlir/Dialect/AMDGPU/Utils/Chipset.h"
#include "mlir/Dialect/GPU/IR/GPUDialect.h"
#include "mlir/Dialect/GPU/Utils/GPUUtils.h"
#include "mlir/IR/PatternMatch.h"
@@ -68,6 +69,20 @@ void populateGpuLowerClusteredSubgroupReduceToShufflePatterns(
RewritePatternSet &patterns, unsigned subgroupSize,
unsigned shuffleBitwidth = 32, PatternBenefit benefit = 1);
+/// Collect a set of patterns to lower `gpu.subgroup_reduce` into `amdgpu.dpp`
+/// ops over scalar types. Assumes that the subgroup has
+/// `subgroupSize` lanes. Applicable only to AMD GPUs.
+void populateGpuLowerSubgroupReduceToDPPPatterns(RewritePatternSet &patterns,
+ unsigned subgroupSize,
+ amdgpu::Chipset chipset,
+ PatternBenefit benefit = 1);
+
+/// Disjoint counterpart of `populateGpuLowerSubgroupReduceToDPPPatterns`
+/// that only matches `gpu.subgroup_reduce` ops with a `cluster_size`.
+void populateGpuLowerClusteredSubgroupReduceToDPPPatterns(
+ RewritePatternSet &patterns, unsigned subgroupSize, amdgpu::Chipset chipset,
+ PatternBenefit benefit = 1);
+
/// Collect all patterns to rewrite ops within the GPU dialect.
inline void populateGpuRewritePatterns(RewritePatternSet &patterns) {
populateGpuAllReducePatterns(patterns);
diff --git a/mlir/lib/Dialect/GPU/Transforms/SubgroupReduceLowering.cpp b/mlir/lib/Dialect/GPU/Transforms/SubgroupReduceLowering.cpp
index 43eff3e..74face4 100644
--- a/mlir/lib/Dialect/GPU/Transforms/SubgroupReduceLowering.cpp
+++ b/mlir/lib/Dialect/GPU/Transforms/SubgroupReduceLowering.cpp
@@ -10,15 +10,19 @@
//
//===----------------------------------------------------------------------===//
+#include "mlir/Dialect/AMDGPU/IR/AMDGPUDialect.h"
+#include "mlir/Dialect/AMDGPU/Utils/Chipset.h"
#include "mlir/Dialect/Arith/IR/Arith.h"
#include "mlir/Dialect/GPU/IR/GPUDialect.h"
#include "mlir/Dialect/GPU/Transforms/Passes.h"
#include "mlir/Dialect/GPU/Utils/GPUUtils.h"
+#include "mlir/Dialect/LLVMIR/ROCDLDialect.h"
#include "mlir/Dialect/Vector/IR/VectorOps.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/Location.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/IR/TypeUtilities.h"
+#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/FormatVariadic.h"
#include "llvm/Support/MathExtras.h"
#include <cassert>
@@ -362,6 +366,163 @@ private:
unsigned shuffleBitwidth = 0;
bool matchClustered = false;
};
+
+static FailureOr<Value>
+createSubgroupDPPReduction(PatternRewriter &rewriter, gpu::SubgroupReduceOp op,
+ Value input, gpu::AllReduceOperation mode,
+ const ClusterInfo &ci, amdgpu::Chipset chipset) {
+ Location loc = op.getLoc();
+ Value dpp;
+ Value res = input;
+ constexpr int allRows = 0xf;
+ constexpr int allBanks = 0xf;
+ const bool boundCtrl = true;
+ if (ci.clusterSize >= 2) {
+ // Perform reduction between all lanes N <-> N+1.
+ dpp = rewriter.create<amdgpu::DPPOp>(
+ loc, res.getType(), res, res, amdgpu::DPPPerm::quad_perm,
+ rewriter.getI32ArrayAttr({1, 0, 3, 2}), allRows, allBanks, boundCtrl);
+ res = vector::makeArithReduction(rewriter, loc,
+ gpu::convertReductionKind(mode), res, dpp);
+ }
+
+ if (ci.clusterSize >= 4) {
+ // Perform reduction between all lanes N <-> N+2.
+ dpp = rewriter.create<amdgpu::DPPOp>(
+ loc, res.getType(), res, res, amdgpu::DPPPerm::quad_perm,
+ rewriter.getI32ArrayAttr({2, 3, 0, 1}), allRows, allBanks, boundCtrl);
+ res = vector::makeArithReduction(rewriter, loc,
+ gpu::convertReductionKind(mode), res, dpp);
+ }
+ if (ci.clusterSize >= 8) {
+ // Perform reduction between all lanes N <-> 7-N,
+ // e.g lane[0] <-> lane[7], lane[1] <-> lane[6]..., lane[3] <-> lane[4].
+ dpp = rewriter.create<amdgpu::DPPOp>(
+ loc, res.getType(), res, res, amdgpu::DPPPerm::row_half_mirror,
+ rewriter.getUnitAttr(), allRows, allBanks, boundCtrl);
+ res = vector::makeArithReduction(rewriter, loc,
+ gpu::convertReductionKind(mode), res, dpp);
+ }
+ if (ci.clusterSize >= 16) {
+ // Perform reduction between all lanes N <-> 15-N,
+ // e.g lane[0] <-> lane[15], lane[1] <-> lane[14]..., lane[7] <-> lane[8].
+ dpp = rewriter.create<amdgpu::DPPOp>(
+ loc, res.getType(), res, res, amdgpu::DPPPerm::row_mirror,
+ rewriter.getUnitAttr(), allRows, allBanks, boundCtrl);
+ res = vector::makeArithReduction(rewriter, loc,
+ gpu::convertReductionKind(mode), res, dpp);
+ }
+ if (ci.clusterSize >= 32) {
+ if (chipset.majorVersion <= 9) {
+ // Broadcast last value from each row to next row.
+ // Use row mask to avoid polluting rows 1 and 3.
+ dpp = rewriter.create<amdgpu::DPPOp>(
+ loc, res.getType(), res, res, amdgpu::DPPPerm::row_bcast_15,
+ rewriter.getUnitAttr(), 0xa, allBanks,
+ /*bound_ctrl*/ false);
+ res = vector::makeArithReduction(
+ rewriter, loc, gpu::convertReductionKind(mode), res, dpp);
+ } else if (chipset.majorVersion <= 12) {
+ // Use a permute lane to cross rows (row 1 <-> row 0, row 3 <-> row 2).
+ Value uint32Max = rewriter.create<arith::ConstantOp>(
+ loc, rewriter.getI32Type(), rewriter.getI32IntegerAttr(-1));
+ dpp = rewriter.create<ROCDL::PermlaneX16Op>(loc, res.getType(), res, res,
+ uint32Max, uint32Max,
+ /*fi=*/true,
+ /*bound_ctrl=*/false);
+ res = vector::makeArithReduction(
+ rewriter, loc, gpu::convertReductionKind(mode), res, dpp);
+ if (ci.subgroupSize == 32) {
+ Value lane0 = rewriter.create<arith::ConstantOp>(
+ loc, rewriter.getI32Type(), rewriter.getI32IntegerAttr(0));
+ res =
+ rewriter.create<ROCDL::ReadlaneOp>(loc, res.getType(), res, lane0);
+ }
+ } else {
+ return rewriter.notifyMatchFailure(
+ op, "Subgroup reduce lowering to DPP not currently supported for "
+ "this device.");
+ }
+ }
+ if (ci.clusterSize >= 64) {
+ if (chipset.majorVersion <= 9) {
+ // Broadcast 31st lane value to rows 2 and 3.
+ // Use row mask to avoid polluting rows 0 and 1.
+ dpp = rewriter.create<amdgpu::DPPOp>(
+ loc, res.getType(), res, res, amdgpu::DPPPerm::row_bcast_31,
+ rewriter.getUnitAttr(), 0xc, allBanks,
+ /*bound_ctrl*/ false);
+
+ } else if (chipset.majorVersion <= 12) {
+ // Assume reduction across 32 lanes has been done.
+ // Perform final reduction manually by summing values in lane 0 and
+ // lane 32.
+ Value lane0 = rewriter.create<arith::ConstantOp>(
+ loc, rewriter.getI32Type(), rewriter.getI32IntegerAttr(0));
+ Value lane32 = rewriter.create<arith::ConstantOp>(
+ loc, rewriter.getI32Type(), rewriter.getI32IntegerAttr(32));
+ dpp = rewriter.create<ROCDL::ReadlaneOp>(loc, res.getType(), res, lane32);
+ res = rewriter.create<ROCDL::ReadlaneOp>(loc, res.getType(), res, lane0);
+ } else {
+ return rewriter.notifyMatchFailure(
+ op, "Subgroup reduce lowering to DPP not currently supported for "
+ "this device.");
+ }
+ res = vector::makeArithReduction(rewriter, loc,
+ gpu::convertReductionKind(mode), res, dpp);
+ }
+ assert(res.getType() == input.getType());
+ return res;
+}
+
+/// Collect a set of patterns to lower `gpu.subgroup_reduce` into `amdgpu.dpp`
+/// ops over scalar types. Assumes that the subgroup has
+/// `subgroupSize` lanes. Applicable only to AMD GPUs.
+struct ScalarSubgroupReduceToDPP final
+ : OpRewritePattern<gpu::SubgroupReduceOp> {
+ ScalarSubgroupReduceToDPP(MLIRContext *ctx, unsigned subgroupSize,
+ bool matchClustered, amdgpu::Chipset chipset,
+ PatternBenefit benefit)
+ : OpRewritePattern(ctx, benefit), subgroupSize(subgroupSize),
+ matchClustered(matchClustered), chipset(chipset) {}
+
+ LogicalResult matchAndRewrite(gpu::SubgroupReduceOp op,
+ PatternRewriter &rewriter) const override {
+ if (op.getClusterSize().has_value() != matchClustered) {
+ return rewriter.notifyMatchFailure(
+ op, llvm::formatv("op is {0}clustered but pattern is configured to "
+ "only match {1}clustered ops",
+ matchClustered ? "non-" : "",
+ matchClustered ? "" : "non-"));
+ }
+ auto ci = getAndValidateClusterInfo(op, subgroupSize);
+ if (failed(ci))
+ return failure();
+
+ if (ci->clusterStride != 1)
+ return rewriter.notifyMatchFailure(
+ op, "Subgroup reductions using DPP are currently only available for "
+ "clusters of contiguous lanes.");
+
+ Type valueTy = op.getType();
+ if (!valueTy.isIntOrFloat())
+ return rewriter.notifyMatchFailure(
+ op, "Value type is not a compatible scalar.");
+
+ FailureOr<Value> dpp = createSubgroupDPPReduction(
+ rewriter, op, op.getValue(), op.getOp(), *ci, chipset);
+ if (failed(dpp))
+ return failure();
+
+ rewriter.replaceOp(op, dpp.value());
+ return success();
+ }
+
+private:
+ unsigned subgroupSize = 0;
+ bool matchClustered = false;
+ amdgpu::Chipset chipset;
+};
} // namespace
void mlir::populateGpuBreakDownSubgroupReducePatterns(
@@ -372,6 +533,22 @@ void mlir::populateGpuBreakDownSubgroupReducePatterns(
patterns.add<ScalarizeSingleElementReduce>(patterns.getContext(), benefit);
}
+void mlir::populateGpuLowerSubgroupReduceToDPPPatterns(
+ RewritePatternSet &patterns, unsigned subgroupSize, amdgpu::Chipset chipset,
+ PatternBenefit benefit) {
+ patterns.add<ScalarSubgroupReduceToDPP>(patterns.getContext(), subgroupSize,
+ /*matchClustered=*/false, chipset,
+ benefit);
+}
+
+void mlir::populateGpuLowerClusteredSubgroupReduceToDPPPatterns(
+ RewritePatternSet &patterns, unsigned subgroupSize, amdgpu::Chipset chipset,
+ PatternBenefit benefit) {
+ patterns.add<ScalarSubgroupReduceToDPP>(patterns.getContext(), subgroupSize,
+ /*matchClustered=*/true, chipset,
+ benefit);
+}
+
void mlir::populateGpuLowerSubgroupReduceToShufflePatterns(
RewritePatternSet &patterns, unsigned subgroupSize,
unsigned shuffleBitwidth, PatternBenefit benefit) {
diff --git a/mlir/test/Dialect/GPU/subgroup-reduce-lowering.mlir b/mlir/test/Dialect/GPU/subgroup-reduce-lowering.mlir
index 9f2aa1b..098145a 100644
--- a/mlir/test/Dialect/GPU/subgroup-reduce-lowering.mlir
+++ b/mlir/test/Dialect/GPU/subgroup-reduce-lowering.mlir
@@ -6,14 +6,26 @@
// RUN: --test-gpu-subgroup-reduce-lowering="expand-to-shuffles" %s \
// RUN: | FileCheck %s --check-prefix=CHECK-SHFL
+// RUN: mlir-opt --allow-unregistered-dialect \
+// RUN: --test-gpu-subgroup-reduce-lowering="expand-to-shuffles target=gfx942" %s \
+// RUN: | FileCheck %s --check-prefix=CHECK-GFX9
+
+// RUN: mlir-opt --allow-unregistered-dialect \
+// RUN: --test-gpu-subgroup-reduce-lowering="expand-to-shuffles target=gfx1030" %s \
+// RUN: | FileCheck %s --check-prefix=CHECK-GFX10
+
// CHECK-SUB: gpu.module @kernels {
// CHECK-SHFL: gpu.module @kernels {
+// CHECK-GFX9: gpu.module @kernels {
+// CHECK-GFX10: gpu.module @kernels {
gpu.module @kernels {
// CHECK-SUB-LABEL: gpu.func @kernel0(
// CHECK-SUB-SAME: %[[ARG0:.+]]: vector<5xf16>)
//
// CHECK-SHFL-LABEL: gpu.func @kernel0(
+ // CHECK-GFX9-LABEL: gpu.func @kernel0(
+ // CHECK-GFX10-LABEL: gpu.func @kernel0(
gpu.func @kernel0(%arg0: vector<5xf16>) kernel {
// CHECK-SUB: %[[VZ:.+]] = arith.constant dense<0.0{{.*}}> : vector<5xf16>
// CHECK-SUB: %[[E0:.+]] = vector.extract_strided_slice %[[ARG0]] {offsets = [0], sizes = [2], strides = [1]} : vector<5xf16> to vector<2xf16>
@@ -26,16 +38,26 @@ gpu.module @kernels {
// CHECK-SUB: %[[R2:.+]] = gpu.subgroup_reduce add %[[E2]] : (f16) -> f16
// CHECK-SUB: %[[V2:.+]] = vector.insert %[[R2]], %[[V1]] [4] : f16 into vector<5xf16>
// CHECK-SUB: "test.consume"(%[[V2]]) : (vector<5xf16>) -> ()
+ // CHECK-GFX9-COUNT-6: amdgpu.dpp
+ // CHECK-GFX10-COUNT-4: amdgpu.dpp
+ // CHECK-GFX10: rocdl.permlanex16
+ // CHECK-GFX10-COUNT-2: rocdl.readlane
%sum0 = gpu.subgroup_reduce add %arg0 : (vector<5xf16>) -> (vector<5xf16>)
"test.consume"(%sum0) : (vector<5xf16>) -> ()
// CHECK-SUB-COUNT-3: gpu.subgroup_reduce mul {{.+}} uniform
// CHECK-SUB: "test.consume"
+ // CHECK-GFX9-COUNT-6: amdgpu.dpp
+ // CHECK-GFX10-COUNT-4: amdgpu.dpp
+ // CHECK-GFX10: rocdl.permlanex16
+ // CHECK-GFX10-COUNT-2: rocdl.readlane
%sum1 = gpu.subgroup_reduce mul %arg0 uniform : (vector<5xf16>) -> (vector<5xf16>)
"test.consume"(%sum1) : (vector<5xf16>) -> ()
// CHECK-SUB-COUNT-3: gpu.subgroup_reduce mul {{.+}} cluster(size = 4)
// CHECK-SUB: "test.consume"
+ // CHECK-GFX9-COUNT-2: amdgpu.dpp {{.+}}
+ // CHECK-GFX10-COUNT-2: amdgpu.dpp {{.+}}
%sum2 = gpu.subgroup_reduce mul %arg0 cluster(size = 4) : (vector<5xf16>) -> (vector<5xf16>)
"test.consume"(%sum2) : (vector<5xf16>) -> ()
@@ -52,27 +74,44 @@ gpu.module @kernels {
// CHECK-SUB-SAME: %[[ARG0:.+]]: vector<1xf32>)
//
// CHECK-SHFL-LABEL: gpu.func @kernel1(
+ // CHECK-GFX9-LABEL: gpu.func @kernel1(
+ // CHECK-GFX10-LABEL: gpu.func @kernel1(
gpu.func @kernel1(%arg0: vector<1xf32>) kernel {
// CHECK-SUB: %[[E0:.+]] = vector.extract %[[ARG0]][0] : f32 from vector<1xf32>
// CHECK-SUB: %[[R0:.+]] = gpu.subgroup_reduce add %[[E0]] : (f32) -> f32
// CHECK-SUB: %[[V0:.+]] = vector.broadcast %[[R0]] : f32 to vector<1xf32>
// CHECK-SUB: "test.consume"(%[[V0]]) : (vector<1xf32>) -> ()
+ // CHECK-GFX9-COUNT-6: amdgpu.dpp
+ // CHECK-GFX10-COUNT-4: amdgpu.dpp
+ // CHECK-GFX10: rocdl.permlanex16
+ // CHECK-GFX10-COUNT-2: rocdl.readlane
%sum0 = gpu.subgroup_reduce add %arg0 : (vector<1xf32>) -> (vector<1xf32>)
"test.consume"(%sum0) : (vector<1xf32>) -> ()
// CHECK-SUB: gpu.subgroup_reduce add {{.+}} uniform : (f32) -> f32
// CHECK-SUB: "test.consume"
+ // CHECK-GFX9-COUNT-6: amdgpu.dpp
+ // CHECK-GFX10-COUNT-4: amdgpu.dpp
+ // CHECK-GFX10: rocdl.permlanex16
+ // CHECK-GFX10-COUNT-2: rocdl.readlane
%sum1 = gpu.subgroup_reduce add %arg0 uniform : (vector<1xf32>) -> (vector<1xf32>)
"test.consume"(%sum1) : (vector<1xf32>) -> ()
// Note stride is dropped because it is == 1.
// CHECK-SUB: gpu.subgroup_reduce add {{.+}} cluster(size = 8) : (f32) -> f32
// CHECK-SUB: "test.consume"
+ // CHECK-GFX9-COUNT-2: amdgpu.dpp {{.+}} quad_perm
+ // CHECK-GFX9: amdgpu.dpp {{.+}} row_half_mirror
+ // CHECK-GFX10-COUNT-2: amdgpu.dpp {{.+}} quad_perm
+ // CHECK-GFX10: amdgpu.dpp {{.+}} row_half_mirror
%sum2 = gpu.subgroup_reduce add %arg0 cluster(size = 8, stride = 1) : (vector<1xf32>) -> (vector<1xf32>)
"test.consume"(%sum2) : (vector<1xf32>) -> ()
// CHECK-SUB: gpu.subgroup_reduce add {{.+}} uniform cluster(size = 8, stride = 4) : (f32) -> f32
// CHECK-SUB: "test.consume"
+ // CHECK-GFX9-NOT: amdgpu.dpp
+ // CHECK-GFX10-NOT: amdgpu.dpp
+ // CHECK-GFX10-NOT: rocdl.permlanex16
%sum3 = gpu.subgroup_reduce add %arg0 uniform cluster(size = 8, stride = 4) : (vector<1xf32>) -> (vector<1xf32>)
"test.consume"(%sum3) : (vector<1xf32>) -> ()
@@ -86,6 +125,12 @@ gpu.module @kernels {
// CHECK-SUB-SAME: %[[ARG0:.+]]: vector<3xi8>, %[[ARG1:.+]]: vector<4xi8>)
//
// CHECK-SHFL-LABEL: gpu.func @kernel2(
+ //
+ // CHECK-GFX9-LABEL: gpu.func @kernel2(
+ // CHECK-GFX9-NOT: amdgpu.dpp
+ //
+ // CHECK-GFX10-LABEL: gpu.func @kernel2(
+ // CHECK-GFX10-NOT: amdgpu.dpp
gpu.func @kernel2(%arg0: vector<3xi8>, %arg1: vector<4xi8>) kernel {
// CHECK-SUB: %[[R0:.+]] = gpu.subgroup_reduce add %[[ARG0]] : (vector<3xi8>) -> vector<3xi8>
// CHECK-SUB: "test.consume"(%[[R0]]) : (vector<3xi8>) -> ()
@@ -103,6 +148,8 @@ gpu.module @kernels {
// CHECK-SHFL-LABEL: gpu.func @kernel3(
// CHECK-SHFL-SAME: %[[ARG0:.+]]: i32)
+ // CHECK-GFX9-LABEL: gpu.func @kernel3(
+ // CHECK-GFX10-LABEL: gpu.func @kernel3(
gpu.func @kernel3(%arg0: i32) kernel {
// CHECK-SHFL-DAG: %[[C1:.+]] = arith.constant 1 : i32
// CHECK-SHFL-DAG: %[[C2:.+]] = arith.constant 2 : i32
@@ -122,6 +169,12 @@ gpu.module @kernels {
// CHECK-SHFL: %[[S4:.+]], %{{.+}} = gpu.shuffle xor %[[A3]], %[[C16]], %[[C32]] : i32
// CHECK-SHFL: %[[A4:.+]] = arith.addi %[[A3]], %[[S4]] : i32
// CHECK-SHFL: "test.consume"(%[[A4]]) : (i32) -> ()
+
+ // CHECK-GFX9-COUNT-6: amdgpu.dpp
+
+ // CHECK-GFX10-COUNT-4: amdgpu.dpp
+ // CHECK-GFX10: rocdl.permlanex16
+ // CHECK-GFX10-COUNT-2: rocdl.readlane
%sum0 = gpu.subgroup_reduce add %arg0 : (i32) -> i32
"test.consume"(%sum0) : (i32) -> ()
@@ -131,6 +184,12 @@ gpu.module @kernels {
// CHECK-SHFL-LABEL: gpu.func @kernel3_clustered(
// CHECK-SHFL-SAME: %[[ARG0:.+]]: i32)
+ //
+ // CHECK-GFX9-LABEL: gpu.func @kernel3_clustered(
+ // CHECK-GFX9-SAME: %[[ARG0:.+]]: i32)
+ //
+ // CHECK-GFX10-LABEL: gpu.func @kernel3_clustered(
+ // CHECK-GFX10-SAME: %[[ARG0:.+]]: i32)
gpu.func @kernel3_clustered(%arg0: i32) kernel {
// CHECK-SHFL-DAG: %[[C1:.+]] = arith.constant 1 : i32
// CHECK-SHFL-DAG: %[[C2:.+]] = arith.constant 2 : i32
@@ -144,6 +203,21 @@ gpu.module @kernels {
// CHECK-SHFL: %[[S2:.+]], %{{.+}} = gpu.shuffle xor %[[A1]], %[[C4]], %[[C32]] : i32
// CHECK-SHFL: %[[A2:.+]] = arith.addi %[[A1]], %[[S2]] : i32
// CHECK-SHFL: "test.consume"(%[[A2]]) : (i32) -> ()
+
+ // CHECK-GFX9: %[[D0:.+]] = amdgpu.dpp %[[ARG0]] %[[ARG0]] quad_perm([1 : i32, 0 : i32, 3 : i32, 2 : i32]) {bound_ctrl = true} : i32
+ // CHECK-GFX9: %[[A0:.+]] = arith.addi %[[ARG0]], %[[D0]] : i32
+ // CHECK-GFX9: %[[D1:.+]] = amdgpu.dpp %[[A0]] %[[A0]] quad_perm([2 : i32, 3 : i32, 0 : i32, 1 : i32]) {bound_ctrl = true} : i32
+ // CHECK-GFX9: %[[A1:.+]] = arith.addi %[[A0]], %[[D1]] : i32
+ // CHECK-GFX9: %[[D2:.+]] = amdgpu.dpp %[[A1]] %[[A1]] row_half_mirror(unit) {bound_ctrl = true} : i32
+ // CHECK-GFX9: %[[A2:.+]] = arith.addi %[[A1]], %[[D2]] : i32
+
+ // CHECK-GFX10: %[[D0:.+]] = amdgpu.dpp %[[ARG0]] %[[ARG0]] quad_perm([1 : i32, 0 : i32, 3 : i32, 2 : i32]) {bound_ctrl = true} : i32
+ // CHECK-GFX10: %[[A0:.+]] = arith.addi %[[ARG0]], %[[D0]] : i32
+ // CHECK-GFX10: %[[D1:.+]] = amdgpu.dpp %[[A0]] %[[A0]] quad_perm([2 : i32, 3 : i32, 0 : i32, 1 : i32]) {bound_ctrl = true} : i32
+ // CHECK-GFX10: %[[A1:.+]] = arith.addi %[[A0]], %[[D1]] : i32
+ // CHECK-GFX10: %[[D2:.+]] = amdgpu.dpp %[[A1]] %[[A1]] row_half_mirror(unit) {bound_ctrl = true} : i32
+ // CHECK-GFX10: %[[A2:.+]] = arith.addi %[[A1]], %[[D2]] : i32
+ // CHECK-GFX10: "test.consume"(%[[A2]]) : (i32) -> ()
%sum0 = gpu.subgroup_reduce add %arg0 cluster(size = 8) : (i32) -> i32
"test.consume"(%sum0) : (i32) -> ()
@@ -153,6 +227,12 @@ gpu.module @kernels {
// CHECK-SHFL-LABEL: gpu.func @kernel3_clustered_strided(
// CHECK-SHFL-SAME: %[[ARG0:.+]]: i32)
+ //
+ // CHECK-GFX9-LABEL: gpu.func @kernel3_clustered_strided(
+ // CHECK-GFX9-NOT: amdgpu.dpp
+ //
+ // CHECK-GFX10-LABEL: gpu.func @kernel3_clustered_strided(
+ // CHECK-GFX10-NOT: amdgpu.dpp
gpu.func @kernel3_clustered_strided(%arg0: i32) kernel {
// CHECK-SHFL-DAG: %[[C1:.+]] = arith.constant 4 : i32
// CHECK-SHFL-DAG: %[[C2:.+]] = arith.constant 8 : i32
@@ -175,6 +255,12 @@ gpu.module @kernels {
// CHECK-SHFL-LABEL: gpu.func @kernel4(
// CHECK-SHFL-SAME: %[[ARG0:.+]]: vector<2xf16>)
+ //
+ // CHECK-GFX9-LABEL: gpu.func @kernel4(
+ // CHECK-GFX9-NOT: amdgpu.dpp
+ //
+ // CHECK-GFX10-LABEL: gpu.func @kernel4(
+ // CHECK-GFX10-NOT: amdgpu.dpp
gpu.func @kernel4(%arg0: vector<2xf16>) kernel {
// CHECK-SHFL-DAG: %[[C1:.+]] = arith.constant 1 : i32
// CHECK-SHFL-DAG: %[[C2:.+]] = arith.constant 2 : i32
@@ -211,6 +297,12 @@ gpu.module @kernels {
// CHECK-SHFL-LABEL: gpu.func @kernel4_clustered(
// CHECK-SHFL-SAME: %[[ARG0:.+]]: vector<2xf16>)
+ //
+ // CHECK-GFX9-LABEL: gpu.func @kernel4_clustered(
+ // CHECK-GFX9-NOT: amdgpu.dpp
+ //
+ // CHECK-GFX10-LABEL: gpu.func @kernel4_clustered(
+ // CHECK-GFX10-NOT: amdgpu.dpp
gpu.func @kernel4_clustered(%arg0: vector<2xf16>) kernel {
// CHECK-SHFL-DAG: %[[C1:.+]] = arith.constant 1 : i32
// CHECK-SHFL-DAG: %[[C2:.+]] = arith.constant 2 : i32
@@ -226,6 +318,11 @@ gpu.module @kernels {
// CHECK-SHFL-LABEL: gpu.func @kernel5(
// CHECK-SHFL-SAME: %[[ARG0:.+]]: i16)
+ //
+ // CHECK-GFX9-LABEL: gpu.func @kernel5(
+ //
+ // CHECK-GFX10-LABEL: gpu.func @kernel5(
+ // CHECK-GFX10-SAME: %[[ARG0:.+]]: i16)
gpu.func @kernel5(%arg0: i16) kernel {
// CHECK-SHFL: %[[E0:.+]] = arith.extui %[[ARG0]] : i16 to i32
// CHECK-SHFL: %[[S0:.+]], %{{.+}} = gpu.shuffle xor %[[E0]], {{.+}} : i32
@@ -237,6 +334,23 @@ gpu.module @kernels {
// CHECK-SHFL: arith.trunci {{.+}} : i32 to i16
// CHECK-SHFL: %[[AL:.+]] = arith.addi {{.+}} : i16
// CHECK-SHFL: "test.consume"(%[[AL]]) : (i16) -> ()
+
+ // CHECK-GFX9-COUNT-6: amdgpu.dpp
+
+ // CHECK-GFX10: %[[D0:.+]] = amdgpu.dpp %[[ARG0]] %[[ARG0]] quad_perm([1 : i32, 0 : i32, 3 : i32, 2 : i32]) {bound_ctrl = true} : i16
+ // CHECK-GFX10: %[[A0:.+]] = arith.addi %[[ARG0]], %[[D0]] : i16
+ // CHECK-GFX10: %[[D1:.+]] = amdgpu.dpp %[[A0]] %[[A0]] quad_perm([2 : i32, 3 : i32, 0 : i32, 1 : i32]) {bound_ctrl = true} : i16
+ // CHECK-GFX10: %[[A1:.+]] = arith.addi %[[A0]], %[[D1]] : i16
+ // CHECK-GFX10: %[[D2:.+]] = amdgpu.dpp %[[A1]] %[[A1]] row_half_mirror(unit) {bound_ctrl = true} : i16
+ // CHECK-GFX10: %[[A2:.+]] = arith.addi %[[A1]], %[[D2]] : i16
+ // CHECK-GFX10: %[[D3:.+]] = amdgpu.dpp %[[A2]] %[[A2]] row_mirror(unit) {bound_ctrl = true} : i16
+ // CHECK-GFX10: %[[A3:.+]] = arith.addi %[[A2]], %[[D3]] : i16
+ // CHECK-GFX10: %[[P0:.+]] = rocdl.permlanex16 %[[A3]], %[[A3]], %c-1_i32, %c-1_i32, true, false : i16, i32
+ // CHECK-GFX10: %[[A4:.+]] = arith.addi %[[A3]], %[[P0]] : i16
+ // CHECK-GFX10: %[[R0:.+]] = rocdl.readlane %[[A4]], %{{.+}} : (i16, i32) -> i16
+ // CHECK-GFX10: %[[R1:.+]] = rocdl.readlane %[[A4]], %{{.+}} : (i16, i32) -> i16
+ // CHECK-GFX10: %[[A5:.+]] = arith.addi %[[R1]], %[[R0]] : i16
+ // CHECK-GFX10: "test.consume"(%[[A5]]) : (i16) -> ()
%sum0 = gpu.subgroup_reduce add %arg0 : (i16) -> i16
"test.consume"(%sum0) : (i16) -> ()
@@ -246,6 +360,12 @@ gpu.module @kernels {
// CHECK-SHFL-LABEL: gpu.func @kernel5_clustered(
// CHECK-SHFL-SAME: %[[ARG0:.+]]: i16)
+ //
+ // CHECK-GFX9-LABEL: gpu.func @kernel5_clustered
+ // CHECK-GFX9-SAME: %[[ARG0:.+]]: i16)
+ //
+ // CHECK-GFX10-LABEL: gpu.func @kernel5_clustered
+ // CHECK-GFX10-SAME: %[[ARG0:.+]]: i16)
gpu.func @kernel5_clustered(%arg0: i16) kernel {
// CHECK-SHFL: %[[E0:.+]] = arith.extui %[[ARG0]] : i16 to i32
// CHECK-SHFL: %[[S0:.+]], %{{.+}} = gpu.shuffle xor %[[E0]], {{.+}} : i32
@@ -257,6 +377,26 @@ gpu.module @kernels {
// CHECK-SHFL: arith.trunci {{.+}} : i32 to i16
// CHECK-SHFL: %[[AL:.+]] = arith.addi {{.+}} : i16
// CHECK-SHFL: "test.consume"(%[[AL]]) : (i16) -> ()
+
+ // CHECK-GFX9: %[[VAR0:.+]] = amdgpu.dpp %[[ARG0]] %[[ARG0]] quad_perm([1 : i32, 0 : i32, 3 : i32, 2 : i32]) {bound_ctrl = true} : i16
+ // CHECK-GFX9: %[[VAR1:.+]] = arith.addi %[[ARG0]], %[[VAR0]] : i16
+ // CHECK-GFX9: %[[VAR2:.+]] = amdgpu.dpp %[[VAR1]] %[[VAR1]] quad_perm([2 : i32, 3 : i32, 0 : i32, 1 : i32]) {bound_ctrl = true} : i16
+ // CHECK-GFX9: %[[VAR3:.+]] = arith.addi %[[VAR1]], %[[VAR2]] : i16
+ // CHECK-GFX9: %[[VAR4:.+]] = amdgpu.dpp %[[VAR3]] %[[VAR3]] row_half_mirror(unit) {bound_ctrl = true} : i16
+ // CHECK-GFX9: %[[VAR5:.+]] = arith.addi %[[VAR3]], %[[VAR4]] : i16
+ // CHECK-GFX9: %[[VAR6:.+]] = amdgpu.dpp %[[VAR5]] %[[VAR5]] row_mirror(unit) {bound_ctrl = true} : i16
+ // CHECK-GFX9: %[[VAR7:.+]] = arith.addi %[[VAR5]], %[[VAR6]] : i16
+ // CHECK-GFX9: "test.consume"(%[[VAR7]]) : (i16) -> ()
+
+ // CHECK-GFX10: %[[VAR0:.+]] = amdgpu.dpp %[[ARG0]] %[[ARG0]] quad_perm([1 : i32, 0 : i32, 3 : i32, 2 : i32]) {bound_ctrl = true} : i16
+ // CHECK-GFX10: %[[VAR1:.+]] = arith.addi %[[ARG0]], %[[VAR0]] : i16
+ // CHECK-GFX10: %[[VAR2:.+]] = amdgpu.dpp %[[VAR1]] %[[VAR1]] quad_perm([2 : i32, 3 : i32, 0 : i32, 1 : i32]) {bound_ctrl = true} : i16
+ // CHECK-GFX10: %[[VAR3:.+]] = arith.addi %[[VAR1]], %[[VAR2]] : i16
+ // CHECK-GFX10: %[[VAR4:.+]] = amdgpu.dpp %[[VAR3]] %[[VAR3]] row_half_mirror(unit) {bound_ctrl = true} : i16
+ // CHECK-GFX10: %[[VAR5:.+]] = arith.addi %[[VAR3]], %[[VAR4]] : i16
+ // CHECK-GFX10: %[[VAR6:.+]] = amdgpu.dpp %[[VAR5]] %[[VAR5]] row_mirror(unit) {bound_ctrl = true} : i16
+ // CHECK-GFX10: %[[VAR7:.+]] = arith.addi %[[VAR5]], %[[VAR6]] : i16
+ // CHECK-GFX10: "test.consume"(%[[VAR7]]) : (i16) -> ()
%sum0 = gpu.subgroup_reduce add %arg0 cluster(size = 16) : (i16) -> i16
"test.consume"(%sum0) : (i16) -> ()
@@ -266,6 +406,12 @@ gpu.module @kernels {
// CHECK-SHFL-LABEL: gpu.func @kernel6(
// CHECK-SHFL-SAME: %[[ARG0:.+]]: vector<3xi8>)
+ //
+ // CHECK-GFX9-LABEL: gpu.func @kernel6(
+ // CHECK-GFX9-NOT: amdgpu.dpp
+ //
+ // CHECK-GFX10-LABEL: gpu.func @kernel6(
+ // CHECK-GFX10-NOT: amdgpu.dpp
gpu.func @kernel6(%arg0: vector<3xi8>) kernel {
// CHECK-SHFL: %[[CZ:.+]] = arith.constant dense<0> : vector<4xi8>
// CHECK-SHFL: %[[V0:.+]] = vector.insert_strided_slice %[[ARG0]], %[[CZ]] {offsets = [0], strides = [1]} : vector<3xi8> into vector<4xi8>
@@ -289,6 +435,12 @@ gpu.module @kernels {
// CHECK-SHFL-LABEL: gpu.func @kernel_cluster_size_is_subgroup_size(
// CHECK-SHFL-SAME: %[[ARG0:.+]]: vector<3xi8>)
+ //
+ // CHECK-GFX9-LABEL: gpu.func @kernel_cluster_size_is_subgroup_size(
+ // CHECK-GFX9-NOT: amdgpu.dpp
+ //
+ // CHECK-GFX10-LABEL: gpu.func @kernel_cluster_size_is_subgroup_size(
+ // CHECK-GFX10-NOT: amdgpu.dpp
gpu.func @kernel_cluster_size_is_subgroup_size(%arg0: vector<3xi8>) kernel {
// CHECK-SHFL-COUNT-5: gpu.shuffle xor
%sum0 = gpu.subgroup_reduce add %arg0 cluster(size = 32) : (vector<3xi8>) -> (vector<3xi8>)
diff --git a/mlir/test/lib/Dialect/GPU/TestGpuRewrite.cpp b/mlir/test/lib/Dialect/GPU/TestGpuRewrite.cpp
index a49d304..fe402da 100644
--- a/mlir/test/lib/Dialect/GPU/TestGpuRewrite.cpp
+++ b/mlir/test/lib/Dialect/GPU/TestGpuRewrite.cpp
@@ -10,10 +10,13 @@
//
//===----------------------------------------------------------------------===//
+#include "mlir/Dialect/AMDGPU/IR/AMDGPUDialect.h"
+#include "mlir/Dialect/AMDGPU/Utils/Chipset.h"
#include "mlir/Dialect/Arith/IR/Arith.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/GPU/Transforms/Passes.h"
#include "mlir/Dialect/Index/IR/IndexDialect.h"
+#include "mlir/Dialect/LLVMIR/ROCDLDialect.h"
#include "mlir/Dialect/MemRef/IR/MemRef.h"
#include "mlir/Dialect/Vector/IR/VectorOps.h"
#include "mlir/IR/PatternMatch.h"
@@ -54,7 +57,9 @@ struct TestGpuSubgroupReduceLoweringPass
: PassWrapper(pass) {}
void getDependentDialects(DialectRegistry &registry) const override {
- registry.insert<arith::ArithDialect, vector::VectorDialect>();
+ registry
+ .insert<amdgpu::AMDGPUDialect, arith::ArithDialect, LLVM::LLVMDialect,
+ ROCDL::ROCDLDialect, vector::VectorDialect>();
}
StringRef getArgument() const final {
@@ -70,6 +75,12 @@ struct TestGpuSubgroupReduceLoweringPass
llvm::cl::desc("Expand subgroup_reduce ops to shuffle ops."),
llvm::cl::init(false)};
+ Option<std::string> target{
+ *this, "target",
+ llvm::cl::desc("Target backend name which will be used to provide "
+ "compatible lowerings of subgroup reduce."),
+ llvm::cl::init("")};
+
void runOnOperation() override {
RewritePatternSet patterns(&getContext());
@@ -77,8 +88,15 @@ struct TestGpuSubgroupReduceLoweringPass
// perform fewer failing matches.
populateGpuBreakDownSubgroupReducePatterns(patterns,
/*maxShuffleBitwidth=*/32,
- PatternBenefit(2));
+ PatternBenefit(3));
if (expandToShuffles) {
+ auto maybeChipset = amdgpu::Chipset::parse(target);
+ if (succeeded(maybeChipset)) {
+ populateGpuLowerSubgroupReduceToDPPPatterns(
+ patterns, /*subgroupSize=*/64, *maybeChipset, PatternBenefit(2));
+ populateGpuLowerClusteredSubgroupReduceToDPPPatterns(
+ patterns, /*subgroupSize=*/64, *maybeChipset, PatternBenefit(2));
+ }
populateGpuLowerSubgroupReduceToShufflePatterns(
patterns, /*subgroupSize=*/32, /*shuffleBitwidth=*/32);
populateGpuLowerClusteredSubgroupReduceToShufflePatterns(
diff --git a/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel b/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel
index 40264fb..b289eaa 100644
--- a/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel
+++ b/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel
@@ -5207,6 +5207,8 @@ cc_library(
hdrs = glob(["include/mlir/Dialect/GPU/Transforms/*.h"]),
includes = ["include"],
deps = [
+ ":AMDGPUDialect",
+ ":AMDGPUUtils",
":AffineDialect",
":AffineUtils",
":ArithDialect",