aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAlexey Bataev <a.bataev@outlook.com>2024-02-29 15:49:05 +0000
committerAlexey Bataev <a.bataev@outlook.com>2024-02-29 15:49:05 +0000
commita0d744b9cf4a5bd7219c5c1007d8db317b70cf1f (patch)
tree40d806f565e8f0ebdafb78f01db92fe4499096f9
parent46478028cda679b76757cae689a53b11fadad5f5 (diff)
parent4f132dca711f4b425f9d370f5d59efb766b8bffa (diff)
downloadllvm-users/alexey-bataev/spr/ttiriscvimprove-costs-for-fixed-vector-whole-reg-extractinsert.zip
llvm-users/alexey-bataev/spr/ttiriscvimprove-costs-for-fixed-vector-whole-reg-extractinsert.tar.gz
llvm-users/alexey-bataev/spr/ttiriscvimprove-costs-for-fixed-vector-whole-reg-extractinsert.tar.bz2
Created using spr 1.3.5
-rw-r--r--bolt/include/bolt/Core/BinaryFunction.h8
-rw-r--r--bolt/lib/Core/BinaryFunction.cpp24
-rw-r--r--clang-tools-extra/include-cleaner/lib/WalkAST.cpp5
-rw-r--r--clang-tools-extra/include-cleaner/unittests/WalkASTTest.cpp10
-rw-r--r--clang/docs/LanguageExtensions.rst8
-rw-r--r--clang/docs/ReleaseNotes.rst3
-rw-r--r--clang/include/clang/Basic/Builtins.td2
-rw-r--r--clang/include/clang/Basic/DiagnosticSemaKinds.td10
-rw-r--r--clang/include/clang/Basic/TargetInfo.h36
-rw-r--r--clang/include/clang/Driver/Driver.h3
-rw-r--r--clang/lib/AST/Interp/ByteCodeExprGen.cpp15
-rw-r--r--clang/lib/AST/Interp/Interp.cpp30
-rw-r--r--clang/lib/AST/Interp/Interp.h31
-rw-r--r--clang/lib/CodeGen/BackendUtil.cpp32
-rw-r--r--clang/lib/CodeGen/CGStmtOpenMP.cpp30
-rw-r--r--clang/lib/CodeGen/Targets/AArch64.cpp3
-rw-r--r--clang/lib/CodeGen/Targets/ARM.cpp8
-rw-r--r--clang/lib/Driver/ToolChains/CommonArgs.cpp10
-rw-r--r--clang/lib/Driver/ToolChains/CommonArgs.h3
-rw-r--r--clang/lib/Driver/ToolChains/Cuda.cpp2
-rw-r--r--clang/lib/InstallAPI/Visitor.cpp2
-rw-r--r--clang/lib/Sema/JumpDiagnostics.cpp19
-rw-r--r--clang/lib/Sema/SemaChecking.cpp25
-rw-r--r--clang/lib/Sema/SemaOpenMP.cpp3
-rw-r--r--clang/lib/Sema/SemaStmt.cpp16
-rw-r--r--clang/test/AST/Interp/arrays.cpp5
-rw-r--r--clang/test/AST/Interp/c.c8
-rw-r--r--clang/test/AST/Interp/cxx98.cpp3
-rw-r--r--clang/test/CXX/drs/dr18xx.cpp1
-rw-r--r--clang/test/CXX/expr/expr.prim/expr.prim.lambda/blocks.mm9
-rw-r--r--clang/test/CodeGen/builtins.c28
-rw-r--r--clang/test/CodeGen/fat-lto-objects.c21
-rw-r--r--clang/test/Driver/darwin-header-search-libcxx.cpp2
-rw-r--r--clang/test/Driver/mingw-sysroot.cpp12
-rw-r--r--clang/test/Driver/no-canonical-prefixes.c2
-rw-r--r--clang/test/Driver/program-path-priority.c13
-rw-r--r--clang/test/Driver/rocm-detect.hip4
-rw-r--r--clang/test/OpenMP/interop_codegen.cpp35
-rw-r--r--clang/test/OpenMP/scan_ast_print.cpp18
-rw-r--r--clang/test/Preprocessor/riscv-target-features.c18
-rw-r--r--clang/test/Sema/builtin-popcountg.c17
-rw-r--r--clang/test/SemaCXX/warn-bool-conversion.cpp12
-rw-r--r--clang/test/SemaOpenACC/no-branch-in-out.c197
-rw-r--r--clang/tools/clang-installapi/ClangInstallAPI.cpp1
-rw-r--r--clang/tools/driver/driver.cpp23
-rw-r--r--compiler-rt/lib/scudo/standalone/allocator_common.h7
-rw-r--r--compiler-rt/lib/scudo/standalone/primary32.h106
-rw-r--r--compiler-rt/lib/scudo/standalone/primary64.h153
-rw-r--r--compiler-rt/lib/scudo/standalone/tests/primary_test.cpp34
-rw-r--r--flang/include/flang/Lower/LoweringOptions.def4
-rw-r--r--flang/include/flang/Runtime/reduction.h5
-rw-r--r--flang/lib/Optimizer/Builder/Runtime/Reduction.cpp25
-rw-r--r--flang/runtime/Float128Math/CMakeLists.txt1
-rw-r--r--flang/runtime/Float128Math/math-entries.h16
-rw-r--r--flang/runtime/Float128Math/norm2.cpp59
-rw-r--r--flang/runtime/extrema.cpp107
-rw-r--r--flang/runtime/reduction-templates.h115
-rw-r--r--flang/runtime/tools.h11
-rw-r--r--flang/test/Driver/flang-experimental-polymorphism-flag.f904
-rw-r--r--flang/test/Lower/Intrinsics/norm2.f9016
-rw-r--r--libc/.clang-tidy2
-rw-r--r--libc/cmake/modules/compiler_features/check_float128.cpp2
-rw-r--r--libc/config/baremetal/api.td36
-rw-r--r--libc/config/baremetal/arm/entrypoints.txt3
-rw-r--r--libc/config/baremetal/arm/headers.txt1
-rw-r--r--libc/config/baremetal/riscv/entrypoints.txt3
-rw-r--r--libc/config/baremetal/riscv/headers.txt1
-rw-r--r--libc/docs/dev/code_style.rst2
-rw-r--r--libc/include/__llvm-libc-common.h6
-rw-r--r--libc/include/llvm-libc-macros/containerof-macro.h6
-rw-r--r--libc/include/llvm-libc-macros/fcntl-macros.h6
-rw-r--r--libc/include/llvm-libc-macros/features-macros.h6
-rw-r--r--libc/include/llvm-libc-macros/fenv-macros.h6
-rw-r--r--libc/include/llvm-libc-macros/file-seek-macros.h6
-rw-r--r--libc/include/llvm-libc-macros/float-macros.h6
-rw-r--r--libc/include/llvm-libc-macros/generic-error-number-macros.h6
-rw-r--r--libc/include/llvm-libc-macros/gpu/time-macros.h6
-rw-r--r--libc/include/llvm-libc-macros/inttypes-macros.h6
-rw-r--r--libc/include/llvm-libc-macros/limits-macros.h6
-rw-r--r--libc/include/llvm-libc-macros/linux/fcntl-macros.h6
-rw-r--r--libc/include/llvm-libc-macros/linux/sched-macros.h6
-rw-r--r--libc/include/llvm-libc-macros/linux/signal-macros.h6
-rw-r--r--libc/include/llvm-libc-macros/linux/sys-ioctl-macros.h6
-rw-r--r--libc/include/llvm-libc-macros/linux/sys-random-macros.h6
-rw-r--r--libc/include/llvm-libc-macros/linux/sys-resource-macros.h5
-rw-r--r--libc/include/llvm-libc-macros/linux/sys-socket-macros.h6
-rw-r--r--libc/include/llvm-libc-macros/linux/sys-stat-macros.h6
-rw-r--r--libc/include/llvm-libc-macros/linux/sys-time-macros.h6
-rw-r--r--libc/include/llvm-libc-macros/linux/sys-wait-macros.h6
-rw-r--r--libc/include/llvm-libc-macros/linux/termios-macros.h6
-rw-r--r--libc/include/llvm-libc-macros/linux/time-macros.h6
-rw-r--r--libc/include/llvm-libc-macros/linux/unistd-macros.h6
-rw-r--r--libc/include/llvm-libc-macros/math-macros.h6
-rw-r--r--libc/include/llvm-libc-macros/null-macro.h6
-rw-r--r--libc/include/llvm-libc-macros/offsetof-macro.h6
-rw-r--r--libc/include/llvm-libc-macros/sched-macros.h6
-rw-r--r--libc/include/llvm-libc-macros/signal-macros.h6
-rw-r--r--libc/include/llvm-libc-macros/stdckdint-macros.h6
-rw-r--r--libc/include/llvm-libc-macros/stdfix-macros.h6
-rw-r--r--libc/include/llvm-libc-macros/stdio-macros.h6
-rw-r--r--libc/include/llvm-libc-macros/stdlib-macros.h6
-rw-r--r--libc/include/llvm-libc-macros/sys-auxv-macros.h6
-rw-r--r--libc/include/llvm-libc-macros/sys-ioctl-macros.h6
-rw-r--r--libc/include/llvm-libc-macros/sys-mman-macros.h6
-rw-r--r--libc/include/llvm-libc-macros/sys-queue-macros.h6
-rw-r--r--libc/include/llvm-libc-macros/sys-random-macros.h6
-rw-r--r--libc/include/llvm-libc-macros/sys-resource-macros.h6
-rw-r--r--libc/include/llvm-libc-macros/sys-select-macros.h6
-rw-r--r--libc/include/llvm-libc-macros/sys-socket-macros.h6
-rw-r--r--libc/include/llvm-libc-macros/sys-stat-macros.h6
-rw-r--r--libc/include/llvm-libc-macros/sys-time-macros.h6
-rw-r--r--libc/include/llvm-libc-macros/sys-wait-macros.h6
-rw-r--r--libc/include/llvm-libc-macros/termios-macros.h6
-rw-r--r--libc/include/llvm-libc-macros/time-macros.h6
-rw-r--r--libc/include/llvm-libc-macros/unistd-macros.h6
-rw-r--r--libc/include/llvm-libc-macros/wchar-macros.h6
-rw-r--r--libc/include/llvm-libc-types/ACTION.h6
-rw-r--r--libc/include/llvm-libc-types/DIR.h6
-rw-r--r--libc/include/llvm-libc-types/ENTRY.h6
-rw-r--r--libc/include/llvm-libc-types/FILE.h6
-rw-r--r--libc/include/llvm-libc-types/__atexithandler_t.h6
-rw-r--r--libc/include/llvm-libc-types/__atfork_callback_t.h6
-rw-r--r--libc/include/llvm-libc-types/__bsearchcompare_t.h6
-rw-r--r--libc/include/llvm-libc-types/__call_once_func_t.h6
-rw-r--r--libc/include/llvm-libc-types/__exec_argv_t.h6
-rw-r--r--libc/include/llvm-libc-types/__exec_envp_t.h6
-rw-r--r--libc/include/llvm-libc-types/__futex_word.h6
-rw-r--r--libc/include/llvm-libc-types/__getoptargv_t.h6
-rw-r--r--libc/include/llvm-libc-types/__mutex_type.h6
-rw-r--r--libc/include/llvm-libc-types/__pthread_once_func_t.h6
-rw-r--r--libc/include/llvm-libc-types/__pthread_start_t.h6
-rw-r--r--libc/include/llvm-libc-types/__pthread_tss_dtor_t.h6
-rw-r--r--libc/include/llvm-libc-types/__qsortcompare_t.h6
-rw-r--r--libc/include/llvm-libc-types/__qsortrcompare_t.h6
-rw-r--r--libc/include/llvm-libc-types/__sighandler_t.h6
-rw-r--r--libc/include/llvm-libc-types/__thread_type.h6
-rw-r--r--libc/include/llvm-libc-types/blkcnt_t.h6
-rw-r--r--libc/include/llvm-libc-types/blksize_t.h6
-rw-r--r--libc/include/llvm-libc-types/cc_t.h6
-rw-r--r--libc/include/llvm-libc-types/clock_t.h6
-rw-r--r--libc/include/llvm-libc-types/clockid_t.h6
-rw-r--r--libc/include/llvm-libc-types/cnd_t.h6
-rw-r--r--libc/include/llvm-libc-types/cookie_io_functions_t.h6
-rw-r--r--libc/include/llvm-libc-types/cpu_set_t.h6
-rw-r--r--libc/include/llvm-libc-types/dev_t.h6
-rw-r--r--libc/include/llvm-libc-types/div_t.h6
-rw-r--r--libc/include/llvm-libc-types/double_t.h6
-rw-r--r--libc/include/llvm-libc-types/fd_set.h6
-rw-r--r--libc/include/llvm-libc-types/fenv_t.h6
-rw-r--r--libc/include/llvm-libc-types/fexcept_t.h6
-rw-r--r--libc/include/llvm-libc-types/float128.h6
-rw-r--r--libc/include/llvm-libc-types/float_t.h6
-rw-r--r--libc/include/llvm-libc-types/gid_t.h6
-rw-r--r--libc/include/llvm-libc-types/ino_t.h6
-rw-r--r--libc/include/llvm-libc-types/jmp_buf.h6
-rw-r--r--libc/include/llvm-libc-types/ldiv_t.h6
-rw-r--r--libc/include/llvm-libc-types/lldiv_t.h6
-rw-r--r--libc/include/llvm-libc-types/mode_t.h6
-rw-r--r--libc/include/llvm-libc-types/mtx_t.h6
-rw-r--r--libc/include/llvm-libc-types/nlink_t.h6
-rw-r--r--libc/include/llvm-libc-types/off64_t.h6
-rw-r--r--libc/include/llvm-libc-types/off_t.h6
-rw-r--r--libc/include/llvm-libc-types/once_flag.h6
-rw-r--r--libc/include/llvm-libc-types/pid_t.h6
-rw-r--r--libc/include/llvm-libc-types/posix_spawn_file_actions_t.h6
-rw-r--r--libc/include/llvm-libc-types/posix_spawnattr_t.h6
-rw-r--r--libc/include/llvm-libc-types/pthread_attr_t.h6
-rw-r--r--libc/include/llvm-libc-types/pthread_key_t.h6
-rw-r--r--libc/include/llvm-libc-types/pthread_mutex_t.h6
-rw-r--r--libc/include/llvm-libc-types/pthread_mutexattr_t.h6
-rw-r--r--libc/include/llvm-libc-types/pthread_once_t.h6
-rw-r--r--libc/include/llvm-libc-types/pthread_t.h6
-rw-r--r--libc/include/llvm-libc-types/rlim_t.h6
-rw-r--r--libc/include/llvm-libc-types/rpc_opcodes_t.h6
-rw-r--r--libc/include/llvm-libc-types/sa_family_t.h6
-rw-r--r--libc/include/llvm-libc-types/sig_atomic_t.h6
-rw-r--r--libc/include/llvm-libc-types/siginfo_t.h6
-rw-r--r--libc/include/llvm-libc-types/sigset_t.h6
-rw-r--r--libc/include/llvm-libc-types/size_t.h6
-rw-r--r--libc/include/llvm-libc-types/socklen_t.h6
-rw-r--r--libc/include/llvm-libc-types/speed_t.h6
-rw-r--r--libc/include/llvm-libc-types/ssize_t.h6
-rw-r--r--libc/include/llvm-libc-types/stack_t.h6
-rw-r--r--libc/include/llvm-libc-types/struct_dirent.h6
-rw-r--r--libc/include/llvm-libc-types/struct_epoll_data.h6
-rw-r--r--libc/include/llvm-libc-types/struct_epoll_event.h6
-rw-r--r--libc/include/llvm-libc-types/struct_hsearch_data.h6
-rw-r--r--libc/include/llvm-libc-types/struct_rlimit.h6
-rw-r--r--libc/include/llvm-libc-types/struct_rusage.h6
-rw-r--r--libc/include/llvm-libc-types/struct_sched_param.h6
-rw-r--r--libc/include/llvm-libc-types/struct_sigaction.h6
-rw-r--r--libc/include/llvm-libc-types/struct_sockaddr.h6
-rw-r--r--libc/include/llvm-libc-types/struct_sockaddr_un.h6
-rw-r--r--libc/include/llvm-libc-types/struct_stat.h6
-rw-r--r--libc/include/llvm-libc-types/struct_timespec.h6
-rw-r--r--libc/include/llvm-libc-types/struct_timeval.h6
-rw-r--r--libc/include/llvm-libc-types/struct_tm.h6
-rw-r--r--libc/include/llvm-libc-types/struct_utsname.h6
-rw-r--r--libc/include/llvm-libc-types/suseconds_t.h6
-rw-r--r--libc/include/llvm-libc-types/tcflag_t.h6
-rw-r--r--libc/include/llvm-libc-types/test_rpc_opcodes_t.h6
-rw-r--r--libc/include/llvm-libc-types/thrd_start_t.h6
-rw-r--r--libc/include/llvm-libc-types/thrd_t.h6
-rw-r--r--libc/include/llvm-libc-types/time_t.h6
-rw-r--r--libc/include/llvm-libc-types/tss_dtor_t.h6
-rw-r--r--libc/include/llvm-libc-types/tss_t.h6
-rw-r--r--libc/include/llvm-libc-types/uid_t.h6
-rw-r--r--libc/include/llvm-libc-types/union_sigval.h6
-rw-r--r--libc/include/llvm-libc-types/wchar_t.h6
-rw-r--r--libc/include/llvm-libc-types/wint_t.h6
-rw-r--r--libc/include/sys/queue.h6
-rw-r--r--libc/src/__support/CPP/CMakeLists.txt4
-rw-r--r--libc/src/__support/CPP/type_traits/is_fixed_point.h2
-rw-r--r--libc/src/__support/CPP/type_traits/is_floating_point.h2
-rw-r--r--libc/src/__support/FPUtil/CMakeLists.txt2
-rw-r--r--libc/src/__support/FPUtil/FPBits.h114
-rw-r--r--libc/src/__support/FPUtil/fpbits_str.h6
-rw-r--r--libc/src/__support/GPU/generic/utils.h6
-rw-r--r--libc/src/__support/GPU/utils.h6
-rw-r--r--libc/src/__support/HashTable/table.h6
-rw-r--r--libc/src/__support/OSUtil/gpu/io.h2
-rw-r--r--libc/src/__support/RPC/rpc_util.h6
-rw-r--r--libc/src/__support/StringUtil/message_mapper.h6
-rw-r--r--libc/src/__support/StringUtil/platform_errors.h6
-rw-r--r--libc/src/__support/StringUtil/platform_signals.h6
-rw-r--r--libc/src/__support/StringUtil/tables/linux_extension_errors.h6
-rw-r--r--libc/src/__support/StringUtil/tables/linux_extension_signals.h6
-rw-r--r--libc/src/__support/StringUtil/tables/linux_platform_errors.h6
-rw-r--r--libc/src/__support/StringUtil/tables/linux_platform_signals.h6
-rw-r--r--libc/src/__support/StringUtil/tables/minimal_platform_errors.h6
-rw-r--r--libc/src/__support/StringUtil/tables/minimal_platform_signals.h6
-rw-r--r--libc/src/__support/StringUtil/tables/posix_errors.h6
-rw-r--r--libc/src/__support/StringUtil/tables/posix_signals.h6
-rw-r--r--libc/src/__support/StringUtil/tables/signal_table.h6
-rw-r--r--libc/src/__support/StringUtil/tables/stdc_errors.h6
-rw-r--r--libc/src/__support/StringUtil/tables/stdc_signals.h6
-rw-r--r--libc/src/__support/fixed_point/fx_bits.h6
-rw-r--r--libc/src/__support/fixed_point/fx_rep.h6
-rw-r--r--libc/src/__support/macros/properties/CMakeLists.txt4
-rw-r--r--libc/src/__support/macros/properties/types.h (renamed from libc/src/__support/macros/properties/float.h)11
-rw-r--r--libc/src/__support/memory_size.h5
-rw-r--r--libc/src/__support/threads/gpu/mutex.h6
-rw-r--r--libc/src/assert/assert.h3
-rw-r--r--libc/src/gpu/rpc_host_call.h2
-rw-r--r--libc/src/math/amdgpu/declarations.h6
-rw-r--r--libc/src/math/amdgpu/fmax.cpp4
-rw-r--r--libc/src/math/amdgpu/fmaxf.cpp6
-rw-r--r--libc/src/math/amdgpu/fmin.cpp6
-rw-r--r--libc/src/math/amdgpu/fminf.cpp6
-rw-r--r--libc/src/math/amdgpu/platform.h6
-rw-r--r--libc/src/math/ceilf128.h2
-rw-r--r--libc/src/math/copysignf128.h4
-rw-r--r--libc/src/math/fabsf128.h2
-rw-r--r--libc/src/math/fdimf128.h2
-rw-r--r--libc/src/math/floorf128.h2
-rw-r--r--libc/src/math/fmaxf128.h2
-rw-r--r--libc/src/math/fminf128.h2
-rw-r--r--libc/src/math/frexpf128.h2
-rw-r--r--libc/src/math/generic/CMakeLists.txt28
-rw-r--r--libc/src/math/generic/exp_utils.h6
-rw-r--r--libc/src/math/ilogbf128.h2
-rw-r--r--libc/src/math/ldexpf128.h2
-rw-r--r--libc/src/math/llogb.h2
-rw-r--r--libc/src/math/llogbf.h2
-rw-r--r--libc/src/math/llogbf128.h2
-rw-r--r--libc/src/math/llogbl.h2
-rw-r--r--libc/src/math/logbf128.h2
-rw-r--r--libc/src/math/nvptx/declarations.h6
-rw-r--r--libc/src/math/nvptx/fmax.cpp6
-rw-r--r--libc/src/math/nvptx/fmaxf.cpp4
-rw-r--r--libc/src/math/nvptx/fmin.cpp6
-rw-r--r--libc/src/math/nvptx/fminf.cpp6
-rw-r--r--libc/src/math/nvptx/nvptx.h6
-rw-r--r--libc/src/math/roundf128.h2
-rw-r--r--libc/src/math/sqrtf128.h2
-rw-r--r--libc/src/math/truncf128.h2
-rw-r--r--libc/src/search/hsearch/global.h5
-rw-r--r--libc/src/string/memory_utils/aarch64/inline_bcmp.h2
-rw-r--r--libc/src/string/memory_utils/aarch64/inline_memcmp.h2
-rw-r--r--libc/src/string/memory_utils/aarch64/inline_memcpy.h6
-rw-r--r--libc/src/string/memory_utils/generic/aligned_access.h4
-rw-r--r--libc/src/string/memory_utils/generic/byte_per_byte.h6
-rw-r--r--libc/src/string/memory_utils/op_aarch64.h10
-rw-r--r--libc/src/string/memory_utils/op_builtin.h16
-rw-r--r--libc/src/string/memory_utils/op_generic.h10
-rw-r--r--libc/src/string/memory_utils/riscv/inline_memmove.h6
-rw-r--r--libc/src/string/memory_utils/utils.h4
-rw-r--r--libc/src/string/memory_utils/x86_64/inline_bcmp.h2
-rw-r--r--libc/src/string/memory_utils/x86_64/inline_memcmp.h2
-rw-r--r--libc/test/UnitTest/ExecuteFunction.h6
-rw-r--r--libc/test/UnitTest/FPExceptMatcher.h6
-rw-r--r--libc/test/UnitTest/FPMatcher.h6
-rw-r--r--libc/test/UnitTest/LibcTest.h6
-rw-r--r--libc/test/UnitTest/MemoryMatcher.h6
-rw-r--r--libc/test/UnitTest/PlatformDefs.h6
-rw-r--r--libc/test/UnitTest/RoundingModeUtils.h6
-rw-r--r--libc/test/UnitTest/StringUtils.h6
-rw-r--r--libc/test/UnitTest/Test.h6
-rw-r--r--libc/test/integration/src/spawn/test_binary_properties.h6
-rw-r--r--libc/test/src/math/FAbsTest.h5
-rw-r--r--libc/test/src/math/FMaxTest.h5
-rw-r--r--libc/test/src/math/FMinTest.h5
-rw-r--r--libc/test/src/math/FloorTest.h5
-rw-r--r--libc/test/src/math/RandUtils.h5
-rw-r--r--libc/test/src/math/RoundTest.h5
-rw-r--r--libc/test/src/math/TruncTest.h5
-rw-r--r--libc/test/src/math/differential_testing/Timer.h6
-rw-r--r--libc/test/src/math/in_float_range_test_helper.h6
-rw-r--r--libc/test/src/math/smoke/CeilTest.h5
-rw-r--r--libc/test/src/math/smoke/CopySignTest.h5
-rw-r--r--libc/test/src/math/smoke/FAbsTest.h5
-rw-r--r--libc/test/src/math/smoke/FMaxTest.h5
-rw-r--r--libc/test/src/math/smoke/FMinTest.h5
-rw-r--r--libc/test/src/math/smoke/FloorTest.h5
-rw-r--r--libc/test/src/math/smoke/RIntTest.h6
-rw-r--r--libc/test/src/math/smoke/RoundTest.h5
-rw-r--r--libc/test/src/math/smoke/RoundToIntegerTest.h6
-rw-r--r--libc/test/src/math/smoke/TruncTest.h5
-rw-r--r--libc/test/src/time/TmHelper.h6
-rw-r--r--libc/utils/MPFRWrapper/MPFRUtils.h6
-rw-r--r--libcxx/cmake/config-ix.cmake7
-rw-r--r--libcxx/include/__atomic/aliases.h1
-rw-r--r--libcxx/include/__config5
-rw-r--r--libcxx/include/__thread/support/pthread.h5
-rw-r--r--libcxx/include/__utility/integer_sequence.h87
-rw-r--r--libcxx/include/atomic1
-rw-r--r--libcxx/test/libcxx/transitive_includes/cxx23.csv1
-rw-r--r--libcxx/test/libcxx/transitive_includes/cxx26.csv1
-rw-r--r--libcxx/test/std/time/time.clock/time.clock.file/to_from_sys.pass.cpp3
-rw-r--r--libcxx/test/std/time/time.clock/time.clock.hires/now.pass.cpp3
-rw-r--r--libcxx/test/std/time/time.clock/time.clock.system/from_time_t.pass.cpp3
-rw-r--r--libcxx/test/std/time/time.clock/time.clock.system/now.pass.cpp3
-rw-r--r--libcxx/test/std/time/time.clock/time.clock.system/to_time_t.pass.cpp3
-rw-r--r--libcxx/test/std/time/time.point/time.point.nonmember/op_-duration.pass.cpp3
-rw-r--r--libcxx/test/std/utilities/intseq/intseq.make/make_integer_seq_fallback.pass.cpp19
-rw-r--r--libcxx/test/std/utilities/intseq/intseq.make/make_integer_seq_fallback.verify.cpp24
-rw-r--r--lldb/bindings/CMakeLists.txt6
-rw-r--r--lldb/cmake/modules/LLDBConfig.cmake2
-rw-r--r--lldb/source/Commands/CommandObjectTarget.cpp8
-rw-r--r--lldb/source/Commands/Options.td4
-rw-r--r--lldb/source/Plugins/ScriptInterpreter/Python/ScriptInterpreterPython.cpp2
-rw-r--r--lldb/source/Symbol/Variable.cpp8
-rw-r--r--lldb/test/API/commands/command/script/TestCommandScript.py14
-rw-r--r--lldb/test/API/commands/command/script/cmd_file.lldb4
-rw-r--r--lldb/test/API/functionalities/completion/TestCompletion.py6
-rw-r--r--lldb/test/API/functionalities/completion/main.cpp13
-rw-r--r--lldb/test/API/lit.cfg.py3
-rw-r--r--lldb/test/API/lit.site.cfg.py.in1
-rw-r--r--lldb/test/API/tools/lldb-dap/databreakpoint/TestDAP_setDataBreakpoints.py45
-rw-r--r--lldb/test/CMakeLists.txt18
-rw-r--r--lldb/test/Shell/Driver/TestHelp.test2
-rw-r--r--lldb/tools/driver/Driver.cpp15
-rw-r--r--lldb/tools/driver/Driver.h1
-rw-r--r--lldb/tools/lldb-dap/Watchpoint.cpp23
-rw-r--r--lldb/tools/lldb-dap/Watchpoint.h5
-rw-r--r--lldb/tools/lldb-dap/lldb-dap.cpp16
-rw-r--r--lldb/use_lldb_suite_root.py4
-rw-r--r--lldb/utils/lldb-dotest/CMakeLists.txt1
-rwxr-xr-xlldb/utils/lldb-dotest/lldb-dotest.in5
-rw-r--r--llvm/CMakeLists.txt2
-rw-r--r--llvm/cmake/modules/AddLLVM.cmake17
-rw-r--r--llvm/docs/CMake.rst6
-rw-r--r--llvm/docs/GlobalISel/GenericOpcode.rst8
-rw-r--r--llvm/docs/LangRef.rst8
-rw-r--r--llvm/docs/RISCVUsage.rst9
-rw-r--r--llvm/docs/ReleaseNotes.rst1
-rw-r--r--llvm/include/llvm/ADT/APFloat.h18
-rw-r--r--llvm/include/llvm/CodeGen/ISDOpcodes.h2
-rw-r--r--llvm/include/llvm/CodeGen/SelectionDAG.h6
-rw-r--r--llvm/include/llvm/DebugInfo/DIContext.h1
-rw-r--r--llvm/include/llvm/IR/InstrTypes.h13
-rw-r--r--llvm/include/llvm/Target/GenericOpcodes.td2
-rw-r--r--llvm/include/llvm/Target/GlobalISel/Combine.td33
-rw-r--r--llvm/include/llvm/TextAPI/RecordsSlice.h7
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp100
-rw-r--r--llvm/lib/DebugInfo/DWARF/DWARFVerifier.cpp29
-rw-r--r--llvm/lib/IR/DebugInfo.cpp7
-rw-r--r--llvm/lib/IR/Instruction.cpp9
-rw-r--r--llvm/lib/IR/Instructions.cpp137
-rw-r--r--llvm/lib/IR/Verifier.cpp4
-rw-r--r--llvm/lib/MC/MCParser/AsmParser.cpp4
-rw-r--r--llvm/lib/Passes/PassBuilderPipelines.cpp2
-rw-r--r--llvm/lib/Support/RISCVISAInfo.cpp2
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUGlobalISelDivergenceLowering.cpp11
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp20
-rw-r--r--llvm/lib/Target/AMDGPU/BUFInstructions.td5
-rw-r--r--llvm/lib/Target/AMDGPU/DSInstructions.td3
-rw-r--r--llvm/lib/Target/AMDGPU/FLATInstructions.td1
-rw-r--r--llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h8
-rw-r--r--llvm/lib/Target/AMDGPU/VOPInstructions.td1
-rw-r--r--llvm/lib/Target/ARM/ARMExpandPseudoInsts.cpp57
-rw-r--r--llvm/lib/Target/ARM/ARMInstrFormats.td31
-rw-r--r--llvm/lib/Target/ARM/ARMInstrVFP.td64
-rw-r--r--llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp76
-rw-r--r--llvm/lib/Target/ARM/Disassembler/ARMDisassembler.cpp23
-rw-r--r--llvm/lib/Target/ARM/MCTargetDesc/ARMInstPrinter.cpp32
-rw-r--r--llvm/lib/Target/DirectX/DXIL.td347
-rw-r--r--llvm/lib/Target/DirectX/DXILOpBuilder.cpp25
-rw-r--r--llvm/lib/Target/DirectX/DXILOpBuilder.h3
-rw-r--r--llvm/lib/Target/DirectX/DXILOpLowering.cpp3
-rw-r--r--llvm/lib/Target/RISCV/RISCVFeatures.td2
-rw-r--r--llvm/lib/Target/RISCV/RISCVISelLowering.cpp16
-rw-r--r--llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp7
-rw-r--r--llvm/lib/Target/RISCV/RISCVSchedSiFive7.td1
-rw-r--r--llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp48
-rw-r--r--llvm/lib/Target/X86/X86ExpandPseudo.cpp49
-rw-r--r--llvm/lib/Target/X86/X86ISelDAGToDAG.cpp10
-rw-r--r--llvm/lib/Target/X86/X86ISelLowering.cpp11
-rw-r--r--llvm/lib/Target/X86/X86InstrSystem.td2
-rw-r--r--llvm/lib/Target/X86/X86InstrUtils.td1
-rw-r--r--llvm/lib/Target/X86/X86InstrVMX.td4
-rw-r--r--llvm/lib/Transforms/IPO/FunctionImport.cpp19
-rw-r--r--llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp14
-rw-r--r--llvm/lib/Transforms/Scalar/Reassociate.cpp8
-rw-r--r--llvm/lib/Transforms/Vectorize/LoopVectorize.cpp14
-rw-r--r--llvm/test/Analysis/CostModel/RISCV/rvv-shuffle.ll28
-rw-r--r--llvm/test/Analysis/CostModel/RISCV/rvv-vectorinsert.ll139
-rw-r--r--llvm/test/Analysis/CostModel/RISCV/shuffle-broadcast.ll12
-rw-r--r--llvm/test/Analysis/CostModel/RISCV/shuffle-insert_subvector.ll1
-rw-r--r--llvm/test/Analysis/CostModel/RISCV/shuffle-reverse.ll12
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/combine-2-icmps-of-0-and-or.mir1244
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-inline-asm.ll22
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-unwind-inline-asm.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/aarch64-sme2-asm.ll6
-rw-r--r--llvm/test/CodeGen/AArch64/aes.ll43
-rw-r--r--llvm/test/CodeGen/AArch64/callbr-asm-outputs-indirect-isel.ll26
-rw-r--r--llvm/test/CodeGen/AArch64/emit_fneg_with_non_register_operand.mir4
-rw-r--r--llvm/test/CodeGen/AArch64/peephole-insvigpr.mir2
-rw-r--r--llvm/test/CodeGen/AArch64/setcc_knownbits.ll4
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-phis-no-lane-mask-merging.ll24
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-phis-no-lane-mask-merging.mir66
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-used-outside-loop.ll36
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-used-outside-loop.mir177
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-structurizer.ll13
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-structurizer.mir161
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-temporal-divergent-i1.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-temporal-divergent-reg.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-phi.mir56
-rw-r--r--llvm/test/CodeGen/AMDGPU/div_i128.ll3003
-rw-r--r--llvm/test/CodeGen/AMDGPU/fmaxnum.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/fminnum.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/rem_i128.ll3014
-rw-r--r--llvm/test/CodeGen/ARM/cmse-vlldm-no-reorder.mir4
-rw-r--r--llvm/test/CodeGen/ARM/vlldm-vlstm-uops.mir9
-rw-r--r--llvm/test/CodeGen/PowerPC/crsave.ll324
-rw-r--r--llvm/test/CodeGen/RISCV/atomic-cmpxchg-branch-on-result.ll6
-rw-r--r--llvm/test/CodeGen/RISCV/atomic-cmpxchg.ll12
-rw-r--r--llvm/test/CodeGen/RISCV/atomic-rmw.ll12
-rw-r--r--llvm/test/CodeGen/RISCV/atomic-signext.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/attributes.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/machine-combiner.ll8
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll1150
-rw-r--r--llvm/test/CodeGen/RISCV/short-forward-branch-opt.ll64
-rw-r--r--llvm/test/CodeGen/X86/avx512-insert-extract.ll18
-rw-r--r--llvm/test/CodeGen/X86/avx512-vec-cmp.ll27
-rw-r--r--llvm/test/CodeGen/X86/avx512fp16-fp-logic.ll12
-rw-r--r--llvm/test/CodeGen/X86/cmov-fp.ll548
-rw-r--r--llvm/test/CodeGen/X86/combine-sse41-intrinsics.ll47
-rw-r--r--llvm/test/CodeGen/X86/cvt16.ll1
-rw-r--r--llvm/test/CodeGen/X86/f16c-intrinsics-fast-isel.ll7
-rw-r--r--llvm/test/CodeGen/X86/fold-int-pow2-with-fmul-or-fdiv.ll4
-rw-r--r--llvm/test/CodeGen/X86/fp-roundeven.ll1
-rw-r--r--llvm/test/CodeGen/X86/fpclamptosat_vec.ll124
-rw-r--r--llvm/test/CodeGen/X86/half.ll109
-rw-r--r--llvm/test/CodeGen/X86/inline-asm-memop.ll27
-rw-r--r--llvm/test/CodeGen/X86/pr31088.ll20
-rw-r--r--llvm/test/CodeGen/X86/pr34605.ll2
-rw-r--r--llvm/test/CodeGen/X86/pr38803.ll2
-rw-r--r--llvm/test/CodeGen/X86/pr43509.ll2
-rw-r--r--llvm/test/CodeGen/X86/pr57340.ll188
-rw-r--r--llvm/test/CodeGen/X86/pr78897.ll4
-rw-r--r--llvm/test/CodeGen/X86/prefer-fpext-splat.ll2
-rw-r--r--llvm/test/CodeGen/X86/select-of-fp-constants.ll2
-rw-r--r--llvm/test/CodeGen/X86/select-of-half-constants.ll8
-rw-r--r--llvm/test/CodeGen/X86/vector-half-conversions.ll44
-rw-r--r--llvm/test/CodeGen/X86/vector-reduce-fmax-nnan.ll20
-rw-r--r--llvm/test/CodeGen/X86/vector-reduce-fmin-nnan.ll20
-rw-r--r--llvm/test/CodeGen/X86/vector-shuffle-128-v4.ll6
-rw-r--r--llvm/test/MC/AArch64/cfi-bad-nesting-darwin.s6
-rw-r--r--llvm/test/MC/AMDGPU/gfx11_unsupported.s12
-rw-r--r--llvm/test/MC/AMDGPU/gfx12_asm_ds_alias.s6
-rw-r--r--llvm/test/MC/ARM/thumbv8m.s8
-rw-r--r--llvm/test/MC/ARM/vlstm-vlldm-8.1m.s11
-rw-r--r--llvm/test/MC/ARM/vlstm-vlldm-8m.s17
-rw-r--r--llvm/test/MC/ARM/vlstm-vlldm-diag.s61
-rw-r--r--llvm/test/MC/Disassembler/ARM/armv8.1m-vlldm_vlstm-8.1.main.txt11
-rw-r--r--llvm/test/MC/Disassembler/ARM/armv8.1m-vlldm_vlstm-8.main.txt17
-rw-r--r--llvm/test/MC/Disassembler/X86/apx/IgnoreW.txt118
-rw-r--r--llvm/test/MC/RISCV/rv32zacas-invalid.s2
-rw-r--r--llvm/test/MC/RISCV/rv32zacas-valid.s12
-rw-r--r--llvm/test/MC/RISCV/rv64zacas-valid.s6
-rw-r--r--llvm/test/MC/RISCV/rvzabha-zacas-valid.s12
-rw-r--r--llvm/test/ThinLTO/X86/visibility-elf.ll6
-rw-r--r--llvm/test/ThinLTO/X86/visibility-macho.ll4
-rw-r--r--llvm/test/Transforms/Coroutines/coro-debug-dbg.values.ll7
-rw-r--r--llvm/test/Transforms/FunctionImport/funcimport.ll23
-rw-r--r--llvm/test/Transforms/Inline/inline_stats.ll4
-rw-r--r--llvm/test/Transforms/InstCombine/maxnum.ll2
-rw-r--r--llvm/test/Transforms/InstCombine/minnum.ll4
-rw-r--r--llvm/test/Transforms/InstSimplify/ConstProp/min-max.ll68
-rw-r--r--llvm/test/Transforms/LoopVectorize/PowerPC/interleave_IC.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/riscv-vector-reverse.ll21
-rw-r--r--llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/global_remove_same.ll15
-rw-r--r--llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/global_remove_same.ll.expected13
-rw-r--r--llvm/test/tools/UpdateTestChecks/update_test_checks/global_remove_same.test4
-rw-r--r--llvm/tools/llvm-dwarfdump/llvm-dwarfdump.cpp14
-rw-r--r--llvm/unittests/ADT/APFloatTest.cpp10
-rw-r--r--llvm/unittests/CodeGen/AArch64SelectionDAGTest.cpp68
-rw-r--r--llvm/unittests/IR/VerifierTest.cpp28
-rw-r--r--llvm/unittests/Support/RISCVISAInfoTest.cpp2
-rw-r--r--llvm/unittests/Target/ARM/MachineInstrTest.cpp2
-rw-r--r--llvm/utils/TableGen/DXILEmitter.cpp408
-rw-r--r--llvm/utils/TableGen/X86DisassemblerTables.cpp2
-rw-r--r--llvm/utils/UpdateTestChecks/common.py15
-rw-r--r--llvm/utils/gn/secondary/lldb/test/BUILD.gn3
-rwxr-xr-xllvm/utils/update_test_checks.py10
-rw-r--r--llvm/utils/vim/syntax/mir.vim2
-rw-r--r--mlir/include/mlir/Dialect/Affine/Analysis/LoopAnalysis.h20
-rw-r--r--mlir/include/mlir/Dialect/EmitC/IR/EmitC.td64
-rw-r--r--mlir/include/mlir/Dialect/GPU/IR/GPUOps.td15
-rw-r--r--mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td42
-rw-r--r--mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp3
-rw-r--r--mlir/lib/Dialect/Affine/Analysis/LoopAnalysis.cpp54
-rw-r--r--mlir/lib/Dialect/Arith/Transforms/ExpandOps.cpp98
-rw-r--r--mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp65
-rw-r--r--mlir/lib/Dialect/SparseTensor/Transforms/SparseGPUCodegen.cpp18
-rw-r--r--mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp12
-rw-r--r--mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp100
-rw-r--r--mlir/lib/Dialect/SparseTensor/Transforms/Utils/CodegenUtils.cpp35
-rw-r--r--mlir/lib/Dialect/SparseTensor/Transforms/Utils/CodegenUtils.h27
-rw-r--r--mlir/lib/Dialect/SparseTensor/Transforms/Utils/LoopEmitter.cpp2
-rw-r--r--mlir/lib/Dialect/SparseTensor/Transforms/Utils/SparseTensorLevel.cpp12
-rw-r--r--mlir/lib/Dialect/Tensor/IR/TensorOps.cpp16
-rw-r--r--mlir/lib/Target/Cpp/TranslateToCpp.cpp30
-rw-r--r--mlir/test/Dialect/Affine/access-analysis.mlir67
-rw-r--r--mlir/test/Dialect/Arith/expand-ops.mlir45
-rw-r--r--mlir/test/Dialect/EmitC/invalid_ops.mlir24
-rw-r--r--mlir/test/Dialect/EmitC/ops.mlir7
-rw-r--r--mlir/test/Dialect/GPU/ops.mlir15
-rw-r--r--mlir/test/Dialect/SparseTensor/invalid.mlir10
-rw-r--r--mlir/test/Dialect/SparseTensor/roundtrip.mlir17
-rw-r--r--mlir/test/Dialect/Tensor/canonicalize.mlir35
-rw-r--r--mlir/test/Integration/Dialect/Arith/CPU/test-wide-int-emulation-compare-results-i16.mlir2
-rw-r--r--mlir/test/Integration/Dialect/Linalg/CPU/ArmSME/fill-2d.mlir2
-rw-r--r--mlir/test/Integration/Dialect/Linalg/CPU/ArmSME/use-too-many-tiles.mlir10
-rw-r--r--mlir/test/Integration/Dialect/Linalg/CPU/ArmSVE/matmul.mlir8
-rw-r--r--mlir/test/Integration/Dialect/SparseTensor/CPU/block.mlir45
-rwxr-xr-xmlir/test/Integration/Dialect/SparseTensor/CPU/block_majors.mlir85
-rw-r--r--mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output.mlir18
-rw-r--r--mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output_bf16.mlir24
-rw-r--r--mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output_f16.mlir24
-rw-r--r--mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_3d.mlir171
-rwxr-xr-xmlir/test/Integration/Dialect/SparseTensor/CPU/sparse_print.mlir269
-rw-r--r--mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_re_im.mlir46
-rw-r--r--mlir/test/Integration/Dialect/Vector/CPU/ArmSME/Emulated/test-setArmSVLBits.mlir8
-rw-r--r--mlir/test/Integration/Dialect/Vector/CPU/ArmSME/load-store-128-bit-tile.mlir8
-rw-r--r--mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-load-vertical.mlir8
-rw-r--r--mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-multi-tile-transpose.mlir8
-rw-r--r--mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-outerproduct-f32.mlir16
-rw-r--r--mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-outerproduct-f64.mlir16
-rw-r--r--mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-transfer-read-2d.mlir12
-rw-r--r--mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-transfer-write-2d.mlir2
-rw-r--r--mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-transpose.mlir8
-rw-r--r--mlir/test/Integration/Dialect/Vector/CPU/ArmSME/tile_fill.mlir4
-rw-r--r--mlir/test/Integration/Dialect/Vector/CPU/ArmSME/vector-load-store.mlir8
-rw-r--r--mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/Emulated/test-setArmVLBits.mlir17
-rw-r--r--mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/arrays-of-scalable-vectors.mlir12
-rw-r--r--mlir/test/Integration/Dialect/Vector/CPU/test-print-str.mlir4
-rw-r--r--mlir/test/Target/Cpp/logical_operators.mlir14
-rw-r--r--mlir/test/lib/Dialect/Affine/CMakeLists.txt2
-rw-r--r--mlir/test/lib/Dialect/Affine/TestAccessAnalysis.cpp83
-rw-r--r--mlir/test/mlir-cpu-runner/expand-arith-ops.mlir47
-rw-r--r--mlir/test/python/dialects/gpu/dialect.py2
-rw-r--r--mlir/tools/mlir-opt/mlir-opt.cpp2
-rw-r--r--openmp/CMakeLists.txt12
-rw-r--r--utils/bazel/llvm-project-overlay/clang/BUILD.bazel3
-rw-r--r--utils/bazel/llvm-project-overlay/libc/BUILD.bazel8
577 files changed, 11179 insertions, 7369 deletions
diff --git a/bolt/include/bolt/Core/BinaryFunction.h b/bolt/include/bolt/Core/BinaryFunction.h
index a17717876..c170fa6 100644
--- a/bolt/include/bolt/Core/BinaryFunction.h
+++ b/bolt/include/bolt/Core/BinaryFunction.h
@@ -2056,6 +2056,14 @@ public:
/// Returns false if disassembly failed.
Error disassemble();
+ /// An external interface to register a branch while the function is in
+ /// disassembled state. Allows to make custom modifications to the
+ /// disassembler. E.g., a pre-CFG pass can add an instruction and register
+ /// a branch that will later be used during the CFG construction.
+ ///
+ /// Return a label at the branch destination.
+ MCSymbol *registerBranch(uint64_t Src, uint64_t Dst);
+
Error handlePCRelOperand(MCInst &Instruction, uint64_t Address,
uint64_t Size);
diff --git a/bolt/lib/Core/BinaryFunction.cpp b/bolt/lib/Core/BinaryFunction.cpp
index 00df42c1..ce4dd29 100644
--- a/bolt/lib/Core/BinaryFunction.cpp
+++ b/bolt/lib/Core/BinaryFunction.cpp
@@ -1445,6 +1445,16 @@ add_instruction:
return Error::success();
}
+MCSymbol *BinaryFunction::registerBranch(uint64_t Src, uint64_t Dst) {
+ assert(CurrentState == State::Disassembled &&
+ "Cannot register branch unless function is in disassembled state.");
+ assert(containsAddress(Src) && containsAddress(Dst) &&
+ "Cannot register external branch.");
+ MCSymbol *Target = getOrCreateLocalLabel(Dst);
+ TakenBranches.emplace_back(Src - getAddress(), Dst - getAddress());
+ return Target;
+}
+
bool BinaryFunction::scanExternalRefs() {
bool Success = true;
bool DisassemblyFailed = false;
@@ -1759,13 +1769,6 @@ void BinaryFunction::postProcessJumpTables() {
}
}
}
-
- // Remove duplicates branches. We can get a bunch of them from jump tables.
- // Without doing jump table value profiling we don't have use for extra
- // (duplicate) branches.
- llvm::sort(TakenBranches);
- auto NewEnd = std::unique(TakenBranches.begin(), TakenBranches.end());
- TakenBranches.erase(NewEnd, TakenBranches.end());
}
bool BinaryFunction::validateExternallyReferencedOffsets() {
@@ -2128,6 +2131,13 @@ Error BinaryFunction::buildCFG(MCPlusBuilder::AllocatorIdTy AllocatorId) {
// e.g. exit(3), etc. Otherwise we'll see a false fall-through
// blocks.
+ // Remove duplicates branches. We can get a bunch of them from jump tables.
+ // Without doing jump table value profiling we don't have a use for extra
+ // (duplicate) branches.
+ llvm::sort(TakenBranches);
+ auto NewEnd = std::unique(TakenBranches.begin(), TakenBranches.end());
+ TakenBranches.erase(NewEnd, TakenBranches.end());
+
for (std::pair<uint32_t, uint32_t> &Branch : TakenBranches) {
LLVM_DEBUG(dbgs() << "registering branch [0x"
<< Twine::utohexstr(Branch.first) << "] -> [0x"
diff --git a/clang-tools-extra/include-cleaner/lib/WalkAST.cpp b/clang-tools-extra/include-cleaner/lib/WalkAST.cpp
index 277e6ec5..878067a 100644
--- a/clang-tools-extra/include-cleaner/lib/WalkAST.cpp
+++ b/clang-tools-extra/include-cleaner/lib/WalkAST.cpp
@@ -228,6 +228,11 @@ public:
// Mark declaration from definition as it needs type-checking.
if (FD->isThisDeclarationADefinition())
report(FD->getLocation(), FD);
+ // Explicit specializaiton/instantiations of a function template requires
+ // primary template.
+ if (clang::isTemplateExplicitInstantiationOrSpecialization(
+ FD->getTemplateSpecializationKind()))
+ report(FD->getLocation(), FD->getPrimaryTemplate());
return true;
}
bool VisitVarDecl(VarDecl *VD) {
diff --git a/clang-tools-extra/include-cleaner/unittests/WalkASTTest.cpp b/clang-tools-extra/include-cleaner/unittests/WalkASTTest.cpp
index e238dc3..5dc8815 100644
--- a/clang-tools-extra/include-cleaner/unittests/WalkASTTest.cpp
+++ b/clang-tools-extra/include-cleaner/unittests/WalkASTTest.cpp
@@ -229,13 +229,9 @@ TEST(WalkAST, FunctionTemplates) {
EXPECT_THAT(testWalk("template<typename T> void foo(T) {}",
"template void ^foo<int>(int);"),
ElementsAre());
- // FIXME: Report specialized template as used from explicit specializations.
- EXPECT_THAT(testWalk("template<typename T> void foo(T);",
+ EXPECT_THAT(testWalk("template<typename T> void $explicit^foo(T);",
"template<> void ^foo<int>(int);"),
- ElementsAre());
- EXPECT_THAT(testWalk("template<typename T> void foo(T) {}",
- "template<typename T> void ^foo(T*) {}"),
- ElementsAre());
+ ElementsAre(Decl::FunctionTemplate));
// Implicit instantiations references most relevant template.
EXPECT_THAT(testWalk(R"cpp(
@@ -510,6 +506,8 @@ TEST(WalkAST, Functions) {
// Definition uses declaration, not the other way around.
testWalk("void $explicit^foo();", "void ^foo() {}");
testWalk("void foo() {}", "void ^foo();");
+ testWalk("template <typename> void $explicit^foo();",
+ "template <typename> void ^foo() {}");
// Unresolved calls marks all the overloads.
testWalk("void $ambiguous^foo(int); void $ambiguous^foo(char);",
diff --git a/clang/docs/LanguageExtensions.rst b/clang/docs/LanguageExtensions.rst
index 2a17781..bcd6919 100644
--- a/clang/docs/LanguageExtensions.rst
+++ b/clang/docs/LanguageExtensions.rst
@@ -3477,7 +3477,7 @@ builtin, the mangler emits their usual pattern without any special treatment.
-----------------------
``__builtin_popcountg`` returns the number of 1 bits in the argument. The
-argument can be of any integer type.
+argument can be of any unsigned integer type.
**Syntax**:
@@ -3489,20 +3489,20 @@ argument can be of any integer type.
.. code-block:: c++
- int x = 1;
+ unsigned int x = 1;
int x_pop = __builtin_popcountg(x);
unsigned long y = 3;
int y_pop = __builtin_popcountg(y);
- _BitInt(128) z = 7;
+ unsigned _BitInt(128) z = 7;
int z_pop = __builtin_popcountg(z);
**Description**:
``__builtin_popcountg`` is meant to be a type-generic alternative to the
``__builtin_popcount{,l,ll}`` builtins, with support for other integer types,
-such as ``__int128`` and C23 ``_BitInt(N)``.
+such as ``unsigned __int128`` and C23 ``unsigned _BitInt(N)``.
Multiprecision Arithmetic Builtins
----------------------------------
diff --git a/clang/docs/ReleaseNotes.rst b/clang/docs/ReleaseNotes.rst
index 7e16b9f..a5c6b80 100644
--- a/clang/docs/ReleaseNotes.rst
+++ b/clang/docs/ReleaseNotes.rst
@@ -192,6 +192,9 @@ Improvements to Clang's diagnostics
- Clang now diagnoses declarative nested name specifiers that name alias templates.
+- Clang now diagnoses lambda function expressions being implicitly cast to boolean values, under ``-Wpointer-bool-conversion``.
+ Fixes `#82512 <https://github.com/llvm/llvm-project/issues/82512>`_.
+
Improvements to Clang's time-trace
----------------------------------
diff --git a/clang/include/clang/Basic/Builtins.td b/clang/include/clang/Basic/Builtins.td
index 3bc35c5..2fbc56d 100644
--- a/clang/include/clang/Basic/Builtins.td
+++ b/clang/include/clang/Basic/Builtins.td
@@ -690,7 +690,7 @@ def Popcount : Builtin, BitInt_Long_LongLongTemplate {
def Popcountg : Builtin {
let Spellings = ["__builtin_popcountg"];
- let Attributes = [NoThrow, Const];
+ let Attributes = [NoThrow, Const, CustomTypeChecking];
let Prototype = "int(...)";
}
diff --git a/clang/include/clang/Basic/DiagnosticSemaKinds.td b/clang/include/clang/Basic/DiagnosticSemaKinds.td
index c8141fe..ff88c4f 100644
--- a/clang/include/clang/Basic/DiagnosticSemaKinds.td
+++ b/clang/include/clang/Basic/DiagnosticSemaKinds.td
@@ -4127,8 +4127,8 @@ def ext_ms_impcast_fn_obj : ExtWarn<
"Microsoft extension">, InGroup<MicrosoftCast>;
def warn_impcast_pointer_to_bool : Warning<
- "address of%select{| function| array}0 '%1' will always evaluate to "
- "'true'">,
+ "address of %select{'%1'|function '%1'|array '%1'|lambda function pointer "
+ "conversion operator}0 will always evaluate to 'true'">,
InGroup<PointerBoolConversion>;
def warn_cast_nonnull_to_bool : Warning<
"nonnull %select{function call|parameter}0 '%1' will evaluate to "
@@ -11984,7 +11984,7 @@ def err_builtin_invalid_arg_type: Error <
"signed integer or floating point type|vector type|"
"floating point type|"
"vector of integers|"
- "type of integer}1 (was %2)">;
+ "type of unsigned integer}1 (was %2)">;
def err_builtin_matrix_disabled: Error<
"matrix types extension is disabled. Pass -fenable-matrix to enable it">;
@@ -12214,4 +12214,8 @@ def err_acc_construct_appertainment
def err_acc_branch_in_out_compute_construct
: Error<"invalid %select{branch|return}0 %select{out of|into}1 OpenACC "
"Compute Construct">;
+def note_acc_branch_into_compute_construct
+ : Note<"invalid branch into OpenACC Compute Construct">;
+def note_acc_branch_out_of_compute_construct
+ : Note<"invalid branch out of OpenACC Compute Construct">;
} // end of sema component.
diff --git a/clang/include/clang/Basic/TargetInfo.h b/clang/include/clang/Basic/TargetInfo.h
index 48e9cec..b94d136 100644
--- a/clang/include/clang/Basic/TargetInfo.h
+++ b/clang/include/clang/Basic/TargetInfo.h
@@ -1369,13 +1369,35 @@ public:
}
struct BranchProtectionInfo {
- LangOptions::SignReturnAddressScopeKind SignReturnAddr =
- LangOptions::SignReturnAddressScopeKind::None;
- LangOptions::SignReturnAddressKeyKind SignKey =
- LangOptions::SignReturnAddressKeyKind::AKey;
- bool BranchTargetEnforcement = false;
- bool BranchProtectionPAuthLR = false;
- bool GuardedControlStack = false;
+ LangOptions::SignReturnAddressScopeKind SignReturnAddr;
+ LangOptions::SignReturnAddressKeyKind SignKey;
+ bool BranchTargetEnforcement;
+ bool BranchProtectionPAuthLR;
+ bool GuardedControlStack;
+
+ BranchProtectionInfo() = default;
+
+ const char *getSignReturnAddrStr() const {
+ switch (SignReturnAddr) {
+ case LangOptions::SignReturnAddressScopeKind::None:
+ return "none";
+ case LangOptions::SignReturnAddressScopeKind::NonLeaf:
+ return "non-leaf";
+ case LangOptions::SignReturnAddressScopeKind::All:
+ return "all";
+ }
+ assert(false && "Unexpected SignReturnAddressScopeKind");
+ }
+
+ const char *getSignKeyStr() const {
+ switch (SignKey) {
+ case LangOptions::SignReturnAddressKeyKind::AKey:
+ return "a_key";
+ case LangOptions::SignReturnAddressKeyKind::BKey:
+ return "b_key";
+ }
+ assert(false && "Unexpected SignReturnAddressKeyKind");
+ }
};
/// Determine if the Architecture in this TargetInfo supports branch
diff --git a/clang/include/clang/Driver/Driver.h b/clang/include/clang/Driver/Driver.h
index a5ca637..73cf326 100644
--- a/clang/include/clang/Driver/Driver.h
+++ b/clang/include/clang/Driver/Driver.h
@@ -160,7 +160,7 @@ public:
/// Target and driver mode components extracted from clang executable name.
ParsedClangName ClangNameParts;
- /// The path to the installed clang directory, if any.
+ /// TODO: Remove this in favor of Dir.
std::string InstalledDir;
/// The path to the compiler resource directory.
@@ -433,7 +433,6 @@ public:
return InstalledDir.c_str();
return Dir.c_str();
}
- void setInstalledDir(StringRef Value) { InstalledDir = std::string(Value); }
bool isSaveTempsEnabled() const { return SaveTemps != SaveTempsNone; }
bool isSaveTempsObj() const { return SaveTemps == SaveTempsObj; }
diff --git a/clang/lib/AST/Interp/ByteCodeExprGen.cpp b/clang/lib/AST/Interp/ByteCodeExprGen.cpp
index b151f8d..122b904 100644
--- a/clang/lib/AST/Interp/ByteCodeExprGen.cpp
+++ b/clang/lib/AST/Interp/ByteCodeExprGen.cpp
@@ -3213,12 +3213,6 @@ bool ByteCodeExprGen<Emitter>::VisitDeclRefExpr(const DeclRefExpr *E) {
// we haven't seen yet.
if (Ctx.getLangOpts().CPlusPlus) {
if (const auto *VD = dyn_cast<VarDecl>(D)) {
- // Dummy for static locals
- if (VD->isStaticLocal()) {
- if (std::optional<unsigned> I = P.getOrCreateDummy(D))
- return this->emitGetPtrGlobal(*I, E);
- return false;
- }
// Visit local const variables like normal.
if (VD->isLocalVarDecl() && VD->getType().isConstQualified()) {
if (!this->visitVarDecl(VD))
@@ -3226,6 +3220,9 @@ bool ByteCodeExprGen<Emitter>::VisitDeclRefExpr(const DeclRefExpr *E) {
// Retry.
return this->VisitDeclRefExpr(E);
}
+
+ if (VD->hasExternalStorage())
+ return this->emitInvalidDeclRef(E, E);
}
} else {
if (const auto *VD = dyn_cast<VarDecl>(D);
@@ -3235,11 +3232,11 @@ bool ByteCodeExprGen<Emitter>::VisitDeclRefExpr(const DeclRefExpr *E) {
// Retry.
return this->VisitDeclRefExpr(E);
}
-
- if (std::optional<unsigned> I = P.getOrCreateDummy(D))
- return this->emitGetPtrGlobal(*I, E);
}
+ if (std::optional<unsigned> I = P.getOrCreateDummy(D))
+ return this->emitGetPtrGlobal(*I, E);
+
return this->emitInvalidDeclRef(E, E);
}
diff --git a/clang/lib/AST/Interp/Interp.cpp b/clang/lib/AST/Interp/Interp.cpp
index 5670888..4f3cd6c 100644
--- a/clang/lib/AST/Interp/Interp.cpp
+++ b/clang/lib/AST/Interp/Interp.cpp
@@ -285,10 +285,6 @@ static bool CheckConstant(InterpState &S, CodePtr OpPC, const Pointer &Ptr) {
return CheckConstant(S, OpPC, Ptr.getDeclDesc());
}
-bool CheckDummy(InterpState &S, CodePtr OpPC, const Pointer &Ptr) {
- return !Ptr.isDummy();
-}
-
bool CheckNull(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
CheckSubobjectKind CSK) {
if (!Ptr.isZero())
@@ -595,10 +591,8 @@ bool CheckFloatResult(InterpState &S, CodePtr OpPC, const Floating &Result,
return true;
}
-/// We aleady know the given DeclRefExpr is invalid for some reason,
-/// now figure out why and print appropriate diagnostics.
-bool CheckDeclRef(InterpState &S, CodePtr OpPC, const DeclRefExpr *DR) {
- const ValueDecl *D = DR->getDecl();
+static bool diagnoseUnknownDecl(InterpState &S, CodePtr OpPC,
+ const ValueDecl *D) {
const SourceInfo &E = S.Current->getSource(OpPC);
if (isa<ParmVarDecl>(D)) {
@@ -621,10 +615,28 @@ bool CheckDeclRef(InterpState &S, CodePtr OpPC, const DeclRefExpr *DR) {
return false;
}
}
-
return false;
}
+/// We aleady know the given DeclRefExpr is invalid for some reason,
+/// now figure out why and print appropriate diagnostics.
+bool CheckDeclRef(InterpState &S, CodePtr OpPC, const DeclRefExpr *DR) {
+ const ValueDecl *D = DR->getDecl();
+ return diagnoseUnknownDecl(S, OpPC, D);
+}
+
+bool CheckDummy(InterpState &S, CodePtr OpPC, const Pointer &Ptr) {
+ if (!Ptr.isDummy())
+ return true;
+
+ const Descriptor *Desc = Ptr.getDeclDesc();
+ const ValueDecl *D = Desc->asValueDecl();
+ if (!D)
+ return false;
+
+ return diagnoseUnknownDecl(S, OpPC, D);
+}
+
bool CheckNonNullArgs(InterpState &S, CodePtr OpPC, const Function *F,
const CallExpr *CE, unsigned ArgSize) {
auto Args = llvm::ArrayRef(CE->getArgs(), CE->getNumArgs());
diff --git a/clang/lib/AST/Interp/Interp.h b/clang/lib/AST/Interp/Interp.h
index 13e0043..f379c98 100644
--- a/clang/lib/AST/Interp/Interp.h
+++ b/clang/lib/AST/Interp/Interp.h
@@ -572,7 +572,8 @@ bool IncDecHelper(InterpState &S, CodePtr OpPC, const Pointer &Ptr) {
template <PrimType Name, class T = typename PrimConv<Name>::T>
bool Inc(InterpState &S, CodePtr OpPC) {
const Pointer &Ptr = S.Stk.pop<Pointer>();
-
+ if (Ptr.isDummy())
+ return false;
if (!CheckInitialized(S, OpPC, Ptr, AK_Increment))
return false;
@@ -585,7 +586,8 @@ bool Inc(InterpState &S, CodePtr OpPC) {
template <PrimType Name, class T = typename PrimConv<Name>::T>
bool IncPop(InterpState &S, CodePtr OpPC) {
const Pointer &Ptr = S.Stk.pop<Pointer>();
-
+ if (Ptr.isDummy())
+ return false;
if (!CheckInitialized(S, OpPC, Ptr, AK_Increment))
return false;
@@ -599,7 +601,8 @@ bool IncPop(InterpState &S, CodePtr OpPC) {
template <PrimType Name, class T = typename PrimConv<Name>::T>
bool Dec(InterpState &S, CodePtr OpPC) {
const Pointer &Ptr = S.Stk.pop<Pointer>();
-
+ if (Ptr.isDummy())
+ return false;
if (!CheckInitialized(S, OpPC, Ptr, AK_Decrement))
return false;
@@ -612,7 +615,8 @@ bool Dec(InterpState &S, CodePtr OpPC) {
template <PrimType Name, class T = typename PrimConv<Name>::T>
bool DecPop(InterpState &S, CodePtr OpPC) {
const Pointer &Ptr = S.Stk.pop<Pointer>();
-
+ if (Ptr.isDummy())
+ return false;
if (!CheckInitialized(S, OpPC, Ptr, AK_Decrement))
return false;
@@ -641,7 +645,8 @@ bool IncDecFloatHelper(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
inline bool Incf(InterpState &S, CodePtr OpPC, llvm::RoundingMode RM) {
const Pointer &Ptr = S.Stk.pop<Pointer>();
-
+ if (Ptr.isDummy())
+ return false;
if (!CheckInitialized(S, OpPC, Ptr, AK_Increment))
return false;
@@ -650,7 +655,8 @@ inline bool Incf(InterpState &S, CodePtr OpPC, llvm::RoundingMode RM) {
inline bool IncfPop(InterpState &S, CodePtr OpPC, llvm::RoundingMode RM) {
const Pointer &Ptr = S.Stk.pop<Pointer>();
-
+ if (Ptr.isDummy())
+ return false;
if (!CheckInitialized(S, OpPC, Ptr, AK_Increment))
return false;
@@ -660,6 +666,9 @@ inline bool IncfPop(InterpState &S, CodePtr OpPC, llvm::RoundingMode RM) {
inline bool Decf(InterpState &S, CodePtr OpPC, llvm::RoundingMode RM) {
const Pointer &Ptr = S.Stk.pop<Pointer>();
+ if (Ptr.isDummy())
+ return false;
+
if (!CheckInitialized(S, OpPC, Ptr, AK_Decrement))
return false;
@@ -669,6 +678,8 @@ inline bool Decf(InterpState &S, CodePtr OpPC, llvm::RoundingMode RM) {
inline bool DecfPop(InterpState &S, CodePtr OpPC, llvm::RoundingMode RM) {
const Pointer &Ptr = S.Stk.pop<Pointer>();
+ if (Ptr.isDummy())
+ return false;
if (!CheckInitialized(S, OpPC, Ptr, AK_Decrement))
return false;
@@ -774,9 +785,9 @@ inline bool CmpHelperEQ<Pointer>(InterpState &S, CodePtr OpPC, CompareFn Fn) {
// element in the same array are NOT equal. They have the same Base value,
// but a different Offset. This is a pretty rare case, so we fix this here
// by comparing pointers to the first elements.
- if (LHS.isArrayRoot())
+ if (!LHS.isDummy() && LHS.isArrayRoot())
VL = LHS.atIndex(0).getByteOffset();
- if (RHS.isArrayRoot())
+ if (!RHS.isDummy() && RHS.isArrayRoot())
VR = RHS.atIndex(0).getByteOffset();
S.Stk.push<BoolT>(BoolT::from(Fn(Compare(VL, VR))));
@@ -1895,7 +1906,7 @@ inline bool ArrayElemPtr(InterpState &S, CodePtr OpPC) {
const T &Offset = S.Stk.pop<T>();
const Pointer &Ptr = S.Stk.peek<Pointer>();
- if (!CheckDummy(S, OpPC, Ptr))
+ if (Ptr.isDummy())
return true;
if (!OffsetHelper<T, ArithOp::Add>(S, OpPC, Offset, Ptr))
@@ -1909,7 +1920,7 @@ inline bool ArrayElemPtrPop(InterpState &S, CodePtr OpPC) {
const T &Offset = S.Stk.pop<T>();
const Pointer &Ptr = S.Stk.pop<Pointer>();
- if (!CheckDummy(S, OpPC, Ptr)) {
+ if (Ptr.isDummy()) {
S.Stk.push<Pointer>(Ptr);
return true;
}
diff --git a/clang/lib/CodeGen/BackendUtil.cpp b/clang/lib/CodeGen/BackendUtil.cpp
index a310825..056f790 100644
--- a/clang/lib/CodeGen/BackendUtil.cpp
+++ b/clang/lib/CodeGen/BackendUtil.cpp
@@ -186,6 +186,14 @@ class EmitAssemblyHelper {
TargetTriple.getVendor() != llvm::Triple::Apple;
}
+ /// Check whether we should emit a flag for UnifiedLTO.
+ /// The UnifiedLTO module flag should be set when UnifiedLTO is enabled for
+ /// ThinLTO or Full LTO with module summaries.
+ bool shouldEmitUnifiedLTOModueFlag() const {
+ return CodeGenOpts.UnifiedLTO &&
+ (CodeGenOpts.PrepareForThinLTO || shouldEmitRegularLTOSummary());
+ }
+
public:
EmitAssemblyHelper(DiagnosticsEngine &_Diags,
const HeaderSearchOptions &HeaderSearchOpts,
@@ -1036,7 +1044,8 @@ void EmitAssemblyHelper::RunOptimizationPipeline(
if (!actionRequiresCodeGen(Action) && CodeGenOpts.VerifyModule)
MPM.addPass(VerifierPass());
- if (Action == Backend_EmitBC || Action == Backend_EmitLL) {
+ if (Action == Backend_EmitBC || Action == Backend_EmitLL ||
+ CodeGenOpts.FatLTO) {
if (CodeGenOpts.PrepareForThinLTO && !CodeGenOpts.DisableLLVMPasses) {
if (!TheModule->getModuleFlag("EnableSplitLTOUnit"))
TheModule->addModuleFlag(llvm::Module::Error, "EnableSplitLTOUnit",
@@ -1047,11 +1056,9 @@ void EmitAssemblyHelper::RunOptimizationPipeline(
if (!ThinLinkOS)
return;
}
- if (CodeGenOpts.UnifiedLTO)
- TheModule->addModuleFlag(llvm::Module::Error, "UnifiedLTO", uint32_t(1));
MPM.addPass(ThinLTOBitcodeWriterPass(
*OS, ThinLinkOS ? &ThinLinkOS->os() : nullptr));
- } else {
+ } else if (Action == Backend_EmitLL) {
MPM.addPass(PrintModulePass(*OS, "", CodeGenOpts.EmitLLVMUseLists,
/*EmitLTOSummary=*/true));
}
@@ -1065,24 +1072,17 @@ void EmitAssemblyHelper::RunOptimizationPipeline(
if (!TheModule->getModuleFlag("EnableSplitLTOUnit"))
TheModule->addModuleFlag(llvm::Module::Error, "EnableSplitLTOUnit",
uint32_t(1));
- if (CodeGenOpts.UnifiedLTO)
- TheModule->addModuleFlag(llvm::Module::Error, "UnifiedLTO", uint32_t(1));
}
- if (Action == Backend_EmitBC)
+ if (Action == Backend_EmitBC) {
MPM.addPass(BitcodeWriterPass(*OS, CodeGenOpts.EmitLLVMUseLists,
EmitLTOSummary));
- else
+ } else if (Action == Backend_EmitLL) {
MPM.addPass(PrintModulePass(*OS, "", CodeGenOpts.EmitLLVMUseLists,
EmitLTOSummary));
+ }
}
- }
- if (CodeGenOpts.FatLTO) {
- // Set the EnableSplitLTOUnit and UnifiedLTO module flags, since FatLTO
- // uses a different action than Backend_EmitBC or Backend_EmitLL.
- if (!TheModule->getModuleFlag("EnableSplitLTOUnit"))
- TheModule->addModuleFlag(llvm::Module::Error, "EnableSplitLTOUnit",
- uint32_t(CodeGenOpts.EnableSplitLTOUnit));
- if (CodeGenOpts.UnifiedLTO && !TheModule->getModuleFlag("UnifiedLTO"))
+
+ if (shouldEmitUnifiedLTOModueFlag())
TheModule->addModuleFlag(llvm::Module::Error, "UnifiedLTO", uint32_t(1));
}
diff --git a/clang/lib/CodeGen/CGStmtOpenMP.cpp b/clang/lib/CodeGen/CGStmtOpenMP.cpp
index 8fd7469..ffcd3ae 100644
--- a/clang/lib/CodeGen/CGStmtOpenMP.cpp
+++ b/clang/lib/CodeGen/CGStmtOpenMP.cpp
@@ -7023,19 +7023,25 @@ void CodeGenFunction::EmitOMPInteropDirective(const OMPInteropDirective &S) {
S.getSingleClause<OMPUseClause>())) &&
"OMPNowaitClause clause is used separately in OMPInteropDirective.");
- if (const auto *C = S.getSingleClause<OMPInitClause>()) {
- llvm::Value *InteropvarPtr =
- EmitLValue(C->getInteropVar()).getPointer(*this);
- llvm::omp::OMPInteropType InteropType = llvm::omp::OMPInteropType::Unknown;
- if (C->getIsTarget()) {
- InteropType = llvm::omp::OMPInteropType::Target;
- } else {
- assert(C->getIsTargetSync() && "Expected interop-type target/targetsync");
- InteropType = llvm::omp::OMPInteropType::TargetSync;
+ auto ItOMPInitClause = S.getClausesOfKind<OMPInitClause>();
+ if (!ItOMPInitClause.empty()) {
+ // Look at the multiple init clauses
+ for (const OMPInitClause *C : ItOMPInitClause) {
+ llvm::Value *InteropvarPtr =
+ EmitLValue(C->getInteropVar()).getPointer(*this);
+ llvm::omp::OMPInteropType InteropType =
+ llvm::omp::OMPInteropType::Unknown;
+ if (C->getIsTarget()) {
+ InteropType = llvm::omp::OMPInteropType::Target;
+ } else {
+ assert(C->getIsTargetSync() &&
+ "Expected interop-type target/targetsync");
+ InteropType = llvm::omp::OMPInteropType::TargetSync;
+ }
+ OMPBuilder.createOMPInteropInit(Builder, InteropvarPtr, InteropType,
+ Device, NumDependences, DependenceList,
+ Data.HasNowaitClause);
}
- OMPBuilder.createOMPInteropInit(Builder, InteropvarPtr, InteropType, Device,
- NumDependences, DependenceList,
- Data.HasNowaitClause);
} else if (const auto *C = S.getSingleClause<OMPDestroyClause>()) {
llvm::Value *InteropvarPtr =
EmitLValue(C->getInteropVar()).getPointer(*this);
diff --git a/clang/lib/CodeGen/Targets/AArch64.cpp b/clang/lib/CodeGen/Targets/AArch64.cpp
index adfdd51..2b8e2ae 100644
--- a/clang/lib/CodeGen/Targets/AArch64.cpp
+++ b/clang/lib/CodeGen/Targets/AArch64.cpp
@@ -132,8 +132,7 @@ public:
assert(Error.empty());
auto *Fn = cast<llvm::Function>(GV);
- static const char *SignReturnAddrStr[] = {"none", "non-leaf", "all"};
- Fn->addFnAttr("sign-return-address", SignReturnAddrStr[static_cast<int>(BPI.SignReturnAddr)]);
+ Fn->addFnAttr("sign-return-address", BPI.getSignReturnAddrStr());
if (BPI.SignReturnAddr != LangOptions::SignReturnAddressScopeKind::None) {
Fn->addFnAttr("sign-return-address-key",
diff --git a/clang/lib/CodeGen/Targets/ARM.cpp b/clang/lib/CodeGen/Targets/ARM.cpp
index d7d175f..5d42e62 100644
--- a/clang/lib/CodeGen/Targets/ARM.cpp
+++ b/clang/lib/CodeGen/Targets/ARM.cpp
@@ -152,13 +152,7 @@ public:
diag::warn_target_unsupported_branch_protection_attribute)
<< Arch;
} else {
- static const char *SignReturnAddrStr[] = {"none", "non-leaf", "all"};
- assert(static_cast<unsigned>(BPI.SignReturnAddr) <= 2 &&
- "Unexpected SignReturnAddressScopeKind");
- Fn->addFnAttr(
- "sign-return-address",
- SignReturnAddrStr[static_cast<int>(BPI.SignReturnAddr)]);
-
+ Fn->addFnAttr("sign-return-address", BPI.getSignReturnAddrStr());
Fn->addFnAttr("branch-target-enforcement",
BPI.BranchTargetEnforcement ? "true" : "false");
}
diff --git a/clang/lib/Driver/ToolChains/CommonArgs.cpp b/clang/lib/Driver/ToolChains/CommonArgs.cpp
index faceee8..382c8b3 100644
--- a/clang/lib/Driver/ToolChains/CommonArgs.cpp
+++ b/clang/lib/Driver/ToolChains/CommonArgs.cpp
@@ -2763,13 +2763,13 @@ void tools::addOpenMPDeviceRTL(const Driver &D,
const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args,
StringRef BitcodeSuffix,
- const llvm::Triple &Triple) {
+ const llvm::Triple &Triple,
+ const ToolChain &HostTC) {
SmallVector<StringRef, 8> LibraryPaths;
- // Add path to clang lib / lib64 folder.
- SmallString<256> DefaultLibPath = llvm::sys::path::parent_path(D.Dir);
- llvm::sys::path::append(DefaultLibPath, CLANG_INSTALL_LIBDIR_BASENAME);
- LibraryPaths.emplace_back(DefaultLibPath.c_str());
+ // Check all of the standard library search paths used by the compiler.
+ for (const auto &LibPath : HostTC.getFilePaths())
+ LibraryPaths.emplace_back(LibPath);
// Add user defined library paths from LIBRARY_PATH.
std::optional<std::string> LibPath =
diff --git a/clang/lib/Driver/ToolChains/CommonArgs.h b/clang/lib/Driver/ToolChains/CommonArgs.h
index 2db0f88..b8f649a 100644
--- a/clang/lib/Driver/ToolChains/CommonArgs.h
+++ b/clang/lib/Driver/ToolChains/CommonArgs.h
@@ -214,7 +214,8 @@ void addMachineOutlinerArgs(const Driver &D, const llvm::opt::ArgList &Args,
void addOpenMPDeviceRTL(const Driver &D, const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args,
- StringRef BitcodeSuffix, const llvm::Triple &Triple);
+ StringRef BitcodeSuffix, const llvm::Triple &Triple,
+ const ToolChain &HostTC);
void addOutlineAtomicsArgs(const Driver &D, const ToolChain &TC,
const llvm::opt::ArgList &Args,
diff --git a/clang/lib/Driver/ToolChains/Cuda.cpp b/clang/lib/Driver/ToolChains/Cuda.cpp
index ff3687c..177fd63 100644
--- a/clang/lib/Driver/ToolChains/Cuda.cpp
+++ b/clang/lib/Driver/ToolChains/Cuda.cpp
@@ -903,7 +903,7 @@ void CudaToolChain::addClangTargetOptions(
return;
addOpenMPDeviceRTL(getDriver(), DriverArgs, CC1Args, GpuArch.str(),
- getTriple());
+ getTriple(), HostTC);
}
}
diff --git a/clang/lib/InstallAPI/Visitor.cpp b/clang/lib/InstallAPI/Visitor.cpp
index 9b333a6..3806a69 100644
--- a/clang/lib/InstallAPI/Visitor.cpp
+++ b/clang/lib/InstallAPI/Visitor.cpp
@@ -19,7 +19,7 @@ using namespace llvm::MachO;
namespace clang::installapi {
-// Exported NamedDecl needs to have externally visibiliy linkage and
+// Exported NamedDecl needs to have external linkage and
// default visibility from LinkageComputer.
static bool isExported(const NamedDecl *D) {
auto LV = D->getLinkageAndVisibility();
diff --git a/clang/lib/Sema/JumpDiagnostics.cpp b/clang/lib/Sema/JumpDiagnostics.cpp
index ec3892e..6722878 100644
--- a/clang/lib/Sema/JumpDiagnostics.cpp
+++ b/clang/lib/Sema/JumpDiagnostics.cpp
@@ -604,6 +604,16 @@ void JumpScopeChecker::BuildScopeInformation(Stmt *S,
break;
}
+ case Stmt::OpenACCComputeConstructClass: {
+ unsigned NewParentScope = Scopes.size();
+ OpenACCComputeConstruct *CC = cast<OpenACCComputeConstruct>(S);
+ Scopes.push_back(GotoScope(
+ ParentScope, diag::note_acc_branch_into_compute_construct,
+ diag::note_acc_branch_out_of_compute_construct, CC->getBeginLoc()));
+ BuildScopeInformation(CC->getStructuredBlock(), NewParentScope);
+ return;
+ }
+
default:
if (auto *ED = dyn_cast<OMPExecutableDirective>(S)) {
if (!ED->isStandaloneDirective()) {
@@ -936,11 +946,16 @@ void JumpScopeChecker::CheckJump(Stmt *From, Stmt *To, SourceLocation DiagLoc,
if (Scopes[I].InDiag == diag::note_protected_by_seh_finally) {
S.Diag(From->getBeginLoc(), diag::warn_jump_out_of_seh_finally);
break;
- }
- if (Scopes[I].InDiag == diag::note_omp_protected_structured_block) {
+ } else if (Scopes[I].InDiag ==
+ diag::note_omp_protected_structured_block) {
S.Diag(From->getBeginLoc(), diag::err_goto_into_protected_scope);
S.Diag(To->getBeginLoc(), diag::note_omp_exits_structured_block);
break;
+ } else if (Scopes[I].InDiag ==
+ diag::note_acc_branch_into_compute_construct) {
+ S.Diag(From->getBeginLoc(), diag::err_goto_into_protected_scope);
+ S.Diag(Scopes[I].Loc, diag::note_acc_branch_out_of_compute_construct);
+ return;
}
}
}
diff --git a/clang/lib/Sema/SemaChecking.cpp b/clang/lib/Sema/SemaChecking.cpp
index 0de76ee..35d453e 100644
--- a/clang/lib/Sema/SemaChecking.cpp
+++ b/clang/lib/Sema/SemaChecking.cpp
@@ -2190,17 +2190,23 @@ static bool SemaBuiltinCpu(Sema &S, const TargetInfo &TI, CallExpr *TheCall,
}
/// Checks that __builtin_popcountg was called with a single argument, which is
-/// an integer.
+/// an unsigned integer.
static bool SemaBuiltinPopcountg(Sema &S, CallExpr *TheCall) {
if (checkArgCount(S, TheCall, 1))
return true;
- Expr *Arg = TheCall->getArg(0);
+ ExprResult ArgRes = S.DefaultLvalueConversion(TheCall->getArg(0));
+ if (ArgRes.isInvalid())
+ return true;
+
+ Expr *Arg = ArgRes.get();
+ TheCall->setArg(0, Arg);
+
QualType ArgTy = Arg->getType();
- if (!ArgTy->isIntegerType()) {
+ if (!ArgTy->isUnsignedIntegerType()) {
S.Diag(Arg->getBeginLoc(), diag::err_builtin_invalid_arg_type)
- << 1 << /*integer ty*/ 7 << ArgTy;
+ << 1 << /*unsigned integer ty*/ 7 << ArgTy;
return true;
}
return false;
@@ -16538,6 +16544,17 @@ void Sema::DiagnoseAlwaysNonNullPointer(Expr *E,
}
}
+ // Complain if we are converting a lambda expression to a boolean value
+ if (const auto *MCallExpr = dyn_cast<CXXMemberCallExpr>(E)) {
+ if (const auto *MRecordDecl = MCallExpr->getRecordDecl();
+ MRecordDecl && MRecordDecl->isLambda()) {
+ Diag(E->getExprLoc(), diag::warn_impcast_pointer_to_bool)
+ << /*LambdaPointerConversionOperatorType=*/3
+ << MRecordDecl->getSourceRange() << Range << IsEqual;
+ return;
+ }
+ }
+
// Expect to find a single Decl. Skip anything more complicated.
ValueDecl *D = nullptr;
if (DeclRefExpr *R = dyn_cast<DeclRefExpr>(E)) {
diff --git a/clang/lib/Sema/SemaOpenMP.cpp b/clang/lib/Sema/SemaOpenMP.cpp
index 7f75cfc..f4364a2 100644
--- a/clang/lib/Sema/SemaOpenMP.cpp
+++ b/clang/lib/Sema/SemaOpenMP.cpp
@@ -4962,7 +4962,8 @@ StmtResult Sema::ActOnOpenMPRegionEnd(StmtResult S,
if (RC->getModifier() != OMPC_REDUCTION_inscan)
continue;
for (Expr *E : RC->copy_array_temps())
- MarkDeclarationsReferencedInExpr(E);
+ if (E)
+ MarkDeclarationsReferencedInExpr(E);
}
if (auto *AC = dyn_cast<OMPAlignedClause>(C)) {
for (Expr *E : AC->varlists())
diff --git a/clang/lib/Sema/SemaStmt.cpp b/clang/lib/Sema/SemaStmt.cpp
index 0a5c2b23..ca2d206 100644
--- a/clang/lib/Sema/SemaStmt.cpp
+++ b/clang/lib/Sema/SemaStmt.cpp
@@ -567,6 +567,11 @@ Sema::ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl,
Diag(IdentLoc, diag::warn_reserved_extern_symbol)
<< TheDecl << static_cast<int>(Status);
+ // If this label is in a compute construct scope, we need to make sure we
+ // check gotos in/out.
+ if (getCurScope()->isInOpenACCComputeConstructScope())
+ setFunctionHasBranchProtectedScope();
+
// Otherwise, things are good. Fill in the declaration and return it.
LabelStmt *LS = new (Context) LabelStmt(IdentLoc, TheDecl, SubStmt);
TheDecl->setStmt(LS);
@@ -3304,6 +3309,12 @@ StmtResult Sema::ActOnGotoStmt(SourceLocation GotoLoc,
SourceLocation LabelLoc,
LabelDecl *TheDecl) {
setFunctionHasBranchIntoScope();
+
+ // If this goto is in a compute construct scope, we need to make sure we check
+ // gotos in/out.
+ if (getCurScope()->isInOpenACCComputeConstructScope())
+ setFunctionHasBranchProtectedScope();
+
TheDecl->markUsed(Context);
return new (Context) GotoStmt(TheDecl, GotoLoc, LabelLoc);
}
@@ -3332,6 +3343,11 @@ Sema::ActOnIndirectGotoStmt(SourceLocation GotoLoc, SourceLocation StarLoc,
setFunctionHasIndirectGoto();
+ // If this goto is in a compute construct scope, we need to make sure we
+ // check gotos in/out.
+ if (getCurScope()->isInOpenACCComputeConstructScope())
+ setFunctionHasBranchProtectedScope();
+
return new (Context) IndirectGotoStmt(GotoLoc, StarLoc, E);
}
diff --git a/clang/test/AST/Interp/arrays.cpp b/clang/test/AST/Interp/arrays.cpp
index e1af2e8..2bf6e9e 100644
--- a/clang/test/AST/Interp/arrays.cpp
+++ b/clang/test/AST/Interp/arrays.cpp
@@ -564,3 +564,8 @@ namespace LocalVLA {
#endif
}
}
+
+char melchizedek[2200000000];
+typedef decltype(melchizedek[1] - melchizedek[0]) ptrdiff_t;
+constexpr ptrdiff_t d1 = &melchizedek[0x7fffffff] - &melchizedek[0]; // ok
+constexpr ptrdiff_t d3 = &melchizedek[0] - &melchizedek[0x80000000u]; // ok
diff --git a/clang/test/AST/Interp/c.c b/clang/test/AST/Interp/c.c
index 2a72c24..260e5bd 100644
--- a/clang/test/AST/Interp/c.c
+++ b/clang/test/AST/Interp/c.c
@@ -33,15 +33,15 @@ const int b = 3;
_Static_assert(b == 3, ""); // pedantic-ref-warning {{not an integer constant expression}} \
// pedantic-expected-warning {{not an integer constant expression}}
-/// FIXME: The new interpreter is missing the "initializer of 'c' unknown" diagnostics.
-const int c; // ref-note {{declared here}} \
- // pedantic-ref-note {{declared here}}
+const int c; // all-note {{declared here}}
_Static_assert(c == 0, ""); // ref-error {{not an integral constant expression}} \
// ref-note {{initializer of 'c' is unknown}} \
// pedantic-ref-error {{not an integral constant expression}} \
// pedantic-ref-note {{initializer of 'c' is unknown}} \
// expected-error {{not an integral constant expression}} \
- // pedantic-expected-error {{not an integral constant expression}}
+ // expected-note {{initializer of 'c' is unknown}} \
+ // pedantic-expected-error {{not an integral constant expression}} \
+ // pedantic-expected-note {{initializer of 'c' is unknown}}
_Static_assert(&c != 0, ""); // ref-warning {{always true}} \
// pedantic-ref-warning {{always true}} \
diff --git a/clang/test/AST/Interp/cxx98.cpp b/clang/test/AST/Interp/cxx98.cpp
index 1acc74a..73e4537 100644
--- a/clang/test/AST/Interp/cxx98.cpp
+++ b/clang/test/AST/Interp/cxx98.cpp
@@ -18,12 +18,13 @@ template struct C<cval>;
/// FIXME: This example does not get properly diagnosed in the new interpreter.
extern const int recurse1;
-const int recurse2 = recurse1; // ref-note {{here}}
+const int recurse2 = recurse1; // both-note {{declared here}}
const int recurse1 = 1;
int array1[recurse1];
int array2[recurse2]; // ref-warning 2{{variable length array}} \
// ref-note {{initializer of 'recurse2' is not a constant expression}} \
// expected-warning {{variable length array}} \
+ // expected-note {{read of non-const variable 'recurse2'}} \
// expected-error {{variable length array}}
int NCI; // both-note {{declared here}}
diff --git a/clang/test/CXX/drs/dr18xx.cpp b/clang/test/CXX/drs/dr18xx.cpp
index a7cee4e..e78730e 100644
--- a/clang/test/CXX/drs/dr18xx.cpp
+++ b/clang/test/CXX/drs/dr18xx.cpp
@@ -282,6 +282,7 @@ namespace dr1837 { // dr1837: 3.3
struct A {
int f();
bool b = [] {
+ // since-cxx11-warning@-1 {{address of lambda function pointer conversion operator will always evaluate to 'true'}}
struct Local {
static_assert(sizeof(this->f()) == sizeof(int), "");
};
diff --git a/clang/test/CXX/expr/expr.prim/expr.prim.lambda/blocks.mm b/clang/test/CXX/expr/expr.prim/expr.prim.lambda/blocks.mm
index cb56f68..e93c37f 100644
--- a/clang/test/CXX/expr/expr.prim/expr.prim.lambda/blocks.mm
+++ b/clang/test/CXX/expr/expr.prim/expr.prim.lambda/blocks.mm
@@ -65,10 +65,10 @@ void nesting() {
namespace overloading {
void bool_conversion() {
- if ([](){}) {
+ if ([](){}) { // expected-warning{{address of lambda function pointer conversion operator will always evaluate to 'true'}}
}
- bool b = []{};
+ bool b = []{}; // expected-warning{{address of lambda function pointer conversion operator will always evaluate to 'true'}}
b = (bool)[]{};
}
@@ -108,8 +108,9 @@ namespace overloading {
using decltype(a)::operator id<void(*)()>; // expected-note {{here}}
} extern d;
- bool r1 = c;
- bool r2 = d; // expected-error {{private}}
+ bool r1 = c; // expected-warning{{address of lambda function pointer conversion operator will always evaluate to 'true'}}
+ bool r2 = d; // expected-error {{private}} \
+ expected-warning{{address of lambda function pointer conversion operator will always evaluate to 'true'}}
}
namespace PR13117 {
diff --git a/clang/test/CodeGen/builtins.c b/clang/test/CodeGen/builtins.c
index 7386611..4f9641d 100644
--- a/clang/test/CodeGen/builtins.c
+++ b/clang/test/CodeGen/builtins.c
@@ -948,14 +948,14 @@ void test_builtin_popcountg(unsigned char uc, unsigned short us,
volatile int pop;
pop = __builtin_popcountg(uc);
// CHECK: %1 = load i8, ptr %uc.addr, align 1
- // CHECK-NEXT: %conv = zext i8 %1 to i32
- // CHECK-NEXT: %2 = call i32 @llvm.ctpop.i32(i32 %conv)
- // CHECK-NEXT: store volatile i32 %2, ptr %pop, align 4
+ // CHECK-NEXT: %2 = call i8 @llvm.ctpop.i8(i8 %1)
+ // CHECK-NEXT: %cast = sext i8 %2 to i32
+ // CHECK-NEXT: store volatile i32 %cast, ptr %pop, align 4
pop = __builtin_popcountg(us);
// CHECK-NEXT: %3 = load i16, ptr %us.addr, align 2
- // CHECK-NEXT: %conv1 = zext i16 %3 to i32
- // CHECK-NEXT: %4 = call i32 @llvm.ctpop.i32(i32 %conv1)
- // CHECK-NEXT: store volatile i32 %4, ptr %pop, align 4
+ // CHECK-NEXT: %4 = call i16 @llvm.ctpop.i16(i16 %3)
+ // CHECK-NEXT: %cast1 = sext i16 %4 to i32
+ // CHECK-NEXT: store volatile i32 %cast1, ptr %pop, align 4
pop = __builtin_popcountg(ui);
// CHECK-NEXT: %5 = load i32, ptr %ui.addr, align 4
// CHECK-NEXT: %6 = call i32 @llvm.ctpop.i32(i32 %5)
@@ -963,23 +963,23 @@ void test_builtin_popcountg(unsigned char uc, unsigned short us,
pop = __builtin_popcountg(ul);
// CHECK-NEXT: %7 = load i64, ptr %ul.addr, align 8
// CHECK-NEXT: %8 = call i64 @llvm.ctpop.i64(i64 %7)
- // CHECK-NEXT: %cast = trunc i64 %8 to i32
- // CHECK-NEXT: store volatile i32 %cast, ptr %pop, align 4
+ // CHECK-NEXT: %cast2 = trunc i64 %8 to i32
+ // CHECK-NEXT: store volatile i32 %cast2, ptr %pop, align 4
pop = __builtin_popcountg(ull);
// CHECK-NEXT: %9 = load i64, ptr %ull.addr, align 8
// CHECK-NEXT: %10 = call i64 @llvm.ctpop.i64(i64 %9)
- // CHECK-NEXT: %cast2 = trunc i64 %10 to i32
- // CHECK-NEXT: store volatile i32 %cast2, ptr %pop, align 4
+ // CHECK-NEXT: %cast3 = trunc i64 %10 to i32
+ // CHECK-NEXT: store volatile i32 %cast3, ptr %pop, align 4
pop = __builtin_popcountg(ui128);
// CHECK-NEXT: %11 = load i128, ptr %ui128.addr, align 16
// CHECK-NEXT: %12 = call i128 @llvm.ctpop.i128(i128 %11)
- // CHECK-NEXT: %cast3 = trunc i128 %12 to i32
- // CHECK-NEXT: store volatile i32 %cast3, ptr %pop, align 4
+ // CHECK-NEXT: %cast4 = trunc i128 %12 to i32
+ // CHECK-NEXT: store volatile i32 %cast4, ptr %pop, align 4
pop = __builtin_popcountg(ubi128);
// CHECK-NEXT: %13 = load i128, ptr %ubi128.addr, align 8
// CHECK-NEXT: %14 = call i128 @llvm.ctpop.i128(i128 %13)
- // CHECK-NEXT: %cast4 = trunc i128 %14 to i32
- // CHECK-NEXT: store volatile i32 %cast4, ptr %pop, align 4
+ // CHECK-NEXT: %cast5 = trunc i128 %14 to i32
+ // CHECK-NEXT: store volatile i32 %cast5, ptr %pop, align 4
// CHECK-NEXT: ret void
}
diff --git a/clang/test/CodeGen/fat-lto-objects.c b/clang/test/CodeGen/fat-lto-objects.c
index afce798..b50567c 100644
--- a/clang/test/CodeGen/fat-lto-objects.c
+++ b/clang/test/CodeGen/fat-lto-objects.c
@@ -11,10 +11,11 @@
// RUN: llvm-objcopy --dump-section=.llvm.lto=%t.full.split.bc %t.full.split.o
// RUN: llvm-dis %t.full.split.bc -o - | FileCheck %s --check-prefixes=FULL,SPLIT,NOUNIFIED
+/// Full LTO always sets EnableSplitLTOUnit when the summary is used.
// RUN: %clang -cc1 -triple x86_64-unknown-linux-gnu -flto=full -ffat-lto-objects -emit-obj < %s -o %t.full.nosplit.o
// RUN: llvm-readelf -S %t.full.nosplit.o | FileCheck %s --check-prefixes=ELF
// RUN: llvm-objcopy --dump-section=.llvm.lto=%t.full.nosplit.bc %t.full.nosplit.o
-// RUN: llvm-dis %t.full.nosplit.bc -o - | FileCheck %s --check-prefixes=FULL,NOSPLIT,NOUNIFIED
+// RUN: llvm-dis %t.full.nosplit.bc -o - | FileCheck %s --check-prefixes=FULL,SPLIT,NOUNIFIED
// RUN: %clang -cc1 -triple x86_64-unknown-linux-gnu -flto=thin -fsplit-lto-unit -ffat-lto-objects -emit-obj < %s -o %t.thin.split.o
// RUN: llvm-readelf -S %t.thin.split.o | FileCheck %s --check-prefixes=ELF
@@ -34,6 +35,21 @@
// RUN: %clang -cc1 -triple x86_64-unknown-linux-gnu -flto=full -ffat-lto-objects -fsplit-lto-unit -S < %s -o - \
// RUN: | FileCheck %s --check-prefixes=ASM
+/// Make sure that FatLTO generates .llvm.lto sections that are the same as the output from normal LTO compilations
+// RUN: %clang -O2 --target=x86_64-unknown-linux-gnu -fPIE -flto=full -ffat-lto-objects -c %s -o %t.fatlto.full.o
+// RUN: llvm-objcopy --dump-section=.llvm.lto=%t.fatlto.full.bc %t.fatlto.full.o
+// RUN: llvm-dis < %t.fatlto.full.bc -o %t.fatlto.full.ll
+// RUN: %clang -O2 --target=x86_64-unknown-linux-gnu -fPIE -flto=full -c %s -o %t.nofat.full.bc
+// RUN: llvm-dis < %t.nofat.full.bc -o %t.nofat.full.ll
+// RUN: diff %t.fatlto.full.ll %t.nofat.full.ll
+
+// RUN: %clang -O2 --target=x86_64-unknown-linux-gnu -fPIE -flto=thin -ffat-lto-objects -c %s -o %t.fatlto.thin.o
+// RUN: llvm-objcopy --dump-section=.llvm.lto=%t.fatlto.thin.bc %t.fatlto.thin.o
+// RUN: llvm-dis < %t.fatlto.thin.bc -o %t.fatlto.thin.ll
+// RUN: %clang -O2 --target=x86_64-unknown-linux-gnu -fPIE -flto=thin -c %s -o %t.nofat.thin.bc
+// RUN: llvm-dis < %t.nofat.thin.bc -o %t.nofat.thin.ll
+// RUN: diff %t.fatlto.thin.ll %t.nofat.thin.ll
+
/// Be sure we enable split LTO units correctly under -ffat-lto-objects.
// SPLIT: ![[#]] = !{i32 1, !"EnableSplitLTOUnit", i32 1}
// NOSPLIT: ![[#]] = !{i32 1, !"EnableSplitLTOUnit", i32 0}
@@ -51,6 +67,9 @@
// ASM-NEXT: .asciz "BC
// ASM-NEXT: .size .Lllvm.embedded.object
+const char* foo = "foo";
+
int test(void) {
+ const char* bar = "bar";
return 0xabcd;
}
diff --git a/clang/test/Driver/darwin-header-search-libcxx.cpp b/clang/test/Driver/darwin-header-search-libcxx.cpp
index 70cc060..5695f53 100644
--- a/clang/test/Driver/darwin-header-search-libcxx.cpp
+++ b/clang/test/Driver/darwin-header-search-libcxx.cpp
@@ -193,7 +193,7 @@
// RUN: ln -sf %t/install/bin/clang %t/symlinked1/bin/clang
// RUN: mkdir -p %t/symlinked1/include/c++/v1
-// RUN: %t/symlinked1/bin/clang -### %s -fsyntax-only 2>&1 \
+// RUN: %t/symlinked1/bin/clang -### %s -no-canonical-prefixes -fsyntax-only 2>&1 \
// RUN: --target=x86_64-apple-darwin \
// RUN: -stdlib=libc++ \
// RUN: -isysroot %S/Inputs/basic_darwin_sdk_usr_cxx_v1 \
diff --git a/clang/test/Driver/mingw-sysroot.cpp b/clang/test/Driver/mingw-sysroot.cpp
index 50152b2..5d512e6 100644
--- a/clang/test/Driver/mingw-sysroot.cpp
+++ b/clang/test/Driver/mingw-sysroot.cpp
@@ -50,10 +50,12 @@
// CHECK_TESTROOT_GCC_EXPLICIT: "-internal-isystem" "{{[^"]+}}/testroot-gcc{{/|\\\\}}include"
-// If there's a matching sysroot next to the clang binary itself, prefer that
+// If -no-canonical-prefixes and there's a matching sysroot next to the clang binary itself, prefer that
// over a gcc in the path:
-// RUN: env "PATH=%T/testroot-gcc/bin:%PATH%" %T/testroot-clang/bin/x86_64-w64-mingw32-clang -target x86_64-w64-mingw32 -rtlib=compiler-rt -stdlib=libstdc++ --sysroot="" -c -### %s 2>&1 | FileCheck -check-prefix=CHECK_TESTROOT_CLANG %s
+// RUN: env "PATH=%T/testroot-gcc/bin:%PATH%" %T/testroot-clang/bin/x86_64-w64-mingw32-clang --target=x86_64-w64-mingw32 -rtlib=compiler-rt -stdlib=libstdc++ --sysroot="" -c -### %s 2>&1 | FileCheck -check-prefix=CHECK_TESTROOT_GCC2 %s
+// RUN: env "PATH=%T/testroot-gcc/bin:%PATH%" %T/testroot-clang/bin/x86_64-w64-mingw32-clang --target=x86_64-w64-mingw32 -rtlib=compiler-rt -stdlib=libstdc++ --sysroot="" -c -### %s -no-canonical-prefixes 2>&1 | FileCheck -check-prefix=CHECK_TESTROOT_CLANG %s
+// CHECK_TESTROOT_GCC2: "{{[^"]+}}/testroot-gcc{{/|\\\\}}x86_64-w64-mingw32{{/|\\\\}}include"
// CHECK_TESTROOT_CLANG: "{{[^"]+}}/testroot-clang{{/|\\\\}}x86_64-w64-mingw32{{/|\\\\}}include"
@@ -82,7 +84,7 @@
// that indicates that we did choose the right base, even if this particular directory
// actually doesn't exist here.
-// RUN: env "PATH=%T/testroot-gcc/bin:%PATH%" %T/testroot-clang-native/bin/clang -target x86_64-w64-mingw32 -rtlib=compiler-rt -stdlib=libstdc++ --sysroot="" -c -### %s 2>&1 | FileCheck -check-prefix=CHECK_TESTROOT_CLANG_NATIVE %s
+// RUN: env "PATH=%T/testroot-gcc/bin:%PATH%" %T/testroot-clang-native/bin/clang -no-canonical-prefixes --target=x86_64-w64-mingw32 -rtlib=compiler-rt -stdlib=libstdc++ --sysroot="" -c -### %s 2>&1 | FileCheck -check-prefix=CHECK_TESTROOT_CLANG_NATIVE %s
// CHECK_TESTROOT_CLANG_NATIVE: "{{[^"]+}}/testroot-clang-native{{/|\\\\}}x86_64-w64-mingw32{{/|\\\\}}include"
@@ -93,12 +95,12 @@
// that defaults to x86_64 mingw, but it's easier to test this in cross setups
// with symlinks, like the other tests here.)
-// RUN: env "PATH=%T/testroot-gcc/bin:%PATH%" %T/testroot-clang/bin/x86_64-w64-mingw32-clang --target=x86_64-w64-mingw32 -m32 -rtlib=compiler-rt -stdlib=libstdc++ --sysroot="" -c -### %s 2>&1 | FileCheck -check-prefix=CHECK_TESTROOT_CLANG_I686 %s
+// RUN: env "PATH=%T/testroot-gcc/bin:%PATH%" %T/testroot-clang/bin/x86_64-w64-mingw32-clang -no-canonical-prefixes --target=x86_64-w64-mingw32 -m32 -rtlib=compiler-rt -stdlib=libstdc++ --sysroot="" -c -### %s 2>&1 | FileCheck -check-prefix=CHECK_TESTROOT_CLANG_I686 %s
// CHECK_TESTROOT_CLANG_I686: "{{[^"]+}}/testroot-clang{{/|\\\\}}i686-w64-mingw32{{/|\\\\}}include"
// If the user calls clang with a custom literal triple, make sure this maps
// to sysroots with the matching spelling.
-// RUN: %T/testroot-custom-triple/bin/clang --target=x86_64-w64-mingw32foo -rtlib=compiler-rt -stdlib=libstdc++ --sysroot="" -c -### %s 2>&1 | FileCheck -check-prefix=CHECK_TESTROOT_CUSTOM_TRIPLE %s
+// RUN: %T/testroot-custom-triple/bin/clang -no-canonical-prefixes --target=x86_64-w64-mingw32foo -rtlib=compiler-rt -stdlib=libstdc++ --sysroot="" -c -### %s 2>&1 | FileCheck -check-prefix=CHECK_TESTROOT_CUSTOM_TRIPLE %s
// CHECK_TESTROOT_CUSTOM_TRIPLE: "{{[^"]+}}/testroot-custom-triple{{/|\\\\}}x86_64-w64-mingw32foo{{/|\\\\}}include"
diff --git a/clang/test/Driver/no-canonical-prefixes.c b/clang/test/Driver/no-canonical-prefixes.c
index fb54f85f..669e566 100644
--- a/clang/test/Driver/no-canonical-prefixes.c
+++ b/clang/test/Driver/no-canonical-prefixes.c
@@ -26,7 +26,7 @@
// RUN: | FileCheck --check-prefix=NON-CANONICAL %s
//
// FIXME: This should really be '.real'.
-// CANONICAL: InstalledDir: {{.*}}.fake
+// CANONICAL: InstalledDir: {{.*}}bin
// CANONICAL: {{[/|\\]*}}clang{{.*}}" -cc1
//
// NON-CANONICAL: InstalledDir: .{{$}}
diff --git a/clang/test/Driver/program-path-priority.c b/clang/test/Driver/program-path-priority.c
index ee931dd..c940c4c 100644
--- a/clang/test/Driver/program-path-priority.c
+++ b/clang/test/Driver/program-path-priority.c
@@ -36,7 +36,7 @@
// RUN: touch %t/notreal-none-elf-gcc && chmod +x %t/notreal-none-elf-gcc
// RUN: env "PATH=" %t/clang -### -target notreal-none-elf %s 2>&1 | \
// RUN: FileCheck --check-prefix=PROG_PATH_NOTREAL_GCC %s
-// PROG_PATH_NOTREAL_GCC: notreal-none-elf-gcc"
+// PROG_PATH_NOTREAL_GCC: notreal-none-unknown-elf
/// <triple>-gcc on the PATH is found
// RUN: mkdir -p %t/env
@@ -57,7 +57,7 @@
// RUN: touch %t/gcc && chmod +x %t/gcc
// RUN: env "PATH=" %t/clang -### -target notreal-none-elf %s 2>&1 | \
// RUN: FileCheck --check-prefix=NOTREAL_GCC_PREFERRED %s
-// NOTREAL_GCC_PREFERRED: notreal-none-elf-gcc"
+// NOTREAL_GCC_PREFERRED: notreal-none-unknown-elf"
// NOTREAL_GCC_PREFERRED-NOT: /gcc"
/// <triple>-gcc on the PATH is preferred to gcc in program path
@@ -125,6 +125,9 @@
/// Only if there is nothing in the prefix will we search other paths
/// -f in case $DEFAULT_TRIPLE == %target_triple
// RUN: rm -f %t/prefix/$DEFAULT_TRIPLE-gcc %t/prefix/%target_triple-gcc %t/prefix/gcc
-// RUN: env "PATH=" %t/clang -### -target notreal-none-elf %s -B %t/prefix 2>&1 | \
-// RUN: FileCheck --check-prefix=EMPTY_PREFIX_DIR %s
-// EMPTY_PREFIX_DIR: notreal-none-elf-gcc"
+// RUN: env "PATH=" %t/clang -### -canonical-prefixes --target=notreal-none-elf %s -B %t/prefix 2>&1 | \
+// RUN: FileCheck --check-prefix=EMPTY_PREFIX_DIR1 %s
+// EMPTY_PREFIX_DIR1: gcc"
+// RUN: env "PATH=" %t/clang -### -no-canonical-prefixes --target=notreal-none-elf %s -B %t/prefix 2>&1 | \
+// RUN: FileCheck --check-prefix=EMPTY_PREFIX_DIR2 %s
+// EMPTY_PREFIX_DIR2: notreal-none-elf-gcc"
diff --git a/clang/test/Driver/rocm-detect.hip b/clang/test/Driver/rocm-detect.hip
index 0db994a..8b15c32 100644
--- a/clang/test/Driver/rocm-detect.hip
+++ b/clang/test/Driver/rocm-detect.hip
@@ -102,7 +102,7 @@
// RUN: rm -rf %t/rocm-spack
// RUN: cp -r %S/Inputs/rocm-spack %t
// RUN: ln -fs %clang %t/rocm-spack/llvm-amdgpu-4.0.0-ieagcs7inf7runpyfvepqkurasoglq4z/bin/clang
-// RUN: %t/rocm-spack/llvm-amdgpu-4.0.0-ieagcs7inf7runpyfvepqkurasoglq4z/bin/clang -### -v \
+// RUN: %t/rocm-spack/llvm-amdgpu-4.0.0-ieagcs7inf7runpyfvepqkurasoglq4z/bin/clang -### -no-canonical-prefixes -v \
// RUN: -resource-dir=%t/rocm-spack/llvm-amdgpu-4.0.0-ieagcs7inf7runpyfvepqkurasoglq4z/lib/clang \
// RUN: -target x86_64-linux-gnu --cuda-gpu-arch=gfx900 --print-rocm-search-dirs %s 2>&1 \
// RUN: | FileCheck -check-prefixes=SPACK %s
@@ -111,7 +111,7 @@
// ROCm release. --hip-path and --rocm-device-lib-path can be used to specify them.
// RUN: cp -r %t/rocm-spack/hip-* %t/rocm-spack/hip-4.0.0-abcd
-// RUN: %t/rocm-spack/llvm-amdgpu-4.0.0-ieagcs7inf7runpyfvepqkurasoglq4z/bin/clang -### -v \
+// RUN: %t/rocm-spack/llvm-amdgpu-4.0.0-ieagcs7inf7runpyfvepqkurasoglq4z/bin/clang -### -no-canonical-prefixes -v \
// RUN: -target x86_64-linux-gnu --cuda-gpu-arch=gfx900 \
// RUN: --hip-path=%t/rocm-spack/hip-4.0.0-abcd \
// RUN: %s 2>&1 | FileCheck -check-prefixes=SPACK-SET %s
diff --git a/clang/test/OpenMP/interop_codegen.cpp b/clang/test/OpenMP/interop_codegen.cpp
new file mode 100644
index 0000000..ea83ef8
--- /dev/null
+++ b/clang/test/OpenMP/interop_codegen.cpp
@@ -0,0 +1,35 @@
+// expected-no-diagnostics
+// RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple x86_64-unknown-unknown -fopenmp-targets=amdgcn-amd-amdhsa -emit-llvm %s -o - | FileCheck %s
+// RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple x86_64-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm %s -o - | FileCheck %s
+// RUN: %clang_cc1 -verify -fopenmp -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s
+
+#ifndef HEADER
+#define HEADER
+
+typedef void *omp_interop_t;
+#define omp_interop_none 0
+#define omp_ipr_fr_id -1
+typedef long omp_intptr_t;
+#define NULL 0
+
+extern omp_intptr_t omp_get_interop_int(const omp_interop_t, int, int *);
+
+int main() {
+ omp_interop_t obj = omp_interop_none;
+ omp_interop_t i1 = omp_interop_none;
+ omp_interop_t i2 = omp_interop_none;
+ omp_interop_t i3 = omp_interop_none;
+ omp_interop_t i4 = omp_interop_none;
+ omp_interop_t i5 = omp_interop_none;
+
+ #pragma omp interop init(targetsync: i1) init(targetsync: obj)
+ int id = (int )omp_get_interop_int(obj, omp_ipr_fr_id, NULL);
+ int id1 = (int )omp_get_interop_int(i1, omp_ipr_fr_id, NULL);
+
+
+}
+#endif
+
+// CHECK-LABEL: define {{.+}}main{{.+}}
+// CHECK: call {{.+}}__tgt_interop_init({{.+}}i1{{.*}})
+// CHECK: call {{.+}}__tgt_interop_init({{.+}}obj{{.*}})
diff --git a/clang/test/OpenMP/scan_ast_print.cpp b/clang/test/OpenMP/scan_ast_print.cpp
index 3bbd3b6..82cb13e 100644
--- a/clang/test/OpenMP/scan_ast_print.cpp
+++ b/clang/test/OpenMP/scan_ast_print.cpp
@@ -19,21 +19,39 @@ T tmain(T argc) {
for (int i = 0; i < 10; ++i) {
#pragma omp scan inclusive(a)
}
+#pragma omp parallel for reduction(inscan, +:a)
+ for (int i = 0; i < 10; ++i) {
+#pragma omp scan inclusive(a)
+ }
return a + argc;
}
// CHECK: static T a;
// CHECK-NEXT: #pragma omp for reduction(inscan, +: a)
// CHECK-NEXT: for (int i = 0; i < 10; ++i) {
// CHECK-NEXT: #pragma omp scan inclusive(a){{$}}
+
+// CHECK: #pragma omp parallel for reduction(inscan, +: a)
+// CHECK-NEXT: for (int i = 0; i < 10; ++i) {
+// CHECK-NEXT: #pragma omp scan inclusive(a){{$}}
+
// CHECK: static int a;
// CHECK-NEXT: #pragma omp for reduction(inscan, +: a)
// CHECK-NEXT: for (int i = 0; i < 10; ++i) {
// CHECK-NEXT: #pragma omp scan inclusive(a)
+
+// CHECK: #pragma omp parallel for reduction(inscan, +: a)
+// CHECK-NEXT: for (int i = 0; i < 10; ++i) {
+// CHECK-NEXT: #pragma omp scan inclusive(a)
+
// CHECK: static char a;
// CHECK-NEXT: #pragma omp for reduction(inscan, +: a)
// CHECK-NEXT: for (int i = 0; i < 10; ++i) {
// CHECK-NEXT: #pragma omp scan inclusive(a)
+// CHECK: #pragma omp parallel for reduction(inscan, +: a)
+// CHECK-NEXT: for (int i = 0; i < 10; ++i) {
+// CHECK-NEXT: #pragma omp scan inclusive(a)
+
int main(int argc, char **argv) {
static int a;
// CHECK: static int a;
diff --git a/clang/test/Preprocessor/riscv-target-features.c b/clang/test/Preprocessor/riscv-target-features.c
index ea81c66..664279c 100644
--- a/clang/test/Preprocessor/riscv-target-features.c
+++ b/clang/test/Preprocessor/riscv-target-features.c
@@ -74,6 +74,7 @@
// CHECK-NOT: __riscv_xventanacondops {{.*$}}
// CHECK-NOT: __riscv_za128rs {{.*$}}
// CHECK-NOT: __riscv_za64rs {{.*$}}
+// CHECK-NOT: __riscv_zacas {{.*$}}
// CHECK-NOT: __riscv_zawrs {{.*$}}
// CHECK-NOT: __riscv_zba {{.*$}}
// CHECK-NOT: __riscv_zbb {{.*$}}
@@ -166,7 +167,6 @@
// CHECK-NOT: __riscv_ssqosid{{.*$}}
// CHECK-NOT: __riscv_supm{{.*$}}
// CHECK-NOT: __riscv_zaamo {{.*$}}
-// CHECK-NOT: __riscv_zacas {{.*$}}
// CHECK-NOT: __riscv_zalasr {{.*$}}
// CHECK-NOT: __riscv_zalrsc {{.*$}}
// CHECK-NOT: __riscv_zcmop {{.*$}}
@@ -660,6 +660,14 @@
// RUN: -o - | FileCheck --check-prefix=CHECK-ZA64RS-EXT %s
// CHECK-ZA64RS-EXT: __riscv_za64rs 1000000{{$}}
+// RUN: %clang --target=riscv32 \
+// RUN: -march=rv32i_zacas1p0 -E -dM %s \
+// RUN: -o - | FileCheck --check-prefix=CHECK-ZACAS-EXT %s
+// RUN: %clang --target=riscv64 \
+// RUN: -march=rv64i_zacas1p0 -E -dM %s \
+// RUN: -o - | FileCheck --check-prefix=CHECK-ZACAS-EXT %s
+// CHECK-ZACAS-EXT: __riscv_zacas 1000000{{$}}
+
// RUN: %clang --target=riscv32-unknown-linux-gnu \
// RUN: -march=rv32izawrs -E -dM %s \
// RUN: -o - | FileCheck --check-prefix=CHECK-ZAWRS-EXT %s
@@ -1486,14 +1494,6 @@
// CHECK-ZAAMO-EXT: __riscv_zaamo 2000{{$}}
// RUN: %clang --target=riscv32 -menable-experimental-extensions \
-// RUN: -march=rv32i_zacas1p0 -E -dM %s \
-// RUN: -o - | FileCheck --check-prefix=CHECK-ZACAS-EXT %s
-// RUN: %clang --target=riscv64 -menable-experimental-extensions \
-// RUN: -march=rv64i_zacas1p0 -E -dM %s \
-// RUN: -o - | FileCheck --check-prefix=CHECK-ZACAS-EXT %s
-// CHECK-ZACAS-EXT: __riscv_zacas 1000000{{$}}
-
-// RUN: %clang --target=riscv32 -menable-experimental-extensions \
// RUN: -march=rv32i_zalasr0p1 -E -dM %s \
// RUN: -o - | FileCheck --check-prefix=CHECK-ZALASR-EXT %s
// RUN: %clang --target=riscv64 -menable-experimental-extensions \
diff --git a/clang/test/Sema/builtin-popcountg.c b/clang/test/Sema/builtin-popcountg.c
index e18b910..9d09592 100644
--- a/clang/test/Sema/builtin-popcountg.c
+++ b/clang/test/Sema/builtin-popcountg.c
@@ -1,14 +1,23 @@
-// RUN: %clang_cc1 -triple=x86_64-pc-linux-gnu -fsyntax-only -verify -Wpedantic %s
+// RUN: %clang_cc1 -std=c23 -triple=x86_64-pc-linux-gnu -fsyntax-only -verify -Wpedantic %s
typedef int int2 __attribute__((ext_vector_type(2)));
-void test_builtin_popcountg(int i, double d, int2 i2) {
+void test_builtin_popcountg(short s, int i, __int128 i128, _BitInt(128) bi128,
+ double d, int2 i2) {
__builtin_popcountg();
// expected-error@-1 {{too few arguments to function call, expected 1, have 0}}
__builtin_popcountg(i, i);
// expected-error@-1 {{too many arguments to function call, expected 1, have 2}}
+ __builtin_popcountg(s);
+ // expected-error@-1 {{1st argument must be a type of unsigned integer (was 'short')}}
+ __builtin_popcountg(i);
+ // expected-error@-1 {{1st argument must be a type of unsigned integer (was 'int')}}
+ __builtin_popcountg(i128);
+ // expected-error@-1 {{1st argument must be a type of unsigned integer (was '__int128')}}
+ __builtin_popcountg(bi128);
+ // expected-error@-1 {{1st argument must be a type of unsigned integer (was '_BitInt(128)')}}
__builtin_popcountg(d);
- // expected-error@-1 {{1st argument must be a type of integer (was 'double')}}
+ // expected-error@-1 {{1st argument must be a type of unsigned integer (was 'double')}}
__builtin_popcountg(i2);
- // expected-error@-1 {{1st argument must be a type of integer (was 'int2' (vector of 2 'int' values))}}
+ // expected-error@-1 {{1st argument must be a type of unsigned integer (was 'int2' (vector of 2 'int' values))}}
}
diff --git a/clang/test/SemaCXX/warn-bool-conversion.cpp b/clang/test/SemaCXX/warn-bool-conversion.cpp
index c81d52d..9e8cf0e 100644
--- a/clang/test/SemaCXX/warn-bool-conversion.cpp
+++ b/clang/test/SemaCXX/warn-bool-conversion.cpp
@@ -81,6 +81,18 @@ struct S2 {
bool f5();
bool f6(int);
+#if __cplusplus >= 201103L
+auto f7 = []{};
+auto f8 = [](){};
+
+void foo() {
+ bool b;
+ b = f7; // expected-warning {{address of lambda function pointer conversion operator will always evaluate to 'true'}}
+ b = f8; // expected-warning {{address of lambda function pointer conversion operator will always evaluate to 'true'}}
+ bool is_true = [](){ return true; };
+ // expected-warning@-1{{address of lambda function pointer conversion operator will always evaluate to 'true'}}
+}
+#endif
void bar() {
bool b;
diff --git a/clang/test/SemaOpenACC/no-branch-in-out.c b/clang/test/SemaOpenACC/no-branch-in-out.c
index f8fb40a..d070247 100644
--- a/clang/test/SemaOpenACC/no-branch-in-out.c
+++ b/clang/test/SemaOpenACC/no-branch-in-out.c
@@ -113,3 +113,200 @@ void Return() {
}
}
}
+
+void Goto() {
+ int j;
+#pragma acc parallel // expected-note{{invalid branch out of OpenACC Compute Construct}}
+ while(j) {
+ if (j <3)
+ goto LABEL; // expected-error{{cannot jump from this goto statement to its label}}
+ }
+
+LABEL:
+ {}
+
+ goto LABEL_IN; // expected-error{{cannot jump from this goto statement to its label}}
+
+#pragma acc parallel // expected-note{{invalid branch into OpenACC Compute Construct}}
+ for(int i = 0; i < 5; ++i) {
+LABEL_IN:
+ {}
+ }
+
+#pragma acc parallel
+ for(int i = 0; i < 5; ++i) {
+LABEL_NOT_CALLED:
+ {}
+ }
+
+#pragma acc parallel
+ {
+ goto ANOTHER_LOOP; // expected-error{{cannot jump from this goto statement to its label}}
+
+ }
+#pragma acc parallel// expected-note{{invalid branch into OpenACC Compute Construct}}
+
+ {
+ANOTHER_LOOP:
+ {}
+ }
+
+#pragma acc parallel
+ {
+ while (j) {
+ --j;
+ if (j < 3)
+ goto LABEL2;
+
+ if (j > 4)
+ break;
+ }
+LABEL2:
+ {}
+ }
+
+#pragma acc parallel
+ do {
+ if (j < 3)
+ goto LABEL3;
+
+ if (j > 4)
+ break; // expected-error{{invalid branch out of OpenACC Compute Construct}}
+
+LABEL3:
+ {}
+ } while (j);
+
+LABEL4:
+ {}
+#pragma acc parallel// expected-note{{invalid branch out of OpenACC Compute Construct}}
+ {
+ goto LABEL4;// expected-error{{cannot jump from this goto statement to its label}}
+ }
+
+#pragma acc parallel// expected-note{{invalid branch into OpenACC Compute Construct}}
+
+ {
+LABEL5:
+ {}
+ }
+
+ {
+ goto LABEL5;// expected-error{{cannot jump from this goto statement to its label}}
+ }
+
+#pragma acc parallel
+ {
+LABEL6:
+ {}
+ goto LABEL6;
+
+ }
+
+#pragma acc parallel
+ goto LABEL7; // expected-error{{cannot jump from this goto statement to its label}}
+#pragma acc parallel// expected-note{{invalid branch into OpenACC Compute Construct}}
+ {
+LABEL7:{}
+ }
+
+#pragma acc parallel
+ LABEL8:{}
+#pragma acc parallel// expected-note{{invalid branch out of OpenACC Compute Construct}}
+ {
+ goto LABEL8;// expected-error{{cannot jump from this goto statement to its label}}
+ }
+
+
+#pragma acc parallel// expected-note{{invalid branch into OpenACC Compute Construct}}
+ {
+LABEL9:{}
+ }
+
+ ({goto LABEL9;});// expected-error{{cannot jump from this goto statement to its label}}
+
+#pragma acc parallel// expected-note{{invalid branch out of OpenACC Compute Construct}}
+ {
+ ({goto LABEL10;});// expected-error{{cannot jump from this goto statement to its label}}
+ }
+
+LABEL10:{}
+
+ ({goto LABEL11;});// expected-error{{cannot jump from this goto statement to its label}}
+#pragma acc parallel// expected-note{{invalid branch into OpenACC Compute Construct}}
+ {
+LABEL11:{}
+ }
+
+LABEL12:{}
+#pragma acc parallel// expected-note{{invalid branch out of OpenACC Compute Construct}}
+ {
+ ({goto LABEL12;});// expected-error{{cannot jump from this goto statement to its label}}
+ }
+
+#pragma acc parallel
+ {
+ ({goto LABEL13;});
+LABEL13:{}
+ }
+
+#pragma acc parallel
+ {
+ LABEL14:{}
+ ({goto LABEL14;});
+ }
+}
+
+void IndirectGoto1() {
+ void* ptr;
+#pragma acc parallel
+ {
+LABEL1:{}
+ ptr = &&LABEL1;
+
+ goto *ptr;
+
+ }
+}
+
+void IndirectGoto2() {
+ void* ptr;
+LABEL2:{} // #GOTOLBL2
+ ptr = &&LABEL2;
+#pragma acc parallel // #GOTOPAR2
+ {
+// expected-error@+3{{cannot jump from this indirect goto statement to one of its possible targets}}
+// expected-note@#GOTOLBL2{{possible target of indirect goto statement}}
+// expected-note@#GOTOPAR2{{invalid branch out of OpenACC Compute Construct}}
+ goto *ptr;
+ }
+}
+
+void IndirectGoto3() {
+ void* ptr;
+#pragma acc parallel // #GOTOPAR3
+ {
+LABEL3:{} // #GOTOLBL3
+ ptr = &&LABEL3;
+ }
+// expected-error@+3{{cannot jump from this indirect goto statement to one of its possible targets}}
+// expected-note@#GOTOLBL3{{possible target of indirect goto statement}}
+// expected-note@#GOTOPAR3{{invalid branch into OpenACC Compute Construct}}
+ goto *ptr;
+}
+
+void IndirectGoto4() {
+ void* ptr;
+#pragma acc parallel // #GOTOPAR4
+ {
+LABEL4:{}
+ ptr = &&LABEL4;
+// expected-error@+3{{cannot jump from this indirect goto statement to one of its possible targets}}
+// expected-note@#GOTOLBL5{{possible target of indirect goto statement}}
+// expected-note@#GOTOPAR4{{invalid branch out of OpenACC Compute Construct}}
+ goto *ptr;
+ }
+LABEL5:// #GOTOLBL5
+
+ ptr=&&LABEL5;
+}
diff --git a/clang/tools/clang-installapi/ClangInstallAPI.cpp b/clang/tools/clang-installapi/ClangInstallAPI.cpp
index 43c9fca..ff031e0 100644
--- a/clang/tools/clang-installapi/ClangInstallAPI.cpp
+++ b/clang/tools/clang-installapi/ClangInstallAPI.cpp
@@ -96,7 +96,6 @@ static bool run(ArrayRef<const char *> Args, const char *ProgName) {
auto DriverArgs = llvm::ArrayRef(Args).slice(1);
clang::driver::Driver Driver(ProgName, llvm::sys::getDefaultTargetTriple(),
*Diag, "clang installapi tool");
- Driver.setInstalledDir(llvm::sys::path::parent_path(ProgName));
auto TargetAndMode =
clang::driver::ToolChain::getTargetAndModeFromProgramName(ProgName);
Driver.setTargetAndMode(TargetAndMode);
diff --git a/clang/tools/driver/driver.cpp b/clang/tools/driver/driver.cpp
index 0dfb512..376025e 100644
--- a/clang/tools/driver/driver.cpp
+++ b/clang/tools/driver/driver.cpp
@@ -323,28 +323,6 @@ static void FixupDiagPrefixExeName(TextDiagnosticPrinter *DiagClient,
DiagClient->setPrefix(std::string(ExeBasename));
}
-static void SetInstallDir(SmallVectorImpl<const char *> &argv,
- Driver &TheDriver, bool CanonicalPrefixes) {
- // Attempt to find the original path used to invoke the driver, to determine
- // the installed path. We do this manually, because we want to support that
- // path being a symlink.
- SmallString<128> InstalledPath(argv[0]);
-
- // Do a PATH lookup, if there are no directory components.
- if (llvm::sys::path::filename(InstalledPath) == InstalledPath)
- if (llvm::ErrorOr<std::string> Tmp = llvm::sys::findProgramByName(
- llvm::sys::path::filename(InstalledPath.str())))
- InstalledPath = *Tmp;
-
- // FIXME: We don't actually canonicalize this, we just make it absolute.
- if (CanonicalPrefixes)
- llvm::sys::fs::make_absolute(InstalledPath);
-
- StringRef InstalledPathParent(llvm::sys::path::parent_path(InstalledPath));
- if (llvm::sys::fs::exists(InstalledPathParent))
- TheDriver.setInstalledDir(InstalledPathParent);
-}
-
static int ExecuteCC1Tool(SmallVectorImpl<const char *> &ArgV,
const llvm::ToolContext &ToolContext) {
// If we call the cc1 tool from the clangDriver library (through
@@ -484,7 +462,6 @@ int clang_main(int Argc, char **Argv, const llvm::ToolContext &ToolContext) {
ProcessWarningOptions(Diags, *DiagOpts, /*ReportDiags=*/false);
Driver TheDriver(Path, llvm::sys::getDefaultTargetTriple(), Diags);
- SetInstallDir(Args, TheDriver, CanonicalPrefixes);
auto TargetAndMode = ToolChain::getTargetAndModeFromProgramName(ProgName);
TheDriver.setTargetAndMode(TargetAndMode);
// If -canonical-prefixes is set, GetExecutablePath will have resolved Path
diff --git a/compiler-rt/lib/scudo/standalone/allocator_common.h b/compiler-rt/lib/scudo/standalone/allocator_common.h
index 95f4776..2b77516 100644
--- a/compiler-rt/lib/scudo/standalone/allocator_common.h
+++ b/compiler-rt/lib/scudo/standalone/allocator_common.h
@@ -40,6 +40,7 @@ template <class SizeClassAllocator> struct TransferBatch {
B->Count = static_cast<u16>(B->Count - N);
}
void clear() { Count = 0; }
+ bool empty() { return Count == 0; }
void add(CompactPtrT P) {
DCHECK_LT(Count, MaxNumCached);
Batch[Count++] = P;
@@ -48,6 +49,12 @@ template <class SizeClassAllocator> struct TransferBatch {
memcpy(Array, Batch, sizeof(Batch[0]) * Count);
clear();
}
+
+ void moveNToArray(CompactPtrT *Array, u16 N) {
+ DCHECK_LE(N, Count);
+ memcpy(Array, Batch + Count - N, sizeof(Batch[0]) * N);
+ Count = static_cast<u16>(Count - N);
+ }
u16 getCount() const { return Count; }
bool isEmpty() const { return Count == 0U; }
CompactPtrT get(u16 I) const {
diff --git a/compiler-rt/lib/scudo/standalone/primary32.h b/compiler-rt/lib/scudo/standalone/primary32.h
index 4d03b28..c86e75b 100644
--- a/compiler-rt/lib/scudo/standalone/primary32.h
+++ b/compiler-rt/lib/scudo/standalone/primary32.h
@@ -191,38 +191,21 @@ public:
return BlockSize > PageSize;
}
- // Note that the `MaxBlockCount` will be used when we support arbitrary blocks
- // count. Now it's the same as the number of blocks stored in the
- // `TransferBatch`.
u16 popBlocks(CacheT *C, uptr ClassId, CompactPtrT *ToArray,
- UNUSED const u16 MaxBlockCount) {
- TransferBatchT *B = popBatch(C, ClassId);
- if (!B)
- return 0;
-
- const u16 Count = B->getCount();
- DCHECK_GT(Count, 0U);
- B->moveToArray(ToArray);
-
- if (ClassId != SizeClassMap::BatchClassId)
- C->deallocate(SizeClassMap::BatchClassId, B);
-
- return Count;
- }
-
- TransferBatchT *popBatch(CacheT *C, uptr ClassId) {
+ const u16 MaxBlockCount) {
DCHECK_LT(ClassId, NumClasses);
SizeClassInfo *Sci = getSizeClassInfo(ClassId);
ScopedLock L(Sci->Mutex);
- TransferBatchT *B = popBatchImpl(C, ClassId, Sci);
- if (UNLIKELY(!B)) {
+
+ u16 PopCount = popBlocksImpl(C, ClassId, Sci, ToArray, MaxBlockCount);
+ if (UNLIKELY(PopCount == 0)) {
if (UNLIKELY(!populateFreeList(C, ClassId, Sci)))
- return nullptr;
- B = popBatchImpl(C, ClassId, Sci);
- // if `populateFreeList` succeeded, we are supposed to get free blocks.
- DCHECK_NE(B, nullptr);
+ return 0U;
+ PopCount = popBlocksImpl(C, ClassId, Sci, ToArray, MaxBlockCount);
+ DCHECK_NE(PopCount, 0U);
}
- return B;
+
+ return PopCount;
}
// Push the array of free blocks to the designated batch group.
@@ -510,7 +493,7 @@ private:
// by TransferBatch is also free for use. We don't need to recycle the
// TransferBatch. Note that the correctness is maintained by the invariant,
//
- // The unit of each popBatch() request is entire TransferBatch. Return
+ // Each popBlocks() request returns the entire TransferBatch. Returning
// part of the blocks in a TransferBatch is invalid.
//
// This ensures that TransferBatch won't leak the address itself while it's
@@ -634,7 +617,7 @@ private:
BG->Batches.push_front(TB);
BG->PushedBlocks = 0;
BG->BytesInBGAtLastCheckpoint = 0;
- BG->MaxCachedPerBatch = CacheT::getMaxCached(getSizeByClassId(ClassId));
+ BG->MaxCachedPerBatch = TransferBatchT::MaxNumCached;
return BG;
};
@@ -726,14 +709,11 @@ private:
InsertBlocks(Cur, Array + Size - Count, Count);
}
- // Pop one TransferBatch from a BatchGroup. The BatchGroup with the smallest
- // group id will be considered first.
- //
- // The region mutex needs to be held while calling this method.
- TransferBatchT *popBatchImpl(CacheT *C, uptr ClassId, SizeClassInfo *Sci)
+ u16 popBlocksImpl(CacheT *C, uptr ClassId, SizeClassInfo *Sci,
+ CompactPtrT *ToArray, const u16 MaxBlockCount)
REQUIRES(Sci->Mutex) {
if (Sci->FreeListInfo.BlockList.empty())
- return nullptr;
+ return 0U;
SinglyLinkedList<TransferBatchT> &Batches =
Sci->FreeListInfo.BlockList.front()->Batches;
@@ -746,33 +726,57 @@ private:
// Block used by `BatchGroup` is from BatchClassId. Turn the block into
// `TransferBatch` with single block.
TransferBatchT *TB = reinterpret_cast<TransferBatchT *>(BG);
- TB->clear();
- TB->add(
- compactPtr(SizeClassMap::BatchClassId, reinterpret_cast<uptr>(TB)));
+ ToArray[0] =
+ compactPtr(SizeClassMap::BatchClassId, reinterpret_cast<uptr>(TB));
Sci->FreeListInfo.PoppedBlocks += 1;
- return TB;
+ return 1U;
}
+ // So far, instead of always filling the blocks to `MaxBlockCount`, we only
+ // examine single `TransferBatch` to minimize the time spent on the primary
+ // allocator. Besides, the sizes of `TransferBatch` and
+ // `CacheT::getMaxCached()` may also impact the time spent on accessing the
+ // primary allocator.
+ // TODO(chiahungduan): Evaluate if we want to always prepare `MaxBlockCount`
+ // blocks and/or adjust the size of `TransferBatch` according to
+ // `CacheT::getMaxCached()`.
TransferBatchT *B = Batches.front();
- Batches.pop_front();
DCHECK_NE(B, nullptr);
DCHECK_GT(B->getCount(), 0U);
- if (Batches.empty()) {
- BatchGroupT *BG = Sci->FreeListInfo.BlockList.front();
- Sci->FreeListInfo.BlockList.pop_front();
-
- // We don't keep BatchGroup with zero blocks to avoid empty-checking while
- // allocating. Note that block used by constructing BatchGroup is recorded
- // as free blocks in the last element of BatchGroup::Batches. Which means,
- // once we pop the last TransferBatch, the block is implicitly
- // deallocated.
+ // BachClassId should always take all blocks in the TransferBatch. Read the
+ // comment in `pushBatchClassBlocks()` for more details.
+ const u16 PopCount = ClassId == SizeClassMap::BatchClassId
+ ? B->getCount()
+ : Min(MaxBlockCount, B->getCount());
+ B->moveNToArray(ToArray, PopCount);
+
+ // TODO(chiahungduan): The deallocation of unused BatchClassId blocks can be
+ // done without holding `Mutex`.
+ if (B->empty()) {
+ Batches.pop_front();
+ // `TransferBatch` of BatchClassId is self-contained, no need to
+ // deallocate. Read the comment in `pushBatchClassBlocks()` for more
+ // details.
if (ClassId != SizeClassMap::BatchClassId)
- C->deallocate(SizeClassMap::BatchClassId, BG);
+ C->deallocate(SizeClassMap::BatchClassId, B);
+
+ if (Batches.empty()) {
+ BatchGroupT *BG = Sci->FreeListInfo.BlockList.front();
+ Sci->FreeListInfo.BlockList.pop_front();
+
+ // We don't keep BatchGroup with zero blocks to avoid empty-checking
+ // while allocating. Note that block used for constructing BatchGroup is
+ // recorded as free blocks in the last element of BatchGroup::Batches.
+ // Which means, once we pop the last TransferBatch, the block is
+ // implicitly deallocated.
+ if (ClassId != SizeClassMap::BatchClassId)
+ C->deallocate(SizeClassMap::BatchClassId, BG);
+ }
}
- Sci->FreeListInfo.PoppedBlocks += B->getCount();
- return B;
+ Sci->FreeListInfo.PoppedBlocks += PopCount;
+ return PopCount;
}
NOINLINE bool populateFreeList(CacheT *C, uptr ClassId, SizeClassInfo *Sci)
diff --git a/compiler-rt/lib/scudo/standalone/primary64.h b/compiler-rt/lib/scudo/standalone/primary64.h
index 9a642d2..d89a2e6 100644
--- a/compiler-rt/lib/scudo/standalone/primary64.h
+++ b/compiler-rt/lib/scudo/standalone/primary64.h
@@ -12,6 +12,7 @@
#include "allocator_common.h"
#include "bytemap.h"
#include "common.h"
+#include "condition_variable.h"
#include "list.h"
#include "local_cache.h"
#include "mem_map.h"
@@ -22,8 +23,6 @@
#include "string_utils.h"
#include "thread_annotations.h"
-#include "condition_variable.h"
-
namespace scudo {
// SizeClassAllocator64 is an allocator tuned for 64-bit address space.
@@ -221,41 +220,24 @@ public:
DCHECK_EQ(BlocksInUse, BatchClassUsedInFreeLists);
}
- // Note that the `MaxBlockCount` will be used when we support arbitrary blocks
- // count. Now it's the same as the number of blocks stored in the
- // `TransferBatch`.
u16 popBlocks(CacheT *C, uptr ClassId, CompactPtrT *ToArray,
- UNUSED const u16 MaxBlockCount) {
- TransferBatchT *B = popBatch(C, ClassId);
- if (!B)
- return 0;
-
- const u16 Count = B->getCount();
- DCHECK_GT(Count, 0U);
- B->moveToArray(ToArray);
-
- if (ClassId != SizeClassMap::BatchClassId)
- C->deallocate(SizeClassMap::BatchClassId, B);
-
- return Count;
- }
-
- TransferBatchT *popBatch(CacheT *C, uptr ClassId) {
+ const u16 MaxBlockCount) {
DCHECK_LT(ClassId, NumClasses);
RegionInfo *Region = getRegionInfo(ClassId);
+ u16 PopCount = 0;
{
ScopedLock L(Region->FLLock);
- TransferBatchT *B = popBatchImpl(C, ClassId, Region);
- if (LIKELY(B))
- return B;
+ PopCount = popBlocksImpl(C, ClassId, Region, ToArray, MaxBlockCount);
+ if (PopCount != 0U)
+ return PopCount;
}
bool ReportRegionExhausted = false;
- TransferBatchT *B = nullptr;
if (conditionVariableEnabled()) {
- B = popBatchWithCV(C, ClassId, Region, ReportRegionExhausted);
+ PopCount = popBlocksWithCV(C, ClassId, Region, ToArray, MaxBlockCount,
+ ReportRegionExhausted);
} else {
while (true) {
// When two threads compete for `Region->MMLock`, we only want one of
@@ -264,13 +246,15 @@ public:
ScopedLock ML(Region->MMLock);
{
ScopedLock FL(Region->FLLock);
- if ((B = popBatchImpl(C, ClassId, Region)))
- break;
+ PopCount = popBlocksImpl(C, ClassId, Region, ToArray, MaxBlockCount);
+ if (PopCount != 0U)
+ return PopCount;
}
const bool RegionIsExhausted = Region->Exhausted;
if (!RegionIsExhausted)
- B = populateFreeListAndPopBatch(C, ClassId, Region);
+ PopCount = populateFreeListAndPopBlocks(C, ClassId, Region, ToArray,
+ MaxBlockCount);
ReportRegionExhausted = !RegionIsExhausted && Region->Exhausted;
break;
}
@@ -286,7 +270,7 @@ public:
reportOutOfBatchClass();
}
- return B;
+ return PopCount;
}
// Push the array of free blocks to the designated batch group.
@@ -640,7 +624,7 @@ private:
// by TransferBatch is also free for use. We don't need to recycle the
// TransferBatch. Note that the correctness is maintained by the invariant,
//
- // The unit of each popBatch() request is entire TransferBatch. Return
+ // Each popBlocks() request returns the entire TransferBatch. Returning
// part of the blocks in a TransferBatch is invalid.
//
// This ensures that TransferBatch won't leak the address itself while it's
@@ -763,7 +747,7 @@ private:
BG->Batches.push_front(TB);
BG->PushedBlocks = 0;
BG->BytesInBGAtLastCheckpoint = 0;
- BG->MaxCachedPerBatch = CacheT::getMaxCached(getSizeByClassId(ClassId));
+ BG->MaxCachedPerBatch = TransferBatchT::MaxNumCached;
return BG;
};
@@ -855,9 +839,10 @@ private:
InsertBlocks(Cur, Array + Size - Count, Count);
}
- TransferBatchT *popBatchWithCV(CacheT *C, uptr ClassId, RegionInfo *Region,
- bool &ReportRegionExhausted) {
- TransferBatchT *B = nullptr;
+ u16 popBlocksWithCV(CacheT *C, uptr ClassId, RegionInfo *Region,
+ CompactPtrT *ToArray, const u16 MaxBlockCount,
+ bool &ReportRegionExhausted) {
+ u16 PopCount = 0;
while (true) {
// We only expect one thread doing the freelist refillment and other
@@ -878,7 +863,8 @@ private:
const bool RegionIsExhausted = Region->Exhausted;
if (!RegionIsExhausted)
- B = populateFreeListAndPopBatch(C, ClassId, Region);
+ PopCount = populateFreeListAndPopBlocks(C, ClassId, Region, ToArray,
+ MaxBlockCount);
ReportRegionExhausted = !RegionIsExhausted && Region->Exhausted;
{
@@ -905,7 +891,8 @@ private:
// blocks were used up right after the refillment. Therefore, we have to
// check if someone is still populating the freelist.
ScopedLock FL(Region->FLLock);
- if (LIKELY(B = popBatchImpl(C, ClassId, Region)))
+ PopCount = popBlocksImpl(C, ClassId, Region, ToArray, MaxBlockCount);
+ if (PopCount != 0U)
break;
if (!Region->isPopulatingFreeList)
@@ -918,21 +905,19 @@ private:
// `pushBatchClassBlocks()` and `mergeGroupsToReleaseBack()`.
Region->FLLockCV.wait(Region->FLLock);
- if (LIKELY(B = popBatchImpl(C, ClassId, Region)))
+ PopCount = popBlocksImpl(C, ClassId, Region, ToArray, MaxBlockCount);
+ if (PopCount != 0U)
break;
}
- return B;
+ return PopCount;
}
- // Pop one TransferBatch from a BatchGroup. The BatchGroup with the smallest
- // group id will be considered first.
- //
- // The region mutex needs to be held while calling this method.
- TransferBatchT *popBatchImpl(CacheT *C, uptr ClassId, RegionInfo *Region)
+ u16 popBlocksImpl(CacheT *C, uptr ClassId, RegionInfo *Region,
+ CompactPtrT *ToArray, const u16 MaxBlockCount)
REQUIRES(Region->FLLock) {
if (Region->FreeListInfo.BlockList.empty())
- return nullptr;
+ return 0U;
SinglyLinkedList<TransferBatchT> &Batches =
Region->FreeListInfo.BlockList.front()->Batches;
@@ -945,39 +930,64 @@ private:
// Block used by `BatchGroup` is from BatchClassId. Turn the block into
// `TransferBatch` with single block.
TransferBatchT *TB = reinterpret_cast<TransferBatchT *>(BG);
- TB->clear();
- TB->add(
- compactPtr(SizeClassMap::BatchClassId, reinterpret_cast<uptr>(TB)));
+ ToArray[0] =
+ compactPtr(SizeClassMap::BatchClassId, reinterpret_cast<uptr>(TB));
Region->FreeListInfo.PoppedBlocks += 1;
- return TB;
+ return 1U;
}
+ // So far, instead of always filling blocks to `MaxBlockCount`, we only
+ // examine single `TransferBatch` to minimize the time spent in the primary
+ // allocator. Besides, the sizes of `TransferBatch` and
+ // `CacheT::getMaxCached()` may also impact the time spent on accessing the
+ // primary allocator.
+ // TODO(chiahungduan): Evaluate if we want to always prepare `MaxBlockCount`
+ // blocks and/or adjust the size of `TransferBatch` according to
+ // `CacheT::getMaxCached()`.
TransferBatchT *B = Batches.front();
- Batches.pop_front();
DCHECK_NE(B, nullptr);
DCHECK_GT(B->getCount(), 0U);
- if (Batches.empty()) {
- BatchGroupT *BG = Region->FreeListInfo.BlockList.front();
- Region->FreeListInfo.BlockList.pop_front();
-
- // We don't keep BatchGroup with zero blocks to avoid empty-checking while
- // allocating. Note that block used by constructing BatchGroup is recorded
- // as free blocks in the last element of BatchGroup::Batches. Which means,
- // once we pop the last TransferBatch, the block is implicitly
- // deallocated.
+ // BachClassId should always take all blocks in the TransferBatch. Read the
+ // comment in `pushBatchClassBlocks()` for more details.
+ const u16 PopCount = ClassId == SizeClassMap::BatchClassId
+ ? B->getCount()
+ : Min(MaxBlockCount, B->getCount());
+ B->moveNToArray(ToArray, PopCount);
+
+ // TODO(chiahungduan): The deallocation of unused BatchClassId blocks can be
+ // done without holding `FLLock`.
+ if (B->empty()) {
+ Batches.pop_front();
+ // `TransferBatch` of BatchClassId is self-contained, no need to
+ // deallocate. Read the comment in `pushBatchClassBlocks()` for more
+ // details.
if (ClassId != SizeClassMap::BatchClassId)
- C->deallocate(SizeClassMap::BatchClassId, BG);
+ C->deallocate(SizeClassMap::BatchClassId, B);
+
+ if (Batches.empty()) {
+ BatchGroupT *BG = Region->FreeListInfo.BlockList.front();
+ Region->FreeListInfo.BlockList.pop_front();
+
+ // We don't keep BatchGroup with zero blocks to avoid empty-checking
+ // while allocating. Note that block used for constructing BatchGroup is
+ // recorded as free blocks in the last element of BatchGroup::Batches.
+ // Which means, once we pop the last TransferBatch, the block is
+ // implicitly deallocated.
+ if (ClassId != SizeClassMap::BatchClassId)
+ C->deallocate(SizeClassMap::BatchClassId, BG);
+ }
}
- Region->FreeListInfo.PoppedBlocks += B->getCount();
+ Region->FreeListInfo.PoppedBlocks += PopCount;
- return B;
+ return PopCount;
}
- // Refill the freelist and return one batch.
- NOINLINE TransferBatchT *populateFreeListAndPopBatch(CacheT *C, uptr ClassId,
- RegionInfo *Region)
+ NOINLINE u16 populateFreeListAndPopBlocks(CacheT *C, uptr ClassId,
+ RegionInfo *Region,
+ CompactPtrT *ToArray,
+ const u16 MaxBlockCount)
REQUIRES(Region->MMLock) EXCLUDES(Region->FLLock) {
const uptr Size = getSizeByClassId(ClassId);
const u16 MaxCount = CacheT::getMaxCached(Size);
@@ -994,7 +1004,7 @@ private:
const uptr RegionBase = RegionBeg - getRegionBaseByClassId(ClassId);
if (UNLIKELY(RegionBase + MappedUser + MapSize > RegionSize)) {
Region->Exhausted = true;
- return nullptr;
+ return 0U;
}
if (UNLIKELY(!Region->MemMapInfo.MemMap.remap(
@@ -1002,7 +1012,7 @@ private:
MAP_ALLOWNOMEM | MAP_RESIZABLE |
(useMemoryTagging<Config>(Options.load()) ? MAP_MEMTAG
: 0)))) {
- return nullptr;
+ return 0U;
}
Region->MemMapInfo.MappedUser += MapSize;
C->getStats().add(StatMapped, MapSize);
@@ -1049,8 +1059,9 @@ private:
pushBatchClassBlocks(Region, ShuffleArray, NumberOfBlocks);
}
- TransferBatchT *B = popBatchImpl(C, ClassId, Region);
- DCHECK_NE(B, nullptr);
+ const u16 PopCount =
+ popBlocksImpl(C, ClassId, Region, ToArray, MaxBlockCount);
+ DCHECK_NE(PopCount, 0U);
// Note that `PushedBlocks` and `PoppedBlocks` are supposed to only record
// the requests from `PushBlocks` and `PopBatch` which are external
@@ -1062,7 +1073,7 @@ private:
C->getStats().add(StatFree, AllocatedUser);
Region->MemMapInfo.AllocatedUser += AllocatedUser;
- return B;
+ return PopCount;
}
void getStats(ScopedString *Str, uptr ClassId, RegionInfo *Region)
@@ -1186,7 +1197,7 @@ private:
}
// Note that we have extracted the `GroupsToRelease` from region freelist.
- // It's safe to let pushBlocks()/popBatches() access the remaining region
+ // It's safe to let pushBlocks()/popBlocks() access the remaining region
// freelist. In the steps 3 and 4, we will temporarily release the FLLock
// and lock it again before step 5.
diff --git a/compiler-rt/lib/scudo/standalone/tests/primary_test.cpp b/compiler-rt/lib/scudo/standalone/tests/primary_test.cpp
index 1817151..f64a514 100644
--- a/compiler-rt/lib/scudo/standalone/tests/primary_test.cpp
+++ b/compiler-rt/lib/scudo/standalone/tests/primary_test.cpp
@@ -237,7 +237,6 @@ struct SmallRegionsConfig {
// For the 32-bit one, it requires actually exhausting memory, so we skip it.
TEST(ScudoPrimaryTest, Primary64OOM) {
using Primary = scudo::SizeClassAllocator64<SmallRegionsConfig>;
- using TransferBatch = Primary::TransferBatchT;
Primary Allocator;
Allocator.init(/*ReleaseToOsInterval=*/-1);
typename Primary::CacheT Cache;
@@ -245,29 +244,26 @@ TEST(ScudoPrimaryTest, Primary64OOM) {
Stats.init();
Cache.init(&Stats, &Allocator);
bool AllocationFailed = false;
- std::vector<TransferBatch *> Batches;
+ std::vector<void *> Blocks;
const scudo::uptr ClassId = Primary::SizeClassMap::LargestClassId;
const scudo::uptr Size = Primary::getSizeByClassId(ClassId);
- typename Primary::CacheT::CompactPtrT Blocks[TransferBatch::MaxNumCached];
+ const scudo::u16 MaxCachedBlockCount = Primary::CacheT::getMaxCached(Size);
for (scudo::uptr I = 0; I < 10000U; I++) {
- TransferBatch *B = Allocator.popBatch(&Cache, ClassId);
- if (!B) {
- AllocationFailed = true;
- break;
+ for (scudo::uptr J = 0; J < MaxCachedBlockCount; ++J) {
+ void *Ptr = Cache.allocate(ClassId);
+ if (Ptr == nullptr) {
+ AllocationFailed = true;
+ break;
+ }
+ memset(Ptr, 'B', Size);
+ Blocks.push_back(Ptr);
}
- for (scudo::u16 J = 0; J < B->getCount(); J++)
- memset(Allocator.decompactPtr(ClassId, B->get(J)), 'B', Size);
- Batches.push_back(B);
- }
- while (!Batches.empty()) {
- TransferBatch *B = Batches.back();
- Batches.pop_back();
- const scudo::u16 Count = B->getCount();
- B->moveToArray(Blocks);
- Allocator.pushBlocks(&Cache, ClassId, Blocks, Count);
- Cache.deallocate(Primary::SizeClassMap::BatchClassId, B);
}
+
+ for (auto *Ptr : Blocks)
+ Cache.deallocate(ClassId, Ptr);
+
Cache.destroy(nullptr);
Allocator.releaseToOS(scudo::ReleaseToOS::Force);
scudo::ScopedString Str;
@@ -342,7 +338,7 @@ SCUDO_TYPED_TEST(ScudoPrimaryTest, PrimaryThreaded) {
V.push_back(std::make_pair(ClassId, P));
}
- // Try to interleave pushBlocks(), popBatch() and releaseToOS().
+ // Try to interleave pushBlocks(), popBlocks() and releaseToOS().
Allocator->releaseToOS(scudo::ReleaseToOS::Force);
while (!V.empty()) {
diff --git a/flang/include/flang/Lower/LoweringOptions.def b/flang/include/flang/Lower/LoweringOptions.def
index 503acda..9de69ac 100644
--- a/flang/include/flang/Lower/LoweringOptions.def
+++ b/flang/include/flang/Lower/LoweringOptions.def
@@ -24,8 +24,8 @@ LOWERINGOPT(Name, Bits, Default)
/// If true, lower transpose without a runtime call.
ENUM_LOWERINGOPT(OptimizeTranspose, unsigned, 1, 1)
-/// If true, enable polymorphic type lowering feature. Off by default.
-ENUM_LOWERINGOPT(PolymorphicTypeImpl, unsigned, 1, 0)
+/// If true, enable polymorphic type lowering feature. On by default.
+ENUM_LOWERINGOPT(PolymorphicTypeImpl, unsigned, 1, 1)
/// If true, lower to High level FIR before lowering to FIR. On by default.
ENUM_LOWERINGOPT(LowerToHighLevelFIR, unsigned, 1, 1)
diff --git a/flang/include/flang/Runtime/reduction.h b/flang/include/flang/Runtime/reduction.h
index 6d62f40..5b60776 100644
--- a/flang/include/flang/Runtime/reduction.h
+++ b/flang/include/flang/Runtime/reduction.h
@@ -364,9 +364,12 @@ double RTDECL(Norm2_8)(
#if LDBL_MANT_DIG == 64
long double RTDECL(Norm2_10)(
const Descriptor &, const char *source, int line, int dim = 0);
-#elif LDBL_MANT_DIG == 113
+#endif
+#if LDBL_MANT_DIG == 113 || HAS_FLOAT128
long double RTDECL(Norm2_16)(
const Descriptor &, const char *source, int line, int dim = 0);
+void RTDECL(Norm2DimReal16)(
+ Descriptor &, const Descriptor &, int dim, const char *source, int line);
#endif
void RTDECL(Norm2Dim)(
Descriptor &, const Descriptor &, int dim, const char *source, int line);
diff --git a/flang/lib/Optimizer/Builder/Runtime/Reduction.cpp b/flang/lib/Optimizer/Builder/Runtime/Reduction.cpp
index fabbff8..66fbadd 100644
--- a/flang/lib/Optimizer/Builder/Runtime/Reduction.cpp
+++ b/flang/lib/Optimizer/Builder/Runtime/Reduction.cpp
@@ -149,6 +149,22 @@ struct ForcedNorm2Real16 {
}
};
+/// Placeholder for real*16 version of Norm2Dim Intrinsic
+struct ForcedNorm2DimReal16 {
+ static constexpr const char *name = ExpandAndQuoteKey(RTNAME(Norm2DimReal16));
+ static constexpr fir::runtime::FuncTypeBuilderFunc getTypeModel() {
+ return [](mlir::MLIRContext *ctx) {
+ auto boxTy =
+ fir::runtime::getModel<const Fortran::runtime::Descriptor &>()(ctx);
+ auto strTy = fir::ReferenceType::get(mlir::IntegerType::get(ctx, 8));
+ auto intTy = mlir::IntegerType::get(ctx, 8 * sizeof(int));
+ return mlir::FunctionType::get(
+ ctx, {fir::ReferenceType::get(boxTy), boxTy, intTy, strTy, intTy},
+ mlir::NoneType::get(ctx));
+ };
+ }
+};
+
/// Placeholder for real*10 version of Product Intrinsic
struct ForcedProductReal10 {
static constexpr const char *name = ExpandAndQuoteKey(RTNAME(ProductReal10));
@@ -876,7 +892,14 @@ mlir::Value fir::runtime::genMinval(fir::FirOpBuilder &builder,
void fir::runtime::genNorm2Dim(fir::FirOpBuilder &builder, mlir::Location loc,
mlir::Value resultBox, mlir::Value arrayBox,
mlir::Value dim) {
- auto func = fir::runtime::getRuntimeFunc<mkRTKey(Norm2Dim)>(loc, builder);
+ mlir::func::FuncOp func;
+ auto ty = arrayBox.getType();
+ auto arrTy = fir::dyn_cast_ptrOrBoxEleTy(ty);
+ auto eleTy = arrTy.cast<fir::SequenceType>().getEleTy();
+ if (eleTy.isF128())
+ func = fir::runtime::getRuntimeFunc<ForcedNorm2DimReal16>(loc, builder);
+ else
+ func = fir::runtime::getRuntimeFunc<mkRTKey(Norm2Dim)>(loc, builder);
auto fTy = func.getFunctionType();
auto sourceFile = fir::factory::locationToFilename(builder, loc);
auto sourceLine =
diff --git a/flang/runtime/Float128Math/CMakeLists.txt b/flang/runtime/Float128Math/CMakeLists.txt
index 8d276e8..f11678c 100644
--- a/flang/runtime/Float128Math/CMakeLists.txt
+++ b/flang/runtime/Float128Math/CMakeLists.txt
@@ -69,6 +69,7 @@ set(sources
log.cpp
log10.cpp
lround.cpp
+ norm2.cpp
pow.cpp
round.cpp
sin.cpp
diff --git a/flang/runtime/Float128Math/math-entries.h b/flang/runtime/Float128Math/math-entries.h
index 8329867..a0d81d0 100644
--- a/flang/runtime/Float128Math/math-entries.h
+++ b/flang/runtime/Float128Math/math-entries.h
@@ -54,6 +54,7 @@ namespace Fortran::runtime {
};
// Define fallback callers.
+DEFINE_FALLBACK(Abs)
DEFINE_FALLBACK(Acos)
DEFINE_FALLBACK(Acosh)
DEFINE_FALLBACK(Asin)
@@ -99,6 +100,7 @@ DEFINE_FALLBACK(Yn)
// Use STD math functions. They provide IEEE-754 128-bit float
// support either via 'long double' or __float128.
// The Bessel's functions are not present in STD namespace.
+DEFINE_SIMPLE_ALIAS(Abs, std::abs)
DEFINE_SIMPLE_ALIAS(Acos, std::acos)
DEFINE_SIMPLE_ALIAS(Acosh, std::acosh)
DEFINE_SIMPLE_ALIAS(Asin, std::asin)
@@ -155,6 +157,7 @@ DEFINE_SIMPLE_ALIAS(Yn, ynl)
#elif HAS_QUADMATHLIB
// Define wrapper callers for libquadmath.
#include "quadmath.h"
+DEFINE_SIMPLE_ALIAS(Abs, fabsq)
DEFINE_SIMPLE_ALIAS(Acos, acosq)
DEFINE_SIMPLE_ALIAS(Acosh, acoshq)
DEFINE_SIMPLE_ALIAS(Asin, asinq)
@@ -191,6 +194,19 @@ DEFINE_SIMPLE_ALIAS(Y0, y0q)
DEFINE_SIMPLE_ALIAS(Y1, y1q)
DEFINE_SIMPLE_ALIAS(Yn, ynq)
#endif
+
+extern "C" {
+// Declarations of the entry points that might be referenced
+// within the Float128Math library itself.
+// Note that not all of these entry points are actually
+// defined in this library. Some of them are used just
+// as template parameters to call the corresponding callee directly.
+CppTypeFor<TypeCategory::Real, 16> RTDECL(AbsF128)(
+ CppTypeFor<TypeCategory::Real, 16> x);
+CppTypeFor<TypeCategory::Real, 16> RTDECL(SqrtF128)(
+ CppTypeFor<TypeCategory::Real, 16> x);
+} // extern "C"
+
} // namespace Fortran::runtime
#endif // FORTRAN_RUNTIME_FLOAT128MATH_MATH_ENTRIES_H_
diff --git a/flang/runtime/Float128Math/norm2.cpp b/flang/runtime/Float128Math/norm2.cpp
new file mode 100644
index 0000000..17453bd
--- /dev/null
+++ b/flang/runtime/Float128Math/norm2.cpp
@@ -0,0 +1,59 @@
+//===-- runtime/Float128Math/norm2.cpp ------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "math-entries.h"
+#include "reduction-templates.h"
+#include <cmath>
+
+#if LDBL_MANT_DIG == 113 || HAS_FLOAT128
+
+namespace {
+using namespace Fortran::runtime;
+
+using AccumType = Norm2AccumType<16>;
+
+struct ABSTy {
+ static AccumType compute(AccumType x) {
+ return Sqrt<RTNAME(AbsF128)>::invoke(x);
+ }
+};
+
+struct SQRTTy {
+ static AccumType compute(AccumType x) {
+ return Sqrt<RTNAME(SqrtF128)>::invoke(x);
+ }
+};
+
+using Float128Norm2Accumulator = Norm2Accumulator<16, ABSTy, SQRTTy>;
+} // namespace
+
+namespace Fortran::runtime {
+extern "C" {
+
+CppTypeFor<TypeCategory::Real, 16> RTDEF(Norm2_16)(
+ const Descriptor &x, const char *source, int line, int dim) {
+ auto accumulator{::Float128Norm2Accumulator(x)};
+ return GetTotalReduction<TypeCategory::Real, 16>(
+ x, source, line, dim, nullptr, accumulator, "NORM2");
+}
+
+void RTDEF(Norm2DimReal16)(Descriptor &result, const Descriptor &x, int dim,
+ const char *source, int line) {
+ Terminator terminator{source, line};
+ auto type{x.type().GetCategoryAndKind()};
+ RUNTIME_CHECK(terminator, type);
+ RUNTIME_CHECK(
+ terminator, type->first == TypeCategory::Real && type->second == 16);
+ DoMaxMinNorm2<TypeCategory::Real, 16, ::Float128Norm2Accumulator>(
+ result, x, dim, nullptr, "NORM2", terminator);
+}
+
+} // extern "C"
+} // namespace Fortran::runtime
+
+#endif
diff --git a/flang/runtime/extrema.cpp b/flang/runtime/extrema.cpp
index 3fdc8e1..fc2b4e1 100644
--- a/flang/runtime/extrema.cpp
+++ b/flang/runtime/extrema.cpp
@@ -528,35 +528,6 @@ inline RT_API_ATTRS CppTypeFor<CAT, KIND> TotalNumericMaxOrMin(
NumericExtremumAccumulator<CAT, KIND, IS_MAXVAL>{x}, intrinsic);
}
-template <TypeCategory CAT, int KIND, typename ACCUMULATOR>
-static RT_API_ATTRS void DoMaxMinNorm2(Descriptor &result, const Descriptor &x,
- int dim, const Descriptor *mask, const char *intrinsic,
- Terminator &terminator) {
- using Type = CppTypeFor<CAT, KIND>;
- ACCUMULATOR accumulator{x};
- if (dim == 0 || x.rank() == 1) {
- // Total reduction
-
- // Element size of the destination descriptor is the same
- // as the element size of the source.
- result.Establish(x.type(), x.ElementBytes(), nullptr, 0, nullptr,
- CFI_attribute_allocatable);
- if (int stat{result.Allocate()}) {
- terminator.Crash(
- "%s: could not allocate memory for result; STAT=%d", intrinsic, stat);
- }
- DoTotalReduction<Type>(x, dim, mask, accumulator, intrinsic, terminator);
- accumulator.GetResult(result.OffsetElement<Type>());
- } else {
- // Partial reduction
-
- // Element size of the destination descriptor is the same
- // as the element size of the source.
- PartialReduction<ACCUMULATOR, CAT, KIND>(result, x, x.ElementBytes(), dim,
- mask, terminator, intrinsic, accumulator);
- }
-}
-
template <TypeCategory CAT, bool IS_MAXVAL> struct MaxOrMinHelper {
template <int KIND> struct Functor {
RT_API_ATTRS void operator()(Descriptor &result, const Descriptor &x,
@@ -802,66 +773,11 @@ RT_EXT_API_GROUP_END
// NORM2
-RT_VAR_GROUP_BEGIN
-
-// Use at least double precision for accumulators.
-// Don't use __float128, it doesn't work with abs() or sqrt() yet.
-static constexpr RT_CONST_VAR_ATTRS int largestLDKind {
-#if LDBL_MANT_DIG == 113
- 16
-#elif LDBL_MANT_DIG == 64
- 10
-#else
- 8
-#endif
-};
-
-RT_VAR_GROUP_END
-
-template <int KIND> class Norm2Accumulator {
-public:
- using Type = CppTypeFor<TypeCategory::Real, KIND>;
- using AccumType =
- CppTypeFor<TypeCategory::Real, std::clamp(KIND, 8, largestLDKind)>;
- explicit RT_API_ATTRS Norm2Accumulator(const Descriptor &array)
- : array_{array} {}
- RT_API_ATTRS void Reinitialize() { max_ = sum_ = 0; }
- template <typename A>
- RT_API_ATTRS void GetResult(A *p, int /*zeroBasedDim*/ = -1) const {
- // m * sqrt(1 + sum((others(:)/m)**2))
- *p = static_cast<Type>(max_ * std::sqrt(1 + sum_));
- }
- RT_API_ATTRS bool Accumulate(Type x) {
- auto absX{std::abs(static_cast<AccumType>(x))};
- if (!max_) {
- max_ = absX;
- } else if (absX > max_) {
- auto t{max_ / absX}; // < 1.0
- auto tsq{t * t};
- sum_ *= tsq; // scale sum to reflect change to the max
- sum_ += tsq; // include a term for the previous max
- max_ = absX;
- } else { // absX <= max_
- auto t{absX / max_};
- sum_ += t * t;
- }
- return true;
- }
- template <typename A>
- RT_API_ATTRS bool AccumulateAt(const SubscriptValue at[]) {
- return Accumulate(*array_.Element<A>(at));
- }
-
-private:
- const Descriptor &array_;
- AccumType max_{0}; // value (m) with largest magnitude
- AccumType sum_{0}; // sum((others(:)/m)**2)
-};
-
template <int KIND> struct Norm2Helper {
RT_API_ATTRS void operator()(Descriptor &result, const Descriptor &x, int dim,
const Descriptor *mask, Terminator &terminator) const {
- DoMaxMinNorm2<TypeCategory::Real, KIND, Norm2Accumulator<KIND>>(
+ DoMaxMinNorm2<TypeCategory::Real, KIND,
+ typename Norm2AccumulatorGetter<KIND>::Type>(
result, x, dim, mask, "NORM2", terminator);
}
};
@@ -872,26 +788,27 @@ RT_EXT_API_GROUP_BEGIN
// TODO: REAL(2 & 3)
CppTypeFor<TypeCategory::Real, 4> RTDEF(Norm2_4)(
const Descriptor &x, const char *source, int line, int dim) {
- return GetTotalReduction<TypeCategory::Real, 4>(
- x, source, line, dim, nullptr, Norm2Accumulator<4>{x}, "NORM2");
+ return GetTotalReduction<TypeCategory::Real, 4>(x, source, line, dim, nullptr,
+ Norm2AccumulatorGetter<4>::create(x), "NORM2");
}
CppTypeFor<TypeCategory::Real, 8> RTDEF(Norm2_8)(
const Descriptor &x, const char *source, int line, int dim) {
- return GetTotalReduction<TypeCategory::Real, 8>(
- x, source, line, dim, nullptr, Norm2Accumulator<8>{x}, "NORM2");
+ return GetTotalReduction<TypeCategory::Real, 8>(x, source, line, dim, nullptr,
+ Norm2AccumulatorGetter<8>::create(x), "NORM2");
}
#if LDBL_MANT_DIG == 64
CppTypeFor<TypeCategory::Real, 10> RTDEF(Norm2_10)(
const Descriptor &x, const char *source, int line, int dim) {
- return GetTotalReduction<TypeCategory::Real, 10>(
- x, source, line, dim, nullptr, Norm2Accumulator<10>{x}, "NORM2");
+ return GetTotalReduction<TypeCategory::Real, 10>(x, source, line, dim,
+ nullptr, Norm2AccumulatorGetter<10>::create(x), "NORM2");
}
#endif
#if LDBL_MANT_DIG == 113
+// The __float128 implementation resides in FortranFloat128Math library.
CppTypeFor<TypeCategory::Real, 16> RTDEF(Norm2_16)(
const Descriptor &x, const char *source, int line, int dim) {
- return GetTotalReduction<TypeCategory::Real, 16>(
- x, source, line, dim, nullptr, Norm2Accumulator<16>{x}, "NORM2");
+ return GetTotalReduction<TypeCategory::Real, 16>(x, source, line, dim,
+ nullptr, Norm2AccumulatorGetter<16>::create(x), "NORM2");
}
#endif
@@ -901,7 +818,7 @@ void RTDEF(Norm2Dim)(Descriptor &result, const Descriptor &x, int dim,
auto type{x.type().GetCategoryAndKind()};
RUNTIME_CHECK(terminator, type);
if (type->first == TypeCategory::Real) {
- ApplyFloatingPointKind<Norm2Helper, void>(
+ ApplyFloatingPointKind<Norm2Helper, void, true>(
type->second, terminator, result, x, dim, nullptr, terminator);
} else {
terminator.Crash("NORM2: bad type code %d", x.type().raw());
diff --git a/flang/runtime/reduction-templates.h b/flang/runtime/reduction-templates.h
index 7d0f82d..0891bc0 100644
--- a/flang/runtime/reduction-templates.h
+++ b/flang/runtime/reduction-templates.h
@@ -25,6 +25,7 @@
#include "tools.h"
#include "flang/Runtime/cpp-type.h"
#include "flang/Runtime/descriptor.h"
+#include <algorithm>
namespace Fortran::runtime {
@@ -332,5 +333,119 @@ template <typename ACCUMULATOR> struct PartialLocationHelper {
};
};
+// NORM2 templates
+
+RT_VAR_GROUP_BEGIN
+
+// Use at least double precision for accumulators.
+// Don't use __float128, it doesn't work with abs() or sqrt() yet.
+static constexpr RT_CONST_VAR_ATTRS int Norm2LargestLDKind {
+#if LDBL_MANT_DIG == 113 || HAS_FLOAT128
+ 16
+#elif LDBL_MANT_DIG == 64
+ 10
+#else
+ 8
+#endif
+};
+
+RT_VAR_GROUP_END
+
+template <TypeCategory CAT, int KIND, typename ACCUMULATOR>
+inline RT_API_ATTRS void DoMaxMinNorm2(Descriptor &result, const Descriptor &x,
+ int dim, const Descriptor *mask, const char *intrinsic,
+ Terminator &terminator) {
+ using Type = CppTypeFor<CAT, KIND>;
+ ACCUMULATOR accumulator{x};
+ if (dim == 0 || x.rank() == 1) {
+ // Total reduction
+
+ // Element size of the destination descriptor is the same
+ // as the element size of the source.
+ result.Establish(x.type(), x.ElementBytes(), nullptr, 0, nullptr,
+ CFI_attribute_allocatable);
+ if (int stat{result.Allocate()}) {
+ terminator.Crash(
+ "%s: could not allocate memory for result; STAT=%d", intrinsic, stat);
+ }
+ DoTotalReduction<Type>(x, dim, mask, accumulator, intrinsic, terminator);
+ accumulator.GetResult(result.OffsetElement<Type>());
+ } else {
+ // Partial reduction
+
+ // Element size of the destination descriptor is the same
+ // as the element size of the source.
+ PartialReduction<ACCUMULATOR, CAT, KIND>(result, x, x.ElementBytes(), dim,
+ mask, terminator, intrinsic, accumulator);
+ }
+}
+
+// The data type used by Norm2Accumulator.
+template <int KIND>
+using Norm2AccumType =
+ CppTypeFor<TypeCategory::Real, std::clamp(KIND, 8, Norm2LargestLDKind)>;
+
+template <int KIND, typename ABS, typename SQRT> class Norm2Accumulator {
+public:
+ using Type = CppTypeFor<TypeCategory::Real, KIND>;
+ using AccumType = Norm2AccumType<KIND>;
+ explicit RT_API_ATTRS Norm2Accumulator(const Descriptor &array)
+ : array_{array} {}
+ RT_API_ATTRS void Reinitialize() { max_ = sum_ = 0; }
+ template <typename A>
+ RT_API_ATTRS void GetResult(A *p, int /*zeroBasedDim*/ = -1) const {
+ // m * sqrt(1 + sum((others(:)/m)**2))
+ *p = static_cast<Type>(max_ * SQRT::compute(1 + sum_));
+ }
+ RT_API_ATTRS bool Accumulate(Type x) {
+ auto absX{ABS::compute(static_cast<AccumType>(x))};
+ if (!max_) {
+ max_ = absX;
+ } else if (absX > max_) {
+ auto t{max_ / absX}; // < 1.0
+ auto tsq{t * t};
+ sum_ *= tsq; // scale sum to reflect change to the max
+ sum_ += tsq; // include a term for the previous max
+ max_ = absX;
+ } else { // absX <= max_
+ auto t{absX / max_};
+ sum_ += t * t;
+ }
+ return true;
+ }
+ template <typename A>
+ RT_API_ATTRS bool AccumulateAt(const SubscriptValue at[]) {
+ return Accumulate(*array_.Element<A>(at));
+ }
+
+private:
+ const Descriptor &array_;
+ AccumType max_{0}; // value (m) with largest magnitude
+ AccumType sum_{0}; // sum((others(:)/m)**2)
+};
+
+// Helper class for creating Norm2Accumulator instance
+// based on the given KIND. This helper returns and instance
+// that uses std::abs and std::sqrt for the computations.
+template <int KIND> class Norm2AccumulatorGetter {
+ using AccumType = Norm2AccumType<KIND>;
+
+public:
+ struct ABSTy {
+ static constexpr RT_API_ATTRS AccumType compute(AccumType &&x) {
+ return std::abs(std::forward<AccumType>(x));
+ }
+ };
+ struct SQRTTy {
+ static constexpr RT_API_ATTRS AccumType compute(AccumType &&x) {
+ return std::sqrt(std::forward<AccumType>(x));
+ }
+ };
+
+ using Type = Norm2Accumulator<KIND, ABSTy, SQRTTy>;
+
+ static RT_API_ATTRS Type create(const Descriptor &x) { return Type(x); }
+};
+
} // namespace Fortran::runtime
#endif // FORTRAN_RUNTIME_REDUCTION_TEMPLATES_H_
diff --git a/flang/runtime/tools.h b/flang/runtime/tools.h
index 89e5069..c1f89ca 100644
--- a/flang/runtime/tools.h
+++ b/flang/runtime/tools.h
@@ -266,7 +266,8 @@ inline RT_API_ATTRS RESULT ApplyIntegerKind(
}
}
-template <template <int KIND> class FUNC, typename RESULT, typename... A>
+template <template <int KIND> class FUNC, typename RESULT,
+ bool NEEDSMATH = false, typename... A>
inline RT_API_ATTRS RESULT ApplyFloatingPointKind(
int kind, Terminator &terminator, A &&...x) {
switch (kind) {
@@ -287,7 +288,13 @@ inline RT_API_ATTRS RESULT ApplyFloatingPointKind(
break;
case 16:
if constexpr (HasCppTypeFor<TypeCategory::Real, 16>) {
- return FUNC<16>{}(std::forward<A>(x)...);
+ // If FUNC implemenation relies on FP math functions,
+ // then we should not be here. The compiler should have
+ // generated a call to an entry in FortranFloat128Math
+ // library.
+ if constexpr (!NEEDSMATH) {
+ return FUNC<16>{}(std::forward<A>(x)...);
+ }
}
break;
}
diff --git a/flang/test/Driver/flang-experimental-polymorphism-flag.f90 b/flang/test/Driver/flang-experimental-polymorphism-flag.f90
index 106e898..095c1cc 100644
--- a/flang/test/Driver/flang-experimental-polymorphism-flag.f90
+++ b/flang/test/Driver/flang-experimental-polymorphism-flag.f90
@@ -1,10 +1,10 @@
! Test -flang-experimental-hlfir flag
! RUN: %flang_fc1 -flang-experimental-polymorphism -emit-fir -o - %s | FileCheck %s
-! RUN: not %flang_fc1 -emit-fir -o - %s 2>&1 | FileCheck %s --check-prefix NO-POLYMORPHISM
+! RUN: %flang_fc1 -emit-fir -o - %s 2>&1 | FileCheck %s --check-prefix NO-POLYMORPHISM
! CHECK: func.func @_QPtest(%{{.*}}: !fir.class<none> {fir.bindc_name = "poly"})
subroutine test(poly)
class(*) :: poly
end subroutine test
-! NO-POLYMORPHISM: not yet implemented: support for polymorphic types
+! NO-POLYMORPHISM: func.func @_QPtest
diff --git a/flang/test/Lower/Intrinsics/norm2.f90 b/flang/test/Lower/Intrinsics/norm2.f90
index f14cad59..0d125e3 100644
--- a/flang/test/Lower/Intrinsics/norm2.f90
+++ b/flang/test/Lower/Intrinsics/norm2.f90
@@ -76,3 +76,19 @@ subroutine norm2_test_dim_3(a,r)
! CHECK-DAG: %[[addr:.*]] = fir.box_addr %[[box]] : (!fir.box<!fir.heap<!fir.array<?x?xf32>>>) -> !fir.heap<!fir.array<?x?xf32>>
! CHECK-DAG: fir.freemem %[[addr]]
end subroutine norm2_test_dim_3
+
+! CHECK-LABEL: func @_QPnorm2_test_real16(
+! CHECK-SAME: %[[arg0:.*]]: !fir.box<!fir.array<?x?x?xf128>>{{.*}}, %[[arg1:.*]]: !fir.box<!fir.array<?x?xf128>>{{.*}})
+subroutine norm2_test_real16(a,r)
+ real(16) :: a(:,:,:)
+ real(16) :: r(:,:)
+ ! CHECK-DAG: %[[dim:.*]] = arith.constant 3 : i32
+ ! CHECK-DAG: %[[r:.*]] = fir.alloca !fir.box<!fir.heap<!fir.array<?x?xf128>>>
+ ! CHECK-DAG: %[[res:.*]] = fir.convert %[[r]] : (!fir.ref<!fir.box<!fir.heap<!fir.array<?x?xf128>>>>) -> !fir.ref<!fir.box<none>>
+ ! CHECK: %[[arr:.*]] = fir.convert %[[arg0]] : (!fir.box<!fir.array<?x?x?xf128>>) -> !fir.box<none>
+ r = norm2(a,dim=3)
+ ! CHECK: %{{.*}} = fir.call @_FortranANorm2DimReal16(%[[res]], %[[arr]], %[[dim]], %{{.*}}, %{{.*}}) {{.*}} : (!fir.ref<!fir.box<none>>, !fir.box<none>, i32, !fir.ref<i8>, i32) -> none
+ ! CHECK: %[[box:.*]] = fir.load %[[r]] : !fir.ref<!fir.box<!fir.heap<!fir.array<?x?xf128>>>>
+ ! CHECK-DAG: %[[addr:.*]] = fir.box_addr %[[box]] : (!fir.box<!fir.heap<!fir.array<?x?xf128>>>) -> !fir.heap<!fir.array<?x?xf128>>
+ ! CHECK-DAG: fir.freemem %[[addr]]
+end subroutine norm2_test_real16
diff --git a/libc/.clang-tidy b/libc/.clang-tidy
index 5adada9..dbde889 100644
--- a/libc/.clang-tidy
+++ b/libc/.clang-tidy
@@ -26,5 +26,7 @@ CheckOptions:
value: UPPER_CASE
- key: readability-identifier-naming.ConstexprVariableCase
value: UPPER_CASE
+ - key: readability-identifier-naming.ConstexprFunctionCase
+ value: lower_case
- key: readability-identifier-naming.GetConfigPerFile
value: true
diff --git a/libc/cmake/modules/compiler_features/check_float128.cpp b/libc/cmake/modules/compiler_features/check_float128.cpp
index 8b1e3fe..20f889c 100644
--- a/libc/cmake/modules/compiler_features/check_float128.cpp
+++ b/libc/cmake/modules/compiler_features/check_float128.cpp
@@ -1,4 +1,4 @@
-#include "src/__support/macros/properties/float.h"
+#include "src/__support/macros/properties/types.h"
#ifndef LIBC_COMPILER_HAS_FLOAT128
#error unsupported
diff --git a/libc/config/baremetal/api.td b/libc/config/baremetal/api.td
index d6897fb..3da83d9 100644
--- a/libc/config/baremetal/api.td
+++ b/libc/config/baremetal/api.td
@@ -2,6 +2,42 @@ include "config/public_api.td"
include "spec/stdc.td"
+def AssertMacro : MacroDef<"assert"> {
+ let Defn = [{
+ #undef assert
+
+ #ifdef NDEBUG
+ #define assert(e) (void)0
+ #else
+
+ #ifdef __cplusplus
+ extern "C"
+ #endif
+ _Noreturn void __assert_fail(const char *, const char *, unsigned, const char *) __NOEXCEPT;
+
+ #define assert(e) \
+ ((e) ? (void)0 : __assert_fail(#e, __FILE__, __LINE__, __PRETTY_FUNCTION__))
+
+ #endif
+ }];
+}
+
+def StaticAssertMacro : MacroDef<"static_assert"> {
+ let Defn = [{
+ #ifndef __cplusplus
+ #undef static_assert
+ #define static_assert _Static_assert
+ #endif
+ }];
+}
+
+def AssertAPI : PublicAPI<"assert.h"> {
+ let Macros = [
+ AssertMacro,
+ StaticAssertMacro,
+ ];
+}
+
def CTypeAPI : PublicAPI<"ctype.h"> {
}
diff --git a/libc/config/baremetal/arm/entrypoints.txt b/libc/config/baremetal/arm/entrypoints.txt
index 608ac46..a61d9fe 100644
--- a/libc/config/baremetal/arm/entrypoints.txt
+++ b/libc/config/baremetal/arm/entrypoints.txt
@@ -1,4 +1,7 @@
set(TARGET_LIBC_ENTRYPOINTS
+ # assert.h entrypoints
+ libc.src.assert.__assert_fail
+
# ctype.h entrypoints
libc.src.ctype.isalnum
libc.src.ctype.isalpha
diff --git a/libc/config/baremetal/arm/headers.txt b/libc/config/baremetal/arm/headers.txt
index 38899fa..4c02ac8 100644
--- a/libc/config/baremetal/arm/headers.txt
+++ b/libc/config/baremetal/arm/headers.txt
@@ -1,4 +1,5 @@
set(TARGET_PUBLIC_HEADERS
+ libc.include.assert
libc.include.ctype
libc.include.fenv
libc.include.errno
diff --git a/libc/config/baremetal/riscv/entrypoints.txt b/libc/config/baremetal/riscv/entrypoints.txt
index 2f299e99..533f9f9 100644
--- a/libc/config/baremetal/riscv/entrypoints.txt
+++ b/libc/config/baremetal/riscv/entrypoints.txt
@@ -1,4 +1,7 @@
set(TARGET_LIBC_ENTRYPOINTS
+ # assert.h entrypoints
+ libc.src.assert.__assert_fail
+
# ctype.h entrypoints
libc.src.ctype.isalnum
libc.src.ctype.isalpha
diff --git a/libc/config/baremetal/riscv/headers.txt b/libc/config/baremetal/riscv/headers.txt
index 38899fa..4c02ac8 100644
--- a/libc/config/baremetal/riscv/headers.txt
+++ b/libc/config/baremetal/riscv/headers.txt
@@ -1,4 +1,5 @@
set(TARGET_PUBLIC_HEADERS
+ libc.include.assert
libc.include.ctype
libc.include.fenv
libc.include.errno
diff --git a/libc/docs/dev/code_style.rst b/libc/docs/dev/code_style.rst
index eeeced0..c76f887 100644
--- a/libc/docs/dev/code_style.rst
+++ b/libc/docs/dev/code_style.rst
@@ -47,7 +47,7 @@ We define two kinds of macros:
e.g., ``LIBC_COMPILER_IS_CLANG``.
* ``cpu_features.h`` - Target cpu feature availability.
e.g., ``LIBC_TARGET_CPU_HAS_AVX2``.
- * ``float.h`` - Floating point type properties and availability.
+ * ``types.h`` - Type properties and availability.
e.g., ``LIBC_COMPILER_HAS_FLOAT128``.
* ``os.h`` - Target os properties.
e.g., ``LIBC_TARGET_OS_IS_LINUX``.
diff --git a/libc/include/__llvm-libc-common.h b/libc/include/__llvm-libc-common.h
index 6b883ee..3af0b08 100644
--- a/libc/include/__llvm-libc-common.h
+++ b/libc/include/__llvm-libc-common.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIBC___COMMON_H
-#define LLVM_LIBC___COMMON_H
+#ifndef LLVM_LIBC_COMMON_H
+#define LLVM_LIBC_COMMON_H
#ifdef __cplusplus
@@ -51,4 +51,4 @@
#endif // __cplusplus
-#endif // LLVM_LIBC___COMMON_H
+#endif // LLVM_LIBC_COMMON_H
diff --git a/libc/include/llvm-libc-macros/containerof-macro.h b/libc/include/llvm-libc-macros/containerof-macro.h
index ea91fa7..62724ab 100644
--- a/libc/include/llvm-libc-macros/containerof-macro.h
+++ b/libc/include/llvm-libc-macros/containerof-macro.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_MACROS_CONTAINEROF_MACRO_H
-#define __LLVM_LIBC_MACROS_CONTAINEROF_MACRO_H
+#ifndef LLVM_LIBC_MACROS_CONTAINEROF_MACRO_H
+#define LLVM_LIBC_MACROS_CONTAINEROF_MACRO_H
#include <llvm-libc-macros/offsetof-macro.h>
@@ -17,4 +17,4 @@
(type *)(void *)((const char *)__ptr - offsetof(type, member)); \
})
-#endif // __LLVM_LIBC_MACROS_CONTAINEROF_MACRO_H
+#endif // LLVM_LIBC_MACROS_CONTAINEROF_MACRO_H
diff --git a/libc/include/llvm-libc-macros/fcntl-macros.h b/libc/include/llvm-libc-macros/fcntl-macros.h
index 448dcc0..4bd03a7 100644
--- a/libc/include/llvm-libc-macros/fcntl-macros.h
+++ b/libc/include/llvm-libc-macros/fcntl-macros.h
@@ -1,8 +1,8 @@
-#ifndef __LLVM_LIBC_MACROS_FCNTL_MACROS_H
-#define __LLVM_LIBC_MACROS_FCNTL_MACROS_H
+#ifndef LLVM_LIBC_MACROS_FCNTL_MACROS_H
+#define LLVM_LIBC_MACROS_FCNTL_MACROS_H
#ifdef __linux__
#include "linux/fcntl-macros.h"
#endif
-#endif // __LLVM_LIBC_MACROS_FCNTL_MACROS_H
+#endif // LLVM_LIBC_MACROS_FCNTL_MACROS_H
diff --git a/libc/include/llvm-libc-macros/features-macros.h b/libc/include/llvm-libc-macros/features-macros.h
index 2938b3c..5bc87a6 100644
--- a/libc/include/llvm-libc-macros/features-macros.h
+++ b/libc/include/llvm-libc-macros/features-macros.h
@@ -6,9 +6,9 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_MACROS_FEATURES_MACROS_H
-#define __LLVM_LIBC_MACROS_FEATURES_MACROS_H
+#ifndef LLVM_LIBC_MACROS_FEATURES_MACROS_H
+#define LLVM_LIBC_MACROS_FEATURES_MACROS_H
#define __LLVM_LIBC__ 1
-#endif // __LLVM_LIBC_MACROS_FEATURES_MACROS_H
+#endif // LLVM_LIBC_MACROS_FEATURES_MACROS_H
diff --git a/libc/include/llvm-libc-macros/fenv-macros.h b/libc/include/llvm-libc-macros/fenv-macros.h
index cc0ea34..72ac660 100644
--- a/libc/include/llvm-libc-macros/fenv-macros.h
+++ b/libc/include/llvm-libc-macros/fenv-macros.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_MACROS_FENV_MACROS_H
-#define __LLVM_LIBC_MACROS_FENV_MACROS_H
+#ifndef LLVM_LIBC_MACROS_FENV_MACROS_H
+#define LLVM_LIBC_MACROS_FENV_MACROS_H
#define FE_DIVBYZERO 1
#define FE_INEXACT 2
@@ -24,4 +24,4 @@
#define FE_DFL_ENV ((fenv_t *)-1)
-#endif // __LLVM_LIBC_MACROS_FENV_MACROS_H
+#endif // LLVM_LIBC_MACROS_FENV_MACROS_H
diff --git a/libc/include/llvm-libc-macros/file-seek-macros.h b/libc/include/llvm-libc-macros/file-seek-macros.h
index 04f3979..676cb75 100644
--- a/libc/include/llvm-libc-macros/file-seek-macros.h
+++ b/libc/include/llvm-libc-macros/file-seek-macros.h
@@ -6,11 +6,11 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_MACROS_FILE_SEEK_MACROS_H
-#define __LLVM_LIBC_MACROS_FILE_SEEK_MACROS_H
+#ifndef LLVM_LIBC_MACROS_FILE_SEEK_MACROS_H
+#define LLVM_LIBC_MACROS_FILE_SEEK_MACROS_H
#define SEEK_SET 0
#define SEEK_CUR 1
#define SEEK_END 2
-#endif // __LLVM_LIBC_MACROS_FILE_SEEK_MACROS_H
+#endif // LLVM_LIBC_MACROS_FILE_SEEK_MACROS_H
diff --git a/libc/include/llvm-libc-macros/float-macros.h b/libc/include/llvm-libc-macros/float-macros.h
index 86ec493..a51eab0 100644
--- a/libc/include/llvm-libc-macros/float-macros.h
+++ b/libc/include/llvm-libc-macros/float-macros.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_MACROS_FLOAT_MACROS_H
-#define __LLVM_LIBC_MACROS_FLOAT_MACROS_H
+#ifndef LLVM_LIBC_MACROS_FLOAT_MACROS_H
+#define LLVM_LIBC_MACROS_FLOAT_MACROS_H
// Suppress `#include_next is a language extension` warnings.
#ifdef __clang__
@@ -169,4 +169,4 @@
// TODO: Add FLT16 and FLT128 constants.
-#endif // __LLVM_LIBC_MACROS_FLOAT_MACROS_H
+#endif // LLVM_LIBC_MACROS_FLOAT_MACROS_H
diff --git a/libc/include/llvm-libc-macros/generic-error-number-macros.h b/libc/include/llvm-libc-macros/generic-error-number-macros.h
index 3805c95..7ee0352 100644
--- a/libc/include/llvm-libc-macros/generic-error-number-macros.h
+++ b/libc/include/llvm-libc-macros/generic-error-number-macros.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_MACROS_GENERIC_ERROR_NUMBER_MACROS_H
-#define __LLVM_LIBC_MACROS_GENERIC_ERROR_NUMBER_MACROS_H
+#ifndef LLVM_LIBC_MACROS_GENERIC_ERROR_NUMBER_MACROS_H
+#define LLVM_LIBC_MACROS_GENERIC_ERROR_NUMBER_MACROS_H
#define EPERM 1
#define ENOENT 2
@@ -45,4 +45,4 @@
#define ERANGE 34
#define EILSEQ 35
-#endif // __LLVM_LIBC_MACROS_GENERIC_ERROR_NUMBER_MACROS_H
+#endif // LLVM_LIBC_MACROS_GENERIC_ERROR_NUMBER_MACROS_H
diff --git a/libc/include/llvm-libc-macros/gpu/time-macros.h b/libc/include/llvm-libc-macros/gpu/time-macros.h
index baf2ea5..c3dc812 100644
--- a/libc/include/llvm-libc-macros/gpu/time-macros.h
+++ b/libc/include/llvm-libc-macros/gpu/time-macros.h
@@ -6,9 +6,9 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_MACROS_GPU_TIME_MACROS_H
-#define __LLVM_LIBC_MACROS_GPU_TIME_MACROS_H
+#ifndef LLVM_LIBC_MACROS_GPU_TIME_MACROS_H
+#define LLVM_LIBC_MACROS_GPU_TIME_MACROS_H
#define CLOCKS_PER_SEC 1000000
-#endif // __LLVM_LIBC_MACROS_GPU_TIME_MACROS_H
+#endif // LLVM_LIBC_MACROS_GPU_TIME_MACROS_H
diff --git a/libc/include/llvm-libc-macros/inttypes-macros.h b/libc/include/llvm-libc-macros/inttypes-macros.h
index fc3e251..8e7d4f5 100644
--- a/libc/include/llvm-libc-macros/inttypes-macros.h
+++ b/libc/include/llvm-libc-macros/inttypes-macros.h
@@ -5,8 +5,8 @@
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_MACROS_INTTYPES_MACROS_H
-#define __LLVM_LIBC_MACROS_INTTYPES_MACROS_H
+#ifndef LLVM_LIBC_MACROS_INTTYPES_MACROS_H
+#define LLVM_LIBC_MACROS_INTTYPES_MACROS_H
// fprintf/scanf format macros.
// POSIX.1-2008, Technical Corrigendum 1, XBD/TC1-2008/0050 [211] is applied.
@@ -286,4 +286,4 @@
#define SCNxMAX __UINTMAX_FMTx__
#define SCNxPTR __UINTPTR_FMTx__
-#endif // __LLVM_LIBC_MACROS_INTTYPES_MACROS_H
+#endif // LLVM_LIBC_MACROS_INTTYPES_MACROS_H
diff --git a/libc/include/llvm-libc-macros/limits-macros.h b/libc/include/llvm-libc-macros/limits-macros.h
index 3b4df58..95f0f5f 100644
--- a/libc/include/llvm-libc-macros/limits-macros.h
+++ b/libc/include/llvm-libc-macros/limits-macros.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_MACROS_LIMITS_MACROS_H
-#define __LLVM_LIBC_MACROS_LIMITS_MACROS_H
+#ifndef LLVM_LIBC_MACROS_LIMITS_MACROS_H
+#define LLVM_LIBC_MACROS_LIMITS_MACROS_H
// Define all C23 macro constants of limits.h
@@ -225,4 +225,4 @@
#define ULLONG_MIN 0ULL
#endif // ULLONG_MIN
-#endif // __LLVM_LIBC_MACROS_LIMITS_MACROS_H
+#endif // LLVM_LIBC_MACROS_LIMITS_MACROS_H
diff --git a/libc/include/llvm-libc-macros/linux/fcntl-macros.h b/libc/include/llvm-libc-macros/linux/fcntl-macros.h
index 495c5ec..1d4e5bb 100644
--- a/libc/include/llvm-libc-macros/linux/fcntl-macros.h
+++ b/libc/include/llvm-libc-macros/linux/fcntl-macros.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_MACROS_LINUX_FCNTL_MACROS_H
-#define __LLVM_LIBC_MACROS_LINUX_FCNTL_MACROS_H
+#ifndef LLVM_LIBC_MACROS_LINUX_FCNTL_MACROS_H
+#define LLVM_LIBC_MACROS_LINUX_FCNTL_MACROS_H
// File creation flags
#define O_CLOEXEC 02000000
@@ -68,4 +68,4 @@
#define F_GETFL 3
#define F_SETFL 4
-#endif // __LLVM_LIBC_MACROS_LINUX_FCNTL_MACROS_H
+#endif // LLVM_LIBC_MACROS_LINUX_FCNTL_MACROS_H
diff --git a/libc/include/llvm-libc-macros/linux/sched-macros.h b/libc/include/llvm-libc-macros/linux/sched-macros.h
index 0c57444..ace6200 100644
--- a/libc/include/llvm-libc-macros/linux/sched-macros.h
+++ b/libc/include/llvm-libc-macros/linux/sched-macros.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_MACROS_LINUX_SCHED_MACROS_H
-#define __LLVM_LIBC_MACROS_LINUX_SCHED_MACROS_H
+#ifndef LLVM_LIBC_MACROS_LINUX_SCHED_MACROS_H
+#define LLVM_LIBC_MACROS_LINUX_SCHED_MACROS_H
// Definitions of SCHED_* macros must match was linux as at:
// https://elixir.bootlin.com/linux/latest/source/include/uapi/linux/sched.h
@@ -26,4 +26,4 @@
#define CPU_COUNT_S(setsize, set) __sched_getcpucount(setsize, set)
#define CPU_COUNT(set) CPU_COUNT_S(sizeof(cpu_set_t), set)
-#endif // __LLVM_LIBC_MACROS_LINUX_SCHED_MACROS_H
+#endif // LLVM_LIBC_MACROS_LINUX_SCHED_MACROS_H
diff --git a/libc/include/llvm-libc-macros/linux/signal-macros.h b/libc/include/llvm-libc-macros/linux/signal-macros.h
index deb190e..e379fc4 100644
--- a/libc/include/llvm-libc-macros/linux/signal-macros.h
+++ b/libc/include/llvm-libc-macros/linux/signal-macros.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_MACROS_LINUX_SIGNUM_MACROS_H
-#define __LLVM_LIBC_MACROS_LINUX_SIGNUM_MACROS_H
+#ifndef LLVM_LIBC_MACROS_LINUX_SIGNAL_MACROS_H
+#define LLVM_LIBC_MACROS_LINUX_SIGNAL_MACROS_H
#define SIGHUP 1
#define SIGINT 2
@@ -101,4 +101,4 @@
#define CLD_STOPPED 5 // child has stopped
#define CLD_CONTINUED 6 // stopped child has continued
-#endif // __LLVM_LIBC_MACROS_LINUX_SIGNUM_MACROS_H
+#endif // LLVM_LIBC_MACROS_LINUX_SIGNAL_MACROS_H
diff --git a/libc/include/llvm-libc-macros/linux/sys-ioctl-macros.h b/libc/include/llvm-libc-macros/linux/sys-ioctl-macros.h
index 8f13a0e..5eb779a 100644
--- a/libc/include/llvm-libc-macros/linux/sys-ioctl-macros.h
+++ b/libc/include/llvm-libc-macros/linux/sys-ioctl-macros.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_MACROS_LINUX_SYS_IOCTL_MACROS_H
-#define __LLVM_LIBC_MACROS_LINUX_SYS_IOCTL_MACROS_H
+#ifndef LLVM_LIBC_MACROS_LINUX_SYS_IOCTL_MACROS_H
+#define LLVM_LIBC_MACROS_LINUX_SYS_IOCTL_MACROS_H
// TODO (michaelrj): Finish defining these macros.
// Just defining this macro for the moment since it's all that we need right
@@ -16,4 +16,4 @@
// think is worth digging into right now.
#define TIOCGETD 0x5424
-#endif // __LLVM_LIBC_MACROS_LINUX_SYS_IOCTL_MACROS_H
+#endif // LLVM_LIBC_MACROS_LINUX_SYS_IOCTL_MACROS_H
diff --git a/libc/include/llvm-libc-macros/linux/sys-random-macros.h b/libc/include/llvm-libc-macros/linux/sys-random-macros.h
index 1337f8b..9261e87 100644
--- a/libc/include/llvm-libc-macros/linux/sys-random-macros.h
+++ b/libc/include/llvm-libc-macros/linux/sys-random-macros.h
@@ -6,12 +6,12 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_MACROS_LINUX_SYS_RANDOM_MACROS_H
-#define __LLVM_LIBC_MACROS_LINUX_SYS_RANDOM_MACROS_H
+#ifndef LLVM_LIBC_MACROS_LINUX_SYS_RANDOM_MACROS_H
+#define LLVM_LIBC_MACROS_LINUX_SYS_RANDOM_MACROS_H
// Getrandom flags
#define GRND_RANDOM 0x0001
#define GRND_NONBLOCK 0x0002
#define GRND_INSECURE 0x0004
-#endif // __LLVM_LIBC_MACROS_LINUX_SYS_RANDOM_MACROS_H
+#endif // LLVM_LIBC_MACROS_LINUX_SYS_RANDOM_MACROS_H
diff --git a/libc/include/llvm-libc-macros/linux/sys-resource-macros.h b/libc/include/llvm-libc-macros/linux/sys-resource-macros.h
index dd26553..c9d93c3 100644
--- a/libc/include/llvm-libc-macros/linux/sys-resource-macros.h
+++ b/libc/include/llvm-libc-macros/linux/sys-resource-macros.h
@@ -6,6 +6,9 @@
//
//===----------------------------------------------------------------------===//
+#ifndef LLVM_LIBC_MACROS_LINUX_SYS_RESOURCE_MACROS_H
+#define LLVM_LIBC_MACROS_LINUX_SYS_RESOURCE_MACROS_H
+
#define RLIMIT_CPU 0
#define RLIMIT_FSIZE 1
#define RLIMIT_DATA 2
@@ -24,3 +27,5 @@
#define RLIMIT_RTTIME 15
#define RLIM_INFINITY (~0UL)
+
+#endif // LLVM_LIBC_MACROS_LINUX_SYS_RESOURCE_MACROS_H
diff --git a/libc/include/llvm-libc-macros/linux/sys-socket-macros.h b/libc/include/llvm-libc-macros/linux/sys-socket-macros.h
index 7de4102..f335200 100644
--- a/libc/include/llvm-libc-macros/linux/sys-socket-macros.h
+++ b/libc/include/llvm-libc-macros/linux/sys-socket-macros.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_MACROS_LINUX_SYS_SOCKET_MACROS_H
-#define __LLVM_LIBC_MACROS_LINUX_SYS_SOCKET_MACROS_H
+#ifndef LLVM_LIBC_MACROS_LINUX_SYS_SOCKET_MACROS_H
+#define LLVM_LIBC_MACROS_LINUX_SYS_SOCKET_MACROS_H
// IEEE Std 1003.1-2017 - basedefs/sys_socket.h.html
// Macro values come from the Linux syscall interface.
@@ -25,4 +25,4 @@
#define SOCK_SEQPACKET 5
#define SOCK_PACKET 10
-#endif // __LLVM_LIBC_MACROS_LINUX_SYS_SOCKET_MACROS_H
+#endif // LLVM_LIBC_MACROS_LINUX_SYS_SOCKET_MACROS_H
diff --git a/libc/include/llvm-libc-macros/linux/sys-stat-macros.h b/libc/include/llvm-libc-macros/linux/sys-stat-macros.h
index 48606cf..3013121 100644
--- a/libc/include/llvm-libc-macros/linux/sys-stat-macros.h
+++ b/libc/include/llvm-libc-macros/linux/sys-stat-macros.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_MACROS_LINUX_SYS_STAT_MACROS_H
-#define __LLVM_LIBC_MACROS_LINUX_SYS_STAT_MACROS_H
+#ifndef LLVM_LIBC_MACROS_LINUX_SYS_STAT_MACROS_H
+#define LLVM_LIBC_MACROS_LINUX_SYS_STAT_MACROS_H
// Definitions from linux/stat.h
#define S_IFMT 0170000
@@ -45,4 +45,4 @@
#define S_IWOTH 00002
#define S_IXOTH 00001
-#endif // __LLVM_LIBC_MACROS_LINUX_SYS_STAT_MACROS_H
+#endif // LLVM_LIBC_MACROS_LINUX_SYS_STAT_MACROS_H
diff --git a/libc/include/llvm-libc-macros/linux/sys-time-macros.h b/libc/include/llvm-libc-macros/linux/sys-time-macros.h
index 06ae43f..e978195 100644
--- a/libc/include/llvm-libc-macros/linux/sys-time-macros.h
+++ b/libc/include/llvm-libc-macros/linux/sys-time-macros.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_MACROS_LINUX_SYS_TIME_MACROS_H
-#define __LLVM_LIBC_MACROS_LINUX_SYS_TIME_MACROS_H
+#ifndef LLVM_LIBC_MACROS_LINUX_SYS_TIME_MACROS_H
+#define LLVM_LIBC_MACROS_LINUX_SYS_TIME_MACROS_H
// Add two timevals and put the result in timeval_ptr_result. If the resulting
// usec value is greater than 999,999 then the microseconds are turned into full
@@ -50,4 +50,4 @@
? ((timeval_ptr_a)->tv_usec CMP(timeval_ptr_b)->tv_usec) \
: ((timeval_ptr_a)->tv_sec CMP(timeval_ptr_b)->tv_sec))
-#endif // __LLVM_LIBC_MACROS_LINUX_SYS_TIME_MACROS_H
+#endif // LLVM_LIBC_MACROS_LINUX_SYS_TIME_MACROS_H
diff --git a/libc/include/llvm-libc-macros/linux/sys-wait-macros.h b/libc/include/llvm-libc-macros/linux/sys-wait-macros.h
index 3e6c6f5..c101638 100644
--- a/libc/include/llvm-libc-macros/linux/sys-wait-macros.h
+++ b/libc/include/llvm-libc-macros/linux/sys-wait-macros.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_MACROS_LINUX_SYS_WAIT_MACROS_H
-#define __LLVM_LIBC_MACROS_LINUX_SYS_WAIT_MACROS_H
+#ifndef LLVM_LIBC_MACROS_LINUX_SYS_WAIT_MACROS_H
+#define LLVM_LIBC_MACROS_LINUX_SYS_WAIT_MACROS_H
// Wait flags
#define WNOHANG 1 // Do not block
@@ -41,4 +41,4 @@
#define P_PGID 2
#define P_PIDFD 3
-#endif // __LLVM_LIBC_MACROS_LINUX_SYS_WAIT_MACROS_H
+#endif // LLVM_LIBC_MACROS_LINUX_SYS_WAIT_MACROS_H
diff --git a/libc/include/llvm-libc-macros/linux/termios-macros.h b/libc/include/llvm-libc-macros/linux/termios-macros.h
index 17e380e..668cfe2 100644
--- a/libc/include/llvm-libc-macros/linux/termios-macros.h
+++ b/libc/include/llvm-libc-macros/linux/termios-macros.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_MACROS_LINUX_TERMIOS_MACROS_H
-#define __LLVM_LIBC_MACROS_LINUX_TERMIOS_MACROS_H
+#ifndef LLVM_LIBC_MACROS_LINUX_TERMIOS_MACROS_H
+#define LLVM_LIBC_MACROS_LINUX_TERMIOS_MACROS_H
// Below are generic definitions of symbolic bit-masks, modes etc. They serve
// most architectures including x86_64, aarch64 but have to be adjusted for few
@@ -164,4 +164,4 @@
#define TCIOFF 2 // Suspend output
#define TCION 3 // Restart output
-#endif // __LLVM_LIBC_MACROS_LINUX_TERMIOS_MACROS_H
+#endif // LLVM_LIBC_MACROS_LINUX_TERMIOS_MACROS_H
diff --git a/libc/include/llvm-libc-macros/linux/time-macros.h b/libc/include/llvm-libc-macros/linux/time-macros.h
index ace27cb..407a1eb 100644
--- a/libc/include/llvm-libc-macros/linux/time-macros.h
+++ b/libc/include/llvm-libc-macros/linux/time-macros.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_MACROS_LINUX_TIME_MACROS_H
-#define __LLVM_LIBC_MACROS_LINUX_TIME_MACROS_H
+#ifndef LLVM_LIBC_MACROS_LINUX_TIME_MACROS_H
+#define LLVM_LIBC_MACROS_LINUX_TIME_MACROS_H
// clock type macros
#define CLOCK_REALTIME 0
@@ -23,4 +23,4 @@
#define CLOCKS_PER_SEC 1000000
-#endif //__LLVM_LIBC_MACROS_LINUX_TIME_MACROS_H
+#endif // LLVM_LIBC_MACROS_LINUX_TIME_MACROS_H
diff --git a/libc/include/llvm-libc-macros/linux/unistd-macros.h b/libc/include/llvm-libc-macros/linux/unistd-macros.h
index cfdfb9a..c5109df 100644
--- a/libc/include/llvm-libc-macros/linux/unistd-macros.h
+++ b/libc/include/llvm-libc-macros/linux/unistd-macros.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_MACROS_LINUX_UNISTD_MACROS_H
-#define __LLVM_LIBC_MACROS_LINUX_UNISTD_MACROS_H
+#ifndef LLVM_LIBC_MACROS_LINUX_UNISTD_MACROS_H
+#define LLVM_LIBC_MACROS_LINUX_UNISTD_MACROS_H
// Values for mode argument to the access(...) function.
#define F_OK 0
@@ -27,4 +27,4 @@
(long)(arg4), (long)(arg5), (long)(arg6))
#define syscall(...) __syscall_helper(__VA_ARGS__, 0, 1, 2, 3, 4, 5, 6)
-#endif // __LLVM_LIBC_MACROS_LINUX_UNISTD_MACROS_H
+#endif // LLVM_LIBC_MACROS_LINUX_UNISTD_MACROS_H
diff --git a/libc/include/llvm-libc-macros/math-macros.h b/libc/include/llvm-libc-macros/math-macros.h
index 9f8edd9..e67fe4d 100644
--- a/libc/include/llvm-libc-macros/math-macros.h
+++ b/libc/include/llvm-libc-macros/math-macros.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_MACROS_MATH_MACROS_H
-#define __LLVM_LIBC_MACROS_MATH_MACROS_H
+#ifndef LLVM_LIBC_MACROS_MATH_MACROS_H
+#define LLVM_LIBC_MACROS_MATH_MACROS_H
#include "limits-macros.h"
@@ -38,4 +38,4 @@
#define math_errhandling (MATH_ERRNO | MATH_ERREXCEPT)
#endif
-#endif // __LLVM_LIBC_MACROS_MATH_MACROS_H
+#endif // LLVM_LIBC_MACROS_MATH_MACROS_H
diff --git a/libc/include/llvm-libc-macros/null-macro.h b/libc/include/llvm-libc-macros/null-macro.h
index b83fc05..416d4e8 100644
--- a/libc/include/llvm-libc-macros/null-macro.h
+++ b/libc/include/llvm-libc-macros/null-macro.h
@@ -6,10 +6,10 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_MACROS_NULL_MACRO_H
-#define __LLVM_LIBC_MACROS_NULL_MACRO_H
+#ifndef LLVM_LIBC_MACROS_NULL_MACRO_H
+#define LLVM_LIBC_MACROS_NULL_MACRO_H
#define __need_NULL
#include <stddef.h>
-#endif // __LLVM_LIBC_MACROS_NULL_MACRO_H
+#endif // LLVM_LIBC_MACROS_NULL_MACRO_H
diff --git a/libc/include/llvm-libc-macros/offsetof-macro.h b/libc/include/llvm-libc-macros/offsetof-macro.h
index eeceb3d..208c06b 100644
--- a/libc/include/llvm-libc-macros/offsetof-macro.h
+++ b/libc/include/llvm-libc-macros/offsetof-macro.h
@@ -6,10 +6,10 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_MACROS_OFFSETOF_MACRO_H
-#define __LLVM_LIBC_MACROS_OFFSETOF_MACRO_H
+#ifndef LLVM_LIBC_MACROS_OFFSETOF_MACRO_H
+#define LLVM_LIBC_MACROS_OFFSETOF_MACRO_H
#define __need_offsetof
#include <stddef.h>
-#endif // __LLVM_LIBC_MACROS_OFFSETOF_MACRO_H
+#endif // LLVM_LIBC_MACROS_OFFSETOF_MACRO_H
diff --git a/libc/include/llvm-libc-macros/sched-macros.h b/libc/include/llvm-libc-macros/sched-macros.h
index 760edd9..0f64302 100644
--- a/libc/include/llvm-libc-macros/sched-macros.h
+++ b/libc/include/llvm-libc-macros/sched-macros.h
@@ -6,11 +6,11 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_MACROS_SCHED_MACROS_H
-#define __LLVM_LIBC_MACROS_SCHED_MACROS_H
+#ifndef LLVM_LIBC_MACROS_SCHED_MACROS_H
+#define LLVM_LIBC_MACROS_SCHED_MACROS_H
#ifdef __linux__
#include "linux/sched-macros.h"
#endif
-#endif // __LLVM_LIBC_MACROS_SCHED_MACROS_H
+#endif // LLVM_LIBC_MACROS_SCHED_MACROS_H
diff --git a/libc/include/llvm-libc-macros/signal-macros.h b/libc/include/llvm-libc-macros/signal-macros.h
index 525032b..7ab605b 100644
--- a/libc/include/llvm-libc-macros/signal-macros.h
+++ b/libc/include/llvm-libc-macros/signal-macros.h
@@ -6,11 +6,11 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_MACROS_SIGNUM_MACROS_H
-#define __LLVM_LIBC_MACROS_SIGNUM_MACROS_H
+#ifndef LLVM_LIBC_MACROS_SIGNAL_MACROS_H
+#define LLVM_LIBC_MACROS_SIGNAL_MACROS_H
#ifdef __linux__
#include "linux/signal-macros.h"
#endif
-#endif // __LLVM_LIBC_MACROS_SIGNUM_MACROS_H
+#endif // LLVM_LIBC_MACROS_SIGNAL_MACROS_H
diff --git a/libc/include/llvm-libc-macros/stdckdint-macros.h b/libc/include/llvm-libc-macros/stdckdint-macros.h
index 03b73ae..6944122 100644
--- a/libc/include/llvm-libc-macros/stdckdint-macros.h
+++ b/libc/include/llvm-libc-macros/stdckdint-macros.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_MACROS_STDCKDINT_MACROS_H
-#define __LLVM_LIBC_MACROS_STDCKDINT_MACROS_H
+#ifndef LLVM_LIBC_MACROS_STDCKDINT_MACROS_H
+#define LLVM_LIBC_MACROS_STDCKDINT_MACROS_H
// We need to use __builtin_*_overflow from GCC/Clang to implement the overflow
// macros. Check __GNUC__ for availability of such builtins.
@@ -22,4 +22,4 @@
#define ckd_mul(R, A, B) __builtin_mul_overflow((A), (B), (R))
#endif // __STDC_VERSION_STDCKDINT_H__
#endif // __GNUC__
-#endif // __LLVM_LIBC_MACROS_STDCKDINT_MACROS_H
+#endif // LLVM_LIBC_MACROS_STDCKDINT_MACROS_H
diff --git a/libc/include/llvm-libc-macros/stdfix-macros.h b/libc/include/llvm-libc-macros/stdfix-macros.h
index 11c18f8..554ebe5 100644
--- a/libc/include/llvm-libc-macros/stdfix-macros.h
+++ b/libc/include/llvm-libc-macros/stdfix-macros.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_MACROS_STDFIX_MACROS_H
-#define __LLVM_LIBC_MACROS_STDFIX_MACROS_H
+#ifndef LLVM_LIBC_MACROS_STDFIX_MACROS_H
+#define LLVM_LIBC_MACROS_STDFIX_MACROS_H
#ifdef __FRACT_FBIT__
// _Fract and _Accum types are available
@@ -325,4 +325,4 @@
#endif // LIBC_COMPILER_HAS_FIXED_POINT
-#endif // __LLVM_LIBC_MACROS_STDFIX_MACROS_H
+#endif // LLVM_LIBC_MACROS_STDFIX_MACROS_H
diff --git a/libc/include/llvm-libc-macros/stdio-macros.h b/libc/include/llvm-libc-macros/stdio-macros.h
index b2c62ec..db747c5 100644
--- a/libc/include/llvm-libc-macros/stdio-macros.h
+++ b/libc/include/llvm-libc-macros/stdio-macros.h
@@ -6,9 +6,9 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_MACROS_STDIO_MACROS_H
-#define __LLVM_LIBC_MACROS_STDIO_MACROS_H
+#ifndef LLVM_LIBC_MACROS_STDIO_MACROS_H
+#define LLVM_LIBC_MACROS_STDIO_MACROS_H
#define BUFSIZ 1024
-#endif // __LLVM_LIBC_MACROS_STDIO_MACROS_H
+#endif // LLVM_LIBC_MACROS_STDIO_MACROS_H
diff --git a/libc/include/llvm-libc-macros/stdlib-macros.h b/libc/include/llvm-libc-macros/stdlib-macros.h
index a7625aa..5fcbfef 100644
--- a/libc/include/llvm-libc-macros/stdlib-macros.h
+++ b/libc/include/llvm-libc-macros/stdlib-macros.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_MACROS_STDLIB_MACROS_H
-#define __LLVM_LIBC_MACROS_STDLIB_MACROS_H
+#ifndef LLVM_LIBC_MACROS_STDLIB_MACROS_H
+#define LLVM_LIBC_MACROS_STDLIB_MACROS_H
#ifndef NULL
#define __need_NULL
@@ -19,4 +19,4 @@
#define RAND_MAX 2147483647
-#endif // __LLVM_LIBC_MACROS_STDLIB_MACROS_H
+#endif // LLVM_LIBC_MACROS_STDLIB_MACROS_H
diff --git a/libc/include/llvm-libc-macros/sys-auxv-macros.h b/libc/include/llvm-libc-macros/sys-auxv-macros.h
index a57c601..2dcaa2f 100644
--- a/libc/include/llvm-libc-macros/sys-auxv-macros.h
+++ b/libc/include/llvm-libc-macros/sys-auxv-macros.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_MACROS_AUXV_MACROS_H
-#define __LLVM_LIBC_MACROS_AUXV_MACROS_H
+#ifndef LLVM_LIBC_MACROS_SYS_AUXV_MACROS_H
+#define LLVM_LIBC_MACROS_SYS_AUXV_MACROS_H
// Macros defining the aux vector indexes.
#define AT_NULL 0
@@ -40,4 +40,4 @@
#define AT_MINSIGSTKSZ 51
#endif
-#endif // __LLVM_LIBC_MACROS_AUXV_MACROS_H
+#endif // LLVM_LIBC_MACROS_SYS_AUXV_MACROS_H
diff --git a/libc/include/llvm-libc-macros/sys-ioctl-macros.h b/libc/include/llvm-libc-macros/sys-ioctl-macros.h
index c273fab..4a5f965 100644
--- a/libc/include/llvm-libc-macros/sys-ioctl-macros.h
+++ b/libc/include/llvm-libc-macros/sys-ioctl-macros.h
@@ -6,11 +6,11 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_MACROS_SYS_IOCTL_MACROS_H
-#define __LLVM_LIBC_MACROS_SYS_IOCTL_MACROS_H
+#ifndef LLVM_LIBC_MACROS_SYS_IOCTL_MACROS_H
+#define LLVM_LIBC_MACROS_SYS_IOCTL_MACROS_H
#ifdef __linux__
#include "linux/sys-ioctl-macros.h"
#endif
-#endif // __LLVM_LIBC_MACROS_SYS_IOCTL_MACROS_H
+#endif // LLVM_LIBC_MACROS_SYS_IOCTL_MACROS_H
diff --git a/libc/include/llvm-libc-macros/sys-mman-macros.h b/libc/include/llvm-libc-macros/sys-mman-macros.h
index 4ffc112..a6dc6d9 100644
--- a/libc/include/llvm-libc-macros/sys-mman-macros.h
+++ b/libc/include/llvm-libc-macros/sys-mman-macros.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_MACROS_SYS_MMAN_MACROS_H
-#define __LLVM_LIBC_MACROS_SYS_MMAN_MACROS_H
+#ifndef LLVM_LIBC_MACROS_SYS_MMAN_MACROS_H
+#define LLVM_LIBC_MACROS_SYS_MMAN_MACROS_H
// Use definitions from <linux/mman.h> to dispatch arch-specific flag values.
// For example, MCL_CURRENT/MCL_FUTURE/MCL_ONFAULT are different on different
@@ -45,4 +45,4 @@
#define POSIX_MADV_DONTNEED MADV_DONTNEED
#endif
-#endif // __LLVM_LIBC_MACROS_SYS_MMAN_MACROS_H
+#endif // LLVM_LIBC_MACROS_SYS_MMAN_MACROS_H
diff --git a/libc/include/llvm-libc-macros/sys-queue-macros.h b/libc/include/llvm-libc-macros/sys-queue-macros.h
index 7da643c..fcac265 100644
--- a/libc/include/llvm-libc-macros/sys-queue-macros.h
+++ b/libc/include/llvm-libc-macros/sys-queue-macros.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_MACROS_SYS_QUEUE_MACROS_H
-#define __LLVM_LIBC_MACROS_SYS_QUEUE_MACROS_H
+#ifndef LLVM_LIBC_MACROS_SYS_QUEUE_MACROS_H
+#define LLVM_LIBC_MACROS_SYS_QUEUE_MACROS_H
#include <llvm-libc-macros/containerof-macro.h>
#include <llvm-libc-macros/null-macro.h>
@@ -259,4 +259,4 @@
(head2)->stqh_last = &STAILQ_FIRST(head2); \
} while (0)
-#endif // __LLVM_LIBC_MACROS_SYS_QUEUE_MACROS_H
+#endif // LLVM_LIBC_MACROS_SYS_QUEUE_MACROS_H
diff --git a/libc/include/llvm-libc-macros/sys-random-macros.h b/libc/include/llvm-libc-macros/sys-random-macros.h
index e87128d..9b1a8ed 100644
--- a/libc/include/llvm-libc-macros/sys-random-macros.h
+++ b/libc/include/llvm-libc-macros/sys-random-macros.h
@@ -6,11 +6,11 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_MACROS_SYS_RANDOM_MACROS_H
-#define __LLVM_LIBC_MACROS_SYS_RANDOM_MACROS_H
+#ifndef LLVM_LIBC_MACROS_SYS_RANDOM_MACROS_H
+#define LLVM_LIBC_MACROS_SYS_RANDOM_MACROS_H
#ifdef __linux__
#include "linux/sys-random-macros.h"
#endif
-#endif // __LLVM_LIBC_MACROS_SYS_RANDOM_MACROS_H
+#endif // LLVM_LIBC_MACROS_SYS_RANDOM_MACROS_H
diff --git a/libc/include/llvm-libc-macros/sys-resource-macros.h b/libc/include/llvm-libc-macros/sys-resource-macros.h
index 272723a..1ce01cd 100644
--- a/libc/include/llvm-libc-macros/sys-resource-macros.h
+++ b/libc/include/llvm-libc-macros/sys-resource-macros.h
@@ -6,11 +6,11 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_MACROS_SYS_RESOURCE_MACROS_H
-#define __LLVM_LIBC_MACROS_SYS_RESOURCE_MACROS_H
+#ifndef LLVM_LIBC_MACROS_SYS_RESOURCE_MACROS_H
+#define LLVM_LIBC_MACROS_SYS_RESOURCE_MACROS_H
#ifdef __linux__
#include "linux/sys-resource-macros.h"
#endif
-#endif // __LLVM_LIBC_MACROS_SYS_RESOURCE_MACROS_H
+#endif // LLVM_LIBC_MACROS_SYS_RESOURCE_MACROS_H
diff --git a/libc/include/llvm-libc-macros/sys-select-macros.h b/libc/include/llvm-libc-macros/sys-select-macros.h
index 5d6592c..d54e530 100644
--- a/libc/include/llvm-libc-macros/sys-select-macros.h
+++ b/libc/include/llvm-libc-macros/sys-select-macros.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_MACROS_SYS_SELECT_MACROS_H
-#define __LLVM_LIBC_MACROS_SYS_SELECT_MACROS_H
+#ifndef LLVM_LIBC_MACROS_SYS_SELECT_MACROS_H
+#define LLVM_LIBC_MACROS_SYS_SELECT_MACROS_H
#define FD_SETSIZE 1024
#define __FD_SET_WORD_TYPE unsigned long
@@ -32,4 +32,4 @@
#define FD_ISSET(fd, set) \
(int)(((set)->__set[__FD_WORD(fd)] & __FD_MASK(fd)) != 0)
-#endif // __LLVM_LIBC_MACROS_SYS_SELECT_MACROS_H
+#endif // LLVM_LIBC_MACROS_SYS_SELECT_MACROS_H
diff --git a/libc/include/llvm-libc-macros/sys-socket-macros.h b/libc/include/llvm-libc-macros/sys-socket-macros.h
index 2032360..6b1d280 100644
--- a/libc/include/llvm-libc-macros/sys-socket-macros.h
+++ b/libc/include/llvm-libc-macros/sys-socket-macros.h
@@ -6,11 +6,11 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_MACROS_SYS_SOCKET_MACROS_H
-#define __LLVM_LIBC_MACROS_SYS_SOCKET_MACROS_H
+#ifndef LLVM_LIBC_MACROS_SYS_SOCKET_MACROS_H
+#define LLVM_LIBC_MACROS_SYS_SOCKET_MACROS_H
#ifdef __linux__
#include "linux/sys-socket-macros.h"
#endif
-#endif // __LLVM_LIBC_MACROS_SYS_SOCKET_MACROS_H
+#endif // LLVM_LIBC_MACROS_SYS_SOCKET_MACROS_H
diff --git a/libc/include/llvm-libc-macros/sys-stat-macros.h b/libc/include/llvm-libc-macros/sys-stat-macros.h
index 64f63c3..c47c961 100644
--- a/libc/include/llvm-libc-macros/sys-stat-macros.h
+++ b/libc/include/llvm-libc-macros/sys-stat-macros.h
@@ -6,11 +6,11 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_MACROS_SYS_STAT_MACROS_H
-#define __LLVM_LIBC_MACROS_SYS_STAT_MACROS_H
+#ifndef LLVM_LIBC_MACROS_SYS_STAT_MACROS_H
+#define LLVM_LIBC_MACROS_SYS_STAT_MACROS_H
#ifdef __linux__
#include "linux/sys-stat-macros.h"
#endif
-#endif // __LLVM_LIBC_MACROS_SYS_STAT_MACROS_H
+#endif // LLVM_LIBC_MACROS_SYS_STAT_MACROS_H
diff --git a/libc/include/llvm-libc-macros/sys-time-macros.h b/libc/include/llvm-libc-macros/sys-time-macros.h
index 8e463170..36d7d5a 100644
--- a/libc/include/llvm-libc-macros/sys-time-macros.h
+++ b/libc/include/llvm-libc-macros/sys-time-macros.h
@@ -6,11 +6,11 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_MACROS_SYS_TIME_MACROS_H
-#define __LLVM_LIBC_MACROS_SYS_TIME_MACROS_H
+#ifndef LLVM_LIBC_MACROS_SYS_TIME_MACROS_H
+#define LLVM_LIBC_MACROS_SYS_TIME_MACROS_H
#ifdef __linux__
#include "linux/sys-time-macros.h"
#endif
-#endif // __LLVM_LIBC_MACROS_SYS_TIME_MACROS_H
+#endif // LLVM_LIBC_MACROS_SYS_TIME_MACROS_H
diff --git a/libc/include/llvm-libc-macros/sys-wait-macros.h b/libc/include/llvm-libc-macros/sys-wait-macros.h
index ea58fcc..c418a79 100644
--- a/libc/include/llvm-libc-macros/sys-wait-macros.h
+++ b/libc/include/llvm-libc-macros/sys-wait-macros.h
@@ -6,11 +6,11 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_MACROS_SYS_WAIT_MACROS_H
-#define __LLVM_LIBC_MACROS_SYS_WAIT_MACROS_H
+#ifndef LLVM_LIBC_MACROS_SYS_WAIT_MACROS_H
+#define LLVM_LIBC_MACROS_SYS_WAIT_MACROS_H
#ifdef __linux__
#include "linux/sys-wait-macros.h"
#endif
-#endif // __LLVM_LIBC_MACROS_SYS_WAIT_MACROS_H
+#endif // LLVM_LIBC_MACROS_SYS_WAIT_MACROS_H
diff --git a/libc/include/llvm-libc-macros/termios-macros.h b/libc/include/llvm-libc-macros/termios-macros.h
index c999828..1067e8a 100644
--- a/libc/include/llvm-libc-macros/termios-macros.h
+++ b/libc/include/llvm-libc-macros/termios-macros.h
@@ -6,11 +6,11 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_MACROS_TERMIOS_MACROS_H
-#define __LLVM_LIBC_MACROS_TERMIOS_MACROS_H
+#ifndef LLVM_LIBC_MACROS_TERMIOS_MACROS_H
+#define LLVM_LIBC_MACROS_TERMIOS_MACROS_H
#ifdef __linux__
#include "linux/termios-macros.h"
#endif
-#endif // __LLVM_LIBC_MACROS_TERMIOS_MACROS_H
+#endif // LLVM_LIBC_MACROS_TERMIOS_MACROS_H
diff --git a/libc/include/llvm-libc-macros/time-macros.h b/libc/include/llvm-libc-macros/time-macros.h
index c3bd7aa..6d49ed4 100644
--- a/libc/include/llvm-libc-macros/time-macros.h
+++ b/libc/include/llvm-libc-macros/time-macros.h
@@ -1,5 +1,5 @@
-#ifndef __LLVM_LIBC_MACROS_TIME_MACROS_H
-#define __LLVM_LIBC_MACROS_TIME_MACROS_H
+#ifndef LLVM_LIBC_MACROS_TIME_MACROS_H
+#define LLVM_LIBC_MACROS_TIME_MACROS_H
#if defined(__AMDGPU__) || defined(__NVPTX__)
#include "gpu/time-macros.h"
@@ -7,4 +7,4 @@
#include "linux/time-macros.h"
#endif
-#endif // __LLVM_LIBC_MACROS_TIME_MACROS_H
+#endif // LLVM_LIBC_MACROS_TIME_MACROS_H
diff --git a/libc/include/llvm-libc-macros/unistd-macros.h b/libc/include/llvm-libc-macros/unistd-macros.h
index dbcac0f..4f27f07 100644
--- a/libc/include/llvm-libc-macros/unistd-macros.h
+++ b/libc/include/llvm-libc-macros/unistd-macros.h
@@ -1,8 +1,8 @@
-#ifndef __LLVM_LIBC_MACROS_UNISTD_MACROS_H
-#define __LLVM_LIBC_MACROS_UNISTD_MACROS_H
+#ifndef LLVM_LIBC_MACROS_UNISTD_MACROS_H
+#define LLVM_LIBC_MACROS_UNISTD_MACROS_H
#ifdef __linux__
#include "linux/unistd-macros.h"
#endif
-#endif // __LLVM_LIBC_MACROS_UNISTD_MACROS_H
+#endif // LLVM_LIBC_MACROS_UNISTD_MACROS_H
diff --git a/libc/include/llvm-libc-macros/wchar-macros.h b/libc/include/llvm-libc-macros/wchar-macros.h
index adca41e..5b211f5 100644
--- a/libc/include/llvm-libc-macros/wchar-macros.h
+++ b/libc/include/llvm-libc-macros/wchar-macros.h
@@ -6,11 +6,11 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_MACROS_WCHAR_MACROS_H
-#define __LLVM_LIBC_MACROS_WCHAR_MACROS_H
+#ifndef LLVM_LIBC_MACROS_WCHAR_MACROS_H
+#define LLVM_LIBC_MACROS_WCHAR_MACROS_H
#ifndef WEOF
#define WEOF 0xffffffffu
#endif
-#endif // __LLVM_LIBC_MACROS_WCHAR_MACROS_H
+#endif // LLVM_LIBC_MACROS_WCHAR_MACROS_H
diff --git a/libc/include/llvm-libc-types/ACTION.h b/libc/include/llvm-libc-types/ACTION.h
index 7181a59..1ddce20 100644
--- a/libc/include/llvm-libc-types/ACTION.h
+++ b/libc/include/llvm-libc-types/ACTION.h
@@ -6,9 +6,9 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_ACTION_H__
-#define __LLVM_LIBC_TYPES_ACTION_H__
+#ifndef LLVM_LIBC_TYPES_ACTION_H
+#define LLVM_LIBC_TYPES_ACTION_H
typedef enum { FIND, ENTER } ACTION;
-#endif // __LLVM_LIBC_TYPES_ACTION_H__
+#endif // LLVM_LIBC_TYPES_ACTION_H
diff --git a/libc/include/llvm-libc-types/DIR.h b/libc/include/llvm-libc-types/DIR.h
index 0a2cf27..855446d 100644
--- a/libc/include/llvm-libc-types/DIR.h
+++ b/libc/include/llvm-libc-types/DIR.h
@@ -6,9 +6,9 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_DIR_H__
-#define __LLVM_LIBC_TYPES_DIR_H__
+#ifndef LLVM_LIBC_TYPES_DIR_H
+#define LLVM_LIBC_TYPES_DIR_H
typedef struct DIR DIR;
-#endif // __LLVM_LIBC_TYPES_DIR_H__
+#endif // LLVM_LIBC_TYPES_DIR_H
diff --git a/libc/include/llvm-libc-types/ENTRY.h b/libc/include/llvm-libc-types/ENTRY.h
index 0ccb593..ccbd777 100644
--- a/libc/include/llvm-libc-types/ENTRY.h
+++ b/libc/include/llvm-libc-types/ENTRY.h
@@ -6,12 +6,12 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_ENTRY_H__
-#define __LLVM_LIBC_TYPES_ENTRY_H__
+#ifndef LLVM_LIBC_TYPES_ENTRY_H
+#define LLVM_LIBC_TYPES_ENTRY_H
typedef struct {
char *key;
void *data;
} ENTRY;
-#endif // __LLVM_LIBC_TYPES_ENTRY_H__
+#endif // LLVM_LIBC_TYPES_ENTRY_H
diff --git a/libc/include/llvm-libc-types/FILE.h b/libc/include/llvm-libc-types/FILE.h
index 1c1ff97..f1d2e4f 100644
--- a/libc/include/llvm-libc-types/FILE.h
+++ b/libc/include/llvm-libc-types/FILE.h
@@ -6,9 +6,9 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_FILE_H__
-#define __LLVM_LIBC_TYPES_FILE_H__
+#ifndef LLVM_LIBC_TYPES_FILE_H
+#define LLVM_LIBC_TYPES_FILE_H
typedef struct FILE FILE;
-#endif // __LLVM_LIBC_TYPES_FILE_H__
+#endif // LLVM_LIBC_TYPES_FILE_H
diff --git a/libc/include/llvm-libc-types/__atexithandler_t.h b/libc/include/llvm-libc-types/__atexithandler_t.h
index a9887b6..01aed67 100644
--- a/libc/include/llvm-libc-types/__atexithandler_t.h
+++ b/libc/include/llvm-libc-types/__atexithandler_t.h
@@ -6,9 +6,9 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_ATEXITHANDLER_T_H__
-#define __LLVM_LIBC_TYPES_ATEXITHANDLER_T_H__
+#ifndef LLVM_LIBC_TYPES___ATEXITHANDLER_T_H
+#define LLVM_LIBC_TYPES___ATEXITHANDLER_T_H
typedef void (*__atexithandler_t)(void);
-#endif // __LLVM_LIBC_TYPES_ATEXITHANDLER_T_H__
+#endif // LLVM_LIBC_TYPES___ATEXITHANDLER_T_H
diff --git a/libc/include/llvm-libc-types/__atfork_callback_t.h b/libc/include/llvm-libc-types/__atfork_callback_t.h
index 3da66c2..ae2d0ca 100644
--- a/libc/include/llvm-libc-types/__atfork_callback_t.h
+++ b/libc/include/llvm-libc-types/__atfork_callback_t.h
@@ -6,9 +6,9 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_ATFORK_CALLBACK_T_H__
-#define __LLVM_LIBC_TYPES_ATFORK_CALLBACK_T_H__
+#ifndef LLVM_LIBC_TYPES___ATFORK_CALLBACK_T_H
+#define LLVM_LIBC_TYPES___ATFORK_CALLBACK_T_H
typedef void (*__atfork_callback_t)(void);
-#endif // __LLVM_LIBC_TYPES_ATFORK_CALLBACK_T_H__
+#endif // LLVM_LIBC_TYPES___ATFORK_CALLBACK_T_H
diff --git a/libc/include/llvm-libc-types/__bsearchcompare_t.h b/libc/include/llvm-libc-types/__bsearchcompare_t.h
index 40ebc7f..0b1987be 100644
--- a/libc/include/llvm-libc-types/__bsearchcompare_t.h
+++ b/libc/include/llvm-libc-types/__bsearchcompare_t.h
@@ -6,9 +6,9 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_BSEARCHCOMPARE_T_H__
-#define __LLVM_LIBC_TYPES_BSEARCHCOMPARE_T_H__
+#ifndef LLVM_LIBC_TYPES___BSEARCHCOMPARE_T_H
+#define LLVM_LIBC_TYPES___BSEARCHCOMPARE_T_H
typedef int (*__bsearchcompare_t)(const void *, const void *);
-#endif // __LLVM_LIBC_TYPES_BSEARCHCOMPARE_T_H__
+#endif // LLVM_LIBC_TYPES___BSEARCHCOMPARE_T_H
diff --git a/libc/include/llvm-libc-types/__call_once_func_t.h b/libc/include/llvm-libc-types/__call_once_func_t.h
index bc8ed83..6d278da 100644
--- a/libc/include/llvm-libc-types/__call_once_func_t.h
+++ b/libc/include/llvm-libc-types/__call_once_func_t.h
@@ -6,9 +6,9 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_CALL_ONCE_FUNC_T_H__
-#define __LLVM_LIBC_TYPES_CALL_ONCE_FUNC_T_H__
+#ifndef LLVM_LIBC_TYPES___CALL_ONCE_FUNC_T_H
+#define LLVM_LIBC_TYPES___CALL_ONCE_FUNC_T_H
typedef void (*__call_once_func_t)(void);
-#endif // __LLVM_LIBC_TYPES_CALL_ONCE_FUNC_T_H__
+#endif // LLVM_LIBC_TYPES___CALL_ONCE_FUNC_T_H
diff --git a/libc/include/llvm-libc-types/__exec_argv_t.h b/libc/include/llvm-libc-types/__exec_argv_t.h
index 35b687d9..4eff583 100644
--- a/libc/include/llvm-libc-types/__exec_argv_t.h
+++ b/libc/include/llvm-libc-types/__exec_argv_t.h
@@ -6,9 +6,9 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_EXEC_ARGV_T_H__
-#define __LLVM_LIBC_TYPES_EXEC_ARGV_T_H__
+#ifndef LLVM_LIBC_TYPES___EXEC_ARGV_T_H
+#define LLVM_LIBC_TYPES___EXEC_ARGV_T_H
typedef char *const __exec_argv_t[];
-#endif // __LLVM_LIBC_TYPES_EXEC_ARGV_T_H__
+#endif // LLVM_LIBC_TYPES___EXEC_ARGV_T_H
diff --git a/libc/include/llvm-libc-types/__exec_envp_t.h b/libc/include/llvm-libc-types/__exec_envp_t.h
index 06eb2dd..89e0275 100644
--- a/libc/include/llvm-libc-types/__exec_envp_t.h
+++ b/libc/include/llvm-libc-types/__exec_envp_t.h
@@ -6,9 +6,9 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_EXEC_ENVP_T_H__
-#define __LLVM_LIBC_TYPES_EXEC_ENVP_T_H__
+#ifndef LLVM_LIBC_TYPES___EXEC_ENVP_T_H
+#define LLVM_LIBC_TYPES___EXEC_ENVP_T_H
typedef char *const __exec_envp_t[];
-#endif // __LLVM_LIBC_TYPES_EXEC_ENVP_T_H__
+#endif // LLVM_LIBC_TYPES___EXEC_ENVP_T_H
diff --git a/libc/include/llvm-libc-types/__futex_word.h b/libc/include/llvm-libc-types/__futex_word.h
index 85130ab..04023c7 100644
--- a/libc/include/llvm-libc-types/__futex_word.h
+++ b/libc/include/llvm-libc-types/__futex_word.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_FUTEX_WORD_H__
-#define __LLVM_LIBC_TYPES_FUTEX_WORD_H__
+#ifndef LLVM_LIBC_TYPES___FUTEX_WORD_H
+#define LLVM_LIBC_TYPES___FUTEX_WORD_H
typedef struct {
// Futex word should be aligned appropriately to allow target atomic
@@ -17,4 +17,4 @@ typedef struct {
: _Alignof(__UINT32_TYPE__)) __UINT32_TYPE__ __word;
} __futex_word;
-#endif // __LLVM_LIBC_TYPES_FUTEX_WORD_H__
+#endif // LLVM_LIBC_TYPES___FUTEX_WORD_H
diff --git a/libc/include/llvm-libc-types/__getoptargv_t.h b/libc/include/llvm-libc-types/__getoptargv_t.h
index 81c6728..c26b9e9 100644
--- a/libc/include/llvm-libc-types/__getoptargv_t.h
+++ b/libc/include/llvm-libc-types/__getoptargv_t.h
@@ -6,9 +6,9 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_GETOPTARGV_T_H__
-#define __LLVM_LIBC_TYPES_GETOPTARGV_T_H__
+#ifndef LLVM_LIBC_TYPES___GETOPTARGV_T_H
+#define LLVM_LIBC_TYPES___GETOPTARGV_T_H
typedef char *const __getoptargv_t[];
-#endif // __LLVM_LIBC_TYPES_GETOPTARGV_T_H__
+#endif // LLVM_LIBC_TYPES___GETOPTARGV_T_H
diff --git a/libc/include/llvm-libc-types/__mutex_type.h b/libc/include/llvm-libc-types/__mutex_type.h
index a7ed8f8..d27bf5d 100644
--- a/libc/include/llvm-libc-types/__mutex_type.h
+++ b/libc/include/llvm-libc-types/__mutex_type.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES___MUTEX_T_H
-#define __LLVM_LIBC_TYPES___MUTEX_T_H
+#ifndef LLVM_LIBC_TYPES___MUTEX_TYPE_H
+#define LLVM_LIBC_TYPES___MUTEX_TYPE_H
#include <llvm-libc-types/__futex_word.h>
@@ -26,4 +26,4 @@ typedef struct {
#endif
} __mutex_type;
-#endif // __LLVM_LIBC_TYPES___MUTEX_T_H
+#endif // LLVM_LIBC_TYPES___MUTEX_TYPE_H
diff --git a/libc/include/llvm-libc-types/__pthread_once_func_t.h b/libc/include/llvm-libc-types/__pthread_once_func_t.h
index 5ace5cb..7575029 100644
--- a/libc/include/llvm-libc-types/__pthread_once_func_t.h
+++ b/libc/include/llvm-libc-types/__pthread_once_func_t.h
@@ -6,9 +6,9 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_PTHREAD_ONCE_FUNC_T_H__
-#define __LLVM_LIBC_TYPES_PTHREAD_ONCE_FUNC_T_H__
+#ifndef LLVM_LIBC_TYPES___PTHREAD_ONCE_FUNC_T_H
+#define LLVM_LIBC_TYPES___PTHREAD_ONCE_FUNC_T_H
typedef void (*__pthread_once_func_t)(void);
-#endif // __LLVM_LIBC_TYPES_PTHREAD_ONCE_FUNC_T_H__
+#endif // LLVM_LIBC_TYPES___PTHREAD_ONCE_FUNC_T_H
diff --git a/libc/include/llvm-libc-types/__pthread_start_t.h b/libc/include/llvm-libc-types/__pthread_start_t.h
index 1e05f9b4..6b7ae40 100644
--- a/libc/include/llvm-libc-types/__pthread_start_t.h
+++ b/libc/include/llvm-libc-types/__pthread_start_t.h
@@ -6,9 +6,9 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_PTHREAD_START_T_H__
-#define __LLVM_LIBC_TYPES_PTHREAD_START_T_H__
+#ifndef LLVM_LIBC_TYPES___PTHREAD_START_T_H
+#define LLVM_LIBC_TYPES___PTHREAD_START_T_H
typedef void *(*__pthread_start_t)(void *);
-#endif // __LLVM_LIBC_TYPES_PTHREAD_START_T_H__
+#endif // LLVM_LIBC_TYPES___PTHREAD_START_T_H
diff --git a/libc/include/llvm-libc-types/__pthread_tss_dtor_t.h b/libc/include/llvm-libc-types/__pthread_tss_dtor_t.h
index 1b54d31..c67b604 100644
--- a/libc/include/llvm-libc-types/__pthread_tss_dtor_t.h
+++ b/libc/include/llvm-libc-types/__pthread_tss_dtor_t.h
@@ -6,9 +6,9 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_PTHREAD_TSS_DTOR_T_H__
-#define __LLVM_LIBC_TYPES_PTHREAD_TSS_DTOR_T_H__
+#ifndef LLVM_LIBC_TYPES___PTHREAD_TSS_DTOR_T_H
+#define LLVM_LIBC_TYPES___PTHREAD_TSS_DTOR_T_H
typedef void (*__pthread_tss_dtor_t)(void *);
-#endif // __LLVM_LIBC_TYPES_PTHREAD_TSS_DTOR_T_H__
+#endif // LLVM_LIBC_TYPES___PTHREAD_TSS_DTOR_T_H
diff --git a/libc/include/llvm-libc-types/__qsortcompare_t.h b/libc/include/llvm-libc-types/__qsortcompare_t.h
index 82bd4cc..48fc9ccb 100644
--- a/libc/include/llvm-libc-types/__qsortcompare_t.h
+++ b/libc/include/llvm-libc-types/__qsortcompare_t.h
@@ -6,9 +6,9 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_QSORTCOMPARE_T_H__
-#define __LLVM_LIBC_TYPES_QSORTCOMPARE_T_H__
+#ifndef LLVM_LIBC_TYPES___QSORTCOMPARE_T_H
+#define LLVM_LIBC_TYPES___QSORTCOMPARE_T_H
typedef int (*__qsortcompare_t)(const void *, const void *);
-#endif // __LLVM_LIBC_TYPES_QSORTCOMPARE_T_H__
+#endif // LLVM_LIBC_TYPES___QSORTCOMPARE_T_H
diff --git a/libc/include/llvm-libc-types/__qsortrcompare_t.h b/libc/include/llvm-libc-types/__qsortrcompare_t.h
index febf79d..f6b0588 100644
--- a/libc/include/llvm-libc-types/__qsortrcompare_t.h
+++ b/libc/include/llvm-libc-types/__qsortrcompare_t.h
@@ -6,9 +6,9 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_QSORTRCOMPARE_T_H__
-#define __LLVM_LIBC_TYPES_QSORTRCOMPARE_T_H__
+#ifndef LLVM_LIBC_TYPES___QSORTRCOMPARE_T_H
+#define LLVM_LIBC_TYPES___QSORTRCOMPARE_T_H
typedef int (*__qsortrcompare_t)(const void *, const void *, void *);
-#endif // __LLVM_LIBC_TYPES_QSORTRCOMPARE_T_H__
+#endif // LLVM_LIBC_TYPES___QSORTRCOMPARE_T_H
diff --git a/libc/include/llvm-libc-types/__sighandler_t.h b/libc/include/llvm-libc-types/__sighandler_t.h
index bd0ad98..9c1ac99 100644
--- a/libc/include/llvm-libc-types/__sighandler_t.h
+++ b/libc/include/llvm-libc-types/__sighandler_t.h
@@ -6,9 +6,9 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_SIGHANDLER_T_H__
-#define __LLVM_LIBC_TYPES_SIGHANDLER_T_H__
+#ifndef LLVM_LIBC_TYPES___SIGHANDLER_T_H
+#define LLVM_LIBC_TYPES___SIGHANDLER_T_H
typedef void (*__sighandler_t)(int);
-#endif // __LLVM_LIBC_TYPES_SIGHANDLER_T_H__
+#endif // LLVM_LIBC_TYPES___SIGHANDLER_T_H
diff --git a/libc/include/llvm-libc-types/__thread_type.h b/libc/include/llvm-libc-types/__thread_type.h
index da5b898..645573f 100644
--- a/libc/include/llvm-libc-types/__thread_type.h
+++ b/libc/include/llvm-libc-types/__thread_type.h
@@ -6,11 +6,11 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_THREAD_TYPE_H__
-#define __LLVM_LIBC_TYPES_THREAD_TYPE_H__
+#ifndef LLVM_LIBC_TYPES___THREAD_TYPE_H
+#define LLVM_LIBC_TYPES___THREAD_TYPE_H
typedef struct {
void *__attrib;
} __thread_type;
-#endif // __LLVM_LIBC_TYPES_THREAD_TYPE_H__
+#endif // LLVM_LIBC_TYPES___THREAD_TYPE_H
diff --git a/libc/include/llvm-libc-types/blkcnt_t.h b/libc/include/llvm-libc-types/blkcnt_t.h
index acd8d34..9dea8f0 100644
--- a/libc/include/llvm-libc-types/blkcnt_t.h
+++ b/libc/include/llvm-libc-types/blkcnt_t.h
@@ -6,9 +6,9 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_BLKCNT_T_H__
-#define __LLVM_LIBC_TYPES_BLKCNT_T_H__
+#ifndef LLVM_LIBC_TYPES_BLKCNT_T_H
+#define LLVM_LIBC_TYPES_BLKCNT_T_H
typedef __INTPTR_TYPE__ blkcnt_t;
-#endif // __LLVM_LIBC_TYPES_BLKCNT_T_H__
+#endif // LLVM_LIBC_TYPES_BLKCNT_T_H
diff --git a/libc/include/llvm-libc-types/blksize_t.h b/libc/include/llvm-libc-types/blksize_t.h
index 99ddac5..7caa970 100644
--- a/libc/include/llvm-libc-types/blksize_t.h
+++ b/libc/include/llvm-libc-types/blksize_t.h
@@ -6,9 +6,9 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_BLKSIZE_T_H__
-#define __LLVM_LIBC_TYPES_BLKSIZE_T_H__
+#ifndef LLVM_LIBC_TYPES_BLKSIZE_T_H
+#define LLVM_LIBC_TYPES_BLKSIZE_T_H
typedef __INTPTR_TYPE__ blksize_t;
-#endif // __LLVM_LIBC_TYPES_BLKSIZE_T_H__
+#endif // LLVM_LIBC_TYPES_BLKSIZE_T_H
diff --git a/libc/include/llvm-libc-types/cc_t.h b/libc/include/llvm-libc-types/cc_t.h
index e08523c..40d99ad 100644
--- a/libc/include/llvm-libc-types/cc_t.h
+++ b/libc/include/llvm-libc-types/cc_t.h
@@ -6,9 +6,9 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_CC_T_H__
-#define __LLVM_LIBC_TYPES_CC_T_H__
+#ifndef LLVM_LIBC_TYPES_CC_T_H
+#define LLVM_LIBC_TYPES_CC_T_H
typedef unsigned char cc_t;
-#endif // __LLVM_LIBC_TYPES_CC_T_H__
+#endif // LLVM_LIBC_TYPES_CC_T_H
diff --git a/libc/include/llvm-libc-types/clock_t.h b/libc/include/llvm-libc-types/clock_t.h
index b7969d6..8759ee9 100644
--- a/libc/include/llvm-libc-types/clock_t.h
+++ b/libc/include/llvm-libc-types/clock_t.h
@@ -6,9 +6,9 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_CLOCK_T_H__
-#define __LLVM_LIBC_TYPES_CLOCK_T_H__
+#ifndef LLVM_LIBC_TYPES_CLOCK_T_H
+#define LLVM_LIBC_TYPES_CLOCK_T_H
typedef long clock_t;
-#endif // __LLVM_LIBC_TYPES_CLOCK_T_H__
+#endif // LLVM_LIBC_TYPES_CLOCK_T_H
diff --git a/libc/include/llvm-libc-types/clockid_t.h b/libc/include/llvm-libc-types/clockid_t.h
index ddaceb6..4b05959 100644
--- a/libc/include/llvm-libc-types/clockid_t.h
+++ b/libc/include/llvm-libc-types/clockid_t.h
@@ -6,9 +6,9 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_CLOCKID_T_H__
-#define __LLVM_LIBC_TYPES_CLOCKID_T_H__
+#ifndef LLVM_LIBC_TYPES_CLOCKID_T_H
+#define LLVM_LIBC_TYPES_CLOCKID_T_H
typedef int clockid_t;
-#endif // __LLVM_LIBC_TYPES_CLOCKID_T_H__
+#endif // LLVM_LIBC_TYPES_CLOCKID_T_H
diff --git a/libc/include/llvm-libc-types/cnd_t.h b/libc/include/llvm-libc-types/cnd_t.h
index 09a29ac..1159ac4 100644
--- a/libc/include/llvm-libc-types/cnd_t.h
+++ b/libc/include/llvm-libc-types/cnd_t.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_CND_T_H__
-#define __LLVM_LIBC_TYPES_CND_T_H__
+#ifndef LLVM_LIBC_TYPES_CND_T_H
+#define LLVM_LIBC_TYPES_CND_T_H
#include "mtx_t.h"
@@ -17,4 +17,4 @@ typedef struct {
mtx_t __qmtx;
} cnd_t;
-#endif // __LLVM_LIBC_TYPES_CND_T_H__
+#endif // LLVM_LIBC_TYPES_CND_T_H
diff --git a/libc/include/llvm-libc-types/cookie_io_functions_t.h b/libc/include/llvm-libc-types/cookie_io_functions_t.h
index df90416..f9fa1a2 100644
--- a/libc/include/llvm-libc-types/cookie_io_functions_t.h
+++ b/libc/include/llvm-libc-types/cookie_io_functions_t.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_COOKIE_IO_FUNCTIONS_T_H
-#define __LLVM_LIBC_TYPES_COOKIE_IO_FUNCTIONS_T_H
+#ifndef LLVM_LIBC_TYPES_COOKIE_IO_FUNCTIONS_T_H
+#define LLVM_LIBC_TYPES_COOKIE_IO_FUNCTIONS_T_H
#include <llvm-libc-types/off64_t.h>
#include <llvm-libc-types/size_t.h>
@@ -25,4 +25,4 @@ typedef struct {
cookie_close_function_t *close;
} cookie_io_functions_t;
-#endif // __LLVM_LIBC_TYPES_COOKIE_IO_FUNCTIONS_T_H
+#endif // LLVM_LIBC_TYPES_COOKIE_IO_FUNCTIONS_T_H
diff --git a/libc/include/llvm-libc-types/cpu_set_t.h b/libc/include/llvm-libc-types/cpu_set_t.h
index 79f694a..e7f5259 100644
--- a/libc/include/llvm-libc-types/cpu_set_t.h
+++ b/libc/include/llvm-libc-types/cpu_set_t.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_CPU_SET_T_H
-#define __LLVM_LIBC_TYPES_CPU_SET_T_H
+#ifndef LLVM_LIBC_TYPES_CPU_SET_T_H
+#define LLVM_LIBC_TYPES_CPU_SET_T_H
typedef struct {
// If a processor with more than 1024 CPUs is to be supported in future,
@@ -15,4 +15,4 @@ typedef struct {
unsigned long __mask[128 / sizeof(unsigned long)];
} cpu_set_t;
-#endif // __LLVM_LIBC_TYPES_CPU_SET_T_H
+#endif // LLVM_LIBC_TYPES_CPU_SET_T_H
diff --git a/libc/include/llvm-libc-types/dev_t.h b/libc/include/llvm-libc-types/dev_t.h
index 9fbc41a..3181e34 100644
--- a/libc/include/llvm-libc-types/dev_t.h
+++ b/libc/include/llvm-libc-types/dev_t.h
@@ -6,9 +6,9 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_DEV_T_H__
-#define __LLVM_LIBC_TYPES_DEV_T_H__
+#ifndef LLVM_LIBC_TYPES_DEV_T_H
+#define LLVM_LIBC_TYPES_DEV_T_H
typedef __UINT64_TYPE__ dev_t;
-#endif // __LLVM_LIBC_TYPES_DEV_T_H__
+#endif // LLVM_LIBC_TYPES_DEV_T_H
diff --git a/libc/include/llvm-libc-types/div_t.h b/libc/include/llvm-libc-types/div_t.h
index e495a1c..450603d 100644
--- a/libc/include/llvm-libc-types/div_t.h
+++ b/libc/include/llvm-libc-types/div_t.h
@@ -6,12 +6,12 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_DIV_T_H__
-#define __LLVM_LIBC_TYPES_DIV_T_H__
+#ifndef LLVM_LIBC_TYPES_DIV_T_H
+#define LLVM_LIBC_TYPES_DIV_T_H
typedef struct {
int quot;
int rem;
} div_t;
-#endif // __LLVM_LIBC_TYPES_DIV_T_H__
+#endif // LLVM_LIBC_TYPES_DIV_T_H
diff --git a/libc/include/llvm-libc-types/double_t.h b/libc/include/llvm-libc-types/double_t.h
index 2aa471d..c4ad08a 100644
--- a/libc/include/llvm-libc-types/double_t.h
+++ b/libc/include/llvm-libc-types/double_t.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_DOUBLE_T_H__
-#define __LLVM_LIBC_TYPES_DOUBLE_T_H__
+#ifndef LLVM_LIBC_TYPES_DOUBLE_T_H
+#define LLVM_LIBC_TYPES_DOUBLE_T_H
#if !defined(__FLT_EVAL_METHOD__) || __FLT_EVAL_METHOD__ == 0
#define __LLVM_LIBC_DOUBLE_T double
@@ -21,4 +21,4 @@
typedef __LLVM_LIBC_DOUBLE_T double_t;
-#endif // __LLVM_LIBC_TYPES_DOUBLE_T_H__
+#endif // LLVM_LIBC_TYPES_DOUBLE_T_H
diff --git a/libc/include/llvm-libc-types/fd_set.h b/libc/include/llvm-libc-types/fd_set.h
index 54e3fc6..58fc438 100644
--- a/libc/include/llvm-libc-types/fd_set.h
+++ b/libc/include/llvm-libc-types/fd_set.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_FD_SET_H__
-#define __LLVM_LIBC_TYPES_FD_SET_H__
+#ifndef LLVM_LIBC_TYPES_FD_SET_H
+#define LLVM_LIBC_TYPES_FD_SET_H
#include <llvm-libc-macros/sys-select-macros.h> // FD_SETSIZE
@@ -15,4 +15,4 @@ typedef struct {
__FD_SET_WORD_TYPE __set[__FD_SET_ARRAYSIZE];
} fd_set;
-#endif // __LLVM_LIBC_TYPES_FD_SET_H__
+#endif // LLVM_LIBC_TYPES_FD_SET_H
diff --git a/libc/include/llvm-libc-types/fenv_t.h b/libc/include/llvm-libc-types/fenv_t.h
index 86fcf2e..c83f238 100644
--- a/libc/include/llvm-libc-types/fenv_t.h
+++ b/libc/include/llvm-libc-types/fenv_t.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_FENV_T_H__
-#define __LLVM_LIBC_TYPES_FENV_T_H__
+#ifndef LLVM_LIBC_TYPES_FENV_T_H
+#define LLVM_LIBC_TYPES_FENV_T_H
#ifdef __aarch64__
typedef struct {
@@ -33,4 +33,4 @@ typedef struct {
#error "fenv_t not defined for your platform"
#endif
-#endif // __LLVM_LIBC_TYPES_FENV_T_H__
+#endif // LLVM_LIBC_TYPES_FENV_T_H
diff --git a/libc/include/llvm-libc-types/fexcept_t.h b/libc/include/llvm-libc-types/fexcept_t.h
index 6e7969c..60687bd 100644
--- a/libc/include/llvm-libc-types/fexcept_t.h
+++ b/libc/include/llvm-libc-types/fexcept_t.h
@@ -6,9 +6,9 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_FEXCEPT_T_H__
-#define __LLVM_LIBC_TYPES_FEXCEPT_T_H__
+#ifndef LLVM_LIBC_TYPES_FEXCEPT_T_H
+#define LLVM_LIBC_TYPES_FEXCEPT_T_H
typedef int fexcept_t;
-#endif // __LLVM_LIBC_TYPES_FEXCEPT_T_H__
+#endif // LLVM_LIBC_TYPES_FEXCEPT_T_H
diff --git a/libc/include/llvm-libc-types/float128.h b/libc/include/llvm-libc-types/float128.h
index 1907a5e..e2dc18c 100644
--- a/libc/include/llvm-libc-types/float128.h
+++ b/libc/include/llvm-libc-types/float128.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_FLOAT128_H__
-#define __LLVM_LIBC_TYPES_FLOAT128_H__
+#ifndef LLVM_LIBC_TYPES_FLOAT128_H
+#define LLVM_LIBC_TYPES_FLOAT128_H
#include "llvm-libc-macros/float-macros.h" // LDBL_MANT_DIG
@@ -34,4 +34,4 @@ typedef __float128 float128;
typedef long double float128;
#endif
-#endif // __LLVM_LIBC_TYPES_FLOAT128_H__
+#endif // LLVM_LIBC_TYPES_FLOAT128_H
diff --git a/libc/include/llvm-libc-types/float_t.h b/libc/include/llvm-libc-types/float_t.h
index 8df3bf0..5027249 100644
--- a/libc/include/llvm-libc-types/float_t.h
+++ b/libc/include/llvm-libc-types/float_t.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_FLOAT_T_H__
-#define __LLVM_LIBC_TYPES_FLOAT_T_H__
+#ifndef LLVM_LIBC_TYPES_FLOAT_T_H
+#define LLVM_LIBC_TYPES_FLOAT_T_H
#if !defined(__FLT_EVAL_METHOD__) || __FLT_EVAL_METHOD__ == 0
#define __LLVM_LIBC_FLOAT_T float
@@ -21,4 +21,4 @@
typedef __LLVM_LIBC_FLOAT_T float_t;
-#endif // __LLVM_LIBC_TYPES_FLOAT_T_H__
+#endif // LLVM_LIBC_TYPES_FLOAT_T_H
diff --git a/libc/include/llvm-libc-types/gid_t.h b/libc/include/llvm-libc-types/gid_t.h
index 664aee0..cfe36ce 100644
--- a/libc/include/llvm-libc-types/gid_t.h
+++ b/libc/include/llvm-libc-types/gid_t.h
@@ -6,9 +6,9 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_GID_T_H__
-#define __LLVM_LIBC_TYPES_GID_T_H__
+#ifndef LLVM_LIBC_TYPES_GID_T_H
+#define LLVM_LIBC_TYPES_GID_T_H
typedef __UINT32_TYPE__ gid_t;
-#endif // __LLVM_LIBC_TYPES_GID_T_H__
+#endif // LLVM_LIBC_TYPES_GID_T_H
diff --git a/libc/include/llvm-libc-types/ino_t.h b/libc/include/llvm-libc-types/ino_t.h
index 0f5abd9..148bd67 100644
--- a/libc/include/llvm-libc-types/ino_t.h
+++ b/libc/include/llvm-libc-types/ino_t.h
@@ -6,9 +6,9 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_INO_T_H__
-#define __LLVM_LIBC_TYPES_INO_T_H__
+#ifndef LLVM_LIBC_TYPES_INO_T_H
+#define LLVM_LIBC_TYPES_INO_T_H
typedef __UINTPTR_TYPE__ ino_t;
-#endif // __LLVM_LIBC_TYPES_INO_T_H__
+#endif // LLVM_LIBC_TYPES_INO_T_H
diff --git a/libc/include/llvm-libc-types/jmp_buf.h b/libc/include/llvm-libc-types/jmp_buf.h
index 6af4e8e..29a1df9 100644
--- a/libc/include/llvm-libc-types/jmp_buf.h
+++ b/libc/include/llvm-libc-types/jmp_buf.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_JMP_BUF_H__
-#define __LLVM_LIBC_TYPES_JMP_BUF_H__
+#ifndef LLVM_LIBC_TYPES_JMP_BUF_H
+#define LLVM_LIBC_TYPES_JMP_BUF_H
typedef struct {
#ifdef __x86_64__
@@ -39,4 +39,4 @@ typedef struct {
typedef __jmp_buf jmp_buf[1];
-#endif // __LLVM_LIBC_TYPES_JMP_BUF_H__
+#endif // LLVM_LIBC_TYPES_JMP_BUF_H
diff --git a/libc/include/llvm-libc-types/ldiv_t.h b/libc/include/llvm-libc-types/ldiv_t.h
index 9bd8d25..5c64ec1 100644
--- a/libc/include/llvm-libc-types/ldiv_t.h
+++ b/libc/include/llvm-libc-types/ldiv_t.h
@@ -6,12 +6,12 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_LDIV_T_H__
-#define __LLVM_LIBC_TYPES_LDIV_T_H__
+#ifndef LLVM_LIBC_TYPES_LDIV_T_H
+#define LLVM_LIBC_TYPES_LDIV_T_H
typedef struct {
long quot;
long rem;
} ldiv_t;
-#endif // __LLVM_LIBC_TYPES_LDIV_T_H__
+#endif // LLVM_LIBC_TYPES_LDIV_T_H
diff --git a/libc/include/llvm-libc-types/lldiv_t.h b/libc/include/llvm-libc-types/lldiv_t.h
index 109304d1..5b8dcbe 100644
--- a/libc/include/llvm-libc-types/lldiv_t.h
+++ b/libc/include/llvm-libc-types/lldiv_t.h
@@ -6,12 +6,12 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_LLDIV_T_H__
-#define __LLVM_LIBC_TYPES_LLDIV_T_H__
+#ifndef LLVM_LIBC_TYPES_LLDIV_T_H
+#define LLVM_LIBC_TYPES_LLDIV_T_H
typedef struct {
long long quot;
long long rem;
} lldiv_t;
-#endif // __LLVM_LIBC_TYPES_LLDIV_T_H__
+#endif // LLVM_LIBC_TYPES_LLDIV_T_H
diff --git a/libc/include/llvm-libc-types/mode_t.h b/libc/include/llvm-libc-types/mode_t.h
index 20037bb..fe09060 100644
--- a/libc/include/llvm-libc-types/mode_t.h
+++ b/libc/include/llvm-libc-types/mode_t.h
@@ -6,9 +6,9 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_MODE_T_H
-#define __LLVM_LIBC_TYPES_MODE_T_H
+#ifndef LLVM_LIBC_TYPES_MODE_T_H
+#define LLVM_LIBC_TYPES_MODE_T_H
typedef unsigned mode_t;
-#endif // __LLVM_LIBC_TYPES_MODE_T_H
+#endif // LLVM_LIBC_TYPES_MODE_T_H
diff --git a/libc/include/llvm-libc-types/mtx_t.h b/libc/include/llvm-libc-types/mtx_t.h
index ac6453e..0f3882c 100644
--- a/libc/include/llvm-libc-types/mtx_t.h
+++ b/libc/include/llvm-libc-types/mtx_t.h
@@ -6,11 +6,11 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_MTX_T_H__
-#define __LLVM_LIBC_TYPES_MTX_T_H__
+#ifndef LLVM_LIBC_TYPES_MTX_T_H
+#define LLVM_LIBC_TYPES_MTX_T_H
#include <llvm-libc-types/__mutex_type.h>
typedef __mutex_type mtx_t;
-#endif // __LLVM_LIBC_TYPES_MTX_T_H__
+#endif // LLVM_LIBC_TYPES_MTX_T_H
diff --git a/libc/include/llvm-libc-types/nlink_t.h b/libc/include/llvm-libc-types/nlink_t.h
index 1826144..7e0016a 100644
--- a/libc/include/llvm-libc-types/nlink_t.h
+++ b/libc/include/llvm-libc-types/nlink_t.h
@@ -6,9 +6,9 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_NLINK_T_H__
-#define __LLVM_LIBC_TYPES_NLINK_T_H__
+#ifndef LLVM_LIBC_TYPES_NLINK_T_H
+#define LLVM_LIBC_TYPES_NLINK_T_H
typedef __UINTPTR_TYPE__ nlink_t;
-#endif // __LLVM_LIBC_TYPES_NLINK_T_H__
+#endif // LLVM_LIBC_TYPES_NLINK_T_H
diff --git a/libc/include/llvm-libc-types/off64_t.h b/libc/include/llvm-libc-types/off64_t.h
index 0f95caa..669698a 100644
--- a/libc/include/llvm-libc-types/off64_t.h
+++ b/libc/include/llvm-libc-types/off64_t.h
@@ -6,9 +6,9 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_OFF64_T_H__
-#define __LLVM_LIBC_TYPES_OFF64_T_H__
+#ifndef LLVM_LIBC_TYPES_OFF64_T_H
+#define LLVM_LIBC_TYPES_OFF64_T_H
typedef __INT64_TYPE__ off64_t;
-#endif // __LLVM_LIBC_TYPES_OFF64_T_H__
+#endif // LLVM_LIBC_TYPES_OFF64_T_H
diff --git a/libc/include/llvm-libc-types/off_t.h b/libc/include/llvm-libc-types/off_t.h
index 111b29aa..63224b6 100644
--- a/libc/include/llvm-libc-types/off_t.h
+++ b/libc/include/llvm-libc-types/off_t.h
@@ -6,9 +6,9 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_OFF_T_H__
-#define __LLVM_LIBC_TYPES_OFF_T_H__
+#ifndef LLVM_LIBC_TYPES_OFF_T_H
+#define LLVM_LIBC_TYPES_OFF_T_H
typedef __INT64_TYPE__ off_t;
-#endif // __LLVM_LIBC_TYPES_OFF_T_H__
+#endif // LLVM_LIBC_TYPES_OFF_T_H
diff --git a/libc/include/llvm-libc-types/once_flag.h b/libc/include/llvm-libc-types/once_flag.h
index 77bab28..cb80112 100644
--- a/libc/include/llvm-libc-types/once_flag.h
+++ b/libc/include/llvm-libc-types/once_flag.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_ONCE_FLAG_H__
-#define __LLVM_LIBC_TYPES_ONCE_FLAG_H__
+#ifndef LLVM_LIBC_TYPES_ONCE_FLAG_H
+#define LLVM_LIBC_TYPES_ONCE_FLAG_H
#include <llvm-libc-types/__futex_word.h>
@@ -17,4 +17,4 @@ typedef __futex_word once_flag;
#error "Once flag type not defined for the target platform."
#endif
-#endif // __LLVM_LIBC_TYPES_ONCE_FLAG_H__
+#endif // LLVM_LIBC_TYPES_ONCE_FLAG_H
diff --git a/libc/include/llvm-libc-types/pid_t.h b/libc/include/llvm-libc-types/pid_t.h
index d78fde7..0397bd2 100644
--- a/libc/include/llvm-libc-types/pid_t.h
+++ b/libc/include/llvm-libc-types/pid_t.h
@@ -6,9 +6,9 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_PID_t_H__
-#define __LLVM_LIBC_TYPES_PID_t_H__
+#ifndef LLVM_LIBC_TYPES_PID_T_H
+#define LLVM_LIBC_TYPES_PID_T_H
typedef __INT32_TYPE__ pid_t;
-#endif // __LLVM_LIBC_TYPES_PID_t_H__
+#endif // LLVM_LIBC_TYPES_PID_T_H
diff --git a/libc/include/llvm-libc-types/posix_spawn_file_actions_t.h b/libc/include/llvm-libc-types/posix_spawn_file_actions_t.h
index 55adbd1..3062da3 100644
--- a/libc/include/llvm-libc-types/posix_spawn_file_actions_t.h
+++ b/libc/include/llvm-libc-types/posix_spawn_file_actions_t.h
@@ -6,12 +6,12 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_POSIX_SPAWN_FILE_ACTIONS_T_T_H
-#define __LLVM_LIBC_TYPES_POSIX_SPAWN_FILE_ACTIONS_T_T_H
+#ifndef LLVM_LIBC_TYPES_POSIX_SPAWN_FILE_ACTIONS_T_H
+#define LLVM_LIBC_TYPES_POSIX_SPAWN_FILE_ACTIONS_T_H
typedef struct {
void *__front;
void *__back;
} posix_spawn_file_actions_t;
-#endif // __LLVM_LIBC_TYPES_POSIX_SPAWN_FILE_ACTIONS_T_T_H
+#endif // LLVM_LIBC_TYPES_POSIX_SPAWN_FILE_ACTIONS_T_H
diff --git a/libc/include/llvm-libc-types/posix_spawnattr_t.h b/libc/include/llvm-libc-types/posix_spawnattr_t.h
index f1bcb3e..47cadc7c 100644
--- a/libc/include/llvm-libc-types/posix_spawnattr_t.h
+++ b/libc/include/llvm-libc-types/posix_spawnattr_t.h
@@ -6,11 +6,11 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_POSIX_SPAWNATTR_T_H
-#define __LLVM_LIBC_TYPES_POSIX_SPAWNATTR_T_H
+#ifndef LLVM_LIBC_TYPES_POSIX_SPAWNATTR_T_H
+#define LLVM_LIBC_TYPES_POSIX_SPAWNATTR_T_H
typedef struct {
// This data structure will be populated as required.
} posix_spawnattr_t;
-#endif // __LLVM_LIBC_TYPES_POSIX_SPAWNATTR_T_H
+#endif // LLVM_LIBC_TYPES_POSIX_SPAWNATTR_T_H
diff --git a/libc/include/llvm-libc-types/pthread_attr_t.h b/libc/include/llvm-libc-types/pthread_attr_t.h
index 7bf8a54..66c04de 100644
--- a/libc/include/llvm-libc-types/pthread_attr_t.h
+++ b/libc/include/llvm-libc-types/pthread_attr_t.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_PTHREAD_ATTR_T_H
-#define __LLVM_LIBC_TYPES_PTHREAD_ATTR_T_H
+#ifndef LLVM_LIBC_TYPES_PTHREAD_ATTR_T_H
+#define LLVM_LIBC_TYPES_PTHREAD_ATTR_T_H
#include <llvm-libc-types/size_t.h>
@@ -18,4 +18,4 @@ typedef struct {
size_t __guardsize;
} pthread_attr_t;
-#endif // __LLVM_LIBC_TYPES_PTHREAD_ATTR_T_H
+#endif // LLVM_LIBC_TYPES_PTHREAD_ATTR_T_H
diff --git a/libc/include/llvm-libc-types/pthread_key_t.h b/libc/include/llvm-libc-types/pthread_key_t.h
index 351e376..e73c7e2 100644
--- a/libc/include/llvm-libc-types/pthread_key_t.h
+++ b/libc/include/llvm-libc-types/pthread_key_t.h
@@ -6,9 +6,9 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_PTHREAD_KEY_T_H__
-#define __LLVM_LIBC_TYPES_PTHREAD_KEY_T_H__
+#ifndef LLVM_LIBC_TYPES_PTHREAD_KEY_T_H
+#define LLVM_LIBC_TYPES_PTHREAD_KEY_T_H
typedef unsigned int pthread_key_t;
-#endif // __LLVM_LIBC_TYPES_PTHREAD_KEY_T_H__
+#endif // LLVM_LIBC_TYPES_PTHREAD_KEY_T_H
diff --git a/libc/include/llvm-libc-types/pthread_mutex_t.h b/libc/include/llvm-libc-types/pthread_mutex_t.h
index 65e4353..b1eb21f 100644
--- a/libc/include/llvm-libc-types/pthread_mutex_t.h
+++ b/libc/include/llvm-libc-types/pthread_mutex_t.h
@@ -6,11 +6,11 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_PTHREAD_MUTEX_T_H
-#define __LLVM_LIBC_TYPES_PTHREAD_MUTEX_T_H
+#ifndef LLVM_LIBC_TYPES_PTHREAD_MUTEX_T_H
+#define LLVM_LIBC_TYPES_PTHREAD_MUTEX_T_H
#include <llvm-libc-types/__mutex_type.h>
typedef __mutex_type pthread_mutex_t;
-#endif // __LLVM_LIBC_TYPES_PTHREAD_MUTEX_T_H
+#endif // LLVM_LIBC_TYPES_PTHREAD_MUTEX_T_H
diff --git a/libc/include/llvm-libc-types/pthread_mutexattr_t.h b/libc/include/llvm-libc-types/pthread_mutexattr_t.h
index be1ff56..8f159a6 100644
--- a/libc/include/llvm-libc-types/pthread_mutexattr_t.h
+++ b/libc/include/llvm-libc-types/pthread_mutexattr_t.h
@@ -6,11 +6,11 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_PTHREAD_MUTEXATTR_T_H
-#define __LLVM_LIBC_TYPES_PTHREAD_MUTEXATTR_T_H
+#ifndef LLVM_LIBC_TYPES_PTHREAD_MUTEXATTR_T_H
+#define LLVM_LIBC_TYPES_PTHREAD_MUTEXATTR_T_H
// pthread_mutexattr_t is a collection bit mapped flags. The mapping is internal
// detail of the libc implementation.
typedef unsigned int pthread_mutexattr_t;
-#endif // __LLVM_LIBC_TYPES_PTHREAD_MUTEXATTR_T_H
+#endif // LLVM_LIBC_TYPES_PTHREAD_MUTEXATTR_T_H
diff --git a/libc/include/llvm-libc-types/pthread_once_t.h b/libc/include/llvm-libc-types/pthread_once_t.h
index 6d65f8f..3fe78b7 100644
--- a/libc/include/llvm-libc-types/pthread_once_t.h
+++ b/libc/include/llvm-libc-types/pthread_once_t.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_PTHREAD_ONCE_T_H__
-#define __LLVM_LIBC_TYPES_PTHREAD_ONCE_T_H__
+#ifndef LLVM_LIBC_TYPES_PTHREAD_ONCE_T_H
+#define LLVM_LIBC_TYPES_PTHREAD_ONCE_T_H
#include <llvm-libc-types/__futex_word.h>
@@ -17,4 +17,4 @@ typedef __futex_word pthread_once_t;
#error "Once flag type not defined for the target platform."
#endif
-#endif // __LLVM_LIBC_TYPES_PTHREAD_ONCE_T_H__
+#endif // LLVM_LIBC_TYPES_PTHREAD_ONCE_T_H
diff --git a/libc/include/llvm-libc-types/pthread_t.h b/libc/include/llvm-libc-types/pthread_t.h
index 8130491..72c14e1 100644
--- a/libc/include/llvm-libc-types/pthread_t.h
+++ b/libc/include/llvm-libc-types/pthread_t.h
@@ -6,11 +6,11 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_PTHREAD_T_H__
-#define __LLVM_LIBC_TYPES_PTHREAD_T_H__
+#ifndef LLVM_LIBC_TYPES_PTHREAD_T_H
+#define LLVM_LIBC_TYPES_PTHREAD_T_H
#include <llvm-libc-types/__thread_type.h>
typedef __thread_type pthread_t;
-#endif // __LLVM_LIBC_TYPES_PTHREAD_T_H__
+#endif // LLVM_LIBC_TYPES_PTHREAD_T_H
diff --git a/libc/include/llvm-libc-types/rlim_t.h b/libc/include/llvm-libc-types/rlim_t.h
index 4e5acfb..016ec7b 100644
--- a/libc/include/llvm-libc-types/rlim_t.h
+++ b/libc/include/llvm-libc-types/rlim_t.h
@@ -6,9 +6,9 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_RLIM_T_H__
-#define __LLVM_LIBC_TYPES_RLIM_T_H__
+#ifndef LLVM_LIBC_TYPES_RLIM_T_H
+#define LLVM_LIBC_TYPES_RLIM_T_H
typedef __UINT64_TYPE__ rlim_t;
-#endif // __LLVM_LIBC_TYPES_RLIM_T_H__
+#endif // LLVM_LIBC_TYPES_RLIM_T_H
diff --git a/libc/include/llvm-libc-types/rpc_opcodes_t.h b/libc/include/llvm-libc-types/rpc_opcodes_t.h
index 7b85428..919ea03 100644
--- a/libc/include/llvm-libc-types/rpc_opcodes_t.h
+++ b/libc/include/llvm-libc-types/rpc_opcodes_t.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_RPC_OPCODE_H__
-#define __LLVM_LIBC_TYPES_RPC_OPCODE_H__
+#ifndef LLVM_LIBC_TYPES_RPC_OPCODES_T_H
+#define LLVM_LIBC_TYPES_RPC_OPCODES_T_H
typedef enum {
RPC_NOOP = 0,
@@ -34,4 +34,4 @@ typedef enum {
RPC_LAST = 0xFFFF,
} rpc_opcode_t;
-#endif // __LLVM_LIBC_TYPES_RPC_OPCODE_H__
+#endif // LLVM_LIBC_TYPES_RPC_OPCODES_T_H
diff --git a/libc/include/llvm-libc-types/sa_family_t.h b/libc/include/llvm-libc-types/sa_family_t.h
index 52b6995..0a010b6 100644
--- a/libc/include/llvm-libc-types/sa_family_t.h
+++ b/libc/include/llvm-libc-types/sa_family_t.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_SA_FAMILY_T_H__
-#define __LLVM_LIBC_TYPES_SA_FAMILY_T_H__
+#ifndef LLVM_LIBC_TYPES_SA_FAMILY_T_H
+#define LLVM_LIBC_TYPES_SA_FAMILY_T_H
// The posix standard only says of sa_family_t that it must be unsigned. The
// linux man page for "address_families" lists approximately 32 different
@@ -16,4 +16,4 @@
typedef unsigned short sa_family_t;
-#endif // __LLVM_LIBC_TYPES_SA_FAMILY_T_H__
+#endif // LLVM_LIBC_TYPES_SA_FAMILY_T_H
diff --git a/libc/include/llvm-libc-types/sig_atomic_t.h b/libc/include/llvm-libc-types/sig_atomic_t.h
index 324629c..2ef3758 100644
--- a/libc/include/llvm-libc-types/sig_atomic_t.h
+++ b/libc/include/llvm-libc-types/sig_atomic_t.h
@@ -6,9 +6,9 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_SIG_ATOMIC_T_H__
-#define __LLVM_LIBC_TYPES_SIG_ATOMIC_T_H__
+#ifndef LLVM_LIBC_TYPES_SIG_ATOMIC_T_H
+#define LLVM_LIBC_TYPES_SIG_ATOMIC_T_H
typedef int sig_atomic_t;
-#endif // __LLVM_LIBC_TYPES_SIG_ATOMIC_T_H__
+#endif // LLVM_LIBC_TYPES_SIG_ATOMIC_T_H
diff --git a/libc/include/llvm-libc-types/siginfo_t.h b/libc/include/llvm-libc-types/siginfo_t.h
index ef8af78..935ef4bb 100644
--- a/libc/include/llvm-libc-types/siginfo_t.h
+++ b/libc/include/llvm-libc-types/siginfo_t.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_SIGINFO_T_H__
-#define __LLVM_LIBC_TYPES_SIGINFO_T_H__
+#ifndef LLVM_LIBC_TYPES_SIGINFO_T_H
+#define LLVM_LIBC_TYPES_SIGINFO_T_H
#include <llvm-libc-types/clock_t.h>
#include <llvm-libc-types/pid_t.h>
@@ -106,4 +106,4 @@ typedef struct {
#define si_syscall _sifields._sigsys._syscall
#define si_arch _sifields._sigsys._arch
-#endif // __LLVM_LIBC_TYPES_SIGINFO_T_H__
+#endif // LLVM_LIBC_TYPES_SIGINFO_T_H
diff --git a/libc/include/llvm-libc-types/sigset_t.h b/libc/include/llvm-libc-types/sigset_t.h
index bcfbc29..f159c6c 100644
--- a/libc/include/llvm-libc-types/sigset_t.h
+++ b/libc/include/llvm-libc-types/sigset_t.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_SIGSET_T_H__
-#define __LLVM_LIBC_TYPES_SIGSET_T_H__
+#ifndef LLVM_LIBC_TYPES_SIGSET_T_H
+#define LLVM_LIBC_TYPES_SIGSET_T_H
#include <llvm-libc-macros/signal-macros.h>
@@ -17,4 +17,4 @@ typedef struct {
unsigned long __signals[__NSIGSET_WORDS];
} sigset_t;
-#endif // __LLVM_LIBC_TYPES_SIGSET_T_H__
+#endif // LLVM_LIBC_TYPES_SIGSET_T_H
diff --git a/libc/include/llvm-libc-types/size_t.h b/libc/include/llvm-libc-types/size_t.h
index 8eaf194..3b31b08 100644
--- a/libc/include/llvm-libc-types/size_t.h
+++ b/libc/include/llvm-libc-types/size_t.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_SIZE_T_H__
-#define __LLVM_LIBC_TYPES_SIZE_T_H__
+#ifndef LLVM_LIBC_TYPES_SIZE_T_H
+#define LLVM_LIBC_TYPES_SIZE_T_H
// Since __need_size_t is defined, we get the definition of size_t from the
// standalone C header stddef.h. Also, because __need_size_t is defined,
@@ -16,4 +16,4 @@
#include <stddef.h>
#undef __need_size_t
-#endif // __LLVM_LIBC_TYPES_SIZE_T_H__
+#endif // LLVM_LIBC_TYPES_SIZE_T_H
diff --git a/libc/include/llvm-libc-types/socklen_t.h b/libc/include/llvm-libc-types/socklen_t.h
index 3134a53..5357747 100644
--- a/libc/include/llvm-libc-types/socklen_t.h
+++ b/libc/include/llvm-libc-types/socklen_t.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_SOCKLEN_T_H__
-#define __LLVM_LIBC_TYPES_SOCKLEN_T_H__
+#ifndef LLVM_LIBC_TYPES_SOCKLEN_T_H
+#define LLVM_LIBC_TYPES_SOCKLEN_T_H
// The posix standard only says of socklen_t that it must be an integer type of
// width of at least 32 bits. The long type is defined as being at least 32
@@ -15,4 +15,4 @@
typedef unsigned long socklen_t;
-#endif // __LLVM_LIBC_TYPES_SOCKLEN_T_H__
+#endif // LLVM_LIBC_TYPES_SOCKLEN_T_H
diff --git a/libc/include/llvm-libc-types/speed_t.h b/libc/include/llvm-libc-types/speed_t.h
index b4ec13d..9875d3b8 100644
--- a/libc/include/llvm-libc-types/speed_t.h
+++ b/libc/include/llvm-libc-types/speed_t.h
@@ -6,9 +6,9 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_SPEED_T_H__
-#define __LLVM_LIBC_TYPES_SPEED_T_H__
+#ifndef LLVM_LIBC_TYPES_SPEED_T_H
+#define LLVM_LIBC_TYPES_SPEED_T_H
typedef unsigned int speed_t;
-#endif // __LLVM_LIBC_TYPES_SPEED_T_H__
+#endif // LLVM_LIBC_TYPES_SPEED_T_H
diff --git a/libc/include/llvm-libc-types/ssize_t.h b/libc/include/llvm-libc-types/ssize_t.h
index b887453..41e4b6d 100644
--- a/libc/include/llvm-libc-types/ssize_t.h
+++ b/libc/include/llvm-libc-types/ssize_t.h
@@ -6,9 +6,9 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_SSIZE_T_H__
-#define __LLVM_LIBC_TYPES_SSIZE_T_H__
+#ifndef LLVM_LIBC_TYPES_SSIZE_T_H
+#define LLVM_LIBC_TYPES_SSIZE_T_H
typedef __INT64_TYPE__ ssize_t;
-#endif // __LLVM_LIBC_TYPES_SSIZE_T_H__
+#endif // LLVM_LIBC_TYPES_SSIZE_T_H
diff --git a/libc/include/llvm-libc-types/stack_t.h b/libc/include/llvm-libc-types/stack_t.h
index f564d91..5fa4d3a 100644
--- a/libc/include/llvm-libc-types/stack_t.h
+++ b/libc/include/llvm-libc-types/stack_t.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_STACK_T_H__
-#define __LLVM_LIBC_TYPES_STACK_T_H__
+#ifndef LLVM_LIBC_TYPES_STACK_T_H
+#define LLVM_LIBC_TYPES_STACK_T_H
#include <llvm-libc-types/size_t.h>
@@ -19,4 +19,4 @@ typedef struct {
size_t ss_size;
} stack_t;
-#endif // __LLVM_LIBC_TYPES_STACK_T_H__
+#endif // LLVM_LIBC_TYPES_STACK_T_H
diff --git a/libc/include/llvm-libc-types/struct_dirent.h b/libc/include/llvm-libc-types/struct_dirent.h
index de54a22..3c5b361 100644
--- a/libc/include/llvm-libc-types/struct_dirent.h
+++ b/libc/include/llvm-libc-types/struct_dirent.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_STRUCT_DIRENT_H__
-#define __LLVM_LIBC_TYPES_STRUCT_DIRENT_H__
+#ifndef LLVM_LIBC_TYPES_STRUCT_DIRENT_H
+#define LLVM_LIBC_TYPES_STRUCT_DIRENT_H
#include <llvm-libc-types/ino_t.h>
#include <llvm-libc-types/off_t.h>
@@ -26,4 +26,4 @@ struct dirent {
char d_name[1];
};
-#endif // __LLVM_LIBC_TYPES_STRUCT_DIRENT_H__
+#endif // LLVM_LIBC_TYPES_STRUCT_DIRENT_H
diff --git a/libc/include/llvm-libc-types/struct_epoll_data.h b/libc/include/llvm-libc-types/struct_epoll_data.h
index c363171..7200276 100644
--- a/libc/include/llvm-libc-types/struct_epoll_data.h
+++ b/libc/include/llvm-libc-types/struct_epoll_data.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_EPOLL_DATA_H__
-#define __LLVM_LIBC_TYPES_EPOLL_DATA_H__
+#ifndef LLVM_LIBC_TYPES_STRUCT_EPOLL_DATA_H
+#define LLVM_LIBC_TYPES_STRUCT_EPOLL_DATA_H
union epoll_data {
void *ptr;
@@ -18,4 +18,4 @@ union epoll_data {
typedef union epoll_data epoll_data_t;
-#endif // __LLVM_LIBC_TYPES_EPOLL_DATA_H__
+#endif // LLVM_LIBC_TYPES_STRUCT_EPOLL_DATA_H
diff --git a/libc/include/llvm-libc-types/struct_epoll_event.h b/libc/include/llvm-libc-types/struct_epoll_event.h
index edfa026..6fc5b41 100644
--- a/libc/include/llvm-libc-types/struct_epoll_event.h
+++ b/libc/include/llvm-libc-types/struct_epoll_event.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_EPOLL_EVENT_H__
-#define __LLVM_LIBC_TYPES_EPOLL_EVENT_H__
+#ifndef LLVM_LIBC_TYPES_STRUCT_EPOLL_EVENT_H
+#define LLVM_LIBC_TYPES_STRUCT_EPOLL_EVENT_H
#include <llvm-libc-types/struct_epoll_data.h>
@@ -16,4 +16,4 @@ typedef struct epoll_event {
epoll_data_t data;
} epoll_event;
-#endif // __LLVM_LIBC_TYPES_EPOLL_EVENT_H__
+#endif // LLVM_LIBC_TYPES_STRUCT_EPOLL_EVENT_H
diff --git a/libc/include/llvm-libc-types/struct_hsearch_data.h b/libc/include/llvm-libc-types/struct_hsearch_data.h
index 7e2a7232..cdb1d0c 100644
--- a/libc/include/llvm-libc-types/struct_hsearch_data.h
+++ b/libc/include/llvm-libc-types/struct_hsearch_data.h
@@ -6,12 +6,12 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_STRUCT_HSEARCH_DATA_H__
-#define __LLVM_LIBC_TYPES_STRUCT_HSEARCH_DATA_H__
+#ifndef LLVM_LIBC_TYPES_STRUCT_HSEARCH_DATA_H
+#define LLVM_LIBC_TYPES_STRUCT_HSEARCH_DATA_H
struct hsearch_data {
void *__opaque;
unsigned int __unused[2];
};
-#endif // __LLVM_LIBC_TYPES_STRUCT_HSEARCH_DATA_H__
+#endif // LLVM_LIBC_TYPES_STRUCT_HSEARCH_DATA_H
diff --git a/libc/include/llvm-libc-types/struct_rlimit.h b/libc/include/llvm-libc-types/struct_rlimit.h
index 4fe0aa6..e093d9f 100644
--- a/libc/include/llvm-libc-types/struct_rlimit.h
+++ b/libc/include/llvm-libc-types/struct_rlimit.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_STRUCT_RLIMIT_H__
-#define __LLVM_LIBC_TYPES_STRUCT_RLIMIT_H__
+#ifndef LLVM_LIBC_TYPES_STRUCT_RLIMIT_H
+#define LLVM_LIBC_TYPES_STRUCT_RLIMIT_H
#include <llvm-libc-types/rlim_t.h>
@@ -16,4 +16,4 @@ struct rlimit {
rlim_t rlim_max;
};
-#endif // __LLVM_LIBC_TYPES_STRUCT_RLIMIT_H__
+#endif // LLVM_LIBC_TYPES_STRUCT_RLIMIT_H
diff --git a/libc/include/llvm-libc-types/struct_rusage.h b/libc/include/llvm-libc-types/struct_rusage.h
index 43f3457..21ea8b1 100644
--- a/libc/include/llvm-libc-types/struct_rusage.h
+++ b/libc/include/llvm-libc-types/struct_rusage.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_STRUCT_RUSAGE_H__
-#define __LLVM_LIBC_TYPES_STRUCT_RUSAGE_H__
+#ifndef LLVM_LIBC_TYPES_STRUCT_RUSAGE_H
+#define LLVM_LIBC_TYPES_STRUCT_RUSAGE_H
#include <llvm-libc-types/struct_timeval.h>
@@ -34,4 +34,4 @@ struct rusage {
#endif
};
-#endif // __LLVM_LIBC_TYPES_STRUCT_RUSAGE_H__
+#endif // LLVM_LIBC_TYPES_STRUCT_RUSAGE_H
diff --git a/libc/include/llvm-libc-types/struct_sched_param.h b/libc/include/llvm-libc-types/struct_sched_param.h
index 4f31881..0521a4d 100644
--- a/libc/include/llvm-libc-types/struct_sched_param.h
+++ b/libc/include/llvm-libc-types/struct_sched_param.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_STRUCT_SCHED_PARAM_H__
-#define __LLVM_LIBC_TYPES_STRUCT_SCHED_PARAM_H__
+#ifndef LLVM_LIBC_TYPES_STRUCT_SCHED_PARAM_H
+#define LLVM_LIBC_TYPES_STRUCT_SCHED_PARAM_H
#include <llvm-libc-types/pid_t.h>
#include <llvm-libc-types/struct_timespec.h>
@@ -18,4 +18,4 @@ struct sched_param {
int sched_priority;
};
-#endif // __LLVM_LIBC_TYPES_STRUCT_SCHED_PARAM_H__
+#endif // LLVM_LIBC_TYPES_STRUCT_SCHED_PARAM_H
diff --git a/libc/include/llvm-libc-types/struct_sigaction.h b/libc/include/llvm-libc-types/struct_sigaction.h
index 3940f14..54d2995f 100644
--- a/libc/include/llvm-libc-types/struct_sigaction.h
+++ b/libc/include/llvm-libc-types/struct_sigaction.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_SIGACTION_H__
-#define __LLVM_LIBC_TYPES_SIGACTION_H__
+#ifndef LLVM_LIBC_TYPES_STRUCT_SIGACTION_H
+#define LLVM_LIBC_TYPES_STRUCT_SIGACTION_H
#include <llvm-libc-types/siginfo_t.h>
#include <llvm-libc-types/sigset_t.h>
@@ -27,4 +27,4 @@ struct sigaction {
typedef void (*__sighandler_t)(int);
-#endif // __LLVM_LIBC_TYPES_SIGACTION_H__
+#endif // LLVM_LIBC_TYPES_STRUCT_SIGACTION_H
diff --git a/libc/include/llvm-libc-types/struct_sockaddr.h b/libc/include/llvm-libc-types/struct_sockaddr.h
index 9a6214c..074b1ae 100644
--- a/libc/include/llvm-libc-types/struct_sockaddr.h
+++ b/libc/include/llvm-libc-types/struct_sockaddr.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_STRUCT_SOCKADDR_H__
-#define __LLVM_LIBC_TYPES_STRUCT_SOCKADDR_H__
+#ifndef LLVM_LIBC_TYPES_STRUCT_SOCKADDR_H
+#define LLVM_LIBC_TYPES_STRUCT_SOCKADDR_H
#include <llvm-libc-types/sa_family_t.h>
@@ -18,4 +18,4 @@ struct sockaddr {
char sa_data[];
};
-#endif // __LLVM_LIBC_TYPES_STRUCT_SOCKADDR_H__
+#endif // LLVM_LIBC_TYPES_STRUCT_SOCKADDR_H
diff --git a/libc/include/llvm-libc-types/struct_sockaddr_un.h b/libc/include/llvm-libc-types/struct_sockaddr_un.h
index 9c3efea..4332419 100644
--- a/libc/include/llvm-libc-types/struct_sockaddr_un.h
+++ b/libc/include/llvm-libc-types/struct_sockaddr_un.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_STRUCT_SOCKADDR_UN_H__
-#define __LLVM_LIBC_TYPES_STRUCT_SOCKADDR_UN_H__
+#ifndef LLVM_LIBC_TYPES_STRUCT_SOCKADDR_UN_H
+#define LLVM_LIBC_TYPES_STRUCT_SOCKADDR_UN_H
#include <llvm-libc-types/sa_family_t.h>
@@ -19,4 +19,4 @@ struct sockaddr_un {
char sun_path[108]; /* Pathname */
};
-#endif // __LLVM_LIBC_TYPES_STRUCT_SOCKADDR_UN_H__
+#endif // LLVM_LIBC_TYPES_STRUCT_SOCKADDR_UN_H
diff --git a/libc/include/llvm-libc-types/struct_stat.h b/libc/include/llvm-libc-types/struct_stat.h
index baaef15..3539fb5 100644
--- a/libc/include/llvm-libc-types/struct_stat.h
+++ b/libc/include/llvm-libc-types/struct_stat.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_STRUCT_STAT_H__
-#define __LLVM_LIBC_TYPES_STRUCT_STAT_H__
+#ifndef LLVM_LIBC_TYPES_STRUCT_STAT_H
+#define LLVM_LIBC_TYPES_STRUCT_STAT_H
#include <llvm-libc-types/blkcnt_t.h>
#include <llvm-libc-types/blksize_t.h>
@@ -36,4 +36,4 @@ struct stat {
blkcnt_t st_blocks;
};
-#endif // __LLVM_LIBC_TYPES_STRUCT_STAT_H__
+#endif // LLVM_LIBC_TYPES_STRUCT_STAT_H
diff --git a/libc/include/llvm-libc-types/struct_timespec.h b/libc/include/llvm-libc-types/struct_timespec.h
index 1fa6272..5d56d9c 100644
--- a/libc/include/llvm-libc-types/struct_timespec.h
+++ b/libc/include/llvm-libc-types/struct_timespec.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_TIMESPEC_H__
-#define __LLVM_LIBC_TYPES_TIMESPEC_H__
+#ifndef LLVM_LIBC_TYPES_STRUCT_TIMESPEC_H
+#define LLVM_LIBC_TYPES_STRUCT_TIMESPEC_H
#include <llvm-libc-types/time_t.h>
@@ -17,4 +17,4 @@ struct timespec {
long tv_nsec; /* Nanoseconds. */
};
-#endif // __LLVM_LIBC_TYPES_TIMESPEC_H__
+#endif // LLVM_LIBC_TYPES_STRUCT_TIMESPEC_H
diff --git a/libc/include/llvm-libc-types/struct_timeval.h b/libc/include/llvm-libc-types/struct_timeval.h
index 756feca..6a0b7bb 100644
--- a/libc/include/llvm-libc-types/struct_timeval.h
+++ b/libc/include/llvm-libc-types/struct_timeval.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_TIMEVAL_H__
-#define __LLVM_LIBC_TYPES_TIMEVAL_H__
+#ifndef LLVM_LIBC_TYPES_STRUCT_TIMEVAL_H
+#define LLVM_LIBC_TYPES_STRUCT_TIMEVAL_H
#include <llvm-libc-types/suseconds_t.h>
#include <llvm-libc-types/time_t.h>
@@ -17,4 +17,4 @@ struct timeval {
suseconds_t tv_usec; // Micro seconds
};
-#endif // __LLVM_LIBC_TYPES_TIMEVAL_H__
+#endif // LLVM_LIBC_TYPES_STRUCT_TIMEVAL_H
diff --git a/libc/include/llvm-libc-types/struct_tm.h b/libc/include/llvm-libc-types/struct_tm.h
index 953e12e..9fef7c5 100644
--- a/libc/include/llvm-libc-types/struct_tm.h
+++ b/libc/include/llvm-libc-types/struct_tm.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_TM_H__
-#define __LLVM_LIBC_TYPES_TM_H__
+#ifndef LLVM_LIBC_TYPES_STRUCT_TM_H
+#define LLVM_LIBC_TYPES_STRUCT_TM_H
struct tm {
int tm_sec; // seconds after the minute
@@ -21,4 +21,4 @@ struct tm {
int tm_isdst; // Daylight Saving Time flag
};
-#endif // __LLVM_LIBC_TYPES_TM_H__
+#endif // LLVM_LIBC_TYPES_STRUCT_TM_H
diff --git a/libc/include/llvm-libc-types/struct_utsname.h b/libc/include/llvm-libc-types/struct_utsname.h
index bfd1ad9..e474171 100644
--- a/libc/include/llvm-libc-types/struct_utsname.h
+++ b/libc/include/llvm-libc-types/struct_utsname.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_STRUCT_UTSNAME_H__
-#define __LLVM_LIBC_TYPES_STRUCT_UTSNAME_H__
+#ifndef LLVM_LIBC_TYPES_STRUCT_UTSNAME_H
+#define LLVM_LIBC_TYPES_STRUCT_UTSNAME_H
#if defined(__linux__)
#define __UTS_NAME_LENGTH 65
@@ -31,4 +31,4 @@ struct utsname {
#undef __UTS_NAME_LENGTH
-#endif // __LLVM_LIBC_TYPES_STRUCT_UTSNAME_H__
+#endif // LLVM_LIBC_TYPES_STRUCT_UTSNAME_H
diff --git a/libc/include/llvm-libc-types/suseconds_t.h b/libc/include/llvm-libc-types/suseconds_t.h
index d7298ed..32ecc9f 100644
--- a/libc/include/llvm-libc-types/suseconds_t.h
+++ b/libc/include/llvm-libc-types/suseconds_t.h
@@ -6,9 +6,9 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_SUSECONDS_T_H__
-#define __LLVM_LIBC_TYPES_SUSECONDS_T_H__
+#ifndef LLVM_LIBC_TYPES_SUSECONDS_T_H
+#define LLVM_LIBC_TYPES_SUSECONDS_T_H
typedef __INT32_TYPE__ suseconds_t;
-#endif // __LLVM_LIBC_TYPES_SUSECONDS_T_H__
+#endif // LLVM_LIBC_TYPES_SUSECONDS_T_H
diff --git a/libc/include/llvm-libc-types/tcflag_t.h b/libc/include/llvm-libc-types/tcflag_t.h
index 7c2ce21..2978487 100644
--- a/libc/include/llvm-libc-types/tcflag_t.h
+++ b/libc/include/llvm-libc-types/tcflag_t.h
@@ -6,9 +6,9 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_TCFLAG_T_H__
-#define __LLVM_LIBC_TYPES_TCFLAG_T_H__
+#ifndef LLVM_LIBC_TYPES_TCFLAG_T_H
+#define LLVM_LIBC_TYPES_TCFLAG_T_H
typedef unsigned int tcflag_t;
-#endif // __LLVM_LIBC_TYPES_TCFLAG_T_H__
+#endif // LLVM_LIBC_TYPES_TCFLAG_T_H
diff --git a/libc/include/llvm-libc-types/test_rpc_opcodes_t.h b/libc/include/llvm-libc-types/test_rpc_opcodes_t.h
index ec4eb26..7129768 100644
--- a/libc/include/llvm-libc-types/test_rpc_opcodes_t.h
+++ b/libc/include/llvm-libc-types/test_rpc_opcodes_t.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_TEST_RPC_OPCODE_H__
-#define __LLVM_LIBC_TYPES_TEST_RPC_OPCODE_H__
+#ifndef LLVM_LIBC_TYPES_TEST_RPC_OPCODES_T_H
+#define LLVM_LIBC_TYPES_TEST_RPC_OPCODES_T_H
// We consider the first 32768 opcodes as reserved for libc purposes. We allow
// extensions to use any other number without conflicting with anything else.
@@ -18,4 +18,4 @@ typedef enum : unsigned short {
RPC_TEST_STREAM,
} rpc_test_opcode_t;
-#endif // __LLVM_LIBC_TYPES_TEST_RPC_OPCODE_H__
+#endif // LLVM_LIBC_TYPES_TEST_RPC_OPCODES_T_H
diff --git a/libc/include/llvm-libc-types/thrd_start_t.h b/libc/include/llvm-libc-types/thrd_start_t.h
index 83fc32c..1fb21bc 100644
--- a/libc/include/llvm-libc-types/thrd_start_t.h
+++ b/libc/include/llvm-libc-types/thrd_start_t.h
@@ -6,9 +6,9 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_THRD_START_T_H__
-#define __LLVM_LIBC_TYPES_THRD_START_T_H__
+#ifndef LLVM_LIBC_TYPES_THRD_START_T_H
+#define LLVM_LIBC_TYPES_THRD_START_T_H
typedef int (*thrd_start_t)(void *);
-#endif // __LLVM_LIBC_TYPES_THRD_START_T_H__
+#endif // LLVM_LIBC_TYPES_THRD_START_T_H
diff --git a/libc/include/llvm-libc-types/thrd_t.h b/libc/include/llvm-libc-types/thrd_t.h
index 0743106..2e0f9a0 100644
--- a/libc/include/llvm-libc-types/thrd_t.h
+++ b/libc/include/llvm-libc-types/thrd_t.h
@@ -6,11 +6,11 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_THRD_T_H__
-#define __LLVM_LIBC_TYPES_THRD_T_H__
+#ifndef LLVM_LIBC_TYPES_THRD_T_H
+#define LLVM_LIBC_TYPES_THRD_T_H
#include <llvm-libc-types/__thread_type.h>
typedef __thread_type thrd_t;
-#endif // __LLVM_LIBC_TYPES_THRD_T_H__
+#endif // LLVM_LIBC_TYPES_THRD_T_H
diff --git a/libc/include/llvm-libc-types/time_t.h b/libc/include/llvm-libc-types/time_t.h
index 2b3ccd4..59953b3 100644
--- a/libc/include/llvm-libc-types/time_t.h
+++ b/libc/include/llvm-libc-types/time_t.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_TIME_T_H__
-#define __LLVM_LIBC_TYPES_TIME_T_H__
+#ifndef LLVM_LIBC_TYPES_TIME_T_H
+#define LLVM_LIBC_TYPES_TIME_T_H
#if (defined(__arm__) || defined(_M_ARM))
typedef __INTPTR_TYPE__ time_t;
@@ -15,4 +15,4 @@ typedef __INTPTR_TYPE__ time_t;
typedef __INT64_TYPE__ time_t;
#endif
-#endif // __LLVM_LIBC_TYPES_TIME_T_H__
+#endif // LLVM_LIBC_TYPES_TIME_T_H
diff --git a/libc/include/llvm-libc-types/tss_dtor_t.h b/libc/include/llvm-libc-types/tss_dtor_t.h
index f80661b..c54b34e 100644
--- a/libc/include/llvm-libc-types/tss_dtor_t.h
+++ b/libc/include/llvm-libc-types/tss_dtor_t.h
@@ -6,9 +6,9 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_TSS_DTOR_T_H__
-#define __LLVM_LIBC_TYPES_TSS_DTOR_T_H__
+#ifndef LLVM_LIBC_TYPES_TSS_DTOR_T_H
+#define LLVM_LIBC_TYPES_TSS_DTOR_T_H
typedef void (*tss_dtor_t)(void *);
-#endif // __LLVM_LIBC_TYPES_TSS_DTOR_T_H__
+#endif // LLVM_LIBC_TYPES_TSS_DTOR_T_H
diff --git a/libc/include/llvm-libc-types/tss_t.h b/libc/include/llvm-libc-types/tss_t.h
index 868ec1a..92bc7ef 100644
--- a/libc/include/llvm-libc-types/tss_t.h
+++ b/libc/include/llvm-libc-types/tss_t.h
@@ -6,9 +6,9 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_TSS_T_H__
-#define __LLVM_LIBC_TYPES_TSS_T_H__
+#ifndef LLVM_LIBC_TYPES_TSS_T_H
+#define LLVM_LIBC_TYPES_TSS_T_H
typedef unsigned int tss_t;
-#endif // __LLVM_LIBC_TYPES_TSS_T_H__
+#endif // LLVM_LIBC_TYPES_TSS_T_H
diff --git a/libc/include/llvm-libc-types/uid_t.h b/libc/include/llvm-libc-types/uid_t.h
index ae9fac2..4f6c647 100644
--- a/libc/include/llvm-libc-types/uid_t.h
+++ b/libc/include/llvm-libc-types/uid_t.h
@@ -6,9 +6,9 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_UID_T_H__
-#define __LLVM_LIBC_TYPES_UID_T_H__
+#ifndef LLVM_LIBC_TYPES_UID_T_H
+#define LLVM_LIBC_TYPES_UID_T_H
typedef __UINT32_TYPE__ uid_t;
-#endif // __LLVM_LIBC_TYPES_UID_T_H__
+#endif // LLVM_LIBC_TYPES_UID_T_H
diff --git a/libc/include/llvm-libc-types/union_sigval.h b/libc/include/llvm-libc-types/union_sigval.h
index ccc9f2e..5f83cd2 100644
--- a/libc/include/llvm-libc-types/union_sigval.h
+++ b/libc/include/llvm-libc-types/union_sigval.h
@@ -6,12 +6,12 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_UNION_SIGVAL_H__
-#define __LLVM_LIBC_TYPES_UNION_SIGVAL_H__
+#ifndef LLVM_LIBC_TYPES_UNION_SIGVAL_H
+#define LLVM_LIBC_TYPES_UNION_SIGVAL_H
union sigval {
int sival_int;
void *sival_ptr;
};
-#endif // __LLVM_LIBC_TYPES_UNION_SIGVAL_H__
+#endif // LLVM_LIBC_TYPES_UNION_SIGVAL_H
diff --git a/libc/include/llvm-libc-types/wchar_t.h b/libc/include/llvm-libc-types/wchar_t.h
index 9efb5cd..3e9a70b 100644
--- a/libc/include/llvm-libc-types/wchar_t.h
+++ b/libc/include/llvm-libc-types/wchar_t.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_WCHAR_T_H__
-#define __LLVM_LIBC_TYPES_WCHAR_T_H__
+#ifndef LLVM_LIBC_TYPES_WCHAR_T_H
+#define LLVM_LIBC_TYPES_WCHAR_T_H
// Since __need_wchar_t is defined, we get the definition of wchar_t from the
// standalone C header stddef.h. Also, because __need_wchar_t is defined,
@@ -16,4 +16,4 @@
#include <stddef.h>
#undef __need_wchar_t
-#endif // __LLVM_LIBC_TYPES_WCHAR_T_H__
+#endif // LLVM_LIBC_TYPES_WCHAR_T_H
diff --git a/libc/include/llvm-libc-types/wint_t.h b/libc/include/llvm-libc-types/wint_t.h
index cf6ccd7..2758685 100644
--- a/libc/include/llvm-libc-types/wint_t.h
+++ b/libc/include/llvm-libc-types/wint_t.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef __LLVM_LIBC_TYPES_WINT_T_H__
-#define __LLVM_LIBC_TYPES_WINT_T_H__
+#ifndef LLVM_LIBC_TYPES_WINT_T_H
+#define LLVM_LIBC_TYPES_WINT_T_H
// Since __need_wint_t is defined, we get the definition of wint_t from the
// standalone C header stddef.h. Also, because __need_wint_t is defined,
@@ -16,4 +16,4 @@
#include <stddef.h>
#undef __need_wint_t
-#endif // __LLVM_LIBC_TYPES_WINT_T_H__
+#endif // LLVM_LIBC_TYPES_WINT_T_H
diff --git a/libc/include/sys/queue.h b/libc/include/sys/queue.h
index 2a4dc37..1cde35e 100644
--- a/libc/include/sys/queue.h
+++ b/libc/include/sys/queue.h
@@ -6,9 +6,9 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIBC_SYS_QUEUE_H
-#define LLVM_LIBC_SYS_QUEUE_H
+#ifndef SYS_QUEUE_H
+#define SYS_QUEUE_H
#include <llvm-libc-macros/sys-queue-macros.h>
-#endif // LLVM_LIBC_SYS_QUEUE_H
+#endif // SYS_QUEUE_H
diff --git a/libc/src/__support/CPP/CMakeLists.txt b/libc/src/__support/CPP/CMakeLists.txt
index d7474127..6c35bc7 100644
--- a/libc/src/__support/CPP/CMakeLists.txt
+++ b/libc/src/__support/CPP/CMakeLists.txt
@@ -153,10 +153,10 @@ add_header_library(
type_traits/type_identity.h
type_traits/void_t.h
DEPENDS
+ libc.include.llvm-libc-macros.stdfix_macros
libc.src.__support.macros.attributes
libc.src.__support.macros.config
- libc.src.__support.macros.properties.float
- libc.include.llvm-libc-macros.stdfix_macros
+ libc.src.__support.macros.properties.types
)
add_header_library(
diff --git a/libc/src/__support/CPP/type_traits/is_fixed_point.h b/libc/src/__support/CPP/type_traits/is_fixed_point.h
index e139e64..09dba8b 100644
--- a/libc/src/__support/CPP/type_traits/is_fixed_point.h
+++ b/libc/src/__support/CPP/type_traits/is_fixed_point.h
@@ -43,4 +43,4 @@ LIBC_INLINE_VAR constexpr bool is_fixed_point_v = is_fixed_point<T>::value;
} // namespace LIBC_NAMESPACE::cpp
-#endif // LLVM_LIBC_SRC___SUPPORT_CPP_TYPE_TRAITS_IS_INTEGRAL_H
+#endif // LLVM_LIBC_SRC___SUPPORT_CPP_TYPE_TRAITS_IS_FIXED_POINT_H
diff --git a/libc/src/__support/CPP/type_traits/is_floating_point.h b/libc/src/__support/CPP/type_traits/is_floating_point.h
index 3a5260b..7f01cc4 100644
--- a/libc/src/__support/CPP/type_traits/is_floating_point.h
+++ b/libc/src/__support/CPP/type_traits/is_floating_point.h
@@ -11,7 +11,7 @@
#include "src/__support/CPP/type_traits/is_same.h"
#include "src/__support/CPP/type_traits/remove_cv.h"
#include "src/__support/macros/attributes.h"
-#include "src/__support/macros/properties/float.h"
+#include "src/__support/macros/properties/types.h"
namespace LIBC_NAMESPACE::cpp {
diff --git a/libc/src/__support/FPUtil/CMakeLists.txt b/libc/src/__support/FPUtil/CMakeLists.txt
index 0c932e8..f1c6fba 100644
--- a/libc/src/__support/FPUtil/CMakeLists.txt
+++ b/libc/src/__support/FPUtil/CMakeLists.txt
@@ -33,7 +33,7 @@ add_header_library(
libc.src.__support.CPP.type_traits
libc.src.__support.libc_assert
libc.src.__support.macros.attributes
- libc.src.__support.macros.properties.float
+ libc.src.__support.macros.properties.types
libc.src.__support.math_extras
libc.src.__support.uint128
)
diff --git a/libc/src/__support/FPUtil/FPBits.h b/libc/src/__support/FPUtil/FPBits.h
index b3179a2..1703e9a 100644
--- a/libc/src/__support/FPUtil/FPBits.h
+++ b/libc/src/__support/FPUtil/FPBits.h
@@ -15,7 +15,7 @@
#include "src/__support/common.h"
#include "src/__support/libc_assert.h" // LIBC_ASSERT
#include "src/__support/macros/attributes.h" // LIBC_INLINE, LIBC_INLINE_VAR
-#include "src/__support/macros/properties/float.h" // LIBC_COMPILER_HAS_FLOAT128
+#include "src/__support/macros/properties/types.h" // LIBC_COMPILER_HAS_FLOAT128
#include "src/__support/math_extras.h" // mask_trailing_ones
#include <stdint.h>
@@ -239,23 +239,23 @@ protected:
// An opaque type to store a floating point exponent.
// We define special values but it is valid to create arbitrary values as long
- // as they are in the range [MIN, MAX].
+ // as they are in the range [min, max].
struct Exponent : public TypedInt<int32_t> {
using UP = TypedInt<int32_t>;
using UP::UP;
- LIBC_INLINE static constexpr auto SUBNORMAL() {
+ LIBC_INLINE static constexpr auto subnormal() {
return Exponent(-EXP_BIAS);
}
- LIBC_INLINE static constexpr auto MIN() { return Exponent(1 - EXP_BIAS); }
- LIBC_INLINE static constexpr auto ZERO() { return Exponent(0); }
- LIBC_INLINE static constexpr auto MAX() { return Exponent(EXP_BIAS); }
- LIBC_INLINE static constexpr auto INF() { return Exponent(EXP_BIAS + 1); }
+ LIBC_INLINE static constexpr auto min() { return Exponent(1 - EXP_BIAS); }
+ LIBC_INLINE static constexpr auto zero() { return Exponent(0); }
+ LIBC_INLINE static constexpr auto max() { return Exponent(EXP_BIAS); }
+ LIBC_INLINE static constexpr auto inf() { return Exponent(EXP_BIAS + 1); }
};
// An opaque type to store a floating point biased exponent.
// We define special values but it is valid to create arbitrary values as long
- // as they are in the range [BITS_ALL_ZEROES, BITS_ALL_ONES].
- // Values greater than BITS_ALL_ONES are truncated.
+ // as they are in the range [zero, bits_all_ones].
+ // Values greater than bits_all_ones are truncated.
struct BiasedExponent : public TypedInt<uint32_t> {
using UP = TypedInt<uint32_t>;
using UP::UP;
@@ -269,13 +269,13 @@ protected:
}
LIBC_INLINE constexpr BiasedExponent &operator++() {
- LIBC_ASSERT(*this != BiasedExponent(Exponent::INF()));
+ LIBC_ASSERT(*this != BiasedExponent(Exponent::inf()));
++UP::value;
return *this;
}
LIBC_INLINE constexpr BiasedExponent &operator--() {
- LIBC_ASSERT(*this != BiasedExponent(Exponent::SUBNORMAL()));
+ LIBC_ASSERT(*this != BiasedExponent(Exponent::subnormal()));
--UP::value;
return *this;
}
@@ -283,9 +283,9 @@ protected:
// An opaque type to store a floating point significand.
// We define special values but it is valid to create arbitrary values as long
- // as they are in the range [ZERO, BITS_ALL_ONES].
+ // as they are in the range [zero, bits_all_ones].
// Note that the semantics of the Significand are implementation dependent.
- // Values greater than BITS_ALL_ONES are truncated.
+ // Values greater than bits_all_ones are truncated.
struct Significand : public TypedInt<StorageType> {
using UP = TypedInt<StorageType>;
using UP::UP;
@@ -305,16 +305,16 @@ protected:
return Significand(StorageType(a.to_storage_type() >> shift));
}
- LIBC_INLINE static constexpr auto ZERO() {
+ LIBC_INLINE static constexpr auto zero() {
return Significand(StorageType(0));
}
- LIBC_INLINE static constexpr auto LSB() {
+ LIBC_INLINE static constexpr auto lsb() {
return Significand(StorageType(1));
}
- LIBC_INLINE static constexpr auto MSB() {
+ LIBC_INLINE static constexpr auto msb() {
return Significand(StorageType(1) << (SIG_LEN - 1));
}
- LIBC_INLINE static constexpr auto BITS_ALL_ONES() {
+ LIBC_INLINE static constexpr auto bits_all_ones() {
return Significand(SIG_MASK);
}
};
@@ -393,58 +393,58 @@ protected:
public:
// Builders
LIBC_INLINE static constexpr RetT zero(Sign sign = Sign::POS) {
- return RetT(encode(sign, Exponent::SUBNORMAL(), Significand::ZERO()));
+ return RetT(encode(sign, Exponent::subnormal(), Significand::zero()));
}
LIBC_INLINE static constexpr RetT one(Sign sign = Sign::POS) {
- return RetT(encode(sign, Exponent::ZERO(), Significand::ZERO()));
+ return RetT(encode(sign, Exponent::zero(), Significand::zero()));
}
LIBC_INLINE static constexpr RetT min_subnormal(Sign sign = Sign::POS) {
- return RetT(encode(sign, Exponent::SUBNORMAL(), Significand::LSB()));
+ return RetT(encode(sign, Exponent::subnormal(), Significand::lsb()));
}
LIBC_INLINE static constexpr RetT max_subnormal(Sign sign = Sign::POS) {
return RetT(
- encode(sign, Exponent::SUBNORMAL(), Significand::BITS_ALL_ONES()));
+ encode(sign, Exponent::subnormal(), Significand::bits_all_ones()));
}
LIBC_INLINE static constexpr RetT min_normal(Sign sign = Sign::POS) {
- return RetT(encode(sign, Exponent::MIN(), Significand::ZERO()));
+ return RetT(encode(sign, Exponent::min(), Significand::zero()));
}
LIBC_INLINE static constexpr RetT max_normal(Sign sign = Sign::POS) {
- return RetT(encode(sign, Exponent::MAX(), Significand::BITS_ALL_ONES()));
+ return RetT(encode(sign, Exponent::max(), Significand::bits_all_ones()));
}
LIBC_INLINE static constexpr RetT inf(Sign sign = Sign::POS) {
- return RetT(encode(sign, Exponent::INF(), Significand::ZERO()));
+ return RetT(encode(sign, Exponent::inf(), Significand::zero()));
}
LIBC_INLINE static constexpr RetT signaling_nan(Sign sign = Sign::POS,
StorageType v = 0) {
- return RetT(encode(sign, Exponent::INF(),
- (v ? Significand(v) : (Significand::MSB() >> 1))));
+ return RetT(encode(sign, Exponent::inf(),
+ (v ? Significand(v) : (Significand::msb() >> 1))));
}
LIBC_INLINE static constexpr RetT quiet_nan(Sign sign = Sign::POS,
StorageType v = 0) {
return RetT(
- encode(sign, Exponent::INF(), Significand::MSB() | Significand(v)));
+ encode(sign, Exponent::inf(), Significand::msb() | Significand(v)));
}
// Observers
LIBC_INLINE constexpr bool is_zero() const { return exp_sig_bits() == 0; }
LIBC_INLINE constexpr bool is_nan() const {
- return exp_sig_bits() > encode(Exponent::INF(), Significand::ZERO());
+ return exp_sig_bits() > encode(Exponent::inf(), Significand::zero());
}
LIBC_INLINE constexpr bool is_quiet_nan() const {
- return exp_sig_bits() >= encode(Exponent::INF(), Significand::MSB());
+ return exp_sig_bits() >= encode(Exponent::inf(), Significand::msb());
}
LIBC_INLINE constexpr bool is_signaling_nan() const {
return is_nan() && !is_quiet_nan();
}
LIBC_INLINE constexpr bool is_inf() const {
- return exp_sig_bits() == encode(Exponent::INF(), Significand::ZERO());
+ return exp_sig_bits() == encode(Exponent::inf(), Significand::zero());
}
LIBC_INLINE constexpr bool is_finite() const {
- return exp_bits() != encode(Exponent::INF());
+ return exp_bits() != encode(Exponent::inf());
}
LIBC_INLINE
constexpr bool is_subnormal() const {
- return exp_bits() == encode(Exponent::SUBNORMAL());
+ return exp_bits() == encode(Exponent::subnormal());
}
LIBC_INLINE constexpr bool is_normal() const {
return is_finite() && !is_subnormal();
@@ -493,37 +493,37 @@ protected:
public:
// Builders
LIBC_INLINE static constexpr RetT zero(Sign sign = Sign::POS) {
- return RetT(encode(sign, Exponent::SUBNORMAL(), Significand::ZERO()));
+ return RetT(encode(sign, Exponent::subnormal(), Significand::zero()));
}
LIBC_INLINE static constexpr RetT one(Sign sign = Sign::POS) {
- return RetT(encode(sign, Exponent::ZERO(), Significand::MSB()));
+ return RetT(encode(sign, Exponent::zero(), Significand::msb()));
}
LIBC_INLINE static constexpr RetT min_subnormal(Sign sign = Sign::POS) {
- return RetT(encode(sign, Exponent::SUBNORMAL(), Significand::LSB()));
+ return RetT(encode(sign, Exponent::subnormal(), Significand::lsb()));
}
LIBC_INLINE static constexpr RetT max_subnormal(Sign sign = Sign::POS) {
- return RetT(encode(sign, Exponent::SUBNORMAL(),
- Significand::BITS_ALL_ONES() ^ Significand::MSB()));
+ return RetT(encode(sign, Exponent::subnormal(),
+ Significand::bits_all_ones() ^ Significand::msb()));
}
LIBC_INLINE static constexpr RetT min_normal(Sign sign = Sign::POS) {
- return RetT(encode(sign, Exponent::MIN(), Significand::MSB()));
+ return RetT(encode(sign, Exponent::min(), Significand::msb()));
}
LIBC_INLINE static constexpr RetT max_normal(Sign sign = Sign::POS) {
- return RetT(encode(sign, Exponent::MAX(), Significand::BITS_ALL_ONES()));
+ return RetT(encode(sign, Exponent::max(), Significand::bits_all_ones()));
}
LIBC_INLINE static constexpr RetT inf(Sign sign = Sign::POS) {
- return RetT(encode(sign, Exponent::INF(), Significand::MSB()));
+ return RetT(encode(sign, Exponent::inf(), Significand::msb()));
}
LIBC_INLINE static constexpr RetT signaling_nan(Sign sign = Sign::POS,
StorageType v = 0) {
- return RetT(encode(sign, Exponent::INF(),
- Significand::MSB() |
- (v ? Significand(v) : (Significand::MSB() >> 2))));
+ return RetT(encode(sign, Exponent::inf(),
+ Significand::msb() |
+ (v ? Significand(v) : (Significand::msb() >> 2))));
}
LIBC_INLINE static constexpr RetT quiet_nan(Sign sign = Sign::POS,
StorageType v = 0) {
- return RetT(encode(sign, Exponent::INF(),
- Significand::MSB() | (Significand::MSB() >> 1) |
+ return RetT(encode(sign, Exponent::inf(),
+ Significand::msb() | (Significand::msb() >> 1) |
Significand(v)));
}
@@ -541,33 +541,33 @@ public:
// - Quiet Not a Number
// - Unnormal
// This can be reduced to the following logic:
- if (exp_bits() == encode(Exponent::INF()))
+ if (exp_bits() == encode(Exponent::inf()))
return !is_inf();
- if (exp_bits() != encode(Exponent::SUBNORMAL()))
- return (sig_bits() & encode(Significand::MSB())) == 0;
+ if (exp_bits() != encode(Exponent::subnormal()))
+ return (sig_bits() & encode(Significand::msb())) == 0;
return false;
}
LIBC_INLINE constexpr bool is_quiet_nan() const {
return exp_sig_bits() >=
- encode(Exponent::INF(),
- Significand::MSB() | (Significand::MSB() >> 1));
+ encode(Exponent::inf(),
+ Significand::msb() | (Significand::msb() >> 1));
}
LIBC_INLINE constexpr bool is_signaling_nan() const {
return is_nan() && !is_quiet_nan();
}
LIBC_INLINE constexpr bool is_inf() const {
- return exp_sig_bits() == encode(Exponent::INF(), Significand::MSB());
+ return exp_sig_bits() == encode(Exponent::inf(), Significand::msb());
}
LIBC_INLINE constexpr bool is_finite() const {
return !is_inf() && !is_nan();
}
LIBC_INLINE
constexpr bool is_subnormal() const {
- return exp_bits() == encode(Exponent::SUBNORMAL());
+ return exp_bits() == encode(Exponent::subnormal());
}
LIBC_INLINE constexpr bool is_normal() const {
const auto exp = exp_bits();
- if (exp == encode(Exponent::SUBNORMAL()) || exp == encode(Exponent::INF()))
+ if (exp == encode(Exponent::subnormal()) || exp == encode(Exponent::inf()))
return false;
return get_implicit_bit();
}
@@ -578,7 +578,7 @@ public:
} else if (exp_sig_bits() == max_subnormal().uintval()) {
return min_normal(sign());
} else if (sig_bits() == SIG_MASK) {
- return RetT(encode(sign(), ++biased_exponent(), Significand::ZERO()));
+ return RetT(encode(sign(), ++biased_exponent(), Significand::zero()));
} else {
return RetT(bits + StorageType(1));
}
@@ -715,9 +715,9 @@ public:
LIBC_INLINE constexpr int get_explicit_exponent() const {
Exponent exponent(UP::biased_exponent());
if (is_zero())
- exponent = Exponent::ZERO();
- if (exponent == Exponent::SUBNORMAL())
- exponent = Exponent::MIN();
+ exponent = Exponent::zero();
+ if (exponent == Exponent::subnormal())
+ exponent = Exponent::min();
return static_cast<int32_t>(exponent);
}
diff --git a/libc/src/__support/FPUtil/fpbits_str.h b/libc/src/__support/FPUtil/fpbits_str.h
index a1654cd..212265b 100644
--- a/libc/src/__support/FPUtil/fpbits_str.h
+++ b/libc/src/__support/FPUtil/fpbits_str.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIBC_SRC___SUPPORT_FPUTIL_FP_BITS_STR_H
-#define LLVM_LIBC_SRC___SUPPORT_FPUTIL_FP_BITS_STR_H
+#ifndef LLVM_LIBC_SRC___SUPPORT_FPUTIL_FPBITS_STR_H
+#define LLVM_LIBC_SRC___SUPPORT_FPUTIL_FPBITS_STR_H
#include "src/__support/CPP/string.h"
#include "src/__support/CPP/type_traits.h"
@@ -73,4 +73,4 @@ template <typename T> LIBC_INLINE cpp::string str(fputil::FPBits<T> x) {
} // namespace LIBC_NAMESPACE
-#endif // LLVM_LIBC_SRC___SUPPORT_FPUTIL_FP_BITS_STR_H
+#endif // LLVM_LIBC_SRC___SUPPORT_FPUTIL_FPBITS_STR_H
diff --git a/libc/src/__support/GPU/generic/utils.h b/libc/src/__support/GPU/generic/utils.h
index 58db88d..c6c3c01 100644
--- a/libc/src/__support/GPU/generic/utils.h
+++ b/libc/src/__support/GPU/generic/utils.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIBC_SRC___SUPPORT_GPU_GENERIC_IO_H
-#define LLVM_LIBC_SRC___SUPPORT_GPU_GENERIC_IO_H
+#ifndef LLVM_LIBC_SRC___SUPPORT_GPU_GENERIC_UTILS_H
+#define LLVM_LIBC_SRC___SUPPORT_GPU_GENERIC_UTILS_H
#include "src/__support/common.h"
@@ -78,4 +78,4 @@ LIBC_INLINE uint32_t get_cluster_id() { return 0; }
} // namespace gpu
} // namespace LIBC_NAMESPACE
-#endif
+#endif // LLVM_LIBC_SRC___SUPPORT_GPU_GENERIC_UTILS_H
diff --git a/libc/src/__support/GPU/utils.h b/libc/src/__support/GPU/utils.h
index 6505b18..0f9167c 100644
--- a/libc/src/__support/GPU/utils.h
+++ b/libc/src/__support/GPU/utils.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIBC_SRC___SUPPORT_GPU_UTIL_H
-#define LLVM_LIBC_SRC___SUPPORT_GPU_UTIL_H
+#ifndef LLVM_LIBC_SRC___SUPPORT_GPU_UTILS_H
+#define LLVM_LIBC_SRC___SUPPORT_GPU_UTILS_H
#include "src/__support/macros/properties/architectures.h"
@@ -34,4 +34,4 @@ LIBC_INLINE bool is_first_lane(uint64_t lane_mask) {
} // namespace gpu
} // namespace LIBC_NAMESPACE
-#endif // LLVM_LIBC_SRC___SUPPORT_OSUTIL_IO_H
+#endif // LLVM_LIBC_SRC___SUPPORT_GPU_UTILS_H
diff --git a/libc/src/__support/HashTable/table.h b/libc/src/__support/HashTable/table.h
index e2a26d0..07fcd42 100644
--- a/libc/src/__support/HashTable/table.h
+++ b/libc/src/__support/HashTable/table.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIBC_SRC___SUPPORT_HASHTABLE_table_H
-#define LLVM_LIBC_SRC___SUPPORT_HASHTABLE_table_H
+#ifndef LLVM_LIBC_SRC___SUPPORT_HASHTABLE_TABLE_H
+#define LLVM_LIBC_SRC___SUPPORT_HASHTABLE_TABLE_H
#include "llvm-libc-types/ENTRY.h"
#include "src/__support/CPP/bit.h" // bit_ceil
@@ -351,4 +351,4 @@ public:
} // namespace internal
} // namespace LIBC_NAMESPACE
-#endif // LLVM_LIBC_SRC___SUPPORT_HASHTABLE_table_H
+#endif // LLVM_LIBC_SRC___SUPPORT_HASHTABLE_TABLE_H
diff --git a/libc/src/__support/OSUtil/gpu/io.h b/libc/src/__support/OSUtil/gpu/io.h
index d6c89cf..e5562eb 100644
--- a/libc/src/__support/OSUtil/gpu/io.h
+++ b/libc/src/__support/OSUtil/gpu/io.h
@@ -18,4 +18,4 @@ void write_to_stderr(cpp::string_view msg);
} // namespace LIBC_NAMESPACE
-#endif // LLVM_LIBC_SRC___SUPPORT_OSUTIL_LINUX_IO_H
+#endif // LLVM_LIBC_SRC___SUPPORT_OSUTIL_GPU_IO_H
diff --git a/libc/src/__support/RPC/rpc_util.h b/libc/src/__support/RPC/rpc_util.h
index 11d2f75..7a9901a 100644
--- a/libc/src/__support/RPC/rpc_util.h
+++ b/libc/src/__support/RPC/rpc_util.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIBC_SRC___SUPPORT_RPC_RPC_UTILS_H
-#define LLVM_LIBC_SRC___SUPPORT_RPC_RPC_UTILS_H
+#ifndef LLVM_LIBC_SRC___SUPPORT_RPC_RPC_UTIL_H
+#define LLVM_LIBC_SRC___SUPPORT_RPC_RPC_UTIL_H
#include "src/__support/CPP/type_traits.h"
#include "src/__support/GPU/utils.h"
@@ -69,4 +69,4 @@ LIBC_INLINE void rpc_memcpy(void *dst, const void *src, size_t count) {
} // namespace rpc
} // namespace LIBC_NAMESPACE
-#endif
+#endif // LLVM_LIBC_SRC___SUPPORT_RPC_RPC_UTIL_H
diff --git a/libc/src/__support/StringUtil/message_mapper.h b/libc/src/__support/StringUtil/message_mapper.h
index c93a57c..dd91839 100644
--- a/libc/src/__support/StringUtil/message_mapper.h
+++ b/libc/src/__support/StringUtil/message_mapper.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIBC_SRC___SUPPORT_STRING_UTIL_MESSAGE_MAPPER_H
-#define LLVM_LIBC_SRC___SUPPORT_STRING_UTIL_MESSAGE_MAPPER_H
+#ifndef LLVM_LIBC_SRC___SUPPORT_STRINGUTIL_MESSAGE_MAPPER_H
+#define LLVM_LIBC_SRC___SUPPORT_STRINGUTIL_MESSAGE_MAPPER_H
#include "src/__support/CPP/array.h"
#include "src/__support/CPP/optional.h"
@@ -100,4 +100,4 @@ constexpr MsgTable<N1 + N2> operator+(const MsgTable<N1> &t1,
} // namespace LIBC_NAMESPACE
-#endif // LLVM_LIBC_SRC___SUPPORT_STRING_UTIL_MESSAGE_MAPPER_H
+#endif // LLVM_LIBC_SRC___SUPPORT_STRINGUTIL_MESSAGE_MAPPER_H
diff --git a/libc/src/__support/StringUtil/platform_errors.h b/libc/src/__support/StringUtil/platform_errors.h
index dfa841c..32e8414 100644
--- a/libc/src/__support/StringUtil/platform_errors.h
+++ b/libc/src/__support/StringUtil/platform_errors.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIBC_SRC___SUPPORT_STRING_UTIL_PLATFORM_ERROR_TABLE_H
-#define LLVM_LIBC_SRC___SUPPORT_STRING_UTIL_PLATFORM_ERROR_TABLE_H
+#ifndef LLVM_LIBC_SRC___SUPPORT_STRINGUTIL_PLATFORM_ERRORS_H
+#define LLVM_LIBC_SRC___SUPPORT_STRINGUTIL_PLATFORM_ERRORS_H
#if defined(__linux__) || defined(__Fuchsia__)
#include "tables/linux_platform_errors.h"
@@ -15,4 +15,4 @@
#include "tables/minimal_platform_errors.h"
#endif
-#endif // LLVM_LIBC_SRC___SUPPORT_STRING_UTIL_PLATFORM_ERROR_TABLE_H
+#endif // LLVM_LIBC_SRC___SUPPORT_STRINGUTIL_PLATFORM_ERRORS_H
diff --git a/libc/src/__support/StringUtil/platform_signals.h b/libc/src/__support/StringUtil/platform_signals.h
index 0a1c3f6..52da082 100644
--- a/libc/src/__support/StringUtil/platform_signals.h
+++ b/libc/src/__support/StringUtil/platform_signals.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIBC_SRC___SUPPORT_STRING_UTIL_PLATFORM_SIGNAL_TABLE_H
-#define LLVM_LIBC_SRC___SUPPORT_STRING_UTIL_PLATFORM_SIGNAL_TABLE_H
+#ifndef LLVM_LIBC_SRC___SUPPORT_STRINGUTIL_PLATFORM_SIGNALS_H
+#define LLVM_LIBC_SRC___SUPPORT_STRINGUTIL_PLATFORM_SIGNALS_H
#if defined(__linux__) || defined(__Fuchsia__)
#include "tables/linux_platform_signals.h"
@@ -15,4 +15,4 @@
#include "tables/minimal_platform_signals.h"
#endif
-#endif // LLVM_LIBC_SRC___SUPPORT_STRING_UTIL_PLATFORM_SIGNAL_TABLE_H
+#endif // LLVM_LIBC_SRC___SUPPORT_STRINGUTIL_PLATFORM_SIGNALS_H
diff --git a/libc/src/__support/StringUtil/tables/linux_extension_errors.h b/libc/src/__support/StringUtil/tables/linux_extension_errors.h
index 4964fa4..f489688 100644
--- a/libc/src/__support/StringUtil/tables/linux_extension_errors.h
+++ b/libc/src/__support/StringUtil/tables/linux_extension_errors.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIBC_SRC___SUPPORT_STRING_UTIL_TABLES_LINUX_EXTENSION_ERRORS_H
-#define LLVM_LIBC_SRC___SUPPORT_STRING_UTIL_TABLES_LINUX_EXTENSION_ERRORS_H
+#ifndef LLVM_LIBC_SRC___SUPPORT_STRINGUTIL_TABLES_LINUX_EXTENSION_ERRORS_H
+#define LLVM_LIBC_SRC___SUPPORT_STRINGUTIL_TABLES_LINUX_EXTENSION_ERRORS_H
#include "src/__support/StringUtil/message_mapper.h"
@@ -72,4 +72,4 @@ constexpr MsgTable<52> LINUX_ERRORS = {
} // namespace LIBC_NAMESPACE
-#endif // LLVM_LIBC_SRC___SUPPORT_STRING_UTIL_TABLES_LINUX_EXTENSION_ERRORS_H
+#endif // LLVM_LIBC_SRC___SUPPORT_STRINGUTIL_TABLES_LINUX_EXTENSION_ERRORS_H
diff --git a/libc/src/__support/StringUtil/tables/linux_extension_signals.h b/libc/src/__support/StringUtil/tables/linux_extension_signals.h
index 633d0e2..3f9f0c6 100644
--- a/libc/src/__support/StringUtil/tables/linux_extension_signals.h
+++ b/libc/src/__support/StringUtil/tables/linux_extension_signals.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIBC_SRC___SUPPORT_STRING_UTIL_TABLES_LINUX_EXTENSION_SIGNALS_H
-#define LLVM_LIBC_SRC___SUPPORT_STRING_UTIL_TABLES_LINUX_EXTENSION_SIGNALS_H
+#ifndef LLVM_LIBC_SRC___SUPPORT_STRINGUTIL_TABLES_LINUX_EXTENSION_SIGNALS_H
+#define LLVM_LIBC_SRC___SUPPORT_STRINGUTIL_TABLES_LINUX_EXTENSION_SIGNALS_H
#include "src/__support/StringUtil/message_mapper.h"
@@ -30,4 +30,4 @@ LIBC_INLINE_VAR constexpr const MsgTable<3> LINUX_SIGNALS = {
} // namespace LIBC_NAMESPACE
-#endif // LLVM_LIBC_SRC___SUPPORT_STRING_UTIL_TABLES_LINUX_EXTENSION_SIGNALS_H
+#endif // LLVM_LIBC_SRC___SUPPORT_STRINGUTIL_TABLES_LINUX_EXTENSION_SIGNALS_H
diff --git a/libc/src/__support/StringUtil/tables/linux_platform_errors.h b/libc/src/__support/StringUtil/tables/linux_platform_errors.h
index a9ae2e8..a7bb545d 100644
--- a/libc/src/__support/StringUtil/tables/linux_platform_errors.h
+++ b/libc/src/__support/StringUtil/tables/linux_platform_errors.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIBC_SRC___SUPPORT_STRING_UTIL_TABLES_LINUX_PLATFORM_ERRORS_H
-#define LLVM_LIBC_SRC___SUPPORT_STRING_UTIL_TABLES_LINUX_PLATFORM_ERRORS_H
+#ifndef LLVM_LIBC_SRC___SUPPORT_STRINGUTIL_TABLES_LINUX_PLATFORM_ERRORS_H
+#define LLVM_LIBC_SRC___SUPPORT_STRINGUTIL_TABLES_LINUX_PLATFORM_ERRORS_H
#include "linux_extension_errors.h"
#include "posix_errors.h"
@@ -20,4 +20,4 @@ LIBC_INLINE_VAR constexpr auto PLATFORM_ERRORS =
} // namespace LIBC_NAMESPACE
-#endif // LLVM_LIBC_SRC___SUPPORT_STRING_UTIL_TABLES_LINUX_PLATFORM_ERRORS_H
+#endif // LLVM_LIBC_SRC___SUPPORT_STRINGUTIL_TABLES_LINUX_PLATFORM_ERRORS_H
diff --git a/libc/src/__support/StringUtil/tables/linux_platform_signals.h b/libc/src/__support/StringUtil/tables/linux_platform_signals.h
index 1daaa9c..f12d31f 100644
--- a/libc/src/__support/StringUtil/tables/linux_platform_signals.h
+++ b/libc/src/__support/StringUtil/tables/linux_platform_signals.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIBC_SRC___SUPPORT_STRING_UTIL_TABLES_LINUX_PLATFORM_SIGNALS_H
-#define LLVM_LIBC_SRC___SUPPORT_STRING_UTIL_TABLES_LINUX_PLATFORM_SIGNALS_H
+#ifndef LLVM_LIBC_SRC___SUPPORT_STRINGUTIL_TABLES_LINUX_PLATFORM_SIGNALS_H
+#define LLVM_LIBC_SRC___SUPPORT_STRINGUTIL_TABLES_LINUX_PLATFORM_SIGNALS_H
#include "linux_extension_signals.h"
#include "posix_signals.h"
@@ -20,4 +20,4 @@ LIBC_INLINE_VAR constexpr auto PLATFORM_SIGNALS =
} // namespace LIBC_NAMESPACE
-#endif // LLVM_LIBC_SRC___SUPPORT_STRING_UTIL_TABLES_LINUX_PLATFORM_SIGNALS_H
+#endif // LLVM_LIBC_SRC___SUPPORT_STRINGUTIL_TABLES_LINUX_PLATFORM_SIGNALS_H
diff --git a/libc/src/__support/StringUtil/tables/minimal_platform_errors.h b/libc/src/__support/StringUtil/tables/minimal_platform_errors.h
index 1cfd9e2..c5672c4 100644
--- a/libc/src/__support/StringUtil/tables/minimal_platform_errors.h
+++ b/libc/src/__support/StringUtil/tables/minimal_platform_errors.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIBC_SRC___SUPPORT_STRING_UTIL_TABLES_MINIMAL_PLATFORM_ERRORS_H
-#define LLVM_LIBC_SRC___SUPPORT_STRING_UTIL_TABLES_MINIMAL_PLATFORM_ERRORS_H
+#ifndef LLVM_LIBC_SRC___SUPPORT_STRINGUTIL_TABLES_MINIMAL_PLATFORM_ERRORS_H
+#define LLVM_LIBC_SRC___SUPPORT_STRINGUTIL_TABLES_MINIMAL_PLATFORM_ERRORS_H
#include "stdc_errors.h"
@@ -17,4 +17,4 @@ LIBC_INLINE_VAR constexpr auto PLATFORM_ERRORS = STDC_ERRORS;
} // namespace LIBC_NAMESPACE
-#endif // LLVM_LIBC_SRC___SUPPORT_STRING_UTIL_TABLES_MINIMAL_PLATFORM_ERRORS_H
+#endif // LLVM_LIBC_SRC___SUPPORT_STRINGUTIL_TABLES_MINIMAL_PLATFORM_ERRORS_H
diff --git a/libc/src/__support/StringUtil/tables/minimal_platform_signals.h b/libc/src/__support/StringUtil/tables/minimal_platform_signals.h
index 7fcf91b..7fe0dccf 100644
--- a/libc/src/__support/StringUtil/tables/minimal_platform_signals.h
+++ b/libc/src/__support/StringUtil/tables/minimal_platform_signals.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIBC_SRC___SUPPORT_STRING_UTIL_TABLES_MINIMAL_PLATFORM_SIGNALS_H
-#define LLVM_LIBC_SRC___SUPPORT_STRING_UTIL_TABLES_MINIMAL_PLATFORM_SIGNALS_H
+#ifndef LLVM_LIBC_SRC___SUPPORT_STRINGUTIL_TABLES_MINIMAL_PLATFORM_SIGNALS_H
+#define LLVM_LIBC_SRC___SUPPORT_STRINGUTIL_TABLES_MINIMAL_PLATFORM_SIGNALS_H
#include "stdc_signals.h"
@@ -17,4 +17,4 @@ LIBC_INLINE_VAR constexpr auto PLATFORM_SIGNALS = STDC_SIGNALS;
} // namespace LIBC_NAMESPACE
-#endif // LLVM_LIBC_SRC___SUPPORT_STRING_UTIL_TABLES_MINIMAL_PLATFORM_SIGNALS_H
+#endif // LLVM_LIBC_SRC___SUPPORT_STRINGUTIL_TABLES_MINIMAL_PLATFORM_SIGNALS_H
diff --git a/libc/src/__support/StringUtil/tables/posix_errors.h b/libc/src/__support/StringUtil/tables/posix_errors.h
index 3ade7aa..3cb6de3 100644
--- a/libc/src/__support/StringUtil/tables/posix_errors.h
+++ b/libc/src/__support/StringUtil/tables/posix_errors.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIBC_SRC___SUPPORT_STRING_UTIL_TABLES_POSIX_ERRORS_H
-#define LLVM_LIBC_SRC___SUPPORT_STRING_UTIL_TABLES_POSIX_ERRORS_H
+#ifndef LLVM_LIBC_SRC___SUPPORT_STRINGUTIL_TABLES_POSIX_ERRORS_H
+#define LLVM_LIBC_SRC___SUPPORT_STRINGUTIL_TABLES_POSIX_ERRORS_H
#include "src/__support/StringUtil/message_mapper.h"
@@ -96,4 +96,4 @@ LIBC_INLINE_VAR constexpr MsgTable<76> POSIX_ERRORS = {
} // namespace LIBC_NAMESPACE
-#endif // LLVM_LIBC_SRC___SUPPORT_STRING_UTIL_TABLES_POSIX_ERRORS_H
+#endif // LLVM_LIBC_SRC___SUPPORT_STRINGUTIL_TABLES_POSIX_ERRORS_H
diff --git a/libc/src/__support/StringUtil/tables/posix_signals.h b/libc/src/__support/StringUtil/tables/posix_signals.h
index 2fba2d9..b9535cb 100644
--- a/libc/src/__support/StringUtil/tables/posix_signals.h
+++ b/libc/src/__support/StringUtil/tables/posix_signals.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIBC_SRC___SUPPORT_STRING_UTIL_TABLES_POSIX_SIGNALS_H
-#define LLVM_LIBC_SRC___SUPPORT_STRING_UTIL_TABLES_POSIX_SIGNALS_H
+#ifndef LLVM_LIBC_SRC___SUPPORT_STRINGUTIL_TABLES_POSIX_SIGNALS_H
+#define LLVM_LIBC_SRC___SUPPORT_STRINGUTIL_TABLES_POSIX_SIGNALS_H
#include "src/__support/CPP/array.h"
#include "src/__support/StringUtil/message_mapper.h"
@@ -43,4 +43,4 @@ LIBC_INLINE_VAR constexpr MsgTable<22> POSIX_SIGNALS = {
} // namespace LIBC_NAMESPACE
-#endif // LLVM_LIBC_SRC___SUPPORT_STRING_UTIL_TABLES_POSIX_SIGNALS_H
+#endif // LLVM_LIBC_SRC___SUPPORT_STRINGUTIL_TABLES_POSIX_SIGNALS_H
diff --git a/libc/src/__support/StringUtil/tables/signal_table.h b/libc/src/__support/StringUtil/tables/signal_table.h
index 5035c54..d7ffbc6 100644
--- a/libc/src/__support/StringUtil/tables/signal_table.h
+++ b/libc/src/__support/StringUtil/tables/signal_table.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIBC_SRC___SUPPORT_STRING_UTIL_TABLES_SIGNAL_TABLE_H
-#define LLVM_LIBC_SRC___SUPPORT_STRING_UTIL_TABLES_SIGNAL_TABLE_H
+#ifndef LLVM_LIBC_SRC___SUPPORT_STRINGUTIL_TABLES_SIGNAL_TABLE_H
+#define LLVM_LIBC_SRC___SUPPORT_STRINGUTIL_TABLES_SIGNAL_TABLE_H
#include "src/__support/StringUtil/message_mapper.h"
@@ -36,4 +36,4 @@ LIBC_INLINE_VAR constexpr auto PLATFORM_SIGNALS = []() {
} // namespace LIBC_NAMESPACE::internal
-#endif // LLVM_LIBC_SRC___SUPPORT_STRING_UTIL_TABLES_SIGNAL_TABLE_H
+#endif // LLVM_LIBC_SRC___SUPPORT_STRINGUTIL_TABLES_SIGNAL_TABLE_H
diff --git a/libc/src/__support/StringUtil/tables/stdc_errors.h b/libc/src/__support/StringUtil/tables/stdc_errors.h
index f0fc787..a9c1527 100644
--- a/libc/src/__support/StringUtil/tables/stdc_errors.h
+++ b/libc/src/__support/StringUtil/tables/stdc_errors.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIBC_SRC___SUPPORT_STRING_UTIL_TABLES_STDC_ERRORS_H
-#define LLVM_LIBC_SRC___SUPPORT_STRING_UTIL_TABLES_STDC_ERRORS_H
+#ifndef LLVM_LIBC_SRC___SUPPORT_STRINGUTIL_TABLES_STDC_ERRORS_H
+#define LLVM_LIBC_SRC___SUPPORT_STRINGUTIL_TABLES_STDC_ERRORS_H
#include "src/__support/StringUtil/message_mapper.h"
@@ -24,4 +24,4 @@ LIBC_INLINE_VAR constexpr const MsgTable<4> STDC_ERRORS = {
} // namespace LIBC_NAMESPACE
-#endif // LLVM_LIBC_SRC___SUPPORT_STRING_UTIL_TABLES_LINUX_ERRORS_H
+#endif // LLVM_LIBC_SRC___SUPPORT_STRINGUTIL_TABLES_STDC_ERRORS_H
diff --git a/libc/src/__support/StringUtil/tables/stdc_signals.h b/libc/src/__support/StringUtil/tables/stdc_signals.h
index 773f182..7c93b45 100644
--- a/libc/src/__support/StringUtil/tables/stdc_signals.h
+++ b/libc/src/__support/StringUtil/tables/stdc_signals.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIBC_SRC___SUPPORT_STRING_UTIL_TABLES_STDC_SIGNALS_H
-#define LLVM_LIBC_SRC___SUPPORT_STRING_UTIL_TABLES_STDC_SIGNALS_H
+#ifndef LLVM_LIBC_SRC___SUPPORT_STRINGUTIL_TABLES_STDC_SIGNALS_H
+#define LLVM_LIBC_SRC___SUPPORT_STRINGUTIL_TABLES_STDC_SIGNALS_H
#include <signal.h> // For signal numbers
@@ -26,4 +26,4 @@ LIBC_INLINE_VAR constexpr const MsgTable<6> STDC_SIGNALS = {
} // namespace LIBC_NAMESPACE
-#endif // LLVM_LIBC_SRC___SUPPORT_STRING_UTIL_TABLES_LINUX_SIGNALS_H
+#endif // LLVM_LIBC_SRC___SUPPORT_STRINGUTIL_TABLES_STDC_SIGNALS_H
diff --git a/libc/src/__support/fixed_point/fx_bits.h b/libc/src/__support/fixed_point/fx_bits.h
index 0c8d03b..6fdbc6f 100644
--- a/libc/src/__support/fixed_point/fx_bits.h
+++ b/libc/src/__support/fixed_point/fx_bits.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIBC_SRC___SUPPORT_FIXEDPOINT_FXBITS_H
-#define LLVM_LIBC_SRC___SUPPORT_FIXEDPOINT_FXBITS_H
+#ifndef LLVM_LIBC_SRC___SUPPORT_FIXED_POINT_FX_BITS_H
+#define LLVM_LIBC_SRC___SUPPORT_FIXED_POINT_FX_BITS_H
#include "llvm-libc-macros/stdfix-macros.h"
#include "src/__support/CPP/bit.h"
@@ -165,4 +165,4 @@ template <typename T> LIBC_INLINE constexpr T round(T x, int n) {
#endif // LIBC_COMPILER_HAS_FIXED_POINT
-#endif // LLVM_LIBC_SRC___SUPPORT_FIXEDPOINT_FXBITS_H
+#endif // LLVM_LIBC_SRC___SUPPORT_FIXED_POINT_FX_BITS_H
diff --git a/libc/src/__support/fixed_point/fx_rep.h b/libc/src/__support/fixed_point/fx_rep.h
index 7d18f14..e1fee62 100644
--- a/libc/src/__support/fixed_point/fx_rep.h
+++ b/libc/src/__support/fixed_point/fx_rep.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIBC_SRC___SUPPORT_FIXEDPOINT_FXREP_H
-#define LLVM_LIBC_SRC___SUPPORT_FIXEDPOINT_FXREP_H
+#ifndef LLVM_LIBC_SRC___SUPPORT_FIXED_POINT_FX_REP_H
+#define LLVM_LIBC_SRC___SUPPORT_FIXED_POINT_FX_REP_H
#include "llvm-libc-macros/stdfix-macros.h"
#include "src/__support/CPP/type_traits.h"
@@ -273,4 +273,4 @@ struct FXRep<unsigned long sat accum> : FXRep<unsigned long accum> {};
#endif // LIBC_COMPILER_HAS_FIXED_POINT
-#endif // LLVM_LIBC_SRC___SUPPORT_FIXEDPOINT_FXREP_H
+#endif // LLVM_LIBC_SRC___SUPPORT_FIXED_POINT_FX_REP_H
diff --git a/libc/src/__support/macros/properties/CMakeLists.txt b/libc/src/__support/macros/properties/CMakeLists.txt
index 3c492ab..bbc45650 100644
--- a/libc/src/__support/macros/properties/CMakeLists.txt
+++ b/libc/src/__support/macros/properties/CMakeLists.txt
@@ -25,9 +25,9 @@ add_header_library(
)
add_header_library(
- float
+ types
HDRS
- float.h
+ types.h
DEPENDS
.architectures
.compiler
diff --git a/libc/src/__support/macros/properties/float.h b/libc/src/__support/macros/properties/types.h
index 510f392..e812a9d 100644
--- a/libc/src/__support/macros/properties/float.h
+++ b/libc/src/__support/macros/properties/types.h
@@ -1,15 +1,14 @@
-//===-- Float type support --------------------------------------*- C++ -*-===//
+//===-- Types support -------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
-// Floating point properties are a combination of compiler support, target OS
-// and target architecture.
+// Types detection and support.
-#ifndef LLVM_LIBC_SRC___SUPPORT_MACROS_PROPERTIES_FLOAT_H
-#define LLVM_LIBC_SRC___SUPPORT_MACROS_PROPERTIES_FLOAT_H
+#ifndef LLVM_LIBC_SRC___SUPPORT_MACROS_PROPERTIES_TYPES_H
+#define LLVM_LIBC_SRC___SUPPORT_MACROS_PROPERTIES_TYPES_H
#include "llvm-libc-macros/float-macros.h" // LDBL_MANT_DIG
#include "llvm-libc-types/float128.h" // float128
@@ -60,4 +59,4 @@ using float16 = _Float16;
#define LIBC_COMPILER_HAS_FLOAT128
#endif
-#endif // LLVM_LIBC_SRC___SUPPORT_MACROS_PROPERTIES_FLOAT_H
+#endif // LLVM_LIBC_SRC___SUPPORT_MACROS_PROPERTIES_TYPES_H
diff --git a/libc/src/__support/memory_size.h b/libc/src/__support/memory_size.h
index 94aee25..7bd16a1 100644
--- a/libc/src/__support/memory_size.h
+++ b/libc/src/__support/memory_size.h
@@ -6,6 +6,9 @@
//
//===----------------------------------------------------------------------===//
+#ifndef LLVM_LIBC_SRC___SUPPORT_MEMORY_SIZE_H
+#define LLVM_LIBC_SRC___SUPPORT_MEMORY_SIZE_H
+
#include "src/__support/CPP/bit.h" // has_single_bit
#include "src/__support/CPP/limits.h"
#include "src/__support/CPP/type_traits.h"
@@ -83,3 +86,5 @@ public:
};
} // namespace internal
} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_SRC___SUPPORT_MEMORY_SIZE_H
diff --git a/libc/src/__support/threads/gpu/mutex.h b/libc/src/__support/threads/gpu/mutex.h
index 7a23604..71d0ef0 100644
--- a/libc/src/__support/threads/gpu/mutex.h
+++ b/libc/src/__support/threads/gpu/mutex.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIBC_SRC___SUPPORT_THREAD_GPU_MUTEX_H
-#define LLVM_LIBC_SRC___SUPPORT_THREAD_GPU_MUTEX_H
+#ifndef LLVM_LIBC_SRC___SUPPORT_THREADS_GPU_MUTEX_H
+#define LLVM_LIBC_SRC___SUPPORT_THREADS_GPU_MUTEX_H
#include "src/__support/macros/attributes.h"
#include "src/__support/threads/mutex_common.h"
@@ -28,4 +28,4 @@ struct Mutex {
} // namespace LIBC_NAMESPACE
-#endif
+#endif // LLVM_LIBC_SRC___SUPPORT_THREADS_GPU_MUTEX_H
diff --git a/libc/src/assert/assert.h b/libc/src/assert/assert.h
index 0318a93..6f352af 100644
--- a/libc/src/assert/assert.h
+++ b/libc/src/assert/assert.h
@@ -1,3 +1,4 @@
+// NOLINT(llvm-header-guard) https://github.com/llvm/llvm-project/issues/83339
//===-- Internal header for assert ------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
@@ -21,4 +22,4 @@
((e) ? (void)0 \
: LIBC_NAMESPACE::__assert_fail(#e, __FILE__, __LINE__, \
__PRETTY_FUNCTION__))
-#endif
+#endif // NDEBUG
diff --git a/libc/src/gpu/rpc_host_call.h b/libc/src/gpu/rpc_host_call.h
index 14393ab..473d90b 100644
--- a/libc/src/gpu/rpc_host_call.h
+++ b/libc/src/gpu/rpc_host_call.h
@@ -17,4 +17,4 @@ void rpc_host_call(void *fn, void *buffer, size_t size);
} // namespace LIBC_NAMESPACE
-#endif // LLVM_LIBC_SRC_GPU_RPC_H_HOST_CALL
+#endif // LLVM_LIBC_SRC_GPU_RPC_HOST_CALL_H
diff --git a/libc/src/math/amdgpu/declarations.h b/libc/src/math/amdgpu/declarations.h
index 780d5f0..5d7f3c9 100644
--- a/libc/src/math/amdgpu/declarations.h
+++ b/libc/src/math/amdgpu/declarations.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIBC_SRC_MATH_GPU_AMDGPU_DECLARATIONS_H
-#define LLVM_LIBC_SRC_MATH_GPU_AMDGPU_DECLARATIONS_H
+#ifndef LLVM_LIBC_SRC_MATH_AMDGPU_DECLARATIONS_H
+#define LLVM_LIBC_SRC_MATH_AMDGPU_DECLARATIONS_H
#include "platform.h"
@@ -83,4 +83,4 @@ float __ocml_tgamma_f32(float);
} // namespace LIBC_NAMESPACE
-#endif // LLVM_LIBC_SRC_MATH_GPU_AMDGPU_DECLARATIONS_H
+#endif // LLVM_LIBC_SRC_MATH_AMDGPU_DECLARATIONS_H
diff --git a/libc/src/math/amdgpu/fmax.cpp b/libc/src/math/amdgpu/fmax.cpp
index 09624cc..09f0f94 100644
--- a/libc/src/math/amdgpu/fmax.cpp
+++ b/libc/src/math/amdgpu/fmax.cpp
@@ -15,10 +15,6 @@
namespace LIBC_NAMESPACE {
LLVM_LIBC_FUNCTION(double, fmax, (double x, double y)) {
- // FIXME: The builtin function does not correctly handle the +/-0.0 case.
- if (LIBC_UNLIKELY(x == y))
- return cpp::bit_cast<double>(cpp::bit_cast<uint64_t>(x) &
- cpp::bit_cast<uint64_t>(y));
return __builtin_fmax(x, y);
}
diff --git a/libc/src/math/amdgpu/fmaxf.cpp b/libc/src/math/amdgpu/fmaxf.cpp
index f6ed466..5913a85 100644
--- a/libc/src/math/amdgpu/fmaxf.cpp
+++ b/libc/src/math/amdgpu/fmaxf.cpp
@@ -8,17 +8,11 @@
#include "src/math/fmaxf.h"
-#include "src/__support/CPP/bit.h"
#include "src/__support/common.h"
-#include "src/__support/macros/optimization.h"
namespace LIBC_NAMESPACE {
LLVM_LIBC_FUNCTION(float, fmaxf, (float x, float y)) {
- // FIXME: The builtin function does not correctly handle the +/-0.0 case.
- if (LIBC_UNLIKELY(x == y))
- return cpp::bit_cast<float>(cpp::bit_cast<uint32_t>(x) &
- cpp::bit_cast<uint32_t>(y));
return __builtin_fmaxf(x, y);
}
diff --git a/libc/src/math/amdgpu/fmin.cpp b/libc/src/math/amdgpu/fmin.cpp
index 8977ff7..0d6f352 100644
--- a/libc/src/math/amdgpu/fmin.cpp
+++ b/libc/src/math/amdgpu/fmin.cpp
@@ -8,17 +8,11 @@
#include "src/math/fmin.h"
-#include "src/__support/CPP/bit.h"
#include "src/__support/common.h"
-#include "src/__support/macros/optimization.h"
namespace LIBC_NAMESPACE {
LLVM_LIBC_FUNCTION(double, fmin, (double x, double y)) {
- // FIXME: The builtin function does not correctly handle the +/-0.0 case.
- if (LIBC_UNLIKELY(x == y))
- return cpp::bit_cast<double>(cpp::bit_cast<uint64_t>(x) |
- cpp::bit_cast<uint64_t>(y));
return __builtin_fmin(x, y);
}
diff --git a/libc/src/math/amdgpu/fminf.cpp b/libc/src/math/amdgpu/fminf.cpp
index 3be5525..42744abf 100644
--- a/libc/src/math/amdgpu/fminf.cpp
+++ b/libc/src/math/amdgpu/fminf.cpp
@@ -8,17 +8,11 @@
#include "src/math/fminf.h"
-#include "src/__support/CPP/bit.h"
#include "src/__support/common.h"
-#include "src/__support/macros/optimization.h"
namespace LIBC_NAMESPACE {
LLVM_LIBC_FUNCTION(float, fminf, (float x, float y)) {
- // FIXME: The builtin function does not correctly handle the +/-0.0 case.
- if (LIBC_UNLIKELY(x == y))
- return cpp::bit_cast<float>(cpp::bit_cast<uint32_t>(x) |
- cpp::bit_cast<uint32_t>(y));
return __builtin_fminf(x, y);
}
diff --git a/libc/src/math/amdgpu/platform.h b/libc/src/math/amdgpu/platform.h
index e5a9f81..29d6cac 100644
--- a/libc/src/math/amdgpu/platform.h
+++ b/libc/src/math/amdgpu/platform.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIBC_SRC_MATH_GPU_AMDGPU_PLATFORM_H
-#define LLVM_LIBC_SRC_MATH_GPU_AMDGPU_PLATFORM_H
+#ifndef LLVM_LIBC_SRC_MATH_AMDGPU_PLATFORM_H
+#define LLVM_LIBC_SRC_MATH_AMDGPU_PLATFORM_H
#include "src/__support/macros/attributes.h"
@@ -51,4 +51,4 @@ extern const LIBC_INLINE_VAR uint32_t __oclc_ISA_version = 9000;
} // namespace LIBC_NAMESPACE
-#endif // LLVM_LIBC_SRC_MATH_GPU_AMDGPU_PLATFORM_H
+#endif // LLVM_LIBC_SRC_MATH_AMDGPU_PLATFORM_H
diff --git a/libc/src/math/ceilf128.h b/libc/src/math/ceilf128.h
index db8feff..b0c4020 100644
--- a/libc/src/math/ceilf128.h
+++ b/libc/src/math/ceilf128.h
@@ -9,7 +9,7 @@
#ifndef LLVM_LIBC_SRC_MATH_CEILF128_H
#define LLVM_LIBC_SRC_MATH_CEILF128_H
-#include "src/__support/macros/properties/float.h"
+#include "src/__support/macros/properties/types.h"
namespace LIBC_NAMESPACE {
diff --git a/libc/src/math/copysignf128.h b/libc/src/math/copysignf128.h
index 5e40657..06c1949 100644
--- a/libc/src/math/copysignf128.h
+++ b/libc/src/math/copysignf128.h
@@ -9,7 +9,7 @@
#ifndef LLVM_LIBC_SRC_MATH_COPYSIGNF128_H
#define LLVM_LIBC_SRC_MATH_COPYSIGNF128_H
-#include "src/__support/macros/properties/float.h"
+#include "src/__support/macros/properties/types.h"
namespace LIBC_NAMESPACE {
@@ -17,4 +17,4 @@ float128 copysignf128(float128 x, float128 y);
} // namespace LIBC_NAMESPACE
-#endif // LLVM_LIBC_SRC_MATH_COPYSIGN_H
+#endif // LLVM_LIBC_SRC_MATH_COPYSIGNF128_H
diff --git a/libc/src/math/fabsf128.h b/libc/src/math/fabsf128.h
index 5999757..0a27502 100644
--- a/libc/src/math/fabsf128.h
+++ b/libc/src/math/fabsf128.h
@@ -9,7 +9,7 @@
#ifndef LLVM_LIBC_SRC_MATH_FABSF128_H
#define LLVM_LIBC_SRC_MATH_FABSF128_H
-#include "src/__support/macros/properties/float.h"
+#include "src/__support/macros/properties/types.h"
namespace LIBC_NAMESPACE {
diff --git a/libc/src/math/fdimf128.h b/libc/src/math/fdimf128.h
index c6f488a..f0485ab 100644
--- a/libc/src/math/fdimf128.h
+++ b/libc/src/math/fdimf128.h
@@ -9,7 +9,7 @@
#ifndef LLVM_LIBC_SRC_MATH_FDIMF128_H
#define LLVM_LIBC_SRC_MATH_FDIMF128_H
-#include "src/__support/macros/properties/float.h"
+#include "src/__support/macros/properties/types.h"
namespace LIBC_NAMESPACE {
diff --git a/libc/src/math/floorf128.h b/libc/src/math/floorf128.h
index 86b9a8e..b97c4b6 100644
--- a/libc/src/math/floorf128.h
+++ b/libc/src/math/floorf128.h
@@ -9,7 +9,7 @@
#ifndef LLVM_LIBC_SRC_MATH_FLOORF128_H
#define LLVM_LIBC_SRC_MATH_FLOORF128_H
-#include "src/__support/macros/properties/float.h"
+#include "src/__support/macros/properties/types.h"
namespace LIBC_NAMESPACE {
diff --git a/libc/src/math/fmaxf128.h b/libc/src/math/fmaxf128.h
index 39eaaf6..a4407d9 100644
--- a/libc/src/math/fmaxf128.h
+++ b/libc/src/math/fmaxf128.h
@@ -9,7 +9,7 @@
#ifndef LLVM_LIBC_SRC_MATH_FMAXF128_H
#define LLVM_LIBC_SRC_MATH_FMAXF128_H
-#include "src/__support/macros/properties/float.h"
+#include "src/__support/macros/properties/types.h"
namespace LIBC_NAMESPACE {
diff --git a/libc/src/math/fminf128.h b/libc/src/math/fminf128.h
index b3d1bec..d2ed593 100644
--- a/libc/src/math/fminf128.h
+++ b/libc/src/math/fminf128.h
@@ -9,7 +9,7 @@
#ifndef LLVM_LIBC_SRC_MATH_FMINF128_H
#define LLVM_LIBC_SRC_MATH_FMINF128_H
-#include "src/__support/macros/properties/float.h"
+#include "src/__support/macros/properties/types.h"
namespace LIBC_NAMESPACE {
diff --git a/libc/src/math/frexpf128.h b/libc/src/math/frexpf128.h
index 5d70860..55c4a47 100644
--- a/libc/src/math/frexpf128.h
+++ b/libc/src/math/frexpf128.h
@@ -9,7 +9,7 @@
#ifndef LLVM_LIBC_SRC_MATH_FREXPF128_H
#define LLVM_LIBC_SRC_MATH_FREXPF128_H
-#include "src/__support/macros/properties/float.h"
+#include "src/__support/macros/properties/types.h"
namespace LIBC_NAMESPACE {
diff --git a/libc/src/math/generic/CMakeLists.txt b/libc/src/math/generic/CMakeLists.txt
index 120ada8..82d2a5e 100644
--- a/libc/src/math/generic/CMakeLists.txt
+++ b/libc/src/math/generic/CMakeLists.txt
@@ -43,7 +43,7 @@ add_entrypoint_object(
COMPILE_OPTIONS
-O3
DEPENDS
- libc.src.__support.macros.properties.float
+ libc.src.__support.macros.properties.types
libc.src.__support.FPUtil.nearest_integer_operations
)
@@ -216,7 +216,7 @@ add_entrypoint_object(
HDRS
../fabsf128.h
DEPENDS
- libc.src.__support.macros.properties.float
+ libc.src.__support.macros.properties.types
libc.src.__support.FPUtil.basic_operations
COMPILE_OPTIONS
-O3
@@ -267,7 +267,7 @@ add_entrypoint_object(
COMPILE_OPTIONS
-O3
DEPENDS
- libc.src.__support.macros.properties.float
+ libc.src.__support.macros.properties.types
libc.src.__support.FPUtil.nearest_integer_operations
)
@@ -316,7 +316,7 @@ add_entrypoint_object(
COMPILE_OPTIONS
-O3
DEPENDS
- libc.src.__support.macros.properties.float
+ libc.src.__support.macros.properties.types
libc.src.__support.FPUtil.nearest_integer_operations
)
@@ -365,7 +365,7 @@ add_entrypoint_object(
COMPILE_OPTIONS
-O3
DEPENDS
- libc.src.__support.macros.properties.float
+ libc.src.__support.macros.properties.types
libc.src.__support.FPUtil.nearest_integer_operations
)
@@ -908,7 +908,7 @@ add_entrypoint_object(
HDRS
../copysignf128.h
DEPENDS
- libc.src.__support.macros.properties.float
+ libc.src.__support.macros.properties.types
libc.src.__support.FPUtil.manipulation_functions
COMPILE_OPTIONS
-O3
@@ -959,7 +959,7 @@ add_entrypoint_object(
COMPILE_OPTIONS
-O3
DEPENDS
- libc.src.__support.macros.properties.float
+ libc.src.__support.macros.properties.types
libc.src.__support.FPUtil.manipulation_functions
)
@@ -1008,7 +1008,7 @@ add_entrypoint_object(
COMPILE_OPTIONS
-O3
DEPENDS
- libc.src.__support.macros.properties.float
+ libc.src.__support.macros.properties.types
libc.src.__support.FPUtil.manipulation_functions
)
@@ -1057,7 +1057,7 @@ add_entrypoint_object(
COMPILE_OPTIONS
-O3
DEPENDS
- libc.src.__support.macros.properties.float
+ libc.src.__support.macros.properties.types
libc.src.__support.FPUtil.manipulation_functions
)
@@ -1106,7 +1106,7 @@ add_entrypoint_object(
COMPILE_OPTIONS
-O3
DEPENDS
- libc.src.__support.macros.properties.float
+ libc.src.__support.macros.properties.types
libc.src.__support.FPUtil.manipulation_functions
)
@@ -1412,7 +1412,7 @@ add_entrypoint_object(
HDRS
../fminf128.h
DEPENDS
- libc.src.__support.macros.properties.float
+ libc.src.__support.macros.properties.types
libc.src.__support.FPUtil.basic_operations
COMPILE_OPTIONS
-O3
@@ -1461,7 +1461,7 @@ add_entrypoint_object(
HDRS
../fmaxf128.h
DEPENDS
- libc.src.__support.macros.properties.float
+ libc.src.__support.macros.properties.types
libc.src.__support.FPUtil.basic_operations
COMPILE_OPTIONS
-O3
@@ -1510,7 +1510,7 @@ add_entrypoint_object(
HDRS
../sqrtf128.h
DEPENDS
- libc.src.__support.macros.properties.float
+ libc.src.__support.macros.properties.types
libc.src.__support.FPUtil.sqrt
COMPILE_OPTIONS
-O3
@@ -1647,7 +1647,7 @@ add_entrypoint_object(
COMPILE_OPTIONS
-O3
DEPENDS
- libc.src.__support.macros.properties.float
+ libc.src.__support.macros.properties.types
libc.src.__support.FPUtil.basic_operations
)
diff --git a/libc/src/math/generic/exp_utils.h b/libc/src/math/generic/exp_utils.h
index 49d9a81..405678c 100644
--- a/libc/src/math/generic/exp_utils.h
+++ b/libc/src/math/generic/exp_utils.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIBC_SRC_MATH_EXP_UTILS_H
-#define LLVM_LIBC_SRC_MATH_EXP_UTILS_H
+#ifndef LLVM_LIBC_SRC_MATH_GENERIC_EXP_UTILS_H
+#define LLVM_LIBC_SRC_MATH_GENERIC_EXP_UTILS_H
#include <stdint.h>
@@ -30,4 +30,4 @@ extern const Exp2fDataTable exp2f_data;
} // namespace LIBC_NAMESPACE
-#endif // LLVM_LIBC_SRC_MATH_EXP_UTILS_H
+#endif // LLVM_LIBC_SRC_MATH_GENERIC_EXP_UTILS_H
diff --git a/libc/src/math/ilogbf128.h b/libc/src/math/ilogbf128.h
index df1145f..d8fe3b9 100644
--- a/libc/src/math/ilogbf128.h
+++ b/libc/src/math/ilogbf128.h
@@ -9,7 +9,7 @@
#ifndef LLVM_LIBC_SRC_MATH_ILOGBF128_H
#define LLVM_LIBC_SRC_MATH_ILOGBF128_H
-#include "src/__support/macros/properties/float.h"
+#include "src/__support/macros/properties/types.h"
namespace LIBC_NAMESPACE {
diff --git a/libc/src/math/ldexpf128.h b/libc/src/math/ldexpf128.h
index adf9d8f..7aa6ded 100644
--- a/libc/src/math/ldexpf128.h
+++ b/libc/src/math/ldexpf128.h
@@ -9,7 +9,7 @@
#ifndef LLVM_LIBC_SRC_MATH_LDEXPF128_H
#define LLVM_LIBC_SRC_MATH_LDEXPF128_H
-#include "src/__support/macros/properties/float.h"
+#include "src/__support/macros/properties/types.h"
namespace LIBC_NAMESPACE {
diff --git a/libc/src/math/llogb.h b/libc/src/math/llogb.h
index 2d95877..b51f89f 100644
--- a/libc/src/math/llogb.h
+++ b/libc/src/math/llogb.h
@@ -9,7 +9,7 @@
#ifndef LLVM_LIBC_SRC_MATH_LLOGB_H
#define LLVM_LIBC_SRC_MATH_LLOGB_H
-#include "src/__support/macros/properties/float.h"
+#include "src/__support/macros/properties/types.h"
namespace LIBC_NAMESPACE {
diff --git a/libc/src/math/llogbf.h b/libc/src/math/llogbf.h
index 512e174..af4aa8a 100644
--- a/libc/src/math/llogbf.h
+++ b/libc/src/math/llogbf.h
@@ -9,7 +9,7 @@
#ifndef LLVM_LIBC_SRC_MATH_LLOGBF_H
#define LLVM_LIBC_SRC_MATH_LLOGBF_H
-#include "src/__support/macros/properties/float.h"
+#include "src/__support/macros/properties/types.h"
namespace LIBC_NAMESPACE {
diff --git a/libc/src/math/llogbf128.h b/libc/src/math/llogbf128.h
index 7fb74d4..ce7c872a 100644
--- a/libc/src/math/llogbf128.h
+++ b/libc/src/math/llogbf128.h
@@ -9,7 +9,7 @@
#ifndef LLVM_LIBC_SRC_MATH_LLOGBF128_H
#define LLVM_LIBC_SRC_MATH_LLOGBF128_H
-#include "src/__support/macros/properties/float.h"
+#include "src/__support/macros/properties/types.h"
namespace LIBC_NAMESPACE {
diff --git a/libc/src/math/llogbl.h b/libc/src/math/llogbl.h
index 4033100..3c323a3 100644
--- a/libc/src/math/llogbl.h
+++ b/libc/src/math/llogbl.h
@@ -9,7 +9,7 @@
#ifndef LLVM_LIBC_SRC_MATH_LLOGBL_H
#define LLVM_LIBC_SRC_MATH_LLOGBL_H
-#include "src/__support/macros/properties/float.h"
+#include "src/__support/macros/properties/types.h"
namespace LIBC_NAMESPACE {
diff --git a/libc/src/math/logbf128.h b/libc/src/math/logbf128.h
index 8baa076a..7823bbd 100644
--- a/libc/src/math/logbf128.h
+++ b/libc/src/math/logbf128.h
@@ -9,7 +9,7 @@
#ifndef LLVM_LIBC_SRC_MATH_LOGBF128_H
#define LLVM_LIBC_SRC_MATH_LOGBF128_H
-#include "src/__support/macros/properties/float.h"
+#include "src/__support/macros/properties/types.h"
namespace LIBC_NAMESPACE {
diff --git a/libc/src/math/nvptx/declarations.h b/libc/src/math/nvptx/declarations.h
index 9cb2be6..d41b16c 100644
--- a/libc/src/math/nvptx/declarations.h
+++ b/libc/src/math/nvptx/declarations.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIBC_SRC_MATH_GPU_NVPTX_DECLARATIONS_H
-#define LLVM_LIBC_SRC_MATH_GPU_NVPTX_DECLARATIONS_H
+#ifndef LLVM_LIBC_SRC_MATH_NVPTX_DECLARATIONS_H
+#define LLVM_LIBC_SRC_MATH_NVPTX_DECLARATIONS_H
namespace LIBC_NAMESPACE {
@@ -86,4 +86,4 @@ float __nv_tgammaf(float);
} // namespace LIBC_NAMESPACE
-#endif // LLVM_LIBC_SRC_MATH_GPU_NVPTX_DECLARATIONS_H
+#endif // LLVM_LIBC_SRC_MATH_NVPTX_DECLARATIONS_H
diff --git a/libc/src/math/nvptx/fmax.cpp b/libc/src/math/nvptx/fmax.cpp
index 09624cc..3ba65d7 100644
--- a/libc/src/math/nvptx/fmax.cpp
+++ b/libc/src/math/nvptx/fmax.cpp
@@ -8,17 +8,11 @@
#include "src/math/fmax.h"
-#include "src/__support/CPP/bit.h"
#include "src/__support/common.h"
-#include "src/__support/macros/optimization.h"
namespace LIBC_NAMESPACE {
LLVM_LIBC_FUNCTION(double, fmax, (double x, double y)) {
- // FIXME: The builtin function does not correctly handle the +/-0.0 case.
- if (LIBC_UNLIKELY(x == y))
- return cpp::bit_cast<double>(cpp::bit_cast<uint64_t>(x) &
- cpp::bit_cast<uint64_t>(y));
return __builtin_fmax(x, y);
}
diff --git a/libc/src/math/nvptx/fmaxf.cpp b/libc/src/math/nvptx/fmaxf.cpp
index f6ed466..e977082 100644
--- a/libc/src/math/nvptx/fmaxf.cpp
+++ b/libc/src/math/nvptx/fmaxf.cpp
@@ -15,10 +15,6 @@
namespace LIBC_NAMESPACE {
LLVM_LIBC_FUNCTION(float, fmaxf, (float x, float y)) {
- // FIXME: The builtin function does not correctly handle the +/-0.0 case.
- if (LIBC_UNLIKELY(x == y))
- return cpp::bit_cast<float>(cpp::bit_cast<uint32_t>(x) &
- cpp::bit_cast<uint32_t>(y));
return __builtin_fmaxf(x, y);
}
diff --git a/libc/src/math/nvptx/fmin.cpp b/libc/src/math/nvptx/fmin.cpp
index 8977ff7..0d6f352 100644
--- a/libc/src/math/nvptx/fmin.cpp
+++ b/libc/src/math/nvptx/fmin.cpp
@@ -8,17 +8,11 @@
#include "src/math/fmin.h"
-#include "src/__support/CPP/bit.h"
#include "src/__support/common.h"
-#include "src/__support/macros/optimization.h"
namespace LIBC_NAMESPACE {
LLVM_LIBC_FUNCTION(double, fmin, (double x, double y)) {
- // FIXME: The builtin function does not correctly handle the +/-0.0 case.
- if (LIBC_UNLIKELY(x == y))
- return cpp::bit_cast<double>(cpp::bit_cast<uint64_t>(x) |
- cpp::bit_cast<uint64_t>(y));
return __builtin_fmin(x, y);
}
diff --git a/libc/src/math/nvptx/fminf.cpp b/libc/src/math/nvptx/fminf.cpp
index 3be5525..42744abf 100644
--- a/libc/src/math/nvptx/fminf.cpp
+++ b/libc/src/math/nvptx/fminf.cpp
@@ -8,17 +8,11 @@
#include "src/math/fminf.h"
-#include "src/__support/CPP/bit.h"
#include "src/__support/common.h"
-#include "src/__support/macros/optimization.h"
namespace LIBC_NAMESPACE {
LLVM_LIBC_FUNCTION(float, fminf, (float x, float y)) {
- // FIXME: The builtin function does not correctly handle the +/-0.0 case.
- if (LIBC_UNLIKELY(x == y))
- return cpp::bit_cast<float>(cpp::bit_cast<uint32_t>(x) |
- cpp::bit_cast<uint32_t>(y));
return __builtin_fminf(x, y);
}
diff --git a/libc/src/math/nvptx/nvptx.h b/libc/src/math/nvptx/nvptx.h
index 110d570..5f9b32f 100644
--- a/libc/src/math/nvptx/nvptx.h
+++ b/libc/src/math/nvptx/nvptx.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIBC_SRC_MATH_GPU_NVPTX_H
-#define LLVM_LIBC_SRC_MATH_GPU_NVPTX_H
+#ifndef LLVM_LIBC_SRC_MATH_NVPTX_NVPTX_H
+#define LLVM_LIBC_SRC_MATH_NVPTX_NVPTX_H
#include "declarations.h"
@@ -99,4 +99,4 @@ LIBC_INLINE float tgammaf(float x) { return __nv_tgammaf(x); }
} // namespace internal
} // namespace LIBC_NAMESPACE
-#endif // LLVM_LIBC_SRC_MATH_GPU_NVPTX_H
+#endif // LLVM_LIBC_SRC_MATH_NVPTX_NVPTX_H
diff --git a/libc/src/math/roundf128.h b/libc/src/math/roundf128.h
index c67c946..e4aca17 100644
--- a/libc/src/math/roundf128.h
+++ b/libc/src/math/roundf128.h
@@ -9,7 +9,7 @@
#ifndef LLVM_LIBC_SRC_MATH_ROUNDF128_H
#define LLVM_LIBC_SRC_MATH_ROUNDF128_H
-#include "src/__support/macros/properties/float.h"
+#include "src/__support/macros/properties/types.h"
namespace LIBC_NAMESPACE {
diff --git a/libc/src/math/sqrtf128.h b/libc/src/math/sqrtf128.h
index bccb6bb..9da9eb6 100644
--- a/libc/src/math/sqrtf128.h
+++ b/libc/src/math/sqrtf128.h
@@ -9,7 +9,7 @@
#ifndef LLVM_LIBC_SRC_MATH_SQRTF128_H
#define LLVM_LIBC_SRC_MATH_SQRTF128_H
-#include "src/__support/macros/properties/float.h"
+#include "src/__support/macros/properties/types.h"
namespace LIBC_NAMESPACE {
diff --git a/libc/src/math/truncf128.h b/libc/src/math/truncf128.h
index c92c820..5eb6116 100644
--- a/libc/src/math/truncf128.h
+++ b/libc/src/math/truncf128.h
@@ -9,7 +9,7 @@
#ifndef LLVM_LIBC_SRC_MATH_TRUNCF128_H
#define LLVM_LIBC_SRC_MATH_TRUNCF128_H
-#include "src/__support/macros/properties/float.h"
+#include "src/__support/macros/properties/types.h"
namespace LIBC_NAMESPACE {
diff --git a/libc/src/search/hsearch/global.h b/libc/src/search/hsearch/global.h
index 292008c..9579195 100644
--- a/libc/src/search/hsearch/global.h
+++ b/libc/src/search/hsearch/global.h
@@ -6,8 +6,13 @@
//
//===----------------------------------------------------------------------===//
+#ifndef LLVM_LIBC_SRC_SEARCH_HSEARCH_GLOBAL_H
+#define LLVM_LIBC_SRC_SEARCH_HSEARCH_GLOBAL_H
+
namespace LIBC_NAMESPACE {
namespace internal {
extern struct HashTable *global_hash_table;
}
} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_SRC_SEARCH_HSEARCH_GLOBAL_H
diff --git a/libc/src/string/memory_utils/aarch64/inline_bcmp.h b/libc/src/string/memory_utils/aarch64/inline_bcmp.h
index 8e0827f..b80b578 100644
--- a/libc/src/string/memory_utils/aarch64/inline_bcmp.h
+++ b/libc/src/string/memory_utils/aarch64/inline_bcmp.h
@@ -27,7 +27,7 @@ namespace LIBC_NAMESPACE {
}
switch (count) {
case 0:
- return BcmpReturnType::ZERO();
+ return BcmpReturnType::zero();
case 1:
return generic::Bcmp<uint8_t>::block(p1, p2);
case 2:
diff --git a/libc/src/string/memory_utils/aarch64/inline_memcmp.h b/libc/src/string/memory_utils/aarch64/inline_memcmp.h
index 839c8ec..d0e0bd7 100644
--- a/libc/src/string/memory_utils/aarch64/inline_memcmp.h
+++ b/libc/src/string/memory_utils/aarch64/inline_memcmp.h
@@ -50,7 +50,7 @@ inline_memcmp_aarch64_neon_gt16(CPtr p1, CPtr p2, size_t count) {
LIBC_INLINE MemcmpReturnType inline_memcmp_aarch64(CPtr p1, CPtr p2,
size_t count) {
if (count == 0)
- return MemcmpReturnType::ZERO();
+ return MemcmpReturnType::zero();
if (count == 1)
return generic::Memcmp<uint8_t>::block(p1, p2);
if (count == 2)
diff --git a/libc/src/string/memory_utils/aarch64/inline_memcpy.h b/libc/src/string/memory_utils/aarch64/inline_memcpy.h
index 0a159f4..ea1a03f4 100644
--- a/libc/src/string/memory_utils/aarch64/inline_memcpy.h
+++ b/libc/src/string/memory_utils/aarch64/inline_memcpy.h
@@ -5,8 +5,8 @@
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
-#ifndef LIBC_SRC_STRING_MEMORY_UTILS_AARCH64_INLINE_MEMCPY_H
-#define LIBC_SRC_STRING_MEMORY_UTILS_AARCH64_INLINE_MEMCPY_H
+#ifndef LLVM_LIBC_SRC_STRING_MEMORY_UTILS_AARCH64_INLINE_MEMCPY_H
+#define LLVM_LIBC_SRC_STRING_MEMORY_UTILS_AARCH64_INLINE_MEMCPY_H
#include "src/__support/macros/config.h" // LIBC_INLINE
#include "src/string/memory_utils/op_builtin.h"
@@ -45,4 +45,4 @@ inline_memcpy_aarch64(Ptr __restrict dst, CPtr __restrict src, size_t count) {
} // namespace LIBC_NAMESPACE
-#endif // LIBC_SRC_STRING_MEMORY_UTILS_AARCH64_INLINE_MEMCPY_H
+#endif // LLVM_LIBC_SRC_STRING_MEMORY_UTILS_AARCH64_INLINE_MEMCPY_H
diff --git a/libc/src/string/memory_utils/generic/aligned_access.h b/libc/src/string/memory_utils/generic/aligned_access.h
index 65bc63f..b6ece81 100644
--- a/libc/src/string/memory_utils/generic/aligned_access.h
+++ b/libc/src/string/memory_utils/generic/aligned_access.h
@@ -135,7 +135,7 @@ inline_bcmp_aligned_access_32bit(CPtr p1, CPtr p2, size_t count) {
uint32_t a = load32_aligned<uint32_t>(p1, offset);
uint32_t b = load32_aligned(p2, offset, p2_alignment);
if (a != b)
- return BcmpReturnType::NONZERO();
+ return BcmpReturnType::nonzero();
}
return inline_bcmp_byte_per_byte(p1, p2, count, offset);
}
@@ -154,7 +154,7 @@ inline_bcmp_aligned_access_64bit(CPtr p1, CPtr p2, size_t count) {
uint64_t a = load64_aligned<uint64_t>(p1, offset);
uint64_t b = load64_aligned(p2, offset, p2_alignment);
if (a != b)
- return BcmpReturnType::NONZERO();
+ return BcmpReturnType::nonzero();
}
return inline_bcmp_byte_per_byte(p1, p2, count, offset);
}
diff --git a/libc/src/string/memory_utils/generic/byte_per_byte.h b/libc/src/string/memory_utils/generic/byte_per_byte.h
index a666c5d..9515398 100644
--- a/libc/src/string/memory_utils/generic/byte_per_byte.h
+++ b/libc/src/string/memory_utils/generic/byte_per_byte.h
@@ -56,8 +56,8 @@ inline_bcmp_byte_per_byte(CPtr p1, CPtr p2, size_t count, size_t offset = 0) {
LIBC_LOOP_NOUNROLL
for (; offset < count; ++offset)
if (p1[offset] != p2[offset])
- return BcmpReturnType::NONZERO();
- return BcmpReturnType::ZERO();
+ return BcmpReturnType::nonzero();
+ return BcmpReturnType::zero();
}
[[maybe_unused]] LIBC_INLINE MemcmpReturnType
@@ -70,7 +70,7 @@ inline_memcmp_byte_per_byte(CPtr p1, CPtr p2, size_t count, size_t offset = 0) {
if (diff)
return diff;
}
- return MemcmpReturnType::ZERO();
+ return MemcmpReturnType::zero();
}
} // namespace LIBC_NAMESPACE
diff --git a/libc/src/string/memory_utils/op_aarch64.h b/libc/src/string/memory_utils/op_aarch64.h
index 3aae3289..6a2013b 100644
--- a/libc/src/string/memory_utils/op_aarch64.h
+++ b/libc/src/string/memory_utils/op_aarch64.h
@@ -108,7 +108,7 @@ template <size_t Size> struct Bcmp {
} else {
static_assert(cpp::always_false<decltype(Size)>, "SIZE not implemented");
}
- return BcmpReturnType::ZERO();
+ return BcmpReturnType::zero();
}
LIBC_INLINE static BcmpReturnType tail(CPtr p1, CPtr p2, size_t count) {
@@ -154,7 +154,7 @@ template <size_t Size> struct Bcmp {
} else {
static_assert(cpp::always_false<decltype(Size)>, "SIZE not implemented");
}
- return BcmpReturnType::ZERO();
+ return BcmpReturnType::zero();
}
LIBC_INLINE static BcmpReturnType loop_and_tail(CPtr p1, CPtr p2,
@@ -217,7 +217,7 @@ LIBC_INLINE MemcmpReturnType cmp<uint64_t>(CPtr p1, CPtr p2, size_t offset) {
const auto b = load_be<uint64_t>(p2, offset);
if (a != b)
return a > b ? 1 : -1;
- return MemcmpReturnType::ZERO();
+ return MemcmpReturnType::zero();
}
///////////////////////////////////////////////////////////////////////////////
@@ -245,7 +245,7 @@ LIBC_INLINE MemcmpReturnType cmp<uint8x16_t>(CPtr p1, CPtr p2, size_t offset) {
return cmp_neq_uint64_t(a, b);
offset += sizeof(uint64_t);
}
- return MemcmpReturnType::ZERO();
+ return MemcmpReturnType::zero();
}
///////////////////////////////////////////////////////////////////////////////
@@ -262,7 +262,7 @@ LIBC_INLINE MemcmpReturnType cmp<uint8x16x2_t>(CPtr p1, CPtr p2,
return cmp_neq_uint64_t(a, b);
offset += sizeof(uint64_t);
}
- return MemcmpReturnType::ZERO();
+ return MemcmpReturnType::zero();
}
} // namespace LIBC_NAMESPACE::generic
diff --git a/libc/src/string/memory_utils/op_builtin.h b/libc/src/string/memory_utils/op_builtin.h
index 3c17eef..75dd4de 100644
--- a/libc/src/string/memory_utils/op_builtin.h
+++ b/libc/src/string/memory_utils/op_builtin.h
@@ -105,22 +105,22 @@ template <size_t Size> struct Bcmp {
LIBC_INLINE static BcmpReturnType block(CPtr, CPtr) {
static_assert(cpp::always_false<decltype(Size)>,
"Missing __builtin_memcmp_inline");
- return BcmpReturnType::ZERO();
+ return BcmpReturnType::zero();
}
LIBC_INLINE static BcmpReturnType tail(CPtr, CPtr, size_t) {
static_assert(cpp::always_false<decltype(Size)>, "Not implemented");
- return BcmpReturnType::ZERO();
+ return BcmpReturnType::zero();
}
LIBC_INLINE static BcmpReturnType head_tail(CPtr, CPtr, size_t) {
static_assert(cpp::always_false<decltype(Size)>, "Not implemented");
- return BcmpReturnType::ZERO();
+ return BcmpReturnType::zero();
}
LIBC_INLINE static BcmpReturnType loop_and_tail(CPtr, CPtr, size_t) {
static_assert(cpp::always_false<decltype(Size)>, "Not implemented");
- return BcmpReturnType::ZERO();
+ return BcmpReturnType::zero();
}
};
@@ -132,22 +132,22 @@ template <size_t Size> struct Memcmp {
LIBC_INLINE static MemcmpReturnType block(CPtr, CPtr) {
static_assert(cpp::always_false<decltype(Size)>,
"Missing __builtin_memcmp_inline");
- return MemcmpReturnType::ZERO();
+ return MemcmpReturnType::zero();
}
LIBC_INLINE static MemcmpReturnType tail(CPtr, CPtr, size_t) {
static_assert(cpp::always_false<decltype(Size)>, "Not implemented");
- return MemcmpReturnType::ZERO();
+ return MemcmpReturnType::zero();
}
LIBC_INLINE static MemcmpReturnType head_tail(CPtr, CPtr, size_t) {
static_assert(cpp::always_false<decltype(Size)>, "Not implemented");
- return MemcmpReturnType::ZERO();
+ return MemcmpReturnType::zero();
}
LIBC_INLINE static MemcmpReturnType loop_and_tail(CPtr, CPtr, size_t) {
static_assert(cpp::always_false<decltype(Size)>, "Not implemented");
- return MemcmpReturnType::ZERO();
+ return MemcmpReturnType::zero();
}
};
diff --git a/libc/src/string/memory_utils/op_generic.h b/libc/src/string/memory_utils/op_generic.h
index db218f8..c7dbd5d 100644
--- a/libc/src/string/memory_utils/op_generic.h
+++ b/libc/src/string/memory_utils/op_generic.h
@@ -390,7 +390,7 @@ private:
if constexpr (cmp_is_expensive<T>::value) {
if (!eq<T>(p1, p2, offset))
return cmp_neq<T>(p1, p2, offset);
- return MemcmpReturnType::ZERO();
+ return MemcmpReturnType::zero();
} else {
return cmp<T>(p1, p2, offset);
}
@@ -443,7 +443,7 @@ public:
for (; offset < count; offset += SIZE)
if (auto value = cmp<T>(p1, p2, offset))
return value;
- return MemcmpReturnType::ZERO();
+ return MemcmpReturnType::zero();
}
}
@@ -475,7 +475,7 @@ template <typename T, typename... TS> struct MemcmpSequence {
if constexpr (sizeof...(TS) > 0)
return MemcmpSequence<TS...>::block(p1 + sizeof(T), p2 + sizeof(T));
else
- return MemcmpReturnType::ZERO();
+ return MemcmpReturnType::zero();
}
};
@@ -521,7 +521,7 @@ template <typename T> struct Bcmp {
for (; offset < count; offset += SIZE)
if (const auto value = neq<T>(p1, p2, offset))
return value;
- return BcmpReturnType::ZERO();
+ return BcmpReturnType::zero();
}
}
@@ -547,7 +547,7 @@ template <typename T, typename... TS> struct BcmpSequence {
if constexpr (sizeof...(TS) > 0)
return BcmpSequence<TS...>::block(p1 + sizeof(T), p2 + sizeof(T));
else
- return BcmpReturnType::ZERO();
+ return BcmpReturnType::zero();
}
};
diff --git a/libc/src/string/memory_utils/riscv/inline_memmove.h b/libc/src/string/memory_utils/riscv/inline_memmove.h
index 1c26917..1a95a8e 100644
--- a/libc/src/string/memory_utils/riscv/inline_memmove.h
+++ b/libc/src/string/memory_utils/riscv/inline_memmove.h
@@ -5,8 +5,8 @@
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
-#ifndef LIBC_SRC_STRING_MEMORY_UTILS_RISCV_INLINE_MEMMOVE_H
-#define LIBC_SRC_STRING_MEMORY_UTILS_RISCV_INLINE_MEMMOVE_H
+#ifndef LLVM_LIBC_SRC_STRING_MEMORY_UTILS_RISCV_INLINE_MEMMOVE_H
+#define LLVM_LIBC_SRC_STRING_MEMORY_UTILS_RISCV_INLINE_MEMMOVE_H
#include "src/__support/macros/attributes.h" // LIBC_INLINE
#include "src/__support/macros/properties/architectures.h" // LIBC_TARGET_ARCH_IS_RISCV64
@@ -24,4 +24,4 @@ inline_memmove_riscv(Ptr __restrict dst, CPtr __restrict src, size_t count) {
} // namespace LIBC_NAMESPACE
-#endif // LIBC_SRC_STRING_MEMORY_UTILS_RISCV_INLINE_MEMMOVE_H
+#endif // LLVM_LIBC_SRC_STRING_MEMORY_UTILS_RISCV_INLINE_MEMMOVE_H
diff --git a/libc/src/string/memory_utils/utils.h b/libc/src/string/memory_utils/utils.h
index 543d45b..701a843 100644
--- a/libc/src/string/memory_utils/utils.h
+++ b/libc/src/string/memory_utils/utils.h
@@ -130,8 +130,8 @@ template <typename T> struct StrictIntegralType {
}
// Helper to get the zero value.
- LIBC_INLINE static constexpr StrictIntegralType ZERO() { return {T(0)}; }
- LIBC_INLINE static constexpr StrictIntegralType NONZERO() { return {T(1)}; }
+ LIBC_INLINE static constexpr StrictIntegralType zero() { return {T(0)}; }
+ LIBC_INLINE static constexpr StrictIntegralType nonzero() { return {T(1)}; }
private:
T value;
diff --git a/libc/src/string/memory_utils/x86_64/inline_bcmp.h b/libc/src/string/memory_utils/x86_64/inline_bcmp.h
index 31aff86..58eaedb 100644
--- a/libc/src/string/memory_utils/x86_64/inline_bcmp.h
+++ b/libc/src/string/memory_utils/x86_64/inline_bcmp.h
@@ -58,7 +58,7 @@ inline_bcmp_x86_avx512bw_gt16(CPtr p1, CPtr p2, size_t count) {
[[maybe_unused]] LIBC_INLINE BcmpReturnType inline_bcmp_x86(CPtr p1, CPtr p2,
size_t count) {
if (count == 0)
- return BcmpReturnType::ZERO();
+ return BcmpReturnType::zero();
if (count == 1)
return generic::Bcmp<uint8_t>::block(p1, p2);
if (count == 2)
diff --git a/libc/src/string/memory_utils/x86_64/inline_memcmp.h b/libc/src/string/memory_utils/x86_64/inline_memcmp.h
index d5fa77c..6a315ad 100644
--- a/libc/src/string/memory_utils/x86_64/inline_memcmp.h
+++ b/libc/src/string/memory_utils/x86_64/inline_memcmp.h
@@ -59,7 +59,7 @@ inline_memcmp_x86_avx512bw_gt16(CPtr p1, CPtr p2, size_t count) {
LIBC_INLINE MemcmpReturnType inline_memcmp_x86(CPtr p1, CPtr p2, size_t count) {
if (count == 0)
- return MemcmpReturnType::ZERO();
+ return MemcmpReturnType::zero();
if (count == 1)
return generic::Memcmp<uint8_t>::block(p1, p2);
if (count == 2)
diff --git a/libc/test/UnitTest/ExecuteFunction.h b/libc/test/UnitTest/ExecuteFunction.h
index 2129e63..9595056 100644
--- a/libc/test/UnitTest/ExecuteFunction.h
+++ b/libc/test/UnitTest/ExecuteFunction.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIBC_UTILS_TESTUTILS_EXECUTEFUNCTION_H
-#define LLVM_LIBC_UTILS_TESTUTILS_EXECUTEFUNCTION_H
+#ifndef LLVM_LIBC_TEST_UNITTEST_EXECUTEFUNCTION_H
+#define LLVM_LIBC_TEST_UNITTEST_EXECUTEFUNCTION_H
#include <stdint.h>
@@ -49,4 +49,4 @@ const char *signal_as_string(int signum);
} // namespace testutils
} // namespace LIBC_NAMESPACE
-#endif // LLVM_LIBC_UTILS_TESTUTILS_EXECUTEFUNCTION_H
+#endif // LLVM_LIBC_TEST_UNITTEST_EXECUTEFUNCTION_H
diff --git a/libc/test/UnitTest/FPExceptMatcher.h b/libc/test/UnitTest/FPExceptMatcher.h
index 98c4f73..d36e98d 100644
--- a/libc/test/UnitTest/FPExceptMatcher.h
+++ b/libc/test/UnitTest/FPExceptMatcher.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIBC_UTILS_UNITTEST_FPEXCEPTMATCHER_H
-#define LLVM_LIBC_UTILS_UNITTEST_FPEXCEPTMATCHER_H
+#ifndef LLVM_LIBC_TEST_UNITTEST_FPEXCEPTMATCHER_H
+#define LLVM_LIBC_TEST_UNITTEST_FPEXCEPTMATCHER_H
#ifndef LIBC_COPT_TEST_USE_FUCHSIA
@@ -61,4 +61,4 @@ public:
#define ASSERT_RAISES_FP_EXCEPT(func) ASSERT_DEATH(func, WITH_SIGNAL(SIGFPE))
#endif // LIBC_COPT_TEST_USE_FUCHSIA
-#endif // LLVM_LIBC_UTILS_UNITTEST_FPEXCEPTMATCHER_H
+#endif // LLVM_LIBC_TEST_UNITTEST_FPEXCEPTMATCHER_H
diff --git a/libc/test/UnitTest/FPMatcher.h b/libc/test/UnitTest/FPMatcher.h
index c4a1cfa..4525b9e 100644
--- a/libc/test/UnitTest/FPMatcher.h
+++ b/libc/test/UnitTest/FPMatcher.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIBC_UTILS_UNITTEST_FPMATCHER_H
-#define LLVM_LIBC_UTILS_UNITTEST_FPMATCHER_H
+#ifndef LLVM_LIBC_TEST_UNITTEST_FPMATCHER_H
+#define LLVM_LIBC_TEST_UNITTEST_FPMATCHER_H
#include "src/__support/CPP/type_traits.h"
#include "src/__support/FPUtil/FEnvImpl.h"
@@ -210,4 +210,4 @@ template <typename T> struct FPTest : public Test {
} \
} while (0)
-#endif // LLVM_LIBC_UTILS_UNITTEST_FPMATCHER_H
+#endif // LLVM_LIBC_TEST_UNITTEST_FPMATCHER_H
diff --git a/libc/test/UnitTest/LibcTest.h b/libc/test/UnitTest/LibcTest.h
index 00e34a4..639f600 100644
--- a/libc/test/UnitTest/LibcTest.h
+++ b/libc/test/UnitTest/LibcTest.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIBC_UTILS_UNITTEST_LIBCTEST_H
-#define LLVM_LIBC_UTILS_UNITTEST_LIBCTEST_H
+#ifndef LLVM_LIBC_TEST_UNITTEST_LIBCTEST_H
+#define LLVM_LIBC_TEST_UNITTEST_LIBCTEST_H
// This is defined as a simple macro in test.h so that it exists for platforms
// that don't use our test infrastructure. It's defined as a proper function
@@ -493,4 +493,4 @@ CString libc_make_test_file_path_func(const char *file_name);
#define WITH_SIGNAL(X) X
-#endif // LLVM_LIBC_UTILS_UNITTEST_LIBCTEST_H
+#endif // LLVM_LIBC_TEST_UNITTEST_LIBCTEST_H
diff --git a/libc/test/UnitTest/MemoryMatcher.h b/libc/test/UnitTest/MemoryMatcher.h
index cf861a6..c548baf 100644
--- a/libc/test/UnitTest/MemoryMatcher.h
+++ b/libc/test/UnitTest/MemoryMatcher.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIBC_UTILS_UNITTEST_MEMORY_MATCHER_H
-#define LLVM_LIBC_UTILS_UNITTEST_MEMORY_MATCHER_H
+#ifndef LLVM_LIBC_TEST_UNITTEST_MEMORYMATCHER_H
+#define LLVM_LIBC_TEST_UNITTEST_MEMORYMATCHER_H
#include "src/__support/CPP/span.h"
@@ -66,4 +66,4 @@ public:
#endif
-#endif // LLVM_LIBC_UTILS_UNITTEST_MEMORY_MATCHER_H
+#endif // LLVM_LIBC_TEST_UNITTEST_MEMORYMATCHER_H
diff --git a/libc/test/UnitTest/PlatformDefs.h b/libc/test/UnitTest/PlatformDefs.h
index 40472f4..f9911b1 100644
--- a/libc/test/UnitTest/PlatformDefs.h
+++ b/libc/test/UnitTest/PlatformDefs.h
@@ -6,11 +6,11 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIBC_UTILS_UNITTEST_PLATFORMDEFS_H
-#define LLVM_LIBC_UTILS_UNITTEST_PLATFORMDEFS_H
+#ifndef LLVM_LIBC_TEST_UNITTEST_PLATFORMDEFS_H
+#define LLVM_LIBC_TEST_UNITTEST_PLATFORMDEFS_H
#if !defined(_WIN32)
#define ENABLE_SUBPROCESS_TESTS
#endif
-#endif // LLVM_LIBC_UTILS_UNITTEST_PLATFORMDEFS_H
+#endif // LLVM_LIBC_TEST_UNITTEST_PLATFORMDEFS_H
diff --git a/libc/test/UnitTest/RoundingModeUtils.h b/libc/test/UnitTest/RoundingModeUtils.h
index d1c3c6f..b986c98 100644
--- a/libc/test/UnitTest/RoundingModeUtils.h
+++ b/libc/test/UnitTest/RoundingModeUtils.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIBC_UTILS_TESTUTILS_ROUNDINGMODEUTILS_H
-#define LLVM_LIBC_UTILS_TESTUTILS_ROUNDINGMODEUTILS_H
+#ifndef LLVM_LIBC_TEST_UNITTEST_ROUNDINGMODEUTILS_H
+#define LLVM_LIBC_TEST_UNITTEST_ROUNDINGMODEUTILS_H
#include <stdint.h>
@@ -34,4 +34,4 @@ template <RoundingMode R> struct ForceRoundingModeTest : ForceRoundingMode {
} // namespace fputil
} // namespace LIBC_NAMESPACE
-#endif // LLVM_LIBC_UTILS_TESTUTILS_ROUNDINGMODEUTILS_H
+#endif // LLVM_LIBC_TEST_UNITTEST_ROUNDINGMODEUTILS_H
diff --git a/libc/test/UnitTest/StringUtils.h b/libc/test/UnitTest/StringUtils.h
index ac28926..54cff97 100644
--- a/libc/test/UnitTest/StringUtils.h
+++ b/libc/test/UnitTest/StringUtils.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIBC_UTILS_UNITTEST_SIMPLE_STRING_CONV_H
-#define LLVM_LIBC_UTILS_UNITTEST_SIMPLE_STRING_CONV_H
+#ifndef LLVM_LIBC_TEST_UNITTEST_STRINGUTILS_H
+#define LLVM_LIBC_TEST_UNITTEST_STRINGUTILS_H
#include "src/__support/CPP/string.h"
#include "src/__support/CPP/type_traits.h"
@@ -33,4 +33,4 @@ int_to_hex(T value, size_t length = sizeof(T) * 2) {
} // namespace LIBC_NAMESPACE
-#endif // LLVM_LIBC_UTILS_UNITTEST_SIMPLE_STRING_CONV_H
+#endif // LLVM_LIBC_TEST_UNITTEST_STRINGUTILS_H
diff --git a/libc/test/UnitTest/Test.h b/libc/test/UnitTest/Test.h
index 61021b9..f7ce3cf 100644
--- a/libc/test/UnitTest/Test.h
+++ b/libc/test/UnitTest/Test.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIBC_UTILS_UNITTEST_TEST_H
-#define LLVM_LIBC_UTILS_UNITTEST_TEST_H
+#ifndef LLVM_LIBC_TEST_UNITTEST_TEST_H
+#define LLVM_LIBC_TEST_UNITTEST_TEST_H
// This macro takes a file name and returns a value implicitly castable to
// a const char*. That const char* is the path to a file with the provided name
@@ -24,4 +24,4 @@
#include "LibcTest.h"
#endif
-#endif // LLVM_LIBC_UTILS_UNITTEST_TEST_H
+#endif // LLVM_LIBC_TEST_UNITTEST_TEST_H
diff --git a/libc/test/integration/src/spawn/test_binary_properties.h b/libc/test/integration/src/spawn/test_binary_properties.h
index f1521c2..8e6a1fe 100644
--- a/libc/test/integration/src/spawn/test_binary_properties.h
+++ b/libc/test/integration/src/spawn/test_binary_properties.h
@@ -6,10 +6,10 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LIBC_TEST_INTEGRATION_SRC_SPAWN_TEST_BINARY_PROPERTIES_H
-#define LIBC_TEST_INTEGRATION_SRC_SPAWN_TEST_BINARY_PROPERTIES_H
+#ifndef LLVM_LIBC_TEST_INTEGRATION_SRC_SPAWN_TEST_BINARY_PROPERTIES_H
+#define LLVM_LIBC_TEST_INTEGRATION_SRC_SPAWN_TEST_BINARY_PROPERTIES_H
constexpr int CHILD_FD = 10;
constexpr char TEXT[] = "Hello, posix_spawn";
-#endif // LIBC_TEST_INTEGRATION_SRC_SPAWN_TEST_BINARY_PROPERTIES_H
+#endif // LLVM_LIBC_TEST_INTEGRATION_SRC_SPAWN_TEST_BINARY_PROPERTIES_H
diff --git a/libc/test/src/math/FAbsTest.h b/libc/test/src/math/FAbsTest.h
index bf3052a..54f5f87 100644
--- a/libc/test/src/math/FAbsTest.h
+++ b/libc/test/src/math/FAbsTest.h
@@ -6,6 +6,9 @@
//
//===----------------------------------------------------------------------===//
+#ifndef LLVM_LIBC_TEST_SRC_MATH_FABSTEST_H
+#define LLVM_LIBC_TEST_SRC_MATH_FABSTEST_H
+
#include "test/UnitTest/FPMatcher.h"
#include "test/UnitTest/Test.h"
#include "utils/MPFRWrapper/MPFRUtils.h"
@@ -47,3 +50,5 @@ public:
using LlvmLibcFAbsTest = FAbsTest<T>; \
TEST_F(LlvmLibcFAbsTest, SpecialNumbers) { testSpecialNumbers(&func); } \
TEST_F(LlvmLibcFAbsTest, Range) { testRange(&func); }
+
+#endif // LLVM_LIBC_TEST_SRC_MATH_FABSTEST_H
diff --git a/libc/test/src/math/FMaxTest.h b/libc/test/src/math/FMaxTest.h
index edc46ae..f8046f3 100644
--- a/libc/test/src/math/FMaxTest.h
+++ b/libc/test/src/math/FMaxTest.h
@@ -6,6 +6,9 @@
//
//===----------------------------------------------------------------------===//
+#ifndef LLVM_LIBC_TEST_SRC_MATH_FMAXTEST_H
+#define LLVM_LIBC_TEST_SRC_MATH_FMAXTEST_H
+
#include "test/UnitTest/FPMatcher.h"
#include "test/UnitTest/Test.h"
#include "utils/MPFRWrapper/MPFRUtils.h"
@@ -83,3 +86,5 @@ public:
TEST_F(LlvmLibcFMaxTest, NegInfArg) { testNegInfArg(&func); } \
TEST_F(LlvmLibcFMaxTest, BothZero) { testBothZero(&func); } \
TEST_F(LlvmLibcFMaxTest, Range) { testRange(&func); }
+
+#endif // LLVM_LIBC_TEST_SRC_MATH_FMAXTEST_H
diff --git a/libc/test/src/math/FMinTest.h b/libc/test/src/math/FMinTest.h
index 5ff5836..7a6534f 100644
--- a/libc/test/src/math/FMinTest.h
+++ b/libc/test/src/math/FMinTest.h
@@ -6,6 +6,9 @@
//
//===----------------------------------------------------------------------===//
+#ifndef LLVM_LIBC_TEST_SRC_MATH_FMINTEST_H
+#define LLVM_LIBC_TEST_SRC_MATH_FMINTEST_H
+
#include "test/UnitTest/FPMatcher.h"
#include "test/UnitTest/Test.h"
#include "utils/MPFRWrapper/MPFRUtils.h"
@@ -83,3 +86,5 @@ public:
TEST_F(LlvmLibcFMinTest, NegInfArg) { testNegInfArg(&func); } \
TEST_F(LlvmLibcFMinTest, BothZero) { testBothZero(&func); } \
TEST_F(LlvmLibcFMinTest, Range) { testRange(&func); }
+
+#endif // LLVM_LIBC_TEST_SRC_MATH_FMINTEST_H
diff --git a/libc/test/src/math/FloorTest.h b/libc/test/src/math/FloorTest.h
index 5e459eb..66b37d6 100644
--- a/libc/test/src/math/FloorTest.h
+++ b/libc/test/src/math/FloorTest.h
@@ -6,6 +6,9 @@
//
//===----------------------------------------------------------------------===//
+#ifndef LLVM_LIBC_TEST_SRC_MATH_FLOORTEST_H
+#define LLVM_LIBC_TEST_SRC_MATH_FLOORTEST_H
+
#include "test/UnitTest/FPMatcher.h"
#include "test/UnitTest/Test.h"
#include "utils/MPFRWrapper/MPFRUtils.h"
@@ -82,3 +85,5 @@ public:
TEST_F(LlvmLibcFloorTest, RoundedNubmers) { testRoundedNumbers(&func); } \
TEST_F(LlvmLibcFloorTest, Fractions) { testFractions(&func); } \
TEST_F(LlvmLibcFloorTest, Range) { testRange(&func); }
+
+#endif // LLVM_LIBC_TEST_SRC_MATH_FLOORTEST_H
diff --git a/libc/test/src/math/RandUtils.h b/libc/test/src/math/RandUtils.h
index 05236ea..fecbd8e 100644
--- a/libc/test/src/math/RandUtils.h
+++ b/libc/test/src/math/RandUtils.h
@@ -6,6 +6,9 @@
//
//===----------------------------------------------------------------------===//
+#ifndef LLVM_LIBC_TEST_SRC_MATH_RANDUTILS_H
+#define LLVM_LIBC_TEST_SRC_MATH_RANDUTILS_H
+
namespace LIBC_NAMESPACE {
namespace testutils {
@@ -14,3 +17,5 @@ int rand();
} // namespace testutils
} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_TEST_SRC_MATH_RANDUTILS_H
diff --git a/libc/test/src/math/RoundTest.h b/libc/test/src/math/RoundTest.h
index 4860464..b255ecc 100644
--- a/libc/test/src/math/RoundTest.h
+++ b/libc/test/src/math/RoundTest.h
@@ -6,6 +6,9 @@
//
//===----------------------------------------------------------------------===//
+#ifndef LLVM_LIBC_TEST_SRC_MATH_ROUNDTEST_H
+#define LLVM_LIBC_TEST_SRC_MATH_ROUNDTEST_H
+
#include "test/UnitTest/FPMatcher.h"
#include "test/UnitTest/Test.h"
#include "utils/MPFRWrapper/MPFRUtils.h"
@@ -82,3 +85,5 @@ public:
TEST_F(LlvmLibcRoundTest, RoundedNubmers) { testRoundedNumbers(&func); } \
TEST_F(LlvmLibcRoundTest, Fractions) { testFractions(&func); } \
TEST_F(LlvmLibcRoundTest, Range) { testRange(&func); }
+
+#endif // LLVM_LIBC_TEST_SRC_MATH_ROUNDTEST_H
diff --git a/libc/test/src/math/TruncTest.h b/libc/test/src/math/TruncTest.h
index 0d99363..6d0ea11 100644
--- a/libc/test/src/math/TruncTest.h
+++ b/libc/test/src/math/TruncTest.h
@@ -6,6 +6,9 @@
//
//===----------------------------------------------------------------------===//
+#ifndef LLVM_LIBC_TEST_SRC_MATH_TRUNCTEST_H
+#define LLVM_LIBC_TEST_SRC_MATH_TRUNCTEST_H
+
#include "test/UnitTest/FPMatcher.h"
#include "test/UnitTest/Test.h"
#include "utils/MPFRWrapper/MPFRUtils.h"
@@ -82,3 +85,5 @@ public:
TEST_F(LlvmLibcTruncTest, RoundedNubmers) { testRoundedNumbers(&func); } \
TEST_F(LlvmLibcTruncTest, Fractions) { testFractions(&func); } \
TEST_F(LlvmLibcTruncTest, Range) { testRange(&func); }
+
+#endif // LLVM_LIBC_TEST_SRC_MATH_TRUNCTEST_H
diff --git a/libc/test/src/math/differential_testing/Timer.h b/libc/test/src/math/differential_testing/Timer.h
index d4acff7..0d9518c 100644
--- a/libc/test/src/math/differential_testing/Timer.h
+++ b/libc/test/src/math/differential_testing/Timer.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIBC_UTILS_TESTUTILS_TIMER_H
-#define LLVM_LIBC_UTILS_TESTUTILS_TIMER_H
+#ifndef LLVM_LIBC_TEST_SRC_MATH_DIFFERENTIAL_TESTING_TIMER_H
+#define LLVM_LIBC_TEST_SRC_MATH_DIFFERENTIAL_TESTING_TIMER_H
#include <stdint.h>
@@ -30,4 +30,4 @@ public:
} // namespace testing
} // namespace LIBC_NAMESPACE
-#endif // LLVM_LIBC_UTILS_TESTUTILS_TIMER_H
+#endif // LLVM_LIBC_TEST_SRC_MATH_DIFFERENTIAL_TESTING_TIMER_H
diff --git a/libc/test/src/math/in_float_range_test_helper.h b/libc/test/src/math/in_float_range_test_helper.h
index 5f345c0..35e039e 100644
--- a/libc/test/src/math/in_float_range_test_helper.h
+++ b/libc/test/src/math/in_float_range_test_helper.h
@@ -2,8 +2,8 @@
// Created by kirill on 8/30/22.
//
-#ifndef LLVM_IN_FLOAT_RANGE_TEST_HELPER_H
-#define LLVM_IN_FLOAT_RANGE_TEST_HELPER_H
+#ifndef LLVM_LIBC_TEST_SRC_MATH_IN_FLOAT_RANGE_TEST_HELPER_H
+#define LLVM_LIBC_TEST_SRC_MATH_IN_FLOAT_RANGE_TEST_HELPER_H
#include <stdint.h>
@@ -23,4 +23,4 @@
} \
}
-#endif // LLVM_IN_FLOAT_RANGE_TEST_HELPER_H
+#endif // LLVM_LIBC_TEST_SRC_MATH_IN_FLOAT_RANGE_TEST_HELPER_H
diff --git a/libc/test/src/math/smoke/CeilTest.h b/libc/test/src/math/smoke/CeilTest.h
index c10fd28..5248dbc 100644
--- a/libc/test/src/math/smoke/CeilTest.h
+++ b/libc/test/src/math/smoke/CeilTest.h
@@ -6,6 +6,9 @@
//
//===----------------------------------------------------------------------===//
+#ifndef LLVM_LIBC_TEST_SRC_MATH_SMOKE_CEILTEST_H
+#define LLVM_LIBC_TEST_SRC_MATH_SMOKE_CEILTEST_H
+
#include "test/UnitTest/FPMatcher.h"
#include "test/UnitTest/Test.h"
@@ -66,3 +69,5 @@ public:
TEST_F(LlvmLibcCeilTest, SpecialNumbers) { testSpecialNumbers(&func); } \
TEST_F(LlvmLibcCeilTest, RoundedNubmers) { testRoundedNumbers(&func); } \
TEST_F(LlvmLibcCeilTest, Fractions) { testFractions(&func); }
+
+#endif // LLVM_LIBC_TEST_SRC_MATH_SMOKE_CEILTEST_H
diff --git a/libc/test/src/math/smoke/CopySignTest.h b/libc/test/src/math/smoke/CopySignTest.h
index 1108a45..9ee3433 100644
--- a/libc/test/src/math/smoke/CopySignTest.h
+++ b/libc/test/src/math/smoke/CopySignTest.h
@@ -6,6 +6,9 @@
//
//===----------------------------------------------------------------------===//
+#ifndef LLVM_LIBC_TEST_SRC_MATH_SMOKE_COPYSIGNTEST_H
+#define LLVM_LIBC_TEST_SRC_MATH_SMOKE_COPYSIGNTEST_H
+
#include "test/UnitTest/FPMatcher.h"
#include "test/UnitTest/Test.h"
@@ -52,3 +55,5 @@ public:
using LlvmLibcCopySignTest = CopySignTest<T>; \
TEST_F(LlvmLibcCopySignTest, SpecialNumbers) { testSpecialNumbers(&func); } \
TEST_F(LlvmLibcCopySignTest, Range) { testRange(&func); }
+
+#endif // LLVM_LIBC_TEST_SRC_MATH_SMOKE_COPYSIGNTEST_H
diff --git a/libc/test/src/math/smoke/FAbsTest.h b/libc/test/src/math/smoke/FAbsTest.h
index 7d905bae..cf05882 100644
--- a/libc/test/src/math/smoke/FAbsTest.h
+++ b/libc/test/src/math/smoke/FAbsTest.h
@@ -6,6 +6,9 @@
//
//===----------------------------------------------------------------------===//
+#ifndef LLVM_LIBC_TEST_SRC_MATH_SMOKE_FABSTEST_H
+#define LLVM_LIBC_TEST_SRC_MATH_SMOKE_FABSTEST_H
+
#include "test/UnitTest/FPMatcher.h"
#include "test/UnitTest/Test.h"
@@ -35,3 +38,5 @@ public:
#define LIST_FABS_TESTS(T, func) \
using LlvmLibcFAbsTest = FAbsTest<T>; \
TEST_F(LlvmLibcFAbsTest, SpecialNumbers) { testSpecialNumbers(&func); }
+
+#endif // LLVM_LIBC_TEST_SRC_MATH_SMOKE_FABSTEST_H
diff --git a/libc/test/src/math/smoke/FMaxTest.h b/libc/test/src/math/smoke/FMaxTest.h
index 1a376af..98fae06 100644
--- a/libc/test/src/math/smoke/FMaxTest.h
+++ b/libc/test/src/math/smoke/FMaxTest.h
@@ -6,6 +6,9 @@
//
//===----------------------------------------------------------------------===//
+#ifndef LLVM_LIBC_TEST_SRC_MATH_SMOKE_FMAXTEST_H
+#define LLVM_LIBC_TEST_SRC_MATH_SMOKE_FMAXTEST_H
+
#include "test/UnitTest/FPMatcher.h"
#include "test/UnitTest/Test.h"
@@ -80,3 +83,5 @@ public:
TEST_F(LlvmLibcFMaxTest, NegInfArg) { testNegInfArg(&func); } \
TEST_F(LlvmLibcFMaxTest, BothZero) { testBothZero(&func); } \
TEST_F(LlvmLibcFMaxTest, Range) { testRange(&func); }
+
+#endif // LLVM_LIBC_TEST_SRC_MATH_SMOKE_FMAXTEST_H
diff --git a/libc/test/src/math/smoke/FMinTest.h b/libc/test/src/math/smoke/FMinTest.h
index add2544..b1ffe38 100644
--- a/libc/test/src/math/smoke/FMinTest.h
+++ b/libc/test/src/math/smoke/FMinTest.h
@@ -6,6 +6,9 @@
//
//===----------------------------------------------------------------------===//
+#ifndef LLVM_LIBC_TEST_SRC_MATH_SMOKE_FMINTEST_H
+#define LLVM_LIBC_TEST_SRC_MATH_SMOKE_FMINTEST_H
+
#include "test/UnitTest/FPMatcher.h"
#include "test/UnitTest/Test.h"
@@ -80,3 +83,5 @@ public:
TEST_F(LlvmLibcFMinTest, NegInfArg) { testNegInfArg(&func); } \
TEST_F(LlvmLibcFMinTest, BothZero) { testBothZero(&func); } \
TEST_F(LlvmLibcFMinTest, Range) { testRange(&func); }
+
+#endif // LLVM_LIBC_TEST_SRC_MATH_SMOKE_FMINTEST_H
diff --git a/libc/test/src/math/smoke/FloorTest.h b/libc/test/src/math/smoke/FloorTest.h
index 1c1b62c..610f5c2 100644
--- a/libc/test/src/math/smoke/FloorTest.h
+++ b/libc/test/src/math/smoke/FloorTest.h
@@ -6,6 +6,9 @@
//
//===----------------------------------------------------------------------===//
+#ifndef LLVM_LIBC_TEST_SRC_MATH_SMOKE_FLOORTEST_H
+#define LLVM_LIBC_TEST_SRC_MATH_SMOKE_FLOORTEST_H
+
#include "test/UnitTest/FPMatcher.h"
#include "test/UnitTest/Test.h"
@@ -66,3 +69,5 @@ public:
TEST_F(LlvmLibcFloorTest, SpecialNumbers) { testSpecialNumbers(&func); } \
TEST_F(LlvmLibcFloorTest, RoundedNubmers) { testRoundedNumbers(&func); } \
TEST_F(LlvmLibcFloorTest, Fractions) { testFractions(&func); }
+
+#endif // LLVM_LIBC_TEST_SRC_MATH_SMOKE_FLOORTEST_H
diff --git a/libc/test/src/math/smoke/RIntTest.h b/libc/test/src/math/smoke/RIntTest.h
index 233164b..4c90dff 100644
--- a/libc/test/src/math/smoke/RIntTest.h
+++ b/libc/test/src/math/smoke/RIntTest.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIBC_TEST_SRC_MATH_RINTTEST_H
-#define LLVM_LIBC_TEST_SRC_MATH_RINTTEST_H
+#ifndef LLVM_LIBC_TEST_SRC_MATH_SMOKE_RINTTEST_H
+#define LLVM_LIBC_TEST_SRC_MATH_SMOKE_RINTTEST_H
#include "src/__support/FPUtil/FEnvImpl.h"
#include "src/__support/FPUtil/FPBits.h"
@@ -54,4 +54,4 @@ public:
using LlvmLibcRIntTest = RIntTestTemplate<F>; \
TEST_F(LlvmLibcRIntTest, specialNumbers) { testSpecialNumbers(&func); }
-#endif // LLVM_LIBC_TEST_SRC_MATH_RINTTEST_H
+#endif // LLVM_LIBC_TEST_SRC_MATH_SMOKE_RINTTEST_H
diff --git a/libc/test/src/math/smoke/RoundTest.h b/libc/test/src/math/smoke/RoundTest.h
index 2e95f18..d2a5906 100644
--- a/libc/test/src/math/smoke/RoundTest.h
+++ b/libc/test/src/math/smoke/RoundTest.h
@@ -6,6 +6,9 @@
//
//===----------------------------------------------------------------------===//
+#ifndef LLVM_LIBC_TEST_SRC_MATH_SMOKE_ROUNDTEST_H
+#define LLVM_LIBC_TEST_SRC_MATH_SMOKE_ROUNDTEST_H
+
#include "test/UnitTest/FPMatcher.h"
#include "test/UnitTest/Test.h"
@@ -66,3 +69,5 @@ public:
TEST_F(LlvmLibcRoundTest, SpecialNumbers) { testSpecialNumbers(&func); } \
TEST_F(LlvmLibcRoundTest, RoundedNubmers) { testRoundedNumbers(&func); } \
TEST_F(LlvmLibcRoundTest, Fractions) { testFractions(&func); }
+
+#endif // LLVM_LIBC_TEST_SRC_MATH_SMOKE_ROUNDTEST_H
diff --git a/libc/test/src/math/smoke/RoundToIntegerTest.h b/libc/test/src/math/smoke/RoundToIntegerTest.h
index 5969413..e86533c 100644
--- a/libc/test/src/math/smoke/RoundToIntegerTest.h
+++ b/libc/test/src/math/smoke/RoundToIntegerTest.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIBC_TEST_SRC_MATH_ROUNDTOINTEGERTEST_H
-#define LLVM_LIBC_TEST_SRC_MATH_ROUNDTOINTEGERTEST_H
+#ifndef LLVM_LIBC_TEST_SRC_MATH_SMOKE_ROUNDTOINTEGERTEST_H
+#define LLVM_LIBC_TEST_SRC_MATH_SMOKE_ROUNDTOINTEGERTEST_H
#include "src/__support/FPUtil/FEnvImpl.h"
#include "src/__support/FPUtil/FPBits.h"
@@ -169,4 +169,4 @@ public:
#define LIST_ROUND_TO_INTEGER_TESTS_WITH_MODES(F, I, func) \
LIST_ROUND_TO_INTEGER_TESTS_HELPER(F, I, func, true)
-#endif // LLVM_LIBC_TEST_SRC_MATH_ROUNDTOINTEGERTEST_H
+#endif // LLVM_LIBC_TEST_SRC_MATH_SMOKE_ROUNDTOINTEGERTEST_H
diff --git a/libc/test/src/math/smoke/TruncTest.h b/libc/test/src/math/smoke/TruncTest.h
index 8334a7b..71b1ab9 100644
--- a/libc/test/src/math/smoke/TruncTest.h
+++ b/libc/test/src/math/smoke/TruncTest.h
@@ -6,6 +6,9 @@
//
//===----------------------------------------------------------------------===//
+#ifndef LLVM_LIBC_TEST_SRC_MATH_SMOKE_TRUNCTEST_H
+#define LLVM_LIBC_TEST_SRC_MATH_SMOKE_TRUNCTEST_H
+
#include "test/UnitTest/FPMatcher.h"
#include "test/UnitTest/Test.h"
@@ -66,3 +69,5 @@ public:
TEST_F(LlvmLibcTruncTest, SpecialNumbers) { testSpecialNumbers(&func); } \
TEST_F(LlvmLibcTruncTest, RoundedNubmers) { testRoundedNumbers(&func); } \
TEST_F(LlvmLibcTruncTest, Fractions) { testFractions(&func); }
+
+#endif // LLVM_LIBC_TEST_SRC_MATH_SMOKE_TRUNCTEST_H
diff --git a/libc/test/src/time/TmHelper.h b/libc/test/src/time/TmHelper.h
index d8e638d..1621094 100644
--- a/libc/test/src/time/TmHelper.h
+++ b/libc/test/src/time/TmHelper.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIBC_TEST_SRC_TIME_TM_HELPER_H
-#define LLVM_LIBC_TEST_SRC_TIME_TM_HELPER_H
+#ifndef LLVM_LIBC_TEST_SRC_TIME_TMHELPER_H
+#define LLVM_LIBC_TEST_SRC_TIME_TMHELPER_H
#include <time.h>
@@ -40,4 +40,4 @@ static inline void initialize_tm_data(struct tm *tm_data, int year, int month,
} // namespace tmhelper
} // namespace LIBC_NAMESPACE
-#endif // LLVM_LIBC_TEST_SRC_TIME_TM_HELPER_H
+#endif // LLVM_LIBC_TEST_SRC_TIME_TMHELPER_H
diff --git a/libc/utils/MPFRWrapper/MPFRUtils.h b/libc/utils/MPFRWrapper/MPFRUtils.h
index 25e6b0b..6164d78 100644
--- a/libc/utils/MPFRWrapper/MPFRUtils.h
+++ b/libc/utils/MPFRWrapper/MPFRUtils.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIBC_UTILS_TESTUTILS_MPFRUTILS_H
-#define LLVM_LIBC_UTILS_TESTUTILS_MPFRUTILS_H
+#ifndef LLVM_LIBC_UTILS_MPFRWRAPPER_MPFRUTILS_H
+#define LLVM_LIBC_UTILS_MPFRWRAPPER_MPFRUTILS_H
#include "src/__support/CPP/type_traits.h"
#include "test/UnitTest/RoundingModeUtils.h"
@@ -426,4 +426,4 @@ template <typename T> bool round_to_long(T x, RoundingMode mode, long &result);
} \
}
-#endif // LLVM_LIBC_UTILS_TESTUTILS_MPFRUTILS_H
+#endif // LLVM_LIBC_UTILS_MPFRWRAPPER_MPFRUTILS_H
diff --git a/libcxx/cmake/config-ix.cmake b/libcxx/cmake/config-ix.cmake
index 1e8c2f5..7406fba 100644
--- a/libcxx/cmake/config-ix.cmake
+++ b/libcxx/cmake/config-ix.cmake
@@ -1,5 +1,6 @@
include(CMakePushCheckState)
include(CheckLibraryExists)
+include(CheckSymbolExists)
include(LLVMCheckCompilerLinkerFlag)
include(CheckCCompilerFlag)
include(CheckCXXCompilerFlag)
@@ -97,6 +98,8 @@ int main(void) { return 0; }
cmake_pop_check_state()
endif()
+check_symbol_exists(__PICOLIBC__ "string.h" PICOLIBC)
+
# Check libraries
if(WIN32 AND NOT MINGW)
# TODO(compnerd) do we want to support an emulation layer that allows for the
@@ -116,6 +119,10 @@ elseif(ANDROID)
set(LIBCXX_HAS_PTHREAD_LIB NO)
set(LIBCXX_HAS_RT_LIB NO)
set(LIBCXX_HAS_ATOMIC_LIB NO)
+elseif(PICOLIBC)
+ set(LIBCXX_HAS_PTHREAD_LIB NO)
+ set(LIBCXX_HAS_RT_LIB NO)
+ set(LIBCXX_HAS_ATOMIC_LIB NO)
else()
check_library_exists(pthread pthread_create "" LIBCXX_HAS_PTHREAD_LIB)
check_library_exists(rt clock_gettime "" LIBCXX_HAS_RT_LIB)
diff --git a/libcxx/include/__atomic/aliases.h b/libcxx/include/__atomic/aliases.h
index db34f5e..e27e09a 100644
--- a/libcxx/include/__atomic/aliases.h
+++ b/libcxx/include/__atomic/aliases.h
@@ -18,7 +18,6 @@
#include <__type_traits/make_unsigned.h>
#include <cstddef>
#include <cstdint>
-#include <cstdlib>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
# pragma GCC system_header
diff --git a/libcxx/include/__config b/libcxx/include/__config
index 942bbe7..53ff113 100644
--- a/libcxx/include/__config
+++ b/libcxx/include/__config
@@ -460,6 +460,11 @@ _LIBCPP_HARDENING_MODE_DEBUG
# define __has_constexpr_builtin(x) 0
# endif
+// This checks wheter a Clang module is built
+# ifndef __building_module
+# define __building_module(...) 0
+# endif
+
// '__is_identifier' returns '0' if '__x' is a reserved identifier provided by
// the compiler and '1' otherwise.
# ifndef __is_identifier
diff --git a/libcxx/include/__thread/support/pthread.h b/libcxx/include/__thread/support/pthread.h
index d0b8367..d8e3f93 100644
--- a/libcxx/include/__thread/support/pthread.h
+++ b/libcxx/include/__thread/support/pthread.h
@@ -30,7 +30,10 @@
// so libc++'s <math.h> usually absorbs atomic_wide_counter.h into the
// module with <math.h> and makes atomic_wide_counter.h invisible.
// Include <math.h> here to work around that.
-#include <math.h>
+// This checks wheter a Clang module is built
+#if __building_module(std)
+# include <math.h>
+#endif
#ifndef _LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER
# pragma GCC system_header
diff --git a/libcxx/include/__utility/integer_sequence.h b/libcxx/include/__utility/integer_sequence.h
index e63f3f2..ccce943 100644
--- a/libcxx/include/__utility/integer_sequence.h
+++ b/libcxx/include/__utility/integer_sequence.h
@@ -31,65 +31,16 @@ struct __integer_sequence {
using __to_tuple_indices = __tuple_indices<(_Values + _Sp)...>;
};
-#if !__has_builtin(__make_integer_seq) || defined(_LIBCPP_TESTING_FALLBACK_MAKE_INTEGER_SEQUENCE)
-
-namespace __detail {
-
-template <typename _Tp, size_t... _Extra>
-struct __repeat;
-template <typename _Tp, _Tp... _Np, size_t... _Extra>
-struct __repeat<__integer_sequence<_Tp, _Np...>, _Extra...> {
- typedef _LIBCPP_NODEBUG __integer_sequence<
- _Tp,
- _Np...,
- sizeof...(_Np) + _Np...,
- 2 * sizeof...(_Np) + _Np...,
- 3 * sizeof...(_Np) + _Np...,
- 4 * sizeof...(_Np) + _Np...,
- 5 * sizeof...(_Np) + _Np...,
- 6 * sizeof...(_Np) + _Np...,
- 7 * sizeof...(_Np) + _Np...,
- _Extra...>
- type;
-};
-
-template <size_t _Np>
-struct __parity;
-template <size_t _Np>
-struct __make : __parity<_Np % 8>::template __pmake<_Np> {};
-
-// clang-format off
-template<> struct __make<0> { typedef __integer_sequence<size_t> type; };
-template<> struct __make<1> { typedef __integer_sequence<size_t, 0> type; };
-template<> struct __make<2> { typedef __integer_sequence<size_t, 0, 1> type; };
-template<> struct __make<3> { typedef __integer_sequence<size_t, 0, 1, 2> type; };
-template<> struct __make<4> { typedef __integer_sequence<size_t, 0, 1, 2, 3> type; };
-template<> struct __make<5> { typedef __integer_sequence<size_t, 0, 1, 2, 3, 4> type; };
-template<> struct __make<6> { typedef __integer_sequence<size_t, 0, 1, 2, 3, 4, 5> type; };
-template<> struct __make<7> { typedef __integer_sequence<size_t, 0, 1, 2, 3, 4, 5, 6> type; };
-
-template<> struct __parity<0> { template<size_t _Np> struct __pmake : __repeat<typename __make<_Np / 8>::type> {}; };
-template<> struct __parity<1> { template<size_t _Np> struct __pmake : __repeat<typename __make<_Np / 8>::type, _Np - 1> {}; };
-template<> struct __parity<2> { template<size_t _Np> struct __pmake : __repeat<typename __make<_Np / 8>::type, _Np - 2, _Np - 1> {}; };
-template<> struct __parity<3> { template<size_t _Np> struct __pmake : __repeat<typename __make<_Np / 8>::type, _Np - 3, _Np - 2, _Np - 1> {}; };
-template<> struct __parity<4> { template<size_t _Np> struct __pmake : __repeat<typename __make<_Np / 8>::type, _Np - 4, _Np - 3, _Np - 2, _Np - 1> {}; };
-template<> struct __parity<5> { template<size_t _Np> struct __pmake : __repeat<typename __make<_Np / 8>::type, _Np - 5, _Np - 4, _Np - 3, _Np - 2, _Np - 1> {}; };
-template<> struct __parity<6> { template<size_t _Np> struct __pmake : __repeat<typename __make<_Np / 8>::type, _Np - 6, _Np - 5, _Np - 4, _Np - 3, _Np - 2, _Np - 1> {}; };
-template<> struct __parity<7> { template<size_t _Np> struct __pmake : __repeat<typename __make<_Np / 8>::type, _Np - 7, _Np - 6, _Np - 5, _Np - 4, _Np - 3, _Np - 2, _Np - 1> {}; };
-// clang-format on
-
-} // namespace __detail
-
-#endif
-
#if __has_builtin(__make_integer_seq)
template <size_t _Ep, size_t _Sp>
using __make_indices_imp =
typename __make_integer_seq<__integer_sequence, size_t, _Ep - _Sp>::template __to_tuple_indices<_Sp>;
-#else
+#elif __has_builtin(__integer_pack)
template <size_t _Ep, size_t _Sp>
-using __make_indices_imp = typename __detail::__make<_Ep - _Sp>::type::template __to_tuple_indices<_Sp>;
-
+using __make_indices_imp =
+ typename __integer_sequence<size_t, __integer_pack(_Ep - _Sp)...>::template __to_tuple_indices<_Sp>;
+#else
+# error "No known way to get an integer pack from the compiler"
#endif
#if _LIBCPP_STD_VER >= 14
@@ -104,34 +55,20 @@ struct _LIBCPP_TEMPLATE_VIS integer_sequence {
template <size_t... _Ip>
using index_sequence = integer_sequence<size_t, _Ip...>;
-# if __has_builtin(__make_integer_seq) && !defined(_LIBCPP_TESTING_FALLBACK_MAKE_INTEGER_SEQUENCE)
+# if __has_builtin(__make_integer_seq)
template <class _Tp, _Tp _Ep>
-using __make_integer_sequence _LIBCPP_NODEBUG = __make_integer_seq<integer_sequence, _Tp, _Ep>;
-
-# else
+using make_integer_sequence _LIBCPP_NODEBUG = __make_integer_seq<integer_sequence, _Tp, _Ep>;
-template <typename _Tp, _Tp _Np>
-using __make_integer_sequence_unchecked _LIBCPP_NODEBUG =
- typename __detail::__make<_Np>::type::template __convert<integer_sequence, _Tp>;
+# elif __has_builtin(__integer_pack)
-template <class _Tp, _Tp _Ep>
-struct __make_integer_sequence_checked {
- static_assert(is_integral<_Tp>::value, "std::make_integer_sequence can only be instantiated with an integral type");
- static_assert(0 <= _Ep, "std::make_integer_sequence must have a non-negative sequence length");
- // Workaround GCC bug by preventing bad installations when 0 <= _Ep
- // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=68929
- typedef _LIBCPP_NODEBUG __make_integer_sequence_unchecked<_Tp, 0 <= _Ep ? _Ep : 0> type;
-};
-
-template <class _Tp, _Tp _Ep>
-using __make_integer_sequence _LIBCPP_NODEBUG = typename __make_integer_sequence_checked<_Tp, _Ep>::type;
+template <class _Tp, _Tp _SequenceSize>
+using make_integer_sequence _LIBCPP_NODEBUG = integer_sequence<_Tp, __integer_pack(_SequenceSize)...>;
+# else
+# error "No known way to get an integer pack from the compiler"
# endif
-template <class _Tp, _Tp _Np>
-using make_integer_sequence = __make_integer_sequence<_Tp, _Np>;
-
template <size_t _Np>
using make_index_sequence = make_integer_sequence<size_t, _Np>;
diff --git a/libcxx/include/atomic b/libcxx/include/atomic
index 2e8f5b5..2dac693 100644
--- a/libcxx/include/atomic
+++ b/libcxx/include/atomic
@@ -620,6 +620,7 @@ template <class T>
#if !defined(_LIBCPP_REMOVE_TRANSITIVE_INCLUDES) && _LIBCPP_STD_VER <= 20
# include <cmath>
# include <compare>
+# include <cstdlib>
# include <cstring>
# include <type_traits>
#endif
diff --git a/libcxx/test/libcxx/transitive_includes/cxx23.csv b/libcxx/test/libcxx/transitive_includes/cxx23.csv
index 44b5f78..49b3ac2 100644
--- a/libcxx/test/libcxx/transitive_includes/cxx23.csv
+++ b/libcxx/test/libcxx/transitive_includes/cxx23.csv
@@ -30,7 +30,6 @@ array stdexcept
array version
atomic cstddef
atomic cstdint
-atomic cstdlib
atomic cstring
atomic ctime
atomic limits
diff --git a/libcxx/test/libcxx/transitive_includes/cxx26.csv b/libcxx/test/libcxx/transitive_includes/cxx26.csv
index 44b5f78..49b3ac2 100644
--- a/libcxx/test/libcxx/transitive_includes/cxx26.csv
+++ b/libcxx/test/libcxx/transitive_includes/cxx26.csv
@@ -30,7 +30,6 @@ array stdexcept
array version
atomic cstddef
atomic cstdint
-atomic cstdlib
atomic cstring
atomic ctime
atomic limits
diff --git a/libcxx/test/std/time/time.clock/time.clock.file/to_from_sys.pass.cpp b/libcxx/test/std/time/time.clock/time.clock.file/to_from_sys.pass.cpp
index b1031c8..5b1f465 100644
--- a/libcxx/test/std/time/time.clock/time.clock.file/to_from_sys.pass.cpp
+++ b/libcxx/test/std/time/time.clock/time.clock.file/to_from_sys.pass.cpp
@@ -10,9 +10,6 @@
// UNSUPPORTED: availability-filesystem-missing
-// "unable to find library from dependent library specifier: rt"
-// XFAIL: LIBCXX-PICOLIBC-FIXME
-
// <chrono>
//
// file_clock
diff --git a/libcxx/test/std/time/time.clock/time.clock.hires/now.pass.cpp b/libcxx/test/std/time/time.clock/time.clock.hires/now.pass.cpp
index 8625ac5..db1fb55 100644
--- a/libcxx/test/std/time/time.clock/time.clock.hires/now.pass.cpp
+++ b/libcxx/test/std/time/time.clock/time.clock.hires/now.pass.cpp
@@ -6,9 +6,6 @@
//
//===----------------------------------------------------------------------===//
-// "unable to find library from dependent library specifier: rt"
-// XFAIL: LIBCXX-PICOLIBC-FIXME
-
// <chrono>
// high_resolution_clock
diff --git a/libcxx/test/std/time/time.clock/time.clock.system/from_time_t.pass.cpp b/libcxx/test/std/time/time.clock/time.clock.system/from_time_t.pass.cpp
index 5ff6674..70dd811 100644
--- a/libcxx/test/std/time/time.clock/time.clock.system/from_time_t.pass.cpp
+++ b/libcxx/test/std/time/time.clock/time.clock.system/from_time_t.pass.cpp
@@ -6,9 +6,6 @@
//
//===----------------------------------------------------------------------===//
-// "unable to find library from dependent library specifier: rt"
-// XFAIL: LIBCXX-PICOLIBC-FIXME
-
// <chrono>
// system_clock
diff --git a/libcxx/test/std/time/time.clock/time.clock.system/now.pass.cpp b/libcxx/test/std/time/time.clock/time.clock.system/now.pass.cpp
index 70fbe98..dade6ba 100644
--- a/libcxx/test/std/time/time.clock/time.clock.system/now.pass.cpp
+++ b/libcxx/test/std/time/time.clock/time.clock.system/now.pass.cpp
@@ -6,9 +6,6 @@
//
//===----------------------------------------------------------------------===//
-// "unable to find library from dependent library specifier: rt"
-// XFAIL: LIBCXX-PICOLIBC-FIXME
-
// <chrono>
// system_clock
diff --git a/libcxx/test/std/time/time.clock/time.clock.system/to_time_t.pass.cpp b/libcxx/test/std/time/time.clock/time.clock.system/to_time_t.pass.cpp
index f3238f7..bf4339c 100644
--- a/libcxx/test/std/time/time.clock/time.clock.system/to_time_t.pass.cpp
+++ b/libcxx/test/std/time/time.clock/time.clock.system/to_time_t.pass.cpp
@@ -6,9 +6,6 @@
//
//===----------------------------------------------------------------------===//
-// "unable to find library from dependent library specifier: rt"
-// XFAIL: LIBCXX-PICOLIBC-FIXME
-
// <chrono>
// system_clock
diff --git a/libcxx/test/std/time/time.point/time.point.nonmember/op_-duration.pass.cpp b/libcxx/test/std/time/time.point/time.point.nonmember/op_-duration.pass.cpp
index 199bdec..80e9d04 100644
--- a/libcxx/test/std/time/time.point/time.point.nonmember/op_-duration.pass.cpp
+++ b/libcxx/test/std/time/time.point/time.point.nonmember/op_-duration.pass.cpp
@@ -6,9 +6,6 @@
//
//===----------------------------------------------------------------------===//
-// "unable to find library from dependent library specifier: rt"
-// XFAIL: LIBCXX-PICOLIBC-FIXME
-
// <chrono>
// time_point
diff --git a/libcxx/test/std/utilities/intseq/intseq.make/make_integer_seq_fallback.pass.cpp b/libcxx/test/std/utilities/intseq/intseq.make/make_integer_seq_fallback.pass.cpp
deleted file mode 100644
index ceeb4dd..0000000
--- a/libcxx/test/std/utilities/intseq/intseq.make/make_integer_seq_fallback.pass.cpp
+++ /dev/null
@@ -1,19 +0,0 @@
-//===----------------------------------------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-// <utility>
-
-// template<class T, T N>
-// using make_integer_sequence = integer_sequence<T, 0, 1, ..., N-1>;
-
-// UNSUPPORTED: c++03, c++11
-
-#define _LIBCPP_TESTING_FALLBACK_MAKE_INTEGER_SEQUENCE
-#include "make_integer_seq.pass.cpp"
-
-#include "test_macros.h"
diff --git a/libcxx/test/std/utilities/intseq/intseq.make/make_integer_seq_fallback.verify.cpp b/libcxx/test/std/utilities/intseq/intseq.make/make_integer_seq_fallback.verify.cpp
deleted file mode 100644
index 32a4a54..0000000
--- a/libcxx/test/std/utilities/intseq/intseq.make/make_integer_seq_fallback.verify.cpp
+++ /dev/null
@@ -1,24 +0,0 @@
-//===----------------------------------------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-// <utility>
-
-// template<class T, T N>
-// using make_integer_sequence = integer_sequence<T, 0, 1, ..., N-1>;
-
-// UNSUPPORTED: c++03, c++11
-
-// This test hangs during recursive template instantiation with libstdc++
-// UNSUPPORTED: stdlib=libstdc++
-
-// ADDITIONAL_COMPILE_FLAGS: -D_LIBCPP_TESTING_FALLBACK_MAKE_INTEGER_SEQUENCE
-
-#include <utility>
-
-typedef std::make_integer_sequence<int, -3> MakeSeqT;
-MakeSeqT i; // expected-error-re@*:* {{static assertion failed{{.*}}std::make_integer_sequence must have a non-negative sequence length}}
diff --git a/lldb/bindings/CMakeLists.txt b/lldb/bindings/CMakeLists.txt
index b44ed59..296eae1 100644
--- a/lldb/bindings/CMakeLists.txt
+++ b/lldb/bindings/CMakeLists.txt
@@ -23,7 +23,11 @@ endif()
set(SWIG_COMMON_FLAGS
-c++
- -w361,362 # Ignore warnings about ignored operator overloads
+ # Ignored warnings:
+ # 361: operator! ignored.
+ # 362: operator= ignored.
+ # 509: Overloaded method declaration effectively ignored, shadowed by previous declaration.
+ -w361,362,509
-features autodoc
-I${LLDB_SOURCE_DIR}/include
-I${CMAKE_CURRENT_SOURCE_DIR}
diff --git a/lldb/cmake/modules/LLDBConfig.cmake b/lldb/cmake/modules/LLDBConfig.cmake
index a758261..93c8ffe 100644
--- a/lldb/cmake/modules/LLDBConfig.cmake
+++ b/lldb/cmake/modules/LLDBConfig.cmake
@@ -67,6 +67,8 @@ option(LLDB_SKIP_STRIP "Whether to skip stripping of binaries when installing ll
option(LLDB_SKIP_DSYM "Whether to skip generating a dSYM when installing lldb." OFF)
option(LLDB_ENFORCE_STRICT_TEST_REQUIREMENTS
"Fail to configure if certain requirements are not met for testing." OFF)
+option(LLDB_TEST_USE_VENDOR_PACKAGES
+ "Use packages from lldb/third_party/Python/module instead of system deps." ON)
set(LLDB_GLOBAL_INIT_DIRECTORY "" CACHE STRING
"Path to the global lldbinit directory. Relative paths are resolved relative to the
diff --git a/lldb/source/Commands/CommandObjectTarget.cpp b/lldb/source/Commands/CommandObjectTarget.cpp
index 4526557..b2346c2 100644
--- a/lldb/source/Commands/CommandObjectTarget.cpp
+++ b/lldb/source/Commands/CommandObjectTarget.cpp
@@ -3376,15 +3376,19 @@ protected:
case 'r': {
size_t ref_count = 0;
+ char in_shared_cache = 'Y';
+
ModuleSP module_sp(module->shared_from_this());
+ if (!ModuleList::ModuleIsInCache(module))
+ in_shared_cache = 'N';
if (module_sp) {
// Take one away to make sure we don't count our local "module_sp"
ref_count = module_sp.use_count() - 1;
}
if (width)
- strm.Printf("{%*" PRIu64 "}", width, (uint64_t)ref_count);
+ strm.Printf("{%c %*" PRIu64 "}", in_shared_cache, width, (uint64_t)ref_count);
else
- strm.Printf("{%" PRIu64 "}", (uint64_t)ref_count);
+ strm.Printf("{%c %" PRIu64 "}", in_shared_cache, (uint64_t)ref_count);
} break;
case 's':
diff --git a/lldb/source/Commands/Options.td b/lldb/source/Commands/Options.td
index ad4321d9..91d5eea 100644
--- a/lldb/source/Commands/Options.td
+++ b/lldb/source/Commands/Options.td
@@ -936,8 +936,8 @@ let Command = "target modules list" in {
OptionalArg<"Width">, Desc<"Display the modification time with optional "
"width of the module.">;
def target_modules_list_ref_count : Option<"ref-count", "r">, Group<1>,
- OptionalArg<"Width">, Desc<"Display the reference count if the module is "
- "still in the shared module cache.">;
+ OptionalArg<"Width">, Desc<"Display whether the module is still in the "
+ "the shared module cache (Y/N), and its shared pointer use_count.">;
def target_modules_list_pointer : Option<"pointer", "p">, Group<1>,
OptionalArg<"None">, Desc<"Display the module pointer.">;
def target_modules_list_global : Option<"global", "g">, Group<1>,
diff --git a/lldb/source/Plugins/ScriptInterpreter/Python/ScriptInterpreterPython.cpp b/lldb/source/Plugins/ScriptInterpreter/Python/ScriptInterpreterPython.cpp
index a1ad3f5..ce52f35 100644
--- a/lldb/source/Plugins/ScriptInterpreter/Python/ScriptInterpreterPython.cpp
+++ b/lldb/source/Plugins/ScriptInterpreter/Python/ScriptInterpreterPython.cpp
@@ -1417,7 +1417,7 @@ bool ScriptInterpreterPythonImpl::GenerateScriptAliasFunction(
sstr.Printf("def %s (debugger, args, exe_ctx, result, internal_dict):",
auto_generated_function_name.c_str());
- if (!GenerateFunction(sstr.GetData(), user_input, /*is_callback=*/true)
+ if (!GenerateFunction(sstr.GetData(), user_input, /*is_callback=*/false)
.Success())
return false;
diff --git a/lldb/source/Symbol/Variable.cpp b/lldb/source/Symbol/Variable.cpp
index 2bb2ff7..a33c343 100644
--- a/lldb/source/Symbol/Variable.cpp
+++ b/lldb/source/Symbol/Variable.cpp
@@ -509,15 +509,17 @@ static void PrivateAutoCompleteMembers(
CompilerType member_compiler_type = compiler_type.GetFieldAtIndex(
i, member_name, nullptr, nullptr, nullptr);
- if (partial_member_name.empty() ||
- llvm::StringRef(member_name).starts_with(partial_member_name)) {
+ if (partial_member_name.empty()) {
+ request.AddCompletion((prefix_path + member_name).str());
+ } else if (llvm::StringRef(member_name)
+ .starts_with(partial_member_name)) {
if (member_name == partial_member_name) {
PrivateAutoComplete(
frame, partial_path,
prefix_path + member_name, // Anything that has been resolved
// already will be in here
member_compiler_type.GetCanonicalType(), request);
- } else {
+ } else if (partial_path.empty()) {
request.AddCompletion((prefix_path + member_name).str());
}
}
diff --git a/lldb/test/API/commands/command/script/TestCommandScript.py b/lldb/test/API/commands/command/script/TestCommandScript.py
index 8505520..fdd5216 100644
--- a/lldb/test/API/commands/command/script/TestCommandScript.py
+++ b/lldb/test/API/commands/command/script/TestCommandScript.py
@@ -216,3 +216,17 @@ class CmdPythonTestCase(TestBase):
# The result object will be replaced by an empty result object (in the
# "Started" state).
self.expect("script str(persistence.result_copy)", substrs=["Started"])
+
+ def test_interactive(self):
+ """
+ Test that we can add multiple lines interactively.
+ """
+ interp = self.dbg.GetCommandInterpreter()
+ cmd_file = self.getSourcePath("cmd_file.lldb")
+ result = lldb.SBCommandReturnObject()
+ interp.HandleCommand(f"command source {cmd_file}", result)
+ self.assertCommandReturn(result, "Sourcing the command should cause no errors.")
+ self.assertTrue(interp.UserCommandExists("my_cmd"), "Command defined.")
+ interp.HandleCommand("my_cmd", result)
+ self.assertCommandReturn(result, "Running the command succeeds")
+ self.assertIn("My Command Result", result.GetOutput(), "Command was correct")
diff --git a/lldb/test/API/commands/command/script/cmd_file.lldb b/lldb/test/API/commands/command/script/cmd_file.lldb
new file mode 100644
index 0000000..1589a7c
--- /dev/null
+++ b/lldb/test/API/commands/command/script/cmd_file.lldb
@@ -0,0 +1,4 @@
+command script add my_cmd
+result.PutCString("My Command Result")
+result.SetStatus(lldb.eReturnStatusSuccessFinishResult)
+DONE
diff --git a/lldb/test/API/functionalities/completion/TestCompletion.py b/lldb/test/API/functionalities/completion/TestCompletion.py
index f71bc73..0d6907e 100644
--- a/lldb/test/API/functionalities/completion/TestCompletion.py
+++ b/lldb/test/API/functionalities/completion/TestCompletion.py
@@ -60,10 +60,12 @@ class CommandLineCompletionTestCase(TestBase):
def do_test_variable_completion(self, command):
self.complete_from_to(f"{command} fo", f"{command} fooo")
- self.complete_from_to(f"{command} fooo.", f"{command} fooo.")
+ self.complete_from_to(f"{command} fooo.", f"{command} fooo.t")
+ self.complete_from_to(f"{command} fooo.t.", f"{command} fooo.t.x")
self.complete_from_to(f"{command} fooo.dd", f"{command} fooo.dd")
- self.complete_from_to(f"{command} ptr_fooo->", f"{command} ptr_fooo->")
+ self.complete_from_to(f"{command} ptr_fooo->", f"{command} ptr_fooo->t")
+ self.complete_from_to(f"{command} ptr_fooo->t.", f"{command} ptr_fooo->t.x")
self.complete_from_to(f"{command} ptr_fooo->dd", f"{command} ptr_fooo->dd")
self.complete_from_to(f"{command} cont", f"{command} container")
diff --git a/lldb/test/API/functionalities/completion/main.cpp b/lldb/test/API/functionalities/completion/main.cpp
index 06ff577..f925c1d 100644
--- a/lldb/test/API/functionalities/completion/main.cpp
+++ b/lldb/test/API/functionalities/completion/main.cpp
@@ -1,12 +1,17 @@
#include <iostream>
+class Baz {
+public:
+ int x;
+};
+
class Foo
{
public:
- int Bar(int x, int y)
- {
- return x + y;
- }
+ Baz t;
+ int temp;
+
+ int Bar(int x, int y) { return x + y; }
};
namespace { int Quux (void) { return 0; } }
diff --git a/lldb/test/API/lit.cfg.py b/lldb/test/API/lit.cfg.py
index 12675ed..f9497b6 100644
--- a/lldb/test/API/lit.cfg.py
+++ b/lldb/test/API/lit.cfg.py
@@ -309,3 +309,6 @@ if "FREEBSD_LEGACY_PLUGIN" in os.environ:
# Propagate XDG_CACHE_HOME
if "XDG_CACHE_HOME" in os.environ:
config.environment["XDG_CACHE_HOME"] = os.environ["XDG_CACHE_HOME"]
+
+if is_configured("use_vendor_packages"):
+ config.environment["LLDB_TEST_USE_VENDOR_PACKAGES"] = "1"
diff --git a/lldb/test/API/lit.site.cfg.py.in b/lldb/test/API/lit.site.cfg.py.in
index 053331d..c2602ac 100644
--- a/lldb/test/API/lit.site.cfg.py.in
+++ b/lldb/test/API/lit.site.cfg.py.in
@@ -38,6 +38,7 @@ config.libcxx_include_target_dir = "@LIBCXX_GENERATED_INCLUDE_TARGET_DIR@"
# The API tests use their own module caches.
config.lldb_module_cache = os.path.join("@LLDB_TEST_MODULE_CACHE_LLDB@", "lldb-api")
config.clang_module_cache = os.path.join("@LLDB_TEST_MODULE_CACHE_CLANG@", "lldb-api")
+config.use_vendor_packages = @LLDB_TEST_USE_VENDOR_PACKAGES@
# Plugins
lldb_build_intel_pt = '@LLDB_BUILD_INTEL_PT@'
diff --git a/lldb/test/API/tools/lldb-dap/databreakpoint/TestDAP_setDataBreakpoints.py b/lldb/test/API/tools/lldb-dap/databreakpoint/TestDAP_setDataBreakpoints.py
index 17cdad8..52c0bbf 100644
--- a/lldb/test/API/tools/lldb-dap/databreakpoint/TestDAP_setDataBreakpoints.py
+++ b/lldb/test/API/tools/lldb-dap/databreakpoint/TestDAP_setDataBreakpoints.py
@@ -14,6 +14,51 @@ class TestDAP_setDataBreakpoints(lldbdap_testcase.DAPTestCaseBase):
@skipIfWindows
@skipIfRemote
+ def test_duplicate_start_addresses(self):
+ """Test setDataBreakpoints with multiple watchpoints starting at the same addresses."""
+ program = self.getBuildArtifact("a.out")
+ self.build_and_launch(program)
+ source = "main.cpp"
+ first_loop_break_line = line_number(source, "// first loop breakpoint")
+ self.set_source_breakpoints(source, [first_loop_break_line])
+ self.continue_to_next_stop()
+ self.dap_server.get_stackFrame()
+ # Test setting write watchpoint using expressions: &x, arr+2
+ response_x = self.dap_server.request_dataBreakpointInfo(0, "&x")
+ response_arr_2 = self.dap_server.request_dataBreakpointInfo(0, "arr+2")
+ # Test response from dataBreakpointInfo request.
+ self.assertEquals(response_x["body"]["dataId"].split("/")[1], "4")
+ self.assertEquals(response_x["body"]["accessTypes"], self.accessTypes)
+ self.assertEquals(response_arr_2["body"]["dataId"].split("/")[1], "4")
+ self.assertEquals(response_arr_2["body"]["accessTypes"], self.accessTypes)
+ # The first one should be overwritten by the third one as they start at
+ # the same address. This is indicated by returning {verified: False} for
+ # the first one.
+ dataBreakpoints = [
+ {"dataId": response_x["body"]["dataId"], "accessType": "read"},
+ {"dataId": response_arr_2["body"]["dataId"], "accessType": "write"},
+ {"dataId": response_x["body"]["dataId"], "accessType": "write"},
+ ]
+ set_response = self.dap_server.request_setDataBreakpoint(dataBreakpoints)
+ self.assertEquals(
+ set_response["body"]["breakpoints"],
+ [{"verified": False}, {"verified": True}, {"verified": True}],
+ )
+
+ self.continue_to_next_stop()
+ x_val = self.dap_server.get_local_variable_value("x")
+ i_val = self.dap_server.get_local_variable_value("i")
+ self.assertEquals(x_val, "2")
+ self.assertEquals(i_val, "1")
+
+ self.continue_to_next_stop()
+ arr_2 = self.dap_server.get_local_variable_child("arr", "[2]")
+ i_val = self.dap_server.get_local_variable_value("i")
+ self.assertEquals(arr_2["value"], "42")
+ self.assertEquals(i_val, "2")
+
+ @skipIfWindows
+ @skipIfRemote
def test_expression(self):
"""Tests setting data breakpoints on expression."""
program = self.getBuildArtifact("a.out")
diff --git a/lldb/test/CMakeLists.txt b/lldb/test/CMakeLists.txt
index 1aa8843..7c31fd4 100644
--- a/lldb/test/CMakeLists.txt
+++ b/lldb/test/CMakeLists.txt
@@ -26,6 +26,21 @@ if(LLDB_ENFORCE_STRICT_TEST_REQUIREMENTS)
endforeach()
endif()
+# The "pexpect" package should come from the system environment, not from the
+# LLDB tree. However, we delay the deletion of it from the tree in case
+# users/buildbots don't have the package yet and need some time to install it.
+if (NOT LLDB_TEST_USE_VENDOR_PACKAGES)
+ unset(PY_pexpect_FOUND CACHE)
+ lldb_find_python_module(pexpect)
+ if (NOT PY_pexpect_FOUND)
+ message(FATAL_ERROR
+ "Python module 'pexpect' not found. Please install it via pip or via "
+ "your operating system's package manager. For a temporary workaround, "
+ "use a version from the LLDB tree with "
+ "`LLDB_TEST_USE_VENDOR_PACKAGES=ON`")
+ endif()
+endif()
+
if(LLDB_BUILT_STANDALONE)
# In order to run check-lldb-* we need the correct map_config directives in
# llvm-lit. Because this is a standalone build, LLVM doesn't know about LLDB,
@@ -240,7 +255,8 @@ llvm_canonicalize_cmake_booleans(
LLDB_HAS_LIBCXX
LLDB_TOOL_LLDB_SERVER_BUILD
LLDB_USE_SYSTEM_DEBUGSERVER
- LLDB_IS_64_BITS)
+ LLDB_IS_64_BITS
+ LLDB_TEST_USE_VENDOR_PACKAGES)
# Configure the individual test suites.
add_subdirectory(API)
diff --git a/lldb/test/Shell/Driver/TestHelp.test b/lldb/test/Shell/Driver/TestHelp.test
index 0f73fdf..2521b31 100644
--- a/lldb/test/Shell/Driver/TestHelp.test
+++ b/lldb/test/Shell/Driver/TestHelp.test
@@ -37,8 +37,6 @@ CHECK: --arch
CHECK: -a
CHECK: --core
CHECK: -c
-CHECK: --debug
-CHECK: -d
CHECK: --editor
CHECK: -e
CHECK: --file
diff --git a/lldb/tools/driver/Driver.cpp b/lldb/tools/driver/Driver.cpp
index c63ff0f..9286abb 100644
--- a/lldb/tools/driver/Driver.cpp
+++ b/lldb/tools/driver/Driver.cpp
@@ -188,7 +188,6 @@ SBError Driver::ProcessArgs(const opt::InputArgList &args, bool &exiting) {
if (args.hasArg(OPT_no_use_colors)) {
m_debugger.SetUseColor(false);
WithColor::setAutoDetectFunction(disable_color);
- m_option_data.m_debug_mode = true;
}
if (args.hasArg(OPT_version)) {
@@ -455,16 +454,7 @@ int Driver::MainLoop() {
// Process lldbinit files before handling any options from the command line.
SBCommandReturnObject result;
sb_interpreter.SourceInitFileInGlobalDirectory(result);
- if (m_option_data.m_debug_mode) {
- result.PutError(m_debugger.GetErrorFile());
- result.PutOutput(m_debugger.GetOutputFile());
- }
-
sb_interpreter.SourceInitFileInHomeDirectory(result, m_option_data.m_repl);
- if (m_option_data.m_debug_mode) {
- result.PutError(m_debugger.GetErrorFile());
- result.PutOutput(m_debugger.GetOutputFile());
- }
// Source the local .lldbinit file if it exists and we're allowed to source.
// Here we want to always print the return object because it contains the
@@ -536,11 +526,6 @@ int Driver::MainLoop() {
"or -s) are ignored in REPL mode.\n";
}
- if (m_option_data.m_debug_mode) {
- result.PutError(m_debugger.GetErrorFile());
- result.PutOutput(m_debugger.GetOutputFile());
- }
-
const bool handle_events = true;
const bool spawn_thread = false;
diff --git a/lldb/tools/driver/Driver.h b/lldb/tools/driver/Driver.h
index d5779b3..83e0d8a 100644
--- a/lldb/tools/driver/Driver.h
+++ b/lldb/tools/driver/Driver.h
@@ -75,7 +75,6 @@ public:
std::vector<InitialCmdEntry> m_after_file_commands;
std::vector<InitialCmdEntry> m_after_crash_commands;
- bool m_debug_mode = false;
bool m_source_quietly = false;
bool m_print_version = false;
bool m_print_python_path = false;
diff --git a/lldb/tools/lldb-dap/Watchpoint.cpp b/lldb/tools/lldb-dap/Watchpoint.cpp
index 2f176e0..2176550 100644
--- a/lldb/tools/lldb-dap/Watchpoint.cpp
+++ b/lldb/tools/lldb-dap/Watchpoint.cpp
@@ -16,17 +16,11 @@ Watchpoint::Watchpoint(const llvm::json::Object &obj) : BreakpointBase(obj) {
llvm::StringRef dataId = GetString(obj, "dataId");
std::string accessType = GetString(obj, "accessType").str();
auto [addr_str, size_str] = dataId.split('/');
- lldb::addr_t addr;
- size_t size;
llvm::to_integer(addr_str, addr, 16);
llvm::to_integer(size_str, size);
- lldb::SBWatchpointOptions options;
options.SetWatchpointTypeRead(accessType != "write");
if (accessType != "read")
options.SetWatchpointTypeWrite(lldb::eWatchpointWriteTypeOnModify);
- wp = g_dap.target.WatchpointCreateByAddress(addr, size, options, error);
- SetCondition();
- SetHitCondition();
}
void Watchpoint::SetCondition() { wp.SetCondition(condition.c_str()); }
@@ -38,11 +32,20 @@ void Watchpoint::SetHitCondition() {
}
void Watchpoint::CreateJsonObject(llvm::json::Object &object) {
- if (error.Success()) {
- object.try_emplace("verified", true);
- } else {
+ if (!error.IsValid() || error.Fail()) {
object.try_emplace("verified", false);
- EmplaceSafeString(object, "message", error.GetCString());
+ if (error.Fail())
+ EmplaceSafeString(object, "message", error.GetCString());
+ } else {
+ object.try_emplace("verified", true);
}
}
+
+void Watchpoint::SetWatchpoint() {
+ wp = g_dap.target.WatchpointCreateByAddress(addr, size, options, error);
+ if (!condition.empty())
+ SetCondition();
+ if (!hitCondition.empty())
+ SetHitCondition();
+}
} // namespace lldb_dap
diff --git a/lldb/tools/lldb-dap/Watchpoint.h b/lldb/tools/lldb-dap/Watchpoint.h
index 026b07d..4d2e58e 100644
--- a/lldb/tools/lldb-dap/Watchpoint.h
+++ b/lldb/tools/lldb-dap/Watchpoint.h
@@ -17,6 +17,9 @@
namespace lldb_dap {
struct Watchpoint : public BreakpointBase {
+ lldb::addr_t addr;
+ size_t size;
+ lldb::SBWatchpointOptions options;
// The LLDB breakpoint associated wit this watchpoint.
lldb::SBWatchpoint wp;
lldb::SBError error;
@@ -28,6 +31,8 @@ struct Watchpoint : public BreakpointBase {
void SetCondition() override;
void SetHitCondition() override;
void CreateJsonObject(llvm::json::Object &object) override;
+
+ void SetWatchpoint();
};
} // namespace lldb_dap
diff --git a/lldb/tools/lldb-dap/lldb-dap.cpp b/lldb/tools/lldb-dap/lldb-dap.cpp
index c6a275b..55f8c92 100644
--- a/lldb/tools/lldb-dap/lldb-dap.cpp
+++ b/lldb/tools/lldb-dap/lldb-dap.cpp
@@ -2880,15 +2880,29 @@ void request_setDataBreakpoints(const llvm::json::Object &request) {
const auto *breakpoints = arguments->getArray("breakpoints");
llvm::json::Array response_breakpoints;
g_dap.target.DeleteAllWatchpoints();
+ std::vector<Watchpoint> watchpoints;
if (breakpoints) {
for (const auto &bp : *breakpoints) {
const auto *bp_obj = bp.getAsObject();
if (bp_obj) {
Watchpoint wp(*bp_obj);
- AppendBreakpoint(&wp, response_breakpoints);
+ watchpoints.push_back(wp);
}
}
}
+ // If two watchpoints start at the same address, the latter overwrite the
+ // former. So, we only enable those at first-seen addresses when iterating
+ // backward.
+ std::set<lldb::addr_t> addresses;
+ for (auto iter = watchpoints.rbegin(); iter != watchpoints.rend(); ++iter) {
+ if (addresses.count(iter->addr) == 0) {
+ iter->SetWatchpoint();
+ addresses.insert(iter->addr);
+ }
+ }
+ for (auto wp : watchpoints)
+ AppendBreakpoint(&wp, response_breakpoints);
+
llvm::json::Object body;
body.try_emplace("breakpoints", std::move(response_breakpoints));
response.try_emplace("body", std::move(body));
diff --git a/lldb/use_lldb_suite_root.py b/lldb/use_lldb_suite_root.py
index fd42f63..b8f8acf 100644
--- a/lldb/use_lldb_suite_root.py
+++ b/lldb/use_lldb_suite_root.py
@@ -21,5 +21,7 @@ def add_lldbsuite_packages_dir(lldb_root):
lldb_root = os.path.dirname(inspect.getfile(inspect.currentframe()))
-add_third_party_module_dirs(lldb_root)
+# Use environment variables to avoid plumbing flags, lit configs, etc.
+if os.getenv("LLDB_TEST_USE_VENDOR_PACKAGES"):
+ add_third_party_module_dirs(lldb_root)
add_lldbsuite_packages_dir(lldb_root)
diff --git a/lldb/utils/lldb-dotest/CMakeLists.txt b/lldb/utils/lldb-dotest/CMakeLists.txt
index 09f41db..2ba40f0 100644
--- a/lldb/utils/lldb-dotest/CMakeLists.txt
+++ b/lldb/utils/lldb-dotest/CMakeLists.txt
@@ -10,6 +10,7 @@ set(LLDB_LIBS_DIR "${LLVM_LIBRARY_OUTPUT_INTDIR}")
llvm_canonicalize_cmake_booleans(
LLDB_BUILD_INTEL_PT
LLDB_HAS_LIBCXX
+ LLDB_TEST_USE_VENDOR_PACKAGES
)
if ("libcxx" IN_LIST LLVM_ENABLE_RUNTIMES)
diff --git a/lldb/utils/lldb-dotest/lldb-dotest.in b/lldb/utils/lldb-dotest/lldb-dotest.in
index 5cd49d2..9291f59 100755
--- a/lldb/utils/lldb-dotest/lldb-dotest.in
+++ b/lldb/utils/lldb-dotest/lldb-dotest.in
@@ -1,4 +1,5 @@
#!@Python3_EXECUTABLE@
+import os
import subprocess
import sys
@@ -17,8 +18,12 @@ has_libcxx = @LLDB_HAS_LIBCXX@
libcxx_libs_dir = "@LIBCXX_LIBRARY_DIR@"
libcxx_include_dir = "@LIBCXX_GENERATED_INCLUDE_DIR@"
libcxx_include_target_dir = "@LIBCXX_GENERATED_INCLUDE_TARGET_DIR@"
+use_vendor_packages = @LLDB_TEST_USE_VENDOR_PACKAGES@
if __name__ == '__main__':
+ if use_vendor_packages:
+ os.putenv("LLDB_TEST_USE_VENDOR_PACKAGES", "1")
+
wrapper_args = sys.argv[1:]
dotest_args = []
# split on an empty string will produce [''] and if you
diff --git a/llvm/CMakeLists.txt b/llvm/CMakeLists.txt
index f5f7d3f..651f178 100644
--- a/llvm/CMakeLists.txt
+++ b/llvm/CMakeLists.txt
@@ -718,6 +718,8 @@ if(LLVM_INDIVIDUAL_TEST_COVERAGE)
endif()
set(LLVM_LIT_ARGS "${LIT_ARGS_DEFAULT}" CACHE STRING "Default options for lit")
+option(LLVM_PARALLEL_LIT "Enable multiple lit suites to run in parallel" OFF)
+
# On Win32 hosts, provide an option to specify the path to the GnuWin32 tools.
if( WIN32 AND NOT CYGWIN )
set(LLVM_LIT_TOOLS_DIR "" CACHE PATH "Path to GnuWin32 tools")
diff --git a/llvm/cmake/modules/AddLLVM.cmake b/llvm/cmake/modules/AddLLVM.cmake
index 3bc78b0..0f1734a 100644
--- a/llvm/cmake/modules/AddLLVM.cmake
+++ b/llvm/cmake/modules/AddLLVM.cmake
@@ -1947,11 +1947,18 @@ function(add_lit_target target comment)
list(APPEND LIT_COMMAND --param ${param})
endforeach()
if (ARG_UNPARSED_ARGUMENTS)
- add_custom_target(${target}
- COMMAND ${LIT_COMMAND} ${ARG_UNPARSED_ARGUMENTS}
- COMMENT "${comment}"
- USES_TERMINAL
- )
+ if (LLVM_PARALLEL_LIT)
+ add_custom_target(${target}
+ COMMAND ${LIT_COMMAND} ${ARG_UNPARSED_ARGUMENTS}
+ COMMENT "${comment}"
+ )
+ else()
+ add_custom_target(${target}
+ COMMAND ${LIT_COMMAND} ${ARG_UNPARSED_ARGUMENTS}
+ COMMENT "${comment}"
+ USES_TERMINAL
+ )
+ endif()
else()
add_custom_target(${target}
COMMAND ${CMAKE_COMMAND} -E echo "${target} does nothing, no tools built.")
diff --git a/llvm/docs/CMake.rst b/llvm/docs/CMake.rst
index abef4f8..35c4798 100644
--- a/llvm/docs/CMake.rst
+++ b/llvm/docs/CMake.rst
@@ -762,6 +762,12 @@ enabled sub-projects. Nearly all of these variable names begin with
**LLVM_PARALLEL_LINK_JOBS**:STRING
Define the maximum number of concurrent link jobs.
+**LLVM_PARALLEL_LIT**:BOOL
+ Defaults to ``OFF``. If set to ``OFF``, lit testsuites will be configured
+ with CMake's ``USES_TERMINAL`` flag to give direct access to the terminal. If
+ set to ``ON``, that flag will be removed allowing Ninja to schedule multiple
+ lit testsuites in parallel.
+
**LLVM_RAM_PER_COMPILE_JOB**:STRING
Calculates the amount of Ninja compile jobs according to available resources.
Value has to be in MB, overwrites LLVM_PARALLEL_COMPILE_JOBS. Compile jobs
diff --git a/llvm/docs/GlobalISel/GenericOpcode.rst b/llvm/docs/GlobalISel/GenericOpcode.rst
index 26ff343..33b0152 100644
--- a/llvm/docs/GlobalISel/GenericOpcode.rst
+++ b/llvm/docs/GlobalISel/GenericOpcode.rst
@@ -536,15 +536,15 @@ G_FMINIMUM
^^^^^^^^^^
NaN-propagating minimum that also treat -0.0 as less than 0.0. While
-FMINNUM_IEEE follow IEEE 754-2008 semantics, FMINIMUM follows IEEE 754-2018
-draft semantics.
+FMINNUM_IEEE follow IEEE 754-2008 semantics, FMINIMUM follows IEEE
+754-2019 semantics.
G_FMAXIMUM
^^^^^^^^^^
NaN-propagating maximum that also treat -0.0 as less than 0.0. While
-FMAXNUM_IEEE follow IEEE 754-2008 semantics, FMAXIMUM follows IEEE 754-2018
-draft semantics.
+FMAXNUM_IEEE follow IEEE 754-2008 semantics, FMAXIMUM follows IEEE
+754-2019 semantics.
G_FADD, G_FSUB, G_FMUL, G_FDIV, G_FREM
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
diff --git a/llvm/docs/LangRef.rst b/llvm/docs/LangRef.rst
index 60e682a..f56d4ed 100644
--- a/llvm/docs/LangRef.rst
+++ b/llvm/docs/LangRef.rst
@@ -15581,7 +15581,7 @@ Semantics:
If either operand is a NaN, returns NaN. Otherwise returns the lesser
of the two arguments. -0.0 is considered to be less than +0.0 for this
intrinsic. Note that these are the semantics specified in the draft of
-IEEE 754-2018.
+IEEE 754-2019.
.. _i_maximum:
@@ -15621,7 +15621,7 @@ Semantics:
If either operand is a NaN, returns NaN. Otherwise returns the greater
of the two arguments. -0.0 is considered to be less than +0.0 for this
intrinsic. Note that these are the semantics specified in the draft of
-IEEE 754-2018.
+IEEE 754-2019.
.. _int_copysign:
@@ -26000,7 +26000,7 @@ The third argument specifies the exception behavior as described above.
Semantics:
""""""""""
-This function follows semantics specified in the draft of IEEE 754-2018.
+This function follows semantics specified in the draft of IEEE 754-2019.
'``llvm.experimental.constrained.minimum``' Intrinsic
@@ -26032,7 +26032,7 @@ The third argument specifies the exception behavior as described above.
Semantics:
""""""""""
-This function follows semantics specified in the draft of IEEE 754-2018.
+This function follows semantics specified in the draft of IEEE 754-2019.
'``llvm.experimental.constrained.ceil``' Intrinsic
diff --git a/llvm/docs/RISCVUsage.rst b/llvm/docs/RISCVUsage.rst
index ed44359..8d293b0 100644
--- a/llvm/docs/RISCVUsage.rst
+++ b/llvm/docs/RISCVUsage.rst
@@ -117,6 +117,7 @@ on support follow.
``V`` Supported
``Za128rs`` Supported (`See note <#riscv-profiles-extensions-note>`__)
``Za64rs`` Supported (`See note <#riscv-profiles-extensions-note>`__)
+ ``Zacas`` Supported (`See note <#riscv-zacas-note>`__)
``Zawrs`` Assembly Support
``Zba`` Supported
``Zbb`` Supported
@@ -236,6 +237,11 @@ Supported
``Za128rs``, ``Za64rs``, ``Zic64b``, ``Ziccamoa``, ``Ziccif``, ``Zicclsm``, ``Ziccrse``, ``Shcounterenvw``, ``Shgatpa``, ``Shtvala``, ``Shvsatpa``, ``Shvstvala``, ``Shvstvecd``, ``Ssccptr``, ``Sscounterenw``, ``Ssstateen``, ``Ssstrict``, ``Sstvala``, ``Sstvecd``, ``Ssu64xl``, ``Svade``, ``Svbare``
These extensions are defined as part of the `RISC-V Profiles specification <https://github.com/riscv/riscv-profiles/releases/tag/v1.0>`__. They do not introduce any new features themselves, but instead describe existing hardware features.
+ .. _riscv-zacas-note:
+
+``Zacas``
+ amocas.w will be used for i32 cmpxchg. amocas.d will be used i64 cmpxchg on RV64. The compiler will not generate amocas.d on RV32 or amocas.q on RV64 due to ABI compatibilty. These can only be used in the assembler.
+
Experimental Extensions
=======================
@@ -252,9 +258,6 @@ The primary goal of experimental support is to assist in the process of ratifica
``experimental-zabha``
LLVM implements the `v1.0-rc1 draft specification <https://github.com/riscv/riscv-zabha/tree/v1.0-rc1>`__.
-``experimental-zacas``
- LLVM implements the `1.0-rc1 draft specification <https://github.com/riscv/riscv-zacas/releases/tag/v1.0-rc1>`__.
-
``experimental-zalasr``
LLVM implements the `0.0.5 draft specification <https://github.com/mehnadnerd/riscv-zalasr>`__.
diff --git a/llvm/docs/ReleaseNotes.rst b/llvm/docs/ReleaseNotes.rst
index 51b6527..8ce6ee5 100644
--- a/llvm/docs/ReleaseNotes.rst
+++ b/llvm/docs/ReleaseNotes.rst
@@ -103,6 +103,7 @@ Changes to the RISC-V Backend
* Codegen support was added for the Zimop (May-Be-Operations) extension.
* The experimental Ssnpm, Smnpm, Smmpm, Sspm, and Supm 0.8.1 Pointer Masking extensions are supported.
* The experimental Ssqosid extension is supported.
+* Zacas is no longer experimental.
Changes to the WebAssembly Backend
----------------------------------
diff --git a/llvm/include/llvm/ADT/APFloat.h b/llvm/include/llvm/ADT/APFloat.h
index 8c247bb..deb74cb 100644
--- a/llvm/include/llvm/ADT/APFloat.h
+++ b/llvm/include/llvm/ADT/APFloat.h
@@ -1389,29 +1389,35 @@ inline APFloat neg(APFloat X) {
return X;
}
-/// Implements IEEE minNum semantics. Returns the smaller of the 2 arguments if
-/// both are not NaN. If either argument is a NaN, returns the other argument.
+/// Implements IEEE-754 2019 minimumNumber semantics. Returns the smaller of the
+/// 2 arguments if both are not NaN. If either argument is a NaN, returns the
+/// other argument. -0 is treated as ordered less than +0.
LLVM_READONLY
inline APFloat minnum(const APFloat &A, const APFloat &B) {
if (A.isNaN())
return B;
if (B.isNaN())
return A;
+ if (A.isZero() && B.isZero() && (A.isNegative() != B.isNegative()))
+ return A.isNegative() ? A : B;
return B < A ? B : A;
}
-/// Implements IEEE maxNum semantics. Returns the larger of the 2 arguments if
-/// both are not NaN. If either argument is a NaN, returns the other argument.
+/// Implements IEEE-754 2019 maximumNumber semantics. Returns the larger of the
+/// 2 arguments if both are not NaN. If either argument is a NaN, returns the
+/// other argument. +0 is treated as ordered greater than -0.
LLVM_READONLY
inline APFloat maxnum(const APFloat &A, const APFloat &B) {
if (A.isNaN())
return B;
if (B.isNaN())
return A;
+ if (A.isZero() && B.isZero() && (A.isNegative() != B.isNegative()))
+ return A.isNegative() ? B : A;
return A < B ? B : A;
}
-/// Implements IEEE 754-2018 minimum semantics. Returns the smaller of 2
+/// Implements IEEE 754-2019 minimum semantics. Returns the smaller of 2
/// arguments, propagating NaNs and treating -0 as less than +0.
LLVM_READONLY
inline APFloat minimum(const APFloat &A, const APFloat &B) {
@@ -1424,7 +1430,7 @@ inline APFloat minimum(const APFloat &A, const APFloat &B) {
return B < A ? B : A;
}
-/// Implements IEEE 754-2018 maximum semantics. Returns the larger of 2
+/// Implements IEEE 754-2019 maximum semantics. Returns the larger of 2
/// arguments, propagating NaNs and treating -0 as less than +0.
LLVM_READONLY
inline APFloat maximum(const APFloat &A, const APFloat &B) {
diff --git a/llvm/include/llvm/CodeGen/ISDOpcodes.h b/llvm/include/llvm/CodeGen/ISDOpcodes.h
index 8cb0bc9..ad876c5 100644
--- a/llvm/include/llvm/CodeGen/ISDOpcodes.h
+++ b/llvm/include/llvm/CodeGen/ISDOpcodes.h
@@ -978,7 +978,7 @@ enum NodeType {
/// FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0
/// as less than 0.0. While FMINNUM_IEEE/FMAXNUM_IEEE follow IEEE 754-2008
- /// semantics, FMINIMUM/FMAXIMUM follow IEEE 754-2018 draft semantics.
+ /// semantics, FMINIMUM/FMAXIMUM follow IEEE 754-2019 semantics.
FMINIMUM,
FMAXIMUM,
diff --git a/llvm/include/llvm/CodeGen/SelectionDAG.h b/llvm/include/llvm/CodeGen/SelectionDAG.h
index 2fc1cea..25e6c52 100644
--- a/llvm/include/llvm/CodeGen/SelectionDAG.h
+++ b/llvm/include/llvm/CodeGen/SelectionDAG.h
@@ -1488,9 +1488,6 @@ public:
SDValue Chain, SDValue Ptr, SDValue Stride,
SDValue Mask, SDValue EVL, EVT MemVT,
MachineMemOperand *MMO, bool IsExpanding = false);
- SDValue getIndexedStridedLoadVP(SDValue OrigLoad, const SDLoc &DL,
- SDValue Base, SDValue Offset,
- ISD::MemIndexedMode AM);
SDValue getStridedStoreVP(SDValue Chain, const SDLoc &DL, SDValue Val,
SDValue Ptr, SDValue Offset, SDValue Stride,
SDValue Mask, SDValue EVL, EVT MemVT,
@@ -1501,9 +1498,6 @@ public:
SDValue Ptr, SDValue Stride, SDValue Mask,
SDValue EVL, EVT SVT, MachineMemOperand *MMO,
bool IsCompressing = false);
- SDValue getIndexedStridedStoreVP(SDValue OrigStore, const SDLoc &DL,
- SDValue Base, SDValue Offset,
- ISD::MemIndexedMode AM);
SDValue getGatherVP(SDVTList VTs, EVT VT, const SDLoc &dl,
ArrayRef<SDValue> Ops, MachineMemOperand *MMO,
diff --git a/llvm/include/llvm/DebugInfo/DIContext.h b/llvm/include/llvm/DebugInfo/DIContext.h
index 288ddf7..b75dc8d 100644
--- a/llvm/include/llvm/DebugInfo/DIContext.h
+++ b/llvm/include/llvm/DebugInfo/DIContext.h
@@ -206,6 +206,7 @@ struct DIDumpOptions {
bool IsEH = false;
bool DumpNonSkeleton = false;
bool ShowAggregateErrors = false;
+ std::string JsonErrSummaryFile;
std::function<llvm::StringRef(uint64_t DwarfRegNum, bool IsEH)>
GetNameForDWARFReg;
diff --git a/llvm/include/llvm/IR/InstrTypes.h b/llvm/include/llvm/IR/InstrTypes.h
index 4ee51cd..0e81d3b 100644
--- a/llvm/include/llvm/IR/InstrTypes.h
+++ b/llvm/include/llvm/IR/InstrTypes.h
@@ -468,9 +468,7 @@ public:
static BinaryOperator *CreateNeg(Value *Op, const Twine &Name,
BasicBlock::iterator InsertBefore);
static BinaryOperator *CreateNeg(Value *Op, const Twine &Name = "",
- Instruction *InsertBefore = nullptr);
- static BinaryOperator *CreateNeg(Value *Op, const Twine &Name,
- BasicBlock *InsertAtEnd);
+ BasicBlock *InsertAtEnd = nullptr);
static BinaryOperator *CreateNSWNeg(Value *Op, const Twine &Name,
BasicBlock::iterator InsertBefore);
static BinaryOperator *CreateNSWNeg(Value *Op, const Twine &Name = "",
@@ -1538,10 +1536,19 @@ public:
OperandBundleDef OB,
Instruction *InsertPt = nullptr);
+ /// Create a clone of \p CB with operand bundle \p OB added.
+ static CallBase *addOperandBundle(CallBase *CB, uint32_t ID,
+ OperandBundleDef OB,
+ BasicBlock::iterator InsertPt);
+
/// Create a clone of \p CB with operand bundle \p ID removed.
static CallBase *removeOperandBundle(CallBase *CB, uint32_t ID,
Instruction *InsertPt = nullptr);
+ /// Create a clone of \p CB with operand bundle \p ID removed.
+ static CallBase *removeOperandBundle(CallBase *CB, uint32_t ID,
+ BasicBlock::iterator InsertPt);
+
static bool classof(const Instruction *I) {
return I->getOpcode() == Instruction::Call ||
I->getOpcode() == Instruction::Invoke ||
diff --git a/llvm/include/llvm/Target/GenericOpcodes.td b/llvm/include/llvm/Target/GenericOpcodes.td
index 19197f5..d2036e4 100644
--- a/llvm/include/llvm/Target/GenericOpcodes.td
+++ b/llvm/include/llvm/Target/GenericOpcodes.td
@@ -815,7 +815,7 @@ def G_FMAXNUM_IEEE : GenericInstruction {
// FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0
// as less than 0.0. While FMINNUM_IEEE/FMAXNUM_IEEE follow IEEE 754-2008
-// semantics, FMINIMUM/FMAXIMUM follow IEEE 754-2018 draft semantics.
+// semantics, FMINIMUM/FMAXIMUM follow IEEE 754-2019 semantics.
def G_FMINIMUM : GenericInstruction {
let OutOperandList = (outs type0:$dst);
let InOperandList = (ins type0:$src1, type0:$src2);
diff --git a/llvm/include/llvm/Target/GlobalISel/Combine.td b/llvm/include/llvm/Target/GlobalISel/Combine.td
index 17757ca..18db7a8 100644
--- a/llvm/include/llvm/Target/GlobalISel/Combine.td
+++ b/llvm/include/llvm/Target/GlobalISel/Combine.td
@@ -952,6 +952,37 @@ def redundant_binop_in_equality : GICombineRule<
[{ return Helper.matchRedundantBinOpInEquality(*${root}, ${info}); }]),
(apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
+// Transform: (X == 0 & Y == 0) -> (X | Y) == 0
+def double_icmp_zero_and_combine: GICombineRule<
+ (defs root:$root),
+ (match (G_ICMP $d1, $p, $s1, 0),
+ (G_ICMP $d2, $p, $s2, 0),
+ (G_AND $root, $d1, $d2),
+ [{ return ${p}.getPredicate() == CmpInst::ICMP_EQ &&
+ !MRI.getType(${s1}.getReg()).getScalarType().isPointer() &&
+ (MRI.getType(${s1}.getReg()) ==
+ MRI.getType(${s2}.getReg())); }]),
+ (apply (G_OR $ordst, $s1, $s2),
+ (G_ICMP $root, $p, $ordst, 0))
+>;
+
+// Transform: (X != 0 | Y != 0) -> (X | Y) != 0
+def double_icmp_zero_or_combine: GICombineRule<
+ (defs root:$root),
+ (match (G_ICMP $d1, $p, $s1, 0),
+ (G_ICMP $d2, $p, $s2, 0),
+ (G_OR $root, $d1, $d2),
+ [{ return ${p}.getPredicate() == CmpInst::ICMP_NE &&
+ !MRI.getType(${s1}.getReg()).getScalarType().isPointer() &&
+ (MRI.getType(${s1}.getReg()) ==
+ MRI.getType(${s2}.getReg())); }]),
+ (apply (G_OR $ordst, $s1, $s2),
+ (G_ICMP $root, $p, $ordst, 0))
+>;
+
+def double_icmp_zero_and_or_combine : GICombineGroup<[double_icmp_zero_and_combine,
+ double_icmp_zero_or_combine]>;
+
def and_or_disjoint_mask : GICombineRule<
(defs root:$root, build_fn_matchinfo:$info),
(match (wip_match_opcode G_AND):$root,
@@ -1343,7 +1374,7 @@ def all_combines : GICombineGroup<[trivial_combines, insert_vec_elt_combines,
and_or_disjoint_mask, fma_combines, fold_binop_into_select,
sub_add_reg, select_to_minmax, redundant_binop_in_equality,
fsub_to_fneg, commute_constant_to_rhs, match_ands, match_ors,
- combine_concat_vector]>;
+ combine_concat_vector, double_icmp_zero_and_or_combine]>;
// A combine group used to for prelegalizer combiners at -O0. The combines in
// this group have been selected based on experiments to balance code size and
diff --git a/llvm/include/llvm/TextAPI/RecordsSlice.h b/llvm/include/llvm/TextAPI/RecordsSlice.h
index 5b214d0..57b23e5 100644
--- a/llvm/include/llvm/TextAPI/RecordsSlice.h
+++ b/llvm/include/llvm/TextAPI/RecordsSlice.h
@@ -50,9 +50,9 @@ public:
/// Add non-ObjC global record.
///
/// \param Name The name of symbol.
- /// \param Flags The flags that describe attributes of the symbol.
- /// \param GV The kind of global.
/// \param Linkage The linkage of symbol.
+ /// \param GV The kind of global.
+ /// \param Flags The flags that describe attributes of the symbol.
/// \return The non-owning pointer to added record in slice.
GlobalRecord *addGlobal(StringRef Name, RecordLinkage Linkage,
GlobalRecord::Kind GV,
@@ -69,6 +69,7 @@ public:
/// Add ObjC IVar record.
///
+ /// \param Container Owning pointer for instance variable.
/// \param Name The name of ivar, not symbol.
/// \param Linkage The linkage of symbol.
/// \return The non-owning pointer to added record in slice.
@@ -93,7 +94,7 @@ public:
/// Find ObjC Category.
///
/// \param ClassToExtend The name of class, not full symbol name.
- /// \param Categories The name of category.
+ /// \param Category The name of category.
/// \return The non-owning pointer to record in slice.
ObjCCategoryRecord *findObjCCategory(StringRef ClassToExtend,
StringRef Category) const;
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index e150f27..5b1b7c7c 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -3645,32 +3645,42 @@ KnownBits SelectionDAG::computeKnownBits(SDValue Op, const APInt &DemandedElts,
}
}
}
- } else if (ISD::isZEXTLoad(Op.getNode()) && Op.getResNo() == 0) {
- // If this is a ZEXTLoad and we are looking at the loaded value.
- EVT VT = LD->getMemoryVT();
- unsigned MemBits = VT.getScalarSizeInBits();
- Known.Zero.setBitsFrom(MemBits);
- } else if (const MDNode *Ranges = LD->getRanges()) {
- EVT VT = LD->getValueType(0);
-
- // TODO: Handle for extending loads
- if (LD->getExtensionType() == ISD::NON_EXTLOAD) {
+ } else if (Op.getResNo() == 0) {
+ KnownBits Known0(!LD->getMemoryVT().isScalableVT()
+ ? LD->getMemoryVT().getFixedSizeInBits()
+ : BitWidth);
+ EVT VT = Op.getValueType();
+ // Fill in any known bits from range information. There are 3 types being
+ // used. The results VT (same vector elt size as BitWidth), the loaded
+ // MemoryVT (which may or may not be vector) and the range VTs original
+ // type. The range matadata needs the full range (i.e
+ // MemoryVT().getSizeInBits()), which is truncated to the correct elt size
+ // if it is know. These are then extended to the original VT sizes below.
+ if (const MDNode *MD = LD->getRanges()) {
+ computeKnownBitsFromRangeMetadata(*MD, Known0);
if (VT.isVector()) {
// Handle truncation to the first demanded element.
// TODO: Figure out which demanded elements are covered
if (DemandedElts != 1 || !getDataLayout().isLittleEndian())
break;
+ Known0 = Known0.trunc(BitWidth);
+ }
+ }
- // Handle the case where a load has a vector type, but scalar memory
- // with an attached range.
- EVT MemVT = LD->getMemoryVT();
- KnownBits KnownFull(MemVT.getSizeInBits());
+ if (LD->getMemoryVT().isVector())
+ Known0 = Known0.trunc(LD->getMemoryVT().getScalarSizeInBits());
- computeKnownBitsFromRangeMetadata(*Ranges, KnownFull);
- Known = KnownFull.trunc(BitWidth);
- } else
- computeKnownBitsFromRangeMetadata(*Ranges, Known);
- }
+ // Extend the Known bits from memory to the size of the result.
+ if (ISD::isZEXTLoad(Op.getNode()))
+ Known = Known0.zext(BitWidth);
+ else if (ISD::isSEXTLoad(Op.getNode()))
+ Known = Known0.sext(BitWidth);
+ else if (ISD::isEXTLoad(Op.getNode()))
+ Known = Known0.anyext(BitWidth);
+ else
+ Known = Known0;
+ assert(Known.getBitWidth() == BitWidth);
+ return Known;
}
break;
}
@@ -9106,26 +9116,6 @@ SDValue SelectionDAG::getExtStridedLoadVP(
Stride, Mask, EVL, MemVT, MMO, IsExpanding);
}
-SDValue SelectionDAG::getIndexedStridedLoadVP(SDValue OrigLoad, const SDLoc &DL,
- SDValue Base, SDValue Offset,
- ISD::MemIndexedMode AM) {
- auto *SLD = cast<VPStridedLoadSDNode>(OrigLoad);
- assert(SLD->getOffset().isUndef() &&
- "Strided load is already a indexed load!");
- // Don't propagate the invariant or dereferenceable flags.
- auto MMOFlags =
- SLD->getMemOperand()->getFlags() &
- ~(MachineMemOperand::MOInvariant | MachineMemOperand::MODereferenceable);
- MachineFunction &MF = getMachineFunction();
- MachineMemOperand *MMO = MF.getMachineMemOperand(
- SLD->getPointerInfo(), MMOFlags, SLD->getMemOperand()->getSize(),
- SLD->getOriginalAlign(), SLD->getAAInfo());
- return getStridedLoadVP(AM, SLD->getExtensionType(), OrigLoad.getValueType(),
- DL, SLD->getChain(), Base, Offset, SLD->getStride(),
- SLD->getMask(), SLD->getVectorLength(),
- SLD->getMemoryVT(), MMO, SLD->isExpandingLoad());
-}
-
SDValue SelectionDAG::getStridedStoreVP(SDValue Chain, const SDLoc &DL,
SDValue Val, SDValue Ptr,
SDValue Offset, SDValue Stride,
@@ -9211,38 +9201,6 @@ SDValue SelectionDAG::getTruncStridedStoreVP(SDValue Chain, const SDLoc &DL,
return V;
}
-SDValue SelectionDAG::getIndexedStridedStoreVP(SDValue OrigStore,
- const SDLoc &DL, SDValue Base,
- SDValue Offset,
- ISD::MemIndexedMode AM) {
- auto *SST = cast<VPStridedStoreSDNode>(OrigStore);
- assert(SST->getOffset().isUndef() &&
- "Strided store is already an indexed store!");
- SDVTList VTs = getVTList(Base.getValueType(), MVT::Other);
- SDValue Ops[] = {
- SST->getChain(), SST->getValue(), Base, Offset, SST->getStride(),
- SST->getMask(), SST->getVectorLength()};
- FoldingSetNodeID ID;
- AddNodeIDNode(ID, ISD::EXPERIMENTAL_VP_STRIDED_STORE, VTs, Ops);
- ID.AddInteger(SST->getMemoryVT().getRawBits());
- ID.AddInteger(SST->getRawSubclassData());
- ID.AddInteger(SST->getPointerInfo().getAddrSpace());
- void *IP = nullptr;
- if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP))
- return SDValue(E, 0);
-
- auto *N = newSDNode<VPStridedStoreSDNode>(
- DL.getIROrder(), DL.getDebugLoc(), VTs, AM, SST->isTruncatingStore(),
- SST->isCompressingStore(), SST->getMemoryVT(), SST->getMemOperand());
- createOperands(N, Ops);
-
- CSEMap.InsertNode(N, IP);
- InsertNode(N);
- SDValue V(N, 0);
- NewSDValueDbgMsg(V, "Creating new node: ", this);
- return V;
-}
-
SDValue SelectionDAG::getGatherVP(SDVTList VTs, EVT VT, const SDLoc &dl,
ArrayRef<SDValue> Ops, MachineMemOperand *MMO,
ISD::MemIndexType IndexType) {
diff --git a/llvm/lib/DebugInfo/DWARF/DWARFVerifier.cpp b/llvm/lib/DebugInfo/DWARF/DWARFVerifier.cpp
index 20ef59e..520debe 100644
--- a/llvm/lib/DebugInfo/DWARF/DWARFVerifier.cpp
+++ b/llvm/lib/DebugInfo/DWARF/DWARFVerifier.cpp
@@ -29,7 +29,9 @@
#include "llvm/Support/DJB.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/FileSystem.h"
#include "llvm/Support/FormatVariadic.h"
+#include "llvm/Support/JSON.h"
#include "llvm/Support/WithColor.h"
#include "llvm/Support/raw_ostream.h"
#include <map>
@@ -2026,12 +2028,37 @@ void OutputCategoryAggregator::EnumerateResults(
}
void DWARFVerifier::summarize() {
- if (ErrorCategory.GetNumCategories() && DumpOpts.ShowAggregateErrors) {
+ if (DumpOpts.ShowAggregateErrors && ErrorCategory.GetNumCategories()) {
error() << "Aggregated error counts:\n";
ErrorCategory.EnumerateResults([&](StringRef s, unsigned count) {
error() << s << " occurred " << count << " time(s).\n";
});
}
+ if (!DumpOpts.JsonErrSummaryFile.empty()) {
+ std::error_code EC;
+ raw_fd_ostream JsonStream(DumpOpts.JsonErrSummaryFile, EC,
+ sys::fs::OF_Text);
+ if (EC) {
+ error() << "unable to open json summary file '"
+ << DumpOpts.JsonErrSummaryFile
+ << "' for writing: " << EC.message() << '\n';
+ return;
+ }
+
+ llvm::json::Object Categories;
+ uint64_t ErrorCount = 0;
+ ErrorCategory.EnumerateResults([&](StringRef Category, unsigned Count) {
+ llvm::json::Object Val;
+ Val.try_emplace("count", Count);
+ Categories.try_emplace(Category, std::move(Val));
+ ErrorCount += Count;
+ });
+ llvm::json::Object RootNode;
+ RootNode.try_emplace("error-categories", std::move(Categories));
+ RootNode.try_emplace("error-count", ErrorCount);
+
+ JsonStream << llvm::json::Value(std::move(RootNode));
+ }
}
raw_ostream &DWARFVerifier::error() const { return WithColor::error(OS); }
diff --git a/llvm/lib/IR/DebugInfo.cpp b/llvm/lib/IR/DebugInfo.cpp
index e044ab3..1f3ff22 100644
--- a/llvm/lib/IR/DebugInfo.cpp
+++ b/llvm/lib/IR/DebugInfo.cpp
@@ -99,8 +99,8 @@ static void findDbgIntrinsics(SmallVectorImpl<IntrinsicT *> &Result, Value *V,
SmallPtrSet<DPValue *, 4> EncounteredDPValues;
/// Append IntrinsicT users of MetadataAsValue(MD).
- auto AppendUsers = [&Ctx, &EncounteredIntrinsics, &Result,
- DPValues](Metadata *MD) {
+ auto AppendUsers = [&Ctx, &EncounteredIntrinsics, &EncounteredDPValues,
+ &Result, DPValues](Metadata *MD) {
if (auto *MDV = MetadataAsValue::getIfExists(Ctx, MD)) {
for (User *U : MDV->users())
if (IntrinsicT *DVI = dyn_cast<IntrinsicT>(U))
@@ -113,7 +113,8 @@ static void findDbgIntrinsics(SmallVectorImpl<IntrinsicT *> &Result, Value *V,
if (LocalAsMetadata *L = dyn_cast<LocalAsMetadata>(MD)) {
for (DPValue *DPV : L->getAllDPValueUsers()) {
if (Type == DPValue::LocationType::Any || DPV->getType() == Type)
- DPValues->push_back(DPV);
+ if (EncounteredDPValues.insert(DPV).second)
+ DPValues->push_back(DPV);
}
}
};
diff --git a/llvm/lib/IR/Instruction.cpp b/llvm/lib/IR/Instruction.cpp
index c54f8d7..ce22175 100644
--- a/llvm/lib/IR/Instruction.cpp
+++ b/llvm/lib/IR/Instruction.cpp
@@ -46,11 +46,11 @@ Instruction::Instruction(Type *ty, unsigned it, Use *Ops, unsigned NumOps,
Instruction::Instruction(Type *ty, unsigned it, Use *Ops, unsigned NumOps,
BasicBlock *InsertAtEnd)
- : User(ty, Value::InstructionVal + it, Ops, NumOps), Parent(nullptr) {
+ : User(ty, Value::InstructionVal + it, Ops, NumOps), Parent(nullptr) {
- // append this instruction into the basic block
- assert(InsertAtEnd && "Basic block to append to may not be NULL!");
- insertInto(InsertAtEnd, InsertAtEnd->end());
+ // If requested, append this instruction into the basic block.
+ if (InsertAtEnd)
+ insertInto(InsertAtEnd, InsertAtEnd->end());
}
Instruction::~Instruction() {
@@ -73,7 +73,6 @@ Instruction::~Instruction() {
setMetadata(LLVMContext::MD_DIAssignID, nullptr);
}
-
void Instruction::setParent(BasicBlock *P) {
Parent = P;
}
diff --git a/llvm/lib/IR/Instructions.cpp b/llvm/lib/IR/Instructions.cpp
index 2577857..42cdcad 100644
--- a/llvm/lib/IR/Instructions.cpp
+++ b/llvm/lib/IR/Instructions.cpp
@@ -304,6 +304,20 @@ void LandingPadInst::addClause(Constant *Val) {
//===----------------------------------------------------------------------===//
CallBase *CallBase::Create(CallBase *CB, ArrayRef<OperandBundleDef> Bundles,
+ BasicBlock::iterator InsertPt) {
+ switch (CB->getOpcode()) {
+ case Instruction::Call:
+ return CallInst::Create(cast<CallInst>(CB), Bundles, InsertPt);
+ case Instruction::Invoke:
+ return InvokeInst::Create(cast<InvokeInst>(CB), Bundles, InsertPt);
+ case Instruction::CallBr:
+ return CallBrInst::Create(cast<CallBrInst>(CB), Bundles, InsertPt);
+ default:
+ llvm_unreachable("Unknown CallBase sub-class!");
+ }
+}
+
+CallBase *CallBase::Create(CallBase *CB, ArrayRef<OperandBundleDef> Bundles,
Instruction *InsertPt) {
switch (CB->getOpcode()) {
case Instruction::Call:
@@ -559,6 +573,18 @@ CallBase::BundleOpInfo &CallBase::getBundleOpInfoForOperand(unsigned OpIdx) {
CallBase *CallBase::addOperandBundle(CallBase *CB, uint32_t ID,
OperandBundleDef OB,
+ BasicBlock::iterator InsertPt) {
+ if (CB->getOperandBundle(ID))
+ return CB;
+
+ SmallVector<OperandBundleDef, 1> Bundles;
+ CB->getOperandBundlesAsDefs(Bundles);
+ Bundles.push_back(OB);
+ return Create(CB, Bundles, InsertPt);
+}
+
+CallBase *CallBase::addOperandBundle(CallBase *CB, uint32_t ID,
+ OperandBundleDef OB,
Instruction *InsertPt) {
if (CB->getOperandBundle(ID))
return CB;
@@ -570,6 +596,23 @@ CallBase *CallBase::addOperandBundle(CallBase *CB, uint32_t ID,
}
CallBase *CallBase::removeOperandBundle(CallBase *CB, uint32_t ID,
+ BasicBlock::iterator InsertPt) {
+ SmallVector<OperandBundleDef, 1> Bundles;
+ bool CreateNew = false;
+
+ for (unsigned I = 0, E = CB->getNumOperandBundles(); I != E; ++I) {
+ auto Bundle = CB->getOperandBundleAt(I);
+ if (Bundle.getTagID() == ID) {
+ CreateNew = true;
+ continue;
+ }
+ Bundles.emplace_back(Bundle);
+ }
+
+ return CreateNew ? Create(CB, Bundles, InsertPt) : CB;
+}
+
+CallBase *CallBase::removeOperandBundle(CallBase *CB, uint32_t ID,
Instruction *InsertPt) {
SmallVector<OperandBundleDef, 1> Bundles;
bool CreateNew = false;
@@ -717,6 +760,13 @@ void CallInst::init(FunctionType *FTy, Value *Func, const Twine &NameStr) {
}
CallInst::CallInst(FunctionType *Ty, Value *Func, const Twine &Name,
+ BasicBlock::iterator InsertBefore)
+ : CallBase(Ty->getReturnType(), Instruction::Call,
+ OperandTraits<CallBase>::op_end(this) - 1, 1, InsertBefore) {
+ init(Ty, Func, Name);
+}
+
+CallInst::CallInst(FunctionType *Ty, Value *Func, const Twine &Name,
Instruction *InsertBefore)
: CallBase(Ty->getReturnType(), Instruction::Call,
OperandTraits<CallBase>::op_end(this) - 1, 1, InsertBefore) {
@@ -881,6 +931,20 @@ InvokeInst::InvokeInst(const InvokeInst &II)
}
InvokeInst *InvokeInst::Create(InvokeInst *II, ArrayRef<OperandBundleDef> OpB,
+ BasicBlock::iterator InsertPt) {
+ std::vector<Value *> Args(II->arg_begin(), II->arg_end());
+
+ auto *NewII = InvokeInst::Create(
+ II->getFunctionType(), II->getCalledOperand(), II->getNormalDest(),
+ II->getUnwindDest(), Args, OpB, II->getName(), InsertPt);
+ NewII->setCallingConv(II->getCallingConv());
+ NewII->SubclassOptionalData = II->SubclassOptionalData;
+ NewII->setAttributes(II->getAttributes());
+ NewII->setDebugLoc(II->getDebugLoc());
+ return NewII;
+}
+
+InvokeInst *InvokeInst::Create(InvokeInst *II, ArrayRef<OperandBundleDef> OpB,
Instruction *InsertPt) {
std::vector<Value *> Args(II->arg_begin(), II->arg_end());
@@ -954,6 +1018,21 @@ CallBrInst::CallBrInst(const CallBrInst &CBI)
}
CallBrInst *CallBrInst::Create(CallBrInst *CBI, ArrayRef<OperandBundleDef> OpB,
+ BasicBlock::iterator InsertPt) {
+ std::vector<Value *> Args(CBI->arg_begin(), CBI->arg_end());
+
+ auto *NewCBI = CallBrInst::Create(
+ CBI->getFunctionType(), CBI->getCalledOperand(), CBI->getDefaultDest(),
+ CBI->getIndirectDests(), Args, OpB, CBI->getName(), InsertPt);
+ NewCBI->setCallingConv(CBI->getCallingConv());
+ NewCBI->SubclassOptionalData = CBI->SubclassOptionalData;
+ NewCBI->setAttributes(CBI->getAttributes());
+ NewCBI->setDebugLoc(CBI->getDebugLoc());
+ NewCBI->NumIndirectDests = CBI->NumIndirectDests;
+ return NewCBI;
+}
+
+CallBrInst *CallBrInst::Create(CallBrInst *CBI, ArrayRef<OperandBundleDef> OpB,
Instruction *InsertPt) {
std::vector<Value *> Args(CBI->arg_begin(), CBI->arg_end());
@@ -1138,6 +1217,18 @@ CatchReturnInst::CatchReturnInst(Value *CatchPad, BasicBlock *BB,
CatchSwitchInst::CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
unsigned NumReservedValues,
const Twine &NameStr,
+ BasicBlock::iterator InsertBefore)
+ : Instruction(ParentPad->getType(), Instruction::CatchSwitch, nullptr, 0,
+ InsertBefore) {
+ if (UnwindDest)
+ ++NumReservedValues;
+ init(ParentPad, UnwindDest, NumReservedValues + 1);
+ setName(NameStr);
+}
+
+CatchSwitchInst::CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
+ unsigned NumReservedValues,
+ const Twine &NameStr,
Instruction *InsertBefore)
: Instruction(ParentPad->getType(), Instruction::CatchSwitch, nullptr, 0,
InsertBefore) {
@@ -3224,6 +3315,14 @@ void BinaryOperator::AssertOK() {
BinaryOperator *BinaryOperator::Create(BinaryOps Op, Value *S1, Value *S2,
const Twine &Name,
+ BasicBlock::iterator InsertBefore) {
+ assert(S1->getType() == S2->getType() &&
+ "Cannot create binary operator with two operands of differing type!");
+ return new BinaryOperator(Op, S1, S2, S1->getType(), Name, InsertBefore);
+}
+
+BinaryOperator *BinaryOperator::Create(BinaryOps Op, Value *S1, Value *S2,
+ const Twine &Name,
Instruction *InsertBefore) {
assert(S1->getType() == S2->getType() &&
"Cannot create binary operator with two operands of differing type!");
@@ -3246,14 +3345,6 @@ BinaryOperator *BinaryOperator::CreateNeg(Value *Op, const Twine &Name,
}
BinaryOperator *BinaryOperator::CreateNeg(Value *Op, const Twine &Name,
- Instruction *InsertBefore) {
- Value *Zero = ConstantInt::get(Op->getType(), 0);
- return new BinaryOperator(Instruction::Sub,
- Zero, Op,
- Op->getType(), Name, InsertBefore);
-}
-
-BinaryOperator *BinaryOperator::CreateNeg(Value *Op, const Twine &Name,
BasicBlock *InsertAtEnd) {
Value *Zero = ConstantInt::get(Op->getType(), 0);
return new BinaryOperator(Instruction::Sub,
@@ -3286,6 +3377,13 @@ BinaryOperator *BinaryOperator::CreateNUWNeg(Value *Op, const Twine &Name,
}
BinaryOperator *BinaryOperator::CreateNot(Value *Op, const Twine &Name,
+ BasicBlock::iterator InsertBefore) {
+ Constant *C = Constant::getAllOnesValue(Op->getType());
+ return new BinaryOperator(Instruction::Xor, Op, C,
+ Op->getType(), Name, InsertBefore);
+}
+
+BinaryOperator *BinaryOperator::CreateNot(Value *Op, const Twine &Name,
Instruction *InsertBefore) {
Constant *C = Constant::getAllOnesValue(Op->getType());
return new BinaryOperator(Instruction::Xor, Op, C,
@@ -3831,6 +3929,17 @@ CastInst *CastInst::CreatePointerBitCastOrAddrSpaceCast(
CastInst *CastInst::CreateBitOrPointerCast(Value *S, Type *Ty,
const Twine &Name,
+ BasicBlock::iterator InsertBefore) {
+ if (S->getType()->isPointerTy() && Ty->isIntegerTy())
+ return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore);
+ if (S->getType()->isIntegerTy() && Ty->isPointerTy())
+ return Create(Instruction::IntToPtr, S, Ty, Name, InsertBefore);
+
+ return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
+}
+
+CastInst *CastInst::CreateBitOrPointerCast(Value *S, Type *Ty,
+ const Twine &Name,
Instruction *InsertBefore) {
if (S->getType()->isPointerTy() && Ty->isIntegerTy())
return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore);
@@ -4465,6 +4574,18 @@ CmpInst::CmpInst(Type *ty, OtherOps op, Predicate predicate, Value *LHS,
CmpInst *
CmpInst::Create(OtherOps Op, Predicate predicate, Value *S1, Value *S2,
+ const Twine &Name, BasicBlock::iterator InsertBefore) {
+ if (Op == Instruction::ICmp) {
+ return new ICmpInst(InsertBefore, CmpInst::Predicate(predicate),
+ S1, S2, Name);
+ }
+
+ return new FCmpInst(InsertBefore, CmpInst::Predicate(predicate),
+ S1, S2, Name);
+}
+
+CmpInst *
+CmpInst::Create(OtherOps Op, Predicate predicate, Value *S1, Value *S2,
const Twine &Name, Instruction *InsertBefore) {
if (Op == Instruction::ICmp) {
if (InsertBefore)
diff --git a/llvm/lib/IR/Verifier.cpp b/llvm/lib/IR/Verifier.cpp
index 3741e5d..e0de179 100644
--- a/llvm/lib/IR/Verifier.cpp
+++ b/llvm/lib/IR/Verifier.cpp
@@ -5002,7 +5002,9 @@ void Verifier::visitInstruction(Instruction &I) {
} else if (GlobalValue *GV = dyn_cast<GlobalValue>(I.getOperand(i))) {
Check(GV->getParent() == &M, "Referencing global in another module!", &I,
&M, GV, GV->getParent());
- } else if (isa<Instruction>(I.getOperand(i))) {
+ } else if (Instruction *OpInst = dyn_cast<Instruction>(I.getOperand(i))) {
+ Check(OpInst->getFunction() == BB->getParent(),
+ "Referring to an instruction in another function!", &I);
verifyDominatesUse(I, i);
} else if (isa<InlineAsm>(I.getOperand(i))) {
Check(CBI && &CBI->getCalledOperandUse() == &I.getOperandUse(i),
diff --git a/llvm/lib/MC/MCParser/AsmParser.cpp b/llvm/lib/MC/MCParser/AsmParser.cpp
index a1c32ee..76a3e50 100644
--- a/llvm/lib/MC/MCParser/AsmParser.cpp
+++ b/llvm/lib/MC/MCParser/AsmParser.cpp
@@ -44,6 +44,7 @@
#include "llvm/MC/MCSection.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSymbol.h"
+#include "llvm/MC/MCSymbolMachO.h"
#include "llvm/MC/MCTargetOptions.h"
#include "llvm/MC/MCValue.h"
#include "llvm/Support/Casting.h"
@@ -1950,7 +1951,8 @@ bool AsmParser::parseStatement(ParseStatementInfo &Info,
Lex();
}
- if (MAI.hasSubsectionsViaSymbols() && CFIStartProcLoc && Sym->isExternal())
+ if (MAI.hasSubsectionsViaSymbols() && CFIStartProcLoc &&
+ Sym->isExternal() && !cast<MCSymbolMachO>(Sym)->isAltEntry())
return Error(StartTokLoc, "non-private labels cannot appear between "
".cfi_startproc / .cfi_endproc pairs") &&
Error(*CFIStartProcLoc, "previous .cfi_startproc was here");
diff --git a/llvm/lib/Passes/PassBuilderPipelines.cpp b/llvm/lib/Passes/PassBuilderPipelines.cpp
index 142bd50..991c3ac 100644
--- a/llvm/lib/Passes/PassBuilderPipelines.cpp
+++ b/llvm/lib/Passes/PassBuilderPipelines.cpp
@@ -707,7 +707,7 @@ PassBuilder::buildFunctionSimplificationPipeline(OptimizationLevel Level,
// Re-consider control flow based optimizations after redundancy elimination,
// redo DCE, etc.
- if (EnableDFAJumpThreading && Level.getSizeLevel() == 0)
+ if (EnableDFAJumpThreading)
FPM.addPass(DFAJumpThreadingPass());
FPM.addPass(JumpThreadingPass());
diff --git a/llvm/lib/Support/RISCVISAInfo.cpp b/llvm/lib/Support/RISCVISAInfo.cpp
index d028302..68f5c36 100644
--- a/llvm/lib/Support/RISCVISAInfo.cpp
+++ b/llvm/lib/Support/RISCVISAInfo.cpp
@@ -109,6 +109,7 @@ static const RISCVSupportedExtension SupportedExtensions[] = {
{"za128rs", {1, 0}},
{"za64rs", {1, 0}},
+ {"zacas", {1, 0}},
{"zawrs", {1, 0}},
{"zba", {1, 0}},
@@ -220,7 +221,6 @@ static const RISCVSupportedExtension SupportedExperimentalExtensions[] = {
{"zaamo", {0, 2}},
{"zabha", {1, 0}},
- {"zacas", {1, 0}},
{"zalasr", {0, 1}},
{"zalrsc", {0, 2}},
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUGlobalISelDivergenceLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUGlobalISelDivergenceLowering.cpp
index 4f65a95..a0c6bf7 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUGlobalISelDivergenceLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUGlobalISelDivergenceLowering.cpp
@@ -177,7 +177,16 @@ void DivergenceLoweringHelper::buildMergeLaneMasks(
B.buildInstr(OrOp, {DstReg}, {PrevMaskedReg, CurMaskedReg});
}
-void DivergenceLoweringHelper::constrainAsLaneMask(Incoming &In) { return; }
+// GlobalISel has to constrain S1 incoming taken as-is with lane mask register
+// class. Insert a copy of Incoming.Reg to new lane mask inside Incoming.Block,
+// Incoming.Reg becomes that new lane mask.
+void DivergenceLoweringHelper::constrainAsLaneMask(Incoming &In) {
+ B.setInsertPt(*In.Block, In.Block->getFirstTerminator());
+
+ auto Copy = B.buildCopy(LLT::scalar(1), In.Reg);
+ MRI->setRegClass(Copy.getReg(0), ST->getBoolRC());
+ In.Reg = Copy.getReg(0);
+}
} // End anonymous namespace.
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
index aacc359..b2c65e6 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
@@ -34,12 +34,6 @@
using namespace llvm;
using namespace MIPatternMatch;
-static cl::opt<bool> AllowRiskySelect(
- "amdgpu-global-isel-risky-select",
- cl::desc("Allow GlobalISel to select cases that are likely to not work yet"),
- cl::init(false),
- cl::ReallyHidden);
-
#define GET_GLOBALISEL_IMPL
#define AMDGPUSubtarget GCNSubtarget
#include "AMDGPUGenGlobalISel.inc"
@@ -211,14 +205,12 @@ bool AMDGPUInstructionSelector::selectPHI(MachineInstr &I) const {
const Register DefReg = I.getOperand(0).getReg();
const LLT DefTy = MRI->getType(DefReg);
- if (DefTy == LLT::scalar(1)) {
- if (!AllowRiskySelect) {
- LLVM_DEBUG(dbgs() << "Skipping risky boolean phi\n");
- return false;
- }
-
- LLVM_DEBUG(dbgs() << "Selecting risky boolean phi\n");
- }
+ // S1 G_PHIs should not be selected in instruction-select, instead:
+ // - divergent S1 G_PHI should go through lane mask merging algorithm
+ // and be fully inst-selected in AMDGPUGlobalISelDivergenceLowering
+ // - uniform S1 G_PHI should be lowered into S32 G_PHI in AMDGPURegBankSelect
+ if (DefTy == LLT::scalar(1))
+ return false;
// TODO: Verify this doesn't have insane operands (i.e. VGPR to SGPR copy)
diff --git a/llvm/lib/Target/AMDGPU/BUFInstructions.td b/llvm/lib/Target/AMDGPU/BUFInstructions.td
index 7f812ed..4b74f3b 100644
--- a/llvm/lib/Target/AMDGPU/BUFInstructions.td
+++ b/llvm/lib/Target/AMDGPU/BUFInstructions.td
@@ -127,6 +127,7 @@ class MTBUF_Real <MTBUF_Pseudo ps, string real_name = ps.Mnemonic> :
// copy relevant pseudo op flags
let UseNamedOperandTable = ps.UseNamedOperandTable;
let SubtargetPredicate = ps.SubtargetPredicate;
+ let OtherPredicates = ps.OtherPredicates;
let AsmMatchConverter = ps.AsmMatchConverter;
let Constraints = ps.Constraints;
let DisableEncoding = ps.DisableEncoding;
@@ -2508,7 +2509,7 @@ class MUBUF_Real_Atomic_gfx12_impl<bits<8> op, string ps_name,
multiclass MUBUF_Real_Atomic_gfx11_Renamed_impl<bits<8> op, bit is_return,
string real_name> {
- defvar Rtn = !if(!eq(is_return, 1), "_RTN", "");
+ defvar Rtn = !if(is_return, "_RTN", "");
def _BOTHEN#Rtn#_gfx11 :
MUBUF_Real_Atomic_gfx11_impl<op, NAME # "_BOTHEN" # Rtn, real_name>,
AtomicNoRet<NAME # "_BOTHEN_gfx11", is_return>;
@@ -2525,7 +2526,7 @@ multiclass MUBUF_Real_Atomic_gfx11_Renamed_impl<bits<8> op, bit is_return,
multiclass MUBUF_Real_Atomic_gfx12_Renamed_impl<bits<8> op, bit is_return,
string real_name> {
- defvar Rtn = !if(!eq(is_return, 1), "_RTN", "");
+ defvar Rtn = !if(is_return, "_RTN", "");
def _BOTHEN#Rtn#_gfx12 :
MUBUF_Real_Atomic_gfx12_impl<op, NAME # "_VBUFFER_BOTHEN" # Rtn, real_name>,
AtomicNoRet<NAME # "_BOTHEN_gfx12", is_return>;
diff --git a/llvm/lib/Target/AMDGPU/DSInstructions.td b/llvm/lib/Target/AMDGPU/DSInstructions.td
index 074e1331..7d79b9b 100644
--- a/llvm/lib/Target/AMDGPU/DSInstructions.td
+++ b/llvm/lib/Target/AMDGPU/DSInstructions.td
@@ -63,6 +63,7 @@ class DS_Real <DS_Pseudo ps, string opName = ps.Mnemonic> :
// copy relevant pseudo op flags
let GWS = ps.GWS;
let SubtargetPredicate = ps.SubtargetPredicate;
+ let WaveSizePredicate = ps.WaveSizePredicate;
let OtherPredicates = ps.OtherPredicates;
let SchedRW = ps.SchedRW;
let mayLoad = ps.mayLoad;
@@ -1261,7 +1262,9 @@ defm DS_PK_ADD_RTN_BF16 : DS_Real_gfx12<0x0ab>;
// New aliases added in GFX12 without renaming the instructions.
def : MnemonicAlias<"ds_subrev_u32", "ds_rsub_u32">, Requires<[isGFX12Plus]>;
+def : MnemonicAlias<"ds_subrev_rtn_u32", "ds_rsub_rtn_u32">, Requires<[isGFX12Plus]>;
def : MnemonicAlias<"ds_subrev_u64", "ds_rsub_u64">, Requires<[isGFX12Plus]>;
+def : MnemonicAlias<"ds_subrev_rtn_u64", "ds_rsub_rtn_u64">, Requires<[isGFX12Plus]>;
//===----------------------------------------------------------------------===//
// GFX11.
diff --git a/llvm/lib/Target/AMDGPU/FLATInstructions.td b/llvm/lib/Target/AMDGPU/FLATInstructions.td
index a7082f5..8a60168 100644
--- a/llvm/lib/Target/AMDGPU/FLATInstructions.td
+++ b/llvm/lib/Target/AMDGPU/FLATInstructions.td
@@ -153,6 +153,7 @@ class VFLAT_Real <bits<8> op, FLAT_Pseudo ps, string opName = ps.Mnemonic> :
// copy relevant pseudo op flags
let SubtargetPredicate = ps.SubtargetPredicate;
+ let WaveSizePredicate = ps.WaveSizePredicate;
let AsmMatchConverter = ps.AsmMatchConverter;
let OtherPredicates = ps.OtherPredicates;
let TSFlags = ps.TSFlags;
diff --git a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h
index b2fc7d8..6edf01d 100644
--- a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h
+++ b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h
@@ -856,14 +856,6 @@ bool isReadOnlySegment(const GlobalValue *GV);
/// target triple \p TT, false otherwise.
bool shouldEmitConstantsToTextSection(const Triple &TT);
-/// \returns Integer value requested using \p F's \p Name attribute.
-///
-/// \returns \p Default if attribute is not present.
-///
-/// \returns \p Default and emits error if requested value cannot be converted
-/// to integer.
-int getIntegerAttribute(const Function &F, StringRef Name, int Default);
-
/// \returns A pair of integer values requested using \p F's \p Name attribute
/// in "first[,second]" format ("second" is optional unless \p OnlyFirstRequired
/// is false).
diff --git a/llvm/lib/Target/AMDGPU/VOPInstructions.td b/llvm/lib/Target/AMDGPU/VOPInstructions.td
index 918bdb9..fa8d466 100644
--- a/llvm/lib/Target/AMDGPU/VOPInstructions.td
+++ b/llvm/lib/Target/AMDGPU/VOPInstructions.td
@@ -190,6 +190,7 @@ class VOP3_Real <VOP_Pseudo ps, int EncodingFamily, string asm_name = ps.Mnemoni
// copy relevant pseudo op flags
let SubtargetPredicate = ps.SubtargetPredicate;
+ let WaveSizePredicate = ps.WaveSizePredicate;
let OtherPredicates = ps.OtherPredicates;
let AsmMatchConverter = ps.AsmMatchConverter;
let AsmVariantName = ps.AsmVariantName;
diff --git a/llvm/lib/Target/ARM/ARMExpandPseudoInsts.cpp b/llvm/lib/Target/ARM/ARMExpandPseudoInsts.cpp
index e78ea63..f0b69b0 100644
--- a/llvm/lib/Target/ARM/ARMExpandPseudoInsts.cpp
+++ b/llvm/lib/Target/ARM/ARMExpandPseudoInsts.cpp
@@ -1468,21 +1468,15 @@ void ARMExpandPseudo::CMSESaveClearFPRegsV8(
if (passesFPReg)
assert(STI->hasFPRegs() && "Subtarget needs fpregs");
- // Lazy store all fp registers to the stack
+ // Lazy store all fp registers to the stack.
// This executes as NOP in the absence of floating-point support.
- MachineInstrBuilder VLSTM =
- BuildMI(MBB, MBBI, DL, TII->get(ARM::VLSTM))
- .addReg(ARM::SP)
- .add(predOps(ARMCC::AL))
- .addImm(0); // Represents a pseoudo register list, has no effect on
- // the encoding.
- // Mark non-live registers as undef
- for (MachineOperand &MO : VLSTM->implicit_operands()) {
- if (MO.isReg() && !MO.isDef()) {
- Register Reg = MO.getReg();
- MO.setIsUndef(!LiveRegs.contains(Reg));
- }
- }
+ MachineInstrBuilder VLSTM = BuildMI(MBB, MBBI, DL, TII->get(ARM::VLSTM))
+ .addReg(ARM::SP)
+ .add(predOps(ARMCC::AL));
+ for (auto R : {ARM::VPR, ARM::FPSCR, ARM::FPSCR_NZCV, ARM::Q0, ARM::Q1,
+ ARM::Q2, ARM::Q3, ARM::Q4, ARM::Q5, ARM::Q6, ARM::Q7})
+ VLSTM.addReg(R, RegState::Implicit |
+ (LiveRegs.contains(R) ? 0 : RegState::Undef));
// Restore all arguments
for (const auto &Regs : ClearedFPRegs) {
@@ -1569,20 +1563,14 @@ void ARMExpandPseudo::CMSESaveClearFPRegsV81(MachineBasicBlock &MBB,
.addImm(CMSE_FP_SAVE_SIZE >> 2)
.add(predOps(ARMCC::AL));
- // Lazy store all fp registers to the stack.
- MachineInstrBuilder VLSTM =
- BuildMI(MBB, MBBI, DL, TII->get(ARM::VLSTM))
- .addReg(ARM::SP)
- .add(predOps(ARMCC::AL))
- .addImm(0); // Represents a pseoudo register list, has no effect on
- // the encoding.
- // Mark non-live registers as undef
- for (MachineOperand &MO : VLSTM->implicit_operands()) {
- if (MO.isReg() && MO.isImplicit() && !MO.isDef()) {
- Register Reg = MO.getReg();
- MO.setIsUndef(!LiveRegs.contains(Reg));
- }
- }
+ // Lazy store all FP registers to the stack
+ MachineInstrBuilder VLSTM = BuildMI(MBB, MBBI, DL, TII->get(ARM::VLSTM))
+ .addReg(ARM::SP)
+ .add(predOps(ARMCC::AL));
+ for (auto R : {ARM::VPR, ARM::FPSCR, ARM::FPSCR_NZCV, ARM::Q0, ARM::Q1,
+ ARM::Q2, ARM::Q3, ARM::Q4, ARM::Q5, ARM::Q6, ARM::Q7})
+ VLSTM.addReg(R, RegState::Implicit |
+ (LiveRegs.contains(R) ? 0 : RegState::Undef));
} else {
// Push all the callee-saved registers (s16-s31).
MachineInstrBuilder VPUSH =
@@ -1685,12 +1673,9 @@ void ARMExpandPseudo::CMSERestoreFPRegsV8(
// Lazy load fp regs from stack.
// This executes as NOP in the absence of floating-point support.
- MachineInstrBuilder VLLDM =
- BuildMI(MBB, MBBI, DL, TII->get(ARM::VLLDM))
- .addReg(ARM::SP)
- .add(predOps(ARMCC::AL))
- .addImm(0); // Represents a pseoudo register list, has no effect on
- // the encoding.
+ MachineInstrBuilder VLLDM = BuildMI(MBB, MBBI, DL, TII->get(ARM::VLLDM))
+ .addReg(ARM::SP)
+ .add(predOps(ARMCC::AL));
if (STI->fixCMSE_CVE_2021_35465()) {
auto Bundler = MIBundleBuilder(MBB, VLLDM);
@@ -1772,9 +1757,7 @@ void ARMExpandPseudo::CMSERestoreFPRegsV81(
// Load FP registers from stack.
BuildMI(MBB, MBBI, DL, TII->get(ARM::VLLDM))
.addReg(ARM::SP)
- .add(predOps(ARMCC::AL))
- .addImm(0); // Represents a pseoudo register list, has no effect on the
- // encoding.
+ .add(predOps(ARMCC::AL));
// Pop the stack space
BuildMI(MBB, MBBI, DL, TII->get(ARM::tADDspi), ARM::SP)
diff --git a/llvm/lib/Target/ARM/ARMInstrFormats.td b/llvm/lib/Target/ARM/ARMInstrFormats.td
index 4040858..14e3155 100644
--- a/llvm/lib/Target/ARM/ARMInstrFormats.td
+++ b/llvm/lib/Target/ARM/ARMInstrFormats.td
@@ -1749,37 +1749,6 @@ class AXSI4<dag oops, dag iops, IndexMode im, InstrItinClass itin,
let Inst{8} = 0; // Single precision
}
-// Single Precision with fixed registers.
-// For when the registers-to-be-stored/loaded are fixed, e.g. VLLDM and VLSTM
-class AXSI4FR<string asm, bit et, bit load>
- : InstARM<AddrMode4, 4, IndexModeNone, VFPLdStMulFrm, VFPDomain, "", NoItinerary> {
- // Instruction operands.
- bits<4> Rn;
- bits<13> regs; // Does not affect encoding, for assembly/disassembly only.
- list<Predicate> Predicates = [HasVFP2];
- let OutOperandList = (outs);
- let InOperandList = (ins GPRnopc:$Rn, pred:$p, dpr_reglist:$regs);
- let AsmString = asm;
- let Pattern = [];
- let DecoderNamespace = "VFP";
- // Encode instruction operands.
- let Inst{19-16} = Rn;
- let Inst{31-28} = 0b1110;
- let Inst{27-25} = 0b110;
- let Inst{24} = 0b0;
- let Inst{23} = 0b0;
- let Inst{22} = 0b0;
- let Inst{21} = 0b1;
- let Inst{20} = load; // Distinguishes vlldm from vlstm
- let Inst{15-12} = 0b0000;
- let Inst{11-9} = 0b101;
- let Inst{8} = 0; // Single precision
- let Inst{7} = et; // encoding type, 0 for T1 and 1 for T2.
- let Inst{6-0} = 0b0000000;
- let mayLoad = load;
- let mayStore = !eq(load, 0);
-}
-
// Double precision, unary
class ADuI<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3, bits<2> opcod4,
bit opcod5, dag oops, dag iops, InstrItinClass itin, string opc,
diff --git a/llvm/lib/Target/ARM/ARMInstrVFP.td b/llvm/lib/Target/ARM/ARMInstrVFP.td
index 3094a4d..55d3efb 100644
--- a/llvm/lib/Target/ARM/ARMInstrVFP.td
+++ b/llvm/lib/Target/ARM/ARMInstrVFP.td
@@ -313,51 +313,29 @@ def : MnemonicAlias<"vstm", "vstmia">;
//===----------------------------------------------------------------------===//
// Lazy load / store multiple Instructions
//
-// VLLDM and VLSTM:
-// 2 encoding options:
-// T1 (bit 7 is 0):
-// T1 takes an optional dpr_reglist, must be '{d0-d15}' (exactly)
-// T1 require v8-M.Main, secure state, target with 16 D registers (or with no D registers - NOP)
-// T2 (bit 7 is 1):
-// T2 takes a mandatory dpr_reglist, must be '{d0-d31}' (exactly)
-// T2 require v8.1-M.Main, secure state, target with 16/32 D registers (or with no D registers - NOP)
-// (source: Arm v8-M ARM, DDI0553B.v ID16122022)
-
-def VLLDM : AXSI4FR<"vlldm${p}\t$Rn, $regs", 0, 1>,
+def VLLDM : AXSI4<(outs), (ins GPRnopc:$Rn, pred:$p), IndexModeNone,
+ NoItinerary, "vlldm${p}\t$Rn", "", []>,
Requires<[HasV8MMainline, Has8MSecExt]> {
- let Defs = [VPR, FPSCR, FPSCR_NZCV, D0, D1, D2, D3, D4, D5, D6, D7, D8, D9, D10, D11, D12, D13, D14, D15];
- let DecoderMethod = "DecodeLazyLoadStoreMul";
-}
-// T1: assembly does not contains the register list.
-def : InstAlias<"vlldm${p}\t$Rn", (VLLDM GPRnopc:$Rn, pred:$p, 0)>,
- Requires<[HasV8MMainline, Has8MSecExt]>;
-// T2: assembly must contains the register list.
-// The register list has no effect on the encoding, it is for assembly/disassembly purposes only.
-def VLLDM_T2 : AXSI4FR<"vlldm${p}\t$Rn, $regs", 1, 1>,
- Requires<[HasV8_1MMainline, Has8MSecExt]> {
- let Defs = [VPR, FPSCR, FPSCR_NZCV, D0, D1, D2, D3, D4, D5, D6, D7, D8, D9, D10, D11, D12, D13, D14, D15,
- D16, D17, D18, D19, D20, D21, D22, D23, D24, D25, D26, D27, D28, D29, D30, D31];
- let DecoderMethod = "DecodeLazyLoadStoreMul";
-}
-// T1: assembly contains the register list.
-// The register list has no effect on the encoding, it is for assembly/disassembly purposes only.
-def VLSTM : AXSI4FR<"vlstm${p}\t$Rn, $regs", 0, 0>,
+ let Inst{24-23} = 0b00;
+ let Inst{22} = 0;
+ let Inst{21} = 1;
+ let Inst{20} = 1;
+ let Inst{15-12} = 0;
+ let Inst{7-0} = 0;
+ let mayLoad = 1;
+ let Defs = [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7, VPR, FPSCR, FPSCR_NZCV];
+}
+
+def VLSTM : AXSI4<(outs), (ins GPRnopc:$Rn, pred:$p), IndexModeNone,
+ NoItinerary, "vlstm${p}\t$Rn", "", []>,
Requires<[HasV8MMainline, Has8MSecExt]> {
- let Defs = [VPR, FPSCR, FPSCR_NZCV];
- let Uses = [VPR, FPSCR, FPSCR_NZCV, D0, D1, D2, D3, D4, D5, D6, D7, D8, D9, D10, D11, D12, D13, D14, D15];
- let DecoderMethod = "DecodeLazyLoadStoreMul";
-}
-// T1: assembly does not contain the register list.
-def : InstAlias<"vlstm${p}\t$Rn", (VLSTM GPRnopc:$Rn, pred:$p, 0)>,
- Requires<[HasV8MMainline, Has8MSecExt]>;
-// T2: assembly must contain the register list.
-// The register list has no effect on the encoding, it is for assembly/disassembly purposes only.
-def VLSTM_T2 : AXSI4FR<"vlstm${p}\t$Rn, $regs", 1, 0>,
- Requires<[HasV8_1MMainline, Has8MSecExt]> {
- let Defs = [VPR, FPSCR, FPSCR_NZCV];
- let Uses = [VPR, FPSCR, FPSCR_NZCV, D0, D1, D2, D3, D4, D5, D6, D7, D8, D9, D10, D11, D12, D13, D14, D15,
- D16, D17, D18, D19, D20, D21, D22, D23, D24, D25, D26, D27, D28, D29, D30, D31];
- let DecoderMethod = "DecodeLazyLoadStoreMul";
+ let Inst{24-23} = 0b00;
+ let Inst{22} = 0;
+ let Inst{21} = 1;
+ let Inst{20} = 0;
+ let Inst{15-12} = 0;
+ let Inst{7-0} = 0;
+ let mayStore = 1;
}
def : InstAlias<"vpush${p} $r", (VSTMDDB_UPD SP, pred:$p, dpr_reglist:$r), 0>,
diff --git a/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp b/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
index 5efbaf0..37bfb76 100644
--- a/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
+++ b/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
@@ -450,12 +450,11 @@ class ARMAsmParser : public MCTargetAsmParser {
bool validatetSTMRegList(const MCInst &Inst, const OperandVector &Operands,
unsigned ListNo);
- int tryParseRegister(bool AllowOutofBoundReg = false);
+ int tryParseRegister();
bool tryParseRegisterWithWriteBack(OperandVector &);
int tryParseShiftRegister(OperandVector &);
bool parseRegisterList(OperandVector &, bool EnforceOrder = true,
- bool AllowRAAC = false,
- bool AllowOutOfBoundReg = false);
+ bool AllowRAAC = false);
bool parseMemory(OperandVector &);
bool parseOperand(OperandVector &, StringRef Mnemonic);
bool parseImmExpr(int64_t &Out);
@@ -4073,7 +4072,7 @@ ParseStatus ARMAsmParser::tryParseRegister(MCRegister &Reg, SMLoc &StartLoc,
/// Try to parse a register name. The token must be an Identifier when called,
/// and if it is a register name the token is eaten and the register number is
/// returned. Otherwise return -1.
-int ARMAsmParser::tryParseRegister(bool AllowOutOfBoundReg) {
+int ARMAsmParser::tryParseRegister() {
MCAsmParser &Parser = getParser();
const AsmToken &Tok = Parser.getTok();
if (Tok.isNot(AsmToken::Identifier)) return -1;
@@ -4117,8 +4116,7 @@ int ARMAsmParser::tryParseRegister(bool AllowOutOfBoundReg) {
}
// Some FPUs only have 16 D registers, so D16-D31 are invalid
- if (!AllowOutOfBoundReg && !hasD32() && RegNum >= ARM::D16 &&
- RegNum <= ARM::D31)
+ if (!hasD32() && RegNum >= ARM::D16 && RegNum <= ARM::D31)
return -1;
Parser.Lex(); // Eat identifier token.
@@ -4458,7 +4456,7 @@ insertNoDuplicates(SmallVectorImpl<std::pair<unsigned, unsigned>> &Regs,
/// Parse a register list.
bool ARMAsmParser::parseRegisterList(OperandVector &Operands, bool EnforceOrder,
- bool AllowRAAC, bool AllowOutOfBoundReg) {
+ bool AllowRAAC) {
MCAsmParser &Parser = getParser();
if (Parser.getTok().isNot(AsmToken::LCurly))
return TokError("Token is not a Left Curly Brace");
@@ -4512,7 +4510,7 @@ bool ARMAsmParser::parseRegisterList(OperandVector &Operands, bool EnforceOrder,
return Error(RegLoc, "pseudo-register not allowed");
Parser.Lex(); // Eat the minus.
SMLoc AfterMinusLoc = Parser.getTok().getLoc();
- int EndReg = tryParseRegister(AllowOutOfBoundReg);
+ int EndReg = tryParseRegister();
if (EndReg == -1)
return Error(AfterMinusLoc, "register expected");
if (EndReg == ARM::RA_AUTH_CODE)
@@ -4547,7 +4545,7 @@ bool ARMAsmParser::parseRegisterList(OperandVector &Operands, bool EnforceOrder,
RegLoc = Parser.getTok().getLoc();
int OldReg = Reg;
const AsmToken RegTok = Parser.getTok();
- Reg = tryParseRegister(AllowOutOfBoundReg);
+ Reg = tryParseRegister();
if (Reg == -1)
return Error(RegLoc, "register expected");
if (!AllowRAAC && Reg == ARM::RA_AUTH_CODE)
@@ -6087,11 +6085,8 @@ bool ARMAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
}
case AsmToken::LBrac:
return parseMemory(Operands);
- case AsmToken::LCurly: {
- bool AllowOutOfBoundReg = Mnemonic == "vlldm" || Mnemonic == "vlstm";
- return parseRegisterList(Operands, !Mnemonic.starts_with("clr"), false,
- AllowOutOfBoundReg);
- }
+ case AsmToken::LCurly:
+ return parseRegisterList(Operands, !Mnemonic.starts_with("clr"));
case AsmToken::Dollar:
case AsmToken::Hash: {
// #42 -> immediate
@@ -7601,33 +7596,6 @@ bool ARMAsmParser::validateInstruction(MCInst &Inst,
const unsigned Opcode = Inst.getOpcode();
switch (Opcode) {
- case ARM::VLLDM:
- case ARM::VLLDM_T2:
- case ARM::VLSTM:
- case ARM::VLSTM_T2: {
- // Since in some cases both T1 and T2 are valid, tablegen can not always
- // pick the correct instruction.
- if (Operands.size() == 4) { // a register list has been provided
- ARMOperand &Op = static_cast<ARMOperand &>(
- *Operands[3]); // the register list, a dpr_reglist
- assert(Op.isDPRRegList());
- auto &RegList = Op.getRegList();
- // T2 requires v8.1-M.Main (cannot be handled by tablegen)
- if (RegList.size() == 32 && !hasV8_1MMainline()) {
- return Error(Op.getEndLoc(), "T2 version requires v8.1-M.Main");
- }
- // When target has 32 D registers, T1 is undefined.
- if (hasD32() && RegList.size() != 32) {
- return Error(Op.getEndLoc(), "operand must be exactly {d0-d31}");
- }
- // When target has 16 D registers, both T1 and T2 are valid.
- if (!hasD32() && (RegList.size() != 16 && RegList.size() != 32)) {
- return Error(Op.getEndLoc(),
- "operand must be exactly {d0-d15} (T1) or {d0-d31} (T2)");
- }
- }
- return false;
- }
case ARM::t2IT: {
// Encoding is unpredictable if it ever results in a notional 'NV'
// predicate. Since we don't parse 'NV' directly this means an 'AL'
@@ -8763,32 +8731,6 @@ bool ARMAsmParser::processInstruction(MCInst &Inst,
}
switch (Inst.getOpcode()) {
- case ARM::VLLDM:
- case ARM::VLSTM: {
- // In some cases both T1 and T2 are valid, causing tablegen pick T1 instead
- // of T2
- if (Operands.size() == 4) { // a register list has been provided
- ARMOperand &Op = static_cast<ARMOperand &>(
- *Operands[3]); // the register list, a dpr_reglist
- assert(Op.isDPRRegList());
- auto &RegList = Op.getRegList();
- // When the register list is {d0-d31} the instruction has to be the T2
- // variant
- if (RegList.size() == 32) {
- const unsigned Opcode =
- (Inst.getOpcode() == ARM::VLLDM) ? ARM::VLLDM_T2 : ARM::VLSTM_T2;
- MCInst TmpInst;
- TmpInst.setOpcode(Opcode);
- TmpInst.addOperand(Inst.getOperand(0));
- TmpInst.addOperand(Inst.getOperand(1));
- TmpInst.addOperand(Inst.getOperand(2));
- TmpInst.addOperand(Inst.getOperand(3));
- Inst = TmpInst;
- return true;
- }
- }
- return false;
- }
// Alias for alternate form of 'ldr{,b}t Rt, [Rn], #imm' instruction.
case ARM::LDRT_POST:
case ARM::LDRBT_POST: {
diff --git a/llvm/lib/Target/ARM/Disassembler/ARMDisassembler.cpp b/llvm/lib/Target/ARM/Disassembler/ARMDisassembler.cpp
index 705f3cbc..604f22d 100644
--- a/llvm/lib/Target/ARM/Disassembler/ARMDisassembler.cpp
+++ b/llvm/lib/Target/ARM/Disassembler/ARMDisassembler.cpp
@@ -700,9 +700,6 @@ DecodeMVEOverlappingLongShift(MCInst &Inst, unsigned Insn, uint64_t Address,
static DecodeStatus DecodeT2AddSubSPImm(MCInst &Inst, unsigned Insn,
uint64_t Address,
const MCDisassembler *Decoder);
-static DecodeStatus DecodeLazyLoadStoreMul(MCInst &Inst, unsigned Insn,
- uint64_t Address,
- const MCDisassembler *Decoder);
#include "ARMGenDisassemblerTables.inc"
@@ -7033,23 +7030,3 @@ static DecodeStatus DecodeT2AddSubSPImm(MCInst &Inst, unsigned Insn,
return DS;
}
-
-static DecodeStatus DecodeLazyLoadStoreMul(MCInst &Inst, unsigned Insn,
- uint64_t Address,
- const MCDisassembler *Decoder) {
- DecodeStatus S = MCDisassembler::Success;
-
- const unsigned Rn = fieldFromInstruction(Insn, 16, 4);
- // Adding Rn, holding memory location to save/load to/from, the only argument
- // that is being encoded.
- // '$Rn' in the assembly.
- if (!Check(S, DecodeGPRRegisterClass(Inst, Rn, Address, Decoder)))
- return MCDisassembler::Fail;
- // An optional predicate, '$p' in the assembly.
- DecodePredicateOperand(Inst, ARMCC::AL, Address, Decoder);
- // An immediate that represents a floating point registers list. '$regs' in
- // the assembly.
- Inst.addOperand(MCOperand::createImm(0)); // Arbitrary value, has no effect.
-
- return S;
-}
diff --git a/llvm/lib/Target/ARM/MCTargetDesc/ARMInstPrinter.cpp b/llvm/lib/Target/ARM/MCTargetDesc/ARMInstPrinter.cpp
index 24e627c..fbd067d 100644
--- a/llvm/lib/Target/ARM/MCTargetDesc/ARMInstPrinter.cpp
+++ b/llvm/lib/Target/ARM/MCTargetDesc/ARMInstPrinter.cpp
@@ -91,38 +91,6 @@ void ARMInstPrinter::printInst(const MCInst *MI, uint64_t Address,
unsigned Opcode = MI->getOpcode();
switch (Opcode) {
- case ARM::VLLDM: {
- const MCOperand &Reg = MI->getOperand(0);
- O << '\t' << "vlldm" << '\t';
- printRegName(O, Reg.getReg());
- O << ", "
- << "{d0 - d15}";
- return;
- }
- case ARM::VLLDM_T2: {
- const MCOperand &Reg = MI->getOperand(0);
- O << '\t' << "vlldm" << '\t';
- printRegName(O, Reg.getReg());
- O << ", "
- << "{d0 - d31}";
- return;
- }
- case ARM::VLSTM: {
- const MCOperand &Reg = MI->getOperand(0);
- O << '\t' << "vlstm" << '\t';
- printRegName(O, Reg.getReg());
- O << ", "
- << "{d0 - d15}";
- return;
- }
- case ARM::VLSTM_T2: {
- const MCOperand &Reg = MI->getOperand(0);
- O << '\t' << "vlstm" << '\t';
- printRegName(O, Reg.getReg());
- O << ", "
- << "{d0 - d31}";
- return;
- }
// Check for MOVs and print canonical forms, instead.
case ARM::MOVsr: {
// FIXME: Thumb variants?
diff --git a/llvm/lib/Target/DirectX/DXIL.td b/llvm/lib/Target/DirectX/DXIL.td
index 8a3454c..67ef798 100644
--- a/llvm/lib/Target/DirectX/DXIL.td
+++ b/llvm/lib/Target/DirectX/DXIL.td
@@ -12,139 +12,224 @@
//===----------------------------------------------------------------------===//
include "llvm/IR/Intrinsics.td"
-include "llvm/IR/Attributes.td"
-// Abstract representation of the class a DXIL Operation belongs to.
-class DXILOpClass<string name> {
- string Name = name;
+class DXILOpClass;
+
+// Following is a set of DXIL Operation classes whose names appear to be
+// arbitrary, yet need to be a substring of the function name used during
+// lowering to DXIL Operation calls. These class name strings are specified
+// as the third argument of add_dixil_op in utils/hct/hctdb.py and case converted
+// in utils/hct/hctdb_instrhelp.py of DirectXShaderCompiler repo. The function
+// name has the format "dx.op.<class-name>.<return-type>".
+
+defset list<DXILOpClass> OpClasses = {
+ def acceptHitAndEndSearch : DXILOpClass;
+ def allocateNodeOutputRecords : DXILOpClass;
+ def allocateRayQuery : DXILOpClass;
+ def annotateHandle : DXILOpClass;
+ def annotateNodeHandle : DXILOpClass;
+ def annotateNodeRecordHandle : DXILOpClass;
+ def atomicBinOp : DXILOpClass;
+ def atomicCompareExchange : DXILOpClass;
+ def attributeAtVertex : DXILOpClass;
+ def barrier : DXILOpClass;
+ def barrierByMemoryHandle : DXILOpClass;
+ def barrierByMemoryType : DXILOpClass;
+ def barrierByNodeRecordHandle : DXILOpClass;
+ def binary : DXILOpClass;
+ def binaryWithCarryOrBorrow : DXILOpClass;
+ def binaryWithTwoOuts : DXILOpClass;
+ def bitcastF16toI16 : DXILOpClass;
+ def bitcastF32toI32 : DXILOpClass;
+ def bitcastF64toI64 : DXILOpClass;
+ def bitcastI16toF16 : DXILOpClass;
+ def bitcastI32toF32 : DXILOpClass;
+ def bitcastI64toF64 : DXILOpClass;
+ def bufferLoad : DXILOpClass;
+ def bufferStore : DXILOpClass;
+ def bufferUpdateCounter : DXILOpClass;
+ def calculateLOD : DXILOpClass;
+ def callShader : DXILOpClass;
+ def cbufferLoad : DXILOpClass;
+ def cbufferLoadLegacy : DXILOpClass;
+ def checkAccessFullyMapped : DXILOpClass;
+ def coverage : DXILOpClass;
+ def createHandle : DXILOpClass;
+ def createHandleForLib : DXILOpClass;
+ def createHandleFromBinding : DXILOpClass;
+ def createHandleFromHeap : DXILOpClass;
+ def createNodeInputRecordHandle : DXILOpClass;
+ def createNodeOutputHandle : DXILOpClass;
+ def cutStream : DXILOpClass;
+ def cycleCounterLegacy : DXILOpClass;
+ def discard : DXILOpClass;
+ def dispatchMesh : DXILOpClass;
+ def dispatchRaysDimensions : DXILOpClass;
+ def dispatchRaysIndex : DXILOpClass;
+ def domainLocation : DXILOpClass;
+ def dot2 : DXILOpClass;
+ def dot2AddHalf : DXILOpClass;
+ def dot3 : DXILOpClass;
+ def dot4 : DXILOpClass;
+ def dot4AddPacked : DXILOpClass;
+ def emitIndices : DXILOpClass;
+ def emitStream : DXILOpClass;
+ def emitThenCutStream : DXILOpClass;
+ def evalCentroid : DXILOpClass;
+ def evalSampleIndex : DXILOpClass;
+ def evalSnapped : DXILOpClass;
+ def finishedCrossGroupSharing : DXILOpClass;
+ def flattenedThreadIdInGroup : DXILOpClass;
+ def geometryIndex : DXILOpClass;
+ def getDimensions : DXILOpClass;
+ def getInputRecordCount : DXILOpClass;
+ def getMeshPayload : DXILOpClass;
+ def getNodeRecordPtr : DXILOpClass;
+ def getRemainingRecursionLevels : DXILOpClass;
+ def groupId : DXILOpClass;
+ def gsInstanceID : DXILOpClass;
+ def hitKind : DXILOpClass;
+ def ignoreHit : DXILOpClass;
+ def incrementOutputCount : DXILOpClass;
+ def indexNodeHandle : DXILOpClass;
+ def innerCoverage : DXILOpClass;
+ def instanceID : DXILOpClass;
+ def instanceIndex : DXILOpClass;
+ def isHelperLane : DXILOpClass;
+ def isSpecialFloat : DXILOpClass;
+ def legacyDoubleToFloat : DXILOpClass;
+ def legacyDoubleToSInt32 : DXILOpClass;
+ def legacyDoubleToUInt32 : DXILOpClass;
+ def legacyF16ToF32 : DXILOpClass;
+ def legacyF32ToF16 : DXILOpClass;
+ def loadInput : DXILOpClass;
+ def loadOutputControlPoint : DXILOpClass;
+ def loadPatchConstant : DXILOpClass;
+ def makeDouble : DXILOpClass;
+ def minPrecXRegLoad : DXILOpClass;
+ def minPrecXRegStore : DXILOpClass;
+ def nodeOutputIsValid : DXILOpClass;
+ def objectRayDirection : DXILOpClass;
+ def objectRayOrigin : DXILOpClass;
+ def objectToWorld : DXILOpClass;
+ def outputComplete : DXILOpClass;
+ def outputControlPointID : DXILOpClass;
+ def pack4x8 : DXILOpClass;
+ def primitiveID : DXILOpClass;
+ def primitiveIndex : DXILOpClass;
+ def quadOp : DXILOpClass;
+ def quadReadLaneAt : DXILOpClass;
+ def quadVote : DXILOpClass;
+ def quaternary : DXILOpClass;
+ def rawBufferLoad : DXILOpClass;
+ def rawBufferStore : DXILOpClass;
+ def rayFlags : DXILOpClass;
+ def rayQuery_Abort : DXILOpClass;
+ def rayQuery_CommitNonOpaqueTriangleHit : DXILOpClass;
+ def rayQuery_CommitProceduralPrimitiveHit : DXILOpClass;
+ def rayQuery_Proceed : DXILOpClass;
+ def rayQuery_StateMatrix : DXILOpClass;
+ def rayQuery_StateScalar : DXILOpClass;
+ def rayQuery_StateVector : DXILOpClass;
+ def rayQuery_TraceRayInline : DXILOpClass;
+ def rayTCurrent : DXILOpClass;
+ def rayTMin : DXILOpClass;
+ def renderTargetGetSampleCount : DXILOpClass;
+ def renderTargetGetSamplePosition : DXILOpClass;
+ def reportHit : DXILOpClass;
+ def sample : DXILOpClass;
+ def sampleBias : DXILOpClass;
+ def sampleCmp : DXILOpClass;
+ def sampleCmpBias : DXILOpClass;
+ def sampleCmpGrad : DXILOpClass;
+ def sampleCmpLevel : DXILOpClass;
+ def sampleCmpLevelZero : DXILOpClass;
+ def sampleGrad : DXILOpClass;
+ def sampleIndex : DXILOpClass;
+ def sampleLevel : DXILOpClass;
+ def setMeshOutputCounts : DXILOpClass;
+ def splitDouble : DXILOpClass;
+ def startInstanceLocation : DXILOpClass;
+ def startVertexLocation : DXILOpClass;
+ def storeOutput : DXILOpClass;
+ def storePatchConstant : DXILOpClass;
+ def storePrimitiveOutput : DXILOpClass;
+ def storeVertexOutput : DXILOpClass;
+ def tempRegLoad : DXILOpClass;
+ def tempRegStore : DXILOpClass;
+ def tertiary : DXILOpClass;
+ def texture2DMSGetSamplePosition : DXILOpClass;
+ def textureGather : DXILOpClass;
+ def textureGatherCmp : DXILOpClass;
+ def textureGatherRaw : DXILOpClass;
+ def textureLoad : DXILOpClass;
+ def textureStore : DXILOpClass;
+ def textureStoreSample : DXILOpClass;
+ def threadId : DXILOpClass;
+ def threadIdInGroup : DXILOpClass;
+ def traceRay : DXILOpClass;
+ def unary : DXILOpClass;
+ def unaryBits : DXILOpClass;
+ def unpack4x8 : DXILOpClass;
+ def viewID : DXILOpClass;
+ def waveActiveAllEqual : DXILOpClass;
+ def waveActiveBallot : DXILOpClass;
+ def waveActiveBit : DXILOpClass;
+ def waveActiveOp : DXILOpClass;
+ def waveAllOp : DXILOpClass;
+ def waveAllTrue : DXILOpClass;
+ def waveAnyTrue : DXILOpClass;
+ def waveGetLaneCount : DXILOpClass;
+ def waveGetLaneIndex : DXILOpClass;
+ def waveIsFirstLane : DXILOpClass;
+ def waveMatch : DXILOpClass;
+ def waveMatrix_Accumulate : DXILOpClass;
+ def waveMatrix_Annotate : DXILOpClass;
+ def waveMatrix_Depth : DXILOpClass;
+ def waveMatrix_Fill : DXILOpClass;
+ def waveMatrix_LoadGroupShared : DXILOpClass;
+ def waveMatrix_LoadRawBuf : DXILOpClass;
+ def waveMatrix_Multiply : DXILOpClass;
+ def waveMatrix_ScalarOp : DXILOpClass;
+ def waveMatrix_StoreGroupShared : DXILOpClass;
+ def waveMatrix_StoreRawBuf : DXILOpClass;
+ def waveMultiPrefixBitCount : DXILOpClass;
+ def waveMultiPrefixOp : DXILOpClass;
+ def wavePrefixOp : DXILOpClass;
+ def waveReadLaneAt : DXILOpClass;
+ def waveReadLaneFirst : DXILOpClass;
+ def worldRayDirection : DXILOpClass;
+ def worldRayOrigin : DXILOpClass;
+ def worldToObject : DXILOpClass;
+ def writeSamplerFeedback : DXILOpClass;
+ def writeSamplerFeedbackBias : DXILOpClass;
+ def writeSamplerFeedbackGrad : DXILOpClass;
+ def writeSamplerFeedbackLevel: DXILOpClass;
}
-// Abstract representation of the category a DXIL Operation belongs to
-class DXILOpCategory<string name> {
- string Name = name;
+// Abstraction DXIL Operation to LLVM intrinsic
+class DXILOpMapping<int opCode, DXILOpClass opClass, Intrinsic intrinsic, string doc> {
+ int OpCode = opCode; // Opcode corresponding to DXIL Operation
+ DXILOpClass OpClass = opClass; // Class of DXIL Operation.
+ Intrinsic LLVMIntrinsic = intrinsic; // LLVM Intrinsic the DXIL Operation maps
+ string Doc = doc; // to a short description of the operation
}
-def UnaryClass : DXILOpClass<"Unary">;
-def BinaryClass : DXILOpClass<"Binary">;
-def FlattenedThreadIdInGroupClass : DXILOpClass<"FlattenedThreadIdInGroup">;
-def ThreadIdInGroupClass : DXILOpClass<"ThreadIdInGroup">;
-def ThreadIdClass : DXILOpClass<"ThreadId">;
-def GroupIdClass : DXILOpClass<"GroupId">;
-
-def BinaryUintCategory : DXILOpCategory<"Binary uint">;
-def UnaryFloatCategory : DXILOpCategory<"Unary float">;
-def ComputeIDCategory : DXILOpCategory<"Compute/Mesh/Amplification shader">;
-
-// Represent as any pointer type with an option to change to a qualified pointer
-// type with address space specified.
-def dxil_handle_ty : LLVMAnyPointerType;
-def dxil_cbuffer_ty : LLVMAnyPointerType;
-def dxil_resource_ty : LLVMAnyPointerType;
-
-// The parameter description for a DXIL operation
-class DXILOpParameter<int pos, LLVMType type, string name, string doc,
- bit isConstant = 0, string enumName = "",
- int maxValue = 0> {
- int Pos = pos; // Position in parameter list
- LLVMType ParamType = type; // Parameter type
- string Name = name; // Short, unique parameter name
- string Doc = doc; // Description of this parameter
- bit IsConstant = isConstant; // Whether this parameter requires a constant value in the IR
- string EnumName = enumName; // Name of the enum type, if applicable
- int MaxValue = maxValue; // Maximum value for this parameter, if applicable
-}
-
-// A representation for a DXIL operation
-class DXILOperationDesc {
- string OpName = ""; // Name of DXIL operation
- int OpCode = 0; // Unique non-negative integer associated with the operation
- DXILOpClass OpClass; // Class of the operation
- DXILOpCategory OpCategory; // Category of the operation
- string Doc = ""; // Description of the operation
- list<DXILOpParameter> Params = []; // Parameter list of the operation
- list<LLVMType> OverloadTypes = []; // Overload types, if applicable
- EnumAttr Attribute; // Operation Attribute. Leverage attributes defined in Attributes.td
- // ReadNone - operation does not access memory.
- // ReadOnly - only reads from memory.
- // "ReadMemory" - reads memory
- bit IsDerivative = 0; // Whether this is some kind of derivative
- bit IsGradient = 0; // Whether this requires a gradient calculation
- bit IsFeedback = 0; // Whether this is a sampler feedback operation
- bit IsWave = 0; // Whether this requires in-wave, cross-lane functionality
- bit NeedsUniformInputs = 0; // Whether this operation requires that all
- // of its inputs are uniform across the wave
- // Group DXIL operation for stats - e.g., to accumulate the number of atomic/float/uint/int/...
- // operations used in the program.
- list<string> StatsGroup = [];
-}
-
-class DXILOperation<string name, int opCode, DXILOpClass opClass, DXILOpCategory opCategory, string doc,
- list<LLVMType> oloadTypes, EnumAttr attrs, list<DXILOpParameter> params,
- list<string> statsGroup = []> : DXILOperationDesc {
- let OpName = name;
- let OpCode = opCode;
- let Doc = doc;
- let Params = params;
- let OpClass = opClass;
- let OpCategory = opCategory;
- let OverloadTypes = oloadTypes;
- let Attribute = attrs;
- let StatsGroup = statsGroup;
-}
-
-// LLVM intrinsic that DXIL operation maps to.
-class LLVMIntrinsic<Intrinsic llvm_intrinsic_> { Intrinsic llvm_intrinsic = llvm_intrinsic_; }
-
-def Sin : DXILOperation<"Sin", 13, UnaryClass, UnaryFloatCategory, "returns sine(theta) for theta in radians.",
- [llvm_half_ty, llvm_float_ty], ReadNone,
- [
- DXILOpParameter<0, llvm_anyfloat_ty, "", "operation result">,
- DXILOpParameter<1, llvm_i32_ty, "opcode", "DXIL opcode">,
- DXILOpParameter<2, llvm_anyfloat_ty, "value", "input value">
- ],
- ["floats"]>,
- LLVMIntrinsic<int_sin>;
-
-def UMax : DXILOperation< "UMax", 39, BinaryClass, BinaryUintCategory, "unsigned integer maximum. UMax(a,b) = a > b ? a : b",
- [llvm_i16_ty, llvm_i32_ty, llvm_i64_ty], ReadNone,
- [
- DXILOpParameter<0, llvm_anyint_ty, "", "operation result">,
- DXILOpParameter<1, llvm_i32_ty, "opcode", "DXIL opcode">,
- DXILOpParameter<2, llvm_anyint_ty, "a", "input value">,
- DXILOpParameter<3, llvm_anyint_ty, "b", "input value">
- ],
- ["uints"]>,
- LLVMIntrinsic<int_umax>;
-
-def ThreadId : DXILOperation< "ThreadId", 93, ThreadIdClass, ComputeIDCategory, "reads the thread ID", [llvm_i32_ty], ReadNone,
- [
- DXILOpParameter<0, llvm_i32_ty, "", "thread ID component">,
- DXILOpParameter<1, llvm_i32_ty, "opcode", "DXIL opcode">,
- DXILOpParameter<2, llvm_i32_ty, "component", "component to read (x,y,z)">
- ]>,
- LLVMIntrinsic<int_dx_thread_id>;
-
-def GroupId : DXILOperation< "GroupId", 94, GroupIdClass, ComputeIDCategory, "reads the group ID (SV_GroupID)", [llvm_i32_ty], ReadNone,
- [
- DXILOpParameter<0, llvm_i32_ty, "", "group ID component">,
- DXILOpParameter<1, llvm_i32_ty, "opcode", "DXIL opcode">,
- DXILOpParameter<2, llvm_i32_ty, "component", "component to read">
- ]>,
- LLVMIntrinsic<int_dx_group_id>;
-
-def ThreadIdInGroup : DXILOperation< "ThreadIdInGroup", 95, ThreadIdInGroupClass, ComputeIDCategory,
- "reads the thread ID within the group (SV_GroupThreadID)", [llvm_i32_ty], ReadNone,
- [
- DXILOpParameter<0, llvm_i32_ty, "", "thread ID in group component">,
- DXILOpParameter<1, llvm_i32_ty, "opcode", "DXIL opcode">,
- DXILOpParameter<2, llvm_i32_ty, "component", "component to read (x,y,z)">
- ]>,
- LLVMIntrinsic<int_dx_thread_id_in_group>;
-
-def FlattenedThreadIdInGroup : DXILOperation< "FlattenedThreadIdInGroup", 96, FlattenedThreadIdInGroupClass, ComputeIDCategory,
- "provides a flattened index for a given thread within a given group (SV_GroupIndex)", [llvm_i32_ty], ReadNone,
- [
- DXILOpParameter<0, llvm_i32_ty, "", "result">,
- DXILOpParameter<1, llvm_i32_ty, "opcode", "DXIL opcode">
- ]>,
- LLVMIntrinsic<int_dx_flattened_thread_id_in_group>;
+// Concrete definition of DXIL Operation mapping to corresponding LLVM intrinsic
+def Sin : DXILOpMapping<13, unary, int_sin,
+ "Returns sine(theta) for theta in radians.">;
+def UMax : DXILOpMapping<39, binary, int_umax,
+ "Unsigned integer maximum. UMax(a,b) = a > b ? a : b">;
+def ThreadId : DXILOpMapping<93, threadId, int_dx_thread_id,
+ "Reads the thread ID">;
+def GroupId : DXILOpMapping<94, groupId, int_dx_group_id,
+ "Reads the group ID (SV_GroupID)">;
+def ThreadIdInGroup : DXILOpMapping<95, threadIdInGroup,
+ int_dx_thread_id_in_group,
+ "Reads the thread ID within the group "
+ "(SV_GroupThreadID)">;
+def FlattenedThreadIdInGroup : DXILOpMapping<96, flattenedThreadIdInGroup,
+ int_dx_flattened_thread_id_in_group,
+ "Provides a flattened index for a "
+ "given thread within a given "
+ "group (SV_GroupIndex)">;
diff --git a/llvm/lib/Target/DirectX/DXILOpBuilder.cpp b/llvm/lib/Target/DirectX/DXILOpBuilder.cpp
index 42180a8..21a20d4 100644
--- a/llvm/lib/Target/DirectX/DXILOpBuilder.cpp
+++ b/llvm/lib/Target/DirectX/DXILOpBuilder.cpp
@@ -221,12 +221,26 @@ static Type *getTypeFromParameterKind(ParameterKind Kind, Type *OverloadTy) {
return nullptr;
}
+/// Construct DXIL function type. This is the type of a function with
+/// the following prototype
+/// OverloadType dx.op.<opclass>.<return-type>(int opcode, <param types>)
+/// <param-types> are constructed from types in Prop.
+/// \param Prop Structure containing DXIL Operation properties based on
+/// its specification in DXIL.td.
+/// \param OverloadTy Return type to be used to construct DXIL function type.
static FunctionType *getDXILOpFunctionType(const OpCodeProperty *Prop,
Type *OverloadTy) {
SmallVector<Type *> ArgTys;
auto ParamKinds = getOpCodeParameterKind(*Prop);
+ // Add OverloadTy as return type of the function
+ ArgTys.emplace_back(OverloadTy);
+
+ // Add DXIL Opcode value type viz., Int32 as first argument
+ ArgTys.emplace_back(Type::getInt32Ty(OverloadTy->getContext()));
+
+ // Add DXIL Operation parameter types as specified in DXIL properties
for (unsigned I = 0; I < Prop->NumOfParameters; ++I) {
ParameterKind Kind = ParamKinds[I];
ArgTys.emplace_back(getTypeFromParameterKind(Kind, OverloadTy));
@@ -267,13 +281,13 @@ CallInst *DXILOpBuilder::createDXILOpCall(dxil::OpCode OpCode, Type *OverloadTy,
return B.CreateCall(Fn, FullArgs);
}
-Type *DXILOpBuilder::getOverloadTy(dxil::OpCode OpCode, FunctionType *FT,
- bool NoOpCodeParam) {
+Type *DXILOpBuilder::getOverloadTy(dxil::OpCode OpCode, FunctionType *FT) {
const OpCodeProperty *Prop = getOpCodeProperty(OpCode);
+ // If DXIL Op has no overload parameter, just return the
+ // precise return type specified.
if (Prop->OverloadParamIndex < 0) {
auto &Ctx = FT->getContext();
- // When only has 1 overload type, just return it.
switch (Prop->OverloadTys) {
case OverloadKind::VOID:
return Type::getVoidTy(Ctx);
@@ -302,9 +316,8 @@ Type *DXILOpBuilder::getOverloadTy(dxil::OpCode OpCode, FunctionType *FT,
// Prop->OverloadParamIndex is 0, overload type is FT->getReturnType().
Type *OverloadType = FT->getReturnType();
if (Prop->OverloadParamIndex != 0) {
- // Skip Return Type and Type for DXIL opcode.
- const unsigned SkipedParam = NoOpCodeParam ? 2 : 1;
- OverloadType = FT->getParamType(Prop->OverloadParamIndex - SkipedParam);
+ // Skip Return Type.
+ OverloadType = FT->getParamType(Prop->OverloadParamIndex - 1);
}
auto ParamKinds = getOpCodeParameterKind(*Prop);
diff --git a/llvm/lib/Target/DirectX/DXILOpBuilder.h b/llvm/lib/Target/DirectX/DXILOpBuilder.h
index 940ed53..1c15f10 100644
--- a/llvm/lib/Target/DirectX/DXILOpBuilder.h
+++ b/llvm/lib/Target/DirectX/DXILOpBuilder.h
@@ -31,8 +31,7 @@ public:
DXILOpBuilder(Module &M, IRBuilderBase &B) : M(M), B(B) {}
CallInst *createDXILOpCall(dxil::OpCode OpCode, Type *OverloadTy,
llvm::iterator_range<Use *> Args);
- Type *getOverloadTy(dxil::OpCode OpCode, FunctionType *FT,
- bool NoOpCodeParam);
+ Type *getOverloadTy(dxil::OpCode OpCode, FunctionType *FT);
static const char *getOpCodeName(dxil::OpCode DXILOp);
private:
diff --git a/llvm/lib/Target/DirectX/DXILOpLowering.cpp b/llvm/lib/Target/DirectX/DXILOpLowering.cpp
index f6e2297..6b649b7 100644
--- a/llvm/lib/Target/DirectX/DXILOpLowering.cpp
+++ b/llvm/lib/Target/DirectX/DXILOpLowering.cpp
@@ -33,8 +33,7 @@ static void lowerIntrinsic(dxil::OpCode DXILOp, Function &F, Module &M) {
IRBuilder<> B(M.getContext());
Value *DXILOpArg = B.getInt32(static_cast<unsigned>(DXILOp));
DXILOpBuilder DXILB(M, B);
- Type *OverloadTy =
- DXILB.getOverloadTy(DXILOp, F.getFunctionType(), /*NoOpCodeParam*/ true);
+ Type *OverloadTy = DXILB.getOverloadTy(DXILOp, F.getFunctionType());
for (User *U : make_early_inc_range(F.users())) {
CallInst *CI = dyn_cast<CallInst>(U);
if (!CI)
diff --git a/llvm/lib/Target/RISCV/RISCVFeatures.td b/llvm/lib/Target/RISCV/RISCVFeatures.td
index bcaf447..9773b29 100644
--- a/llvm/lib/Target/RISCV/RISCVFeatures.td
+++ b/llvm/lib/Target/RISCV/RISCVFeatures.td
@@ -185,7 +185,7 @@ def HasStdExtZabha : Predicate<"Subtarget->hasStdExtZabha()">,
"'Zabha' (Byte and Halfword Atomic Memory Operations)">;
def FeatureStdExtZacas
- : SubtargetFeature<"experimental-zacas", "HasStdExtZacas", "true",
+ : SubtargetFeature<"zacas", "HasStdExtZacas", "true",
"'Zacas' (Atomic Compare-And-Swap Instructions)">;
def HasStdExtZacas : Predicate<"Subtarget->hasStdExtZacas()">,
AssemblerPredicate<(all_of FeatureStdExtZacas),
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index dde1882..e647f56 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -1999,6 +1999,10 @@ bool RISCVTargetLowering::canSplatOperand(Instruction *I, int Operand) const {
case Intrinsic::vp_sdiv:
case Intrinsic::vp_urem:
case Intrinsic::vp_srem:
+ case Intrinsic::ssub_sat:
+ case Intrinsic::vp_ssub_sat:
+ case Intrinsic::usub_sat:
+ case Intrinsic::vp_usub_sat:
return Operand == 1;
// These intrinsics are commutative.
case Intrinsic::vp_add:
@@ -2010,6 +2014,18 @@ bool RISCVTargetLowering::canSplatOperand(Instruction *I, int Operand) const {
case Intrinsic::vp_fmul:
case Intrinsic::vp_icmp:
case Intrinsic::vp_fcmp:
+ case Intrinsic::smin:
+ case Intrinsic::vp_smin:
+ case Intrinsic::umin:
+ case Intrinsic::vp_umin:
+ case Intrinsic::smax:
+ case Intrinsic::vp_smax:
+ case Intrinsic::umax:
+ case Intrinsic::vp_umax:
+ case Intrinsic::sadd_sat:
+ case Intrinsic::vp_sadd_sat:
+ case Intrinsic::uadd_sat:
+ case Intrinsic::vp_uadd_sat:
// These intrinsics have 'vr' versions.
case Intrinsic::vp_sub:
case Intrinsic::vp_fsub:
diff --git a/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp b/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
index 9d1f01d..a68674b 100644
--- a/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
@@ -30,6 +30,8 @@
using namespace llvm;
+static cl::opt<bool> DisableCostPerUse("riscv-disable-cost-per-use",
+ cl::init(false), cl::Hidden);
static cl::opt<bool>
DisableRegAllocHints("riscv-disable-regalloc-hints", cl::Hidden,
cl::init(false),
@@ -712,7 +714,10 @@ void RISCVRegisterInfo::getOffsetOpcodes(const StackOffset &Offset,
unsigned
RISCVRegisterInfo::getRegisterCostTableIndex(const MachineFunction &MF) const {
- return MF.getSubtarget<RISCVSubtarget>().hasStdExtCOrZca() ? 1 : 0;
+ return MF.getSubtarget<RISCVSubtarget>().hasStdExtCOrZca() &&
+ !DisableCostPerUse
+ ? 1
+ : 0;
}
// Add two address hints to improve chances of being able to use a compressed
diff --git a/llvm/lib/Target/RISCV/RISCVSchedSiFive7.td b/llvm/lib/Target/RISCV/RISCVSchedSiFive7.td
index 040cec4..0430d60 100644
--- a/llvm/lib/Target/RISCV/RISCVSchedSiFive7.td
+++ b/llvm/lib/Target/RISCV/RISCVSchedSiFive7.td
@@ -198,6 +198,7 @@ def SiFive7Model : SchedMachineModel {
let LoadLatency = 3;
let MispredictPenalty = 3;
let CompleteModel = 0;
+ let PostRAScheduler = true;
let EnableIntervals = true;
let UnsupportedFeatures = [HasStdExtZbkb, HasStdExtZbkc, HasStdExtZbkx,
HasStdExtZcmt, HasStdExtZknd, HasStdExtZkne,
diff --git a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
index d23921f..a0b55b2 100644
--- a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
@@ -326,18 +326,6 @@ InstructionCost RISCVTTIImpl::getShuffleCost(TTI::ShuffleKind Kind,
switch (Kind) {
default:
break;
- case TTI::SK_InsertSubvector: {
- auto *FSubTy = cast<FixedVectorType>(SubTp);
- unsigned TpRegs = getRegUsageForType(Tp);
- unsigned SubTpRegs = getRegUsageForType(SubTp);
- unsigned NextSubTpRegs = getRegUsageForType(FixedVectorType::get(
- Tp->getElementType(), FSubTy->getNumElements() + 1));
- // Whole vector insert - just the vector itself.
- if (Index == 0 && SubTpRegs != 0 && SubTpRegs != NextSubTpRegs &&
- TpRegs >= SubTpRegs)
- return TTI::TCC_Free;
- break;
- }
case TTI::SK_PermuteSingleSrc: {
if (Mask.size() >= 2 && LT.second.isFixedLengthVector()) {
MVT EltTp = LT.second.getVectorElementType();
@@ -469,15 +457,28 @@ InstructionCost RISCVTTIImpl::getShuffleCost(TTI::ShuffleKind Kind,
// vslidedown.vi v8, v9, 2
return LT.first *
getRISCVInstructionCost(RISCV::VSLIDEDOWN_VI, LT.second, CostKind);
- case TTI::SK_InsertSubvector:
+ case TTI::SK_InsertSubvector: {
if (Index == 0 && !Args.empty() && any_of(Args, UndefValue::classof))
return TTI::TCC_Free;
+ const unsigned MinVLen = ST->getRealMinVLen();
+ const unsigned MaxVLen = ST->getRealMaxVLen();
+ // Whole vector insert - just the vector itself.
+ if (auto *FSubTy = dyn_cast<FixedVectorType>(SubTp);
+ FSubTy && Index == 0 && MinVLen == MaxVLen) {
+ unsigned TpRegs = getRegUsageForType(Tp);
+ unsigned SubTpRegs = getRegUsageForType(SubTp);
+ unsigned NextSubTpRegs = getRegUsageForType(FixedVectorType::get(
+ Tp->getElementType(), FSubTy->getNumElements() + 1));
+ if (SubTpRegs != 0 && SubTpRegs != NextSubTpRegs && TpRegs >= SubTpRegs)
+ return TTI::TCC_Free;
+ }
// Example sequence:
// vsetivli zero, 4, e8, mf2, tu, ma (ignored)
// vslideup.vi v8, v9, 2
return LT.first *
getRISCVInstructionCost(RISCV::VSLIDEUP_VI, LT.second, CostKind);
+ }
case TTI::SK_Select: {
// Example sequence:
// li a0, 90
@@ -503,9 +504,8 @@ InstructionCost RISCVTTIImpl::getShuffleCost(TTI::ShuffleKind Kind,
// vmv.v.x v8, a0
// vmsne.vi v0, v8, 0
return LT.first *
- (TLI->getLMULCost(LT.second) + // FIXME: should be 1 for andi
- getRISCVInstructionCost({RISCV::VMV_V_X, RISCV::VMSNE_VI},
- LT.second, CostKind));
+ (1 + getRISCVInstructionCost({RISCV::VMV_V_X, RISCV::VMSNE_VI},
+ LT.second, CostKind));
}
// Example sequence:
// vsetivli zero, 2, e8, mf8, ta, mu (ignored)
@@ -517,11 +517,10 @@ InstructionCost RISCVTTIImpl::getShuffleCost(TTI::ShuffleKind Kind,
// vmsne.vi v0, v8, 0
return LT.first *
- (TLI->getLMULCost(LT.second) + // FIXME: this should be 1 for andi
- getRISCVInstructionCost({RISCV::VMV_V_I, RISCV::VMERGE_VIM,
- RISCV::VMV_X_S, RISCV::VMV_V_X,
- RISCV::VMSNE_VI},
- LT.second, CostKind));
+ (1 + getRISCVInstructionCost({RISCV::VMV_V_I, RISCV::VMERGE_VIM,
+ RISCV::VMV_X_S, RISCV::VMV_V_X,
+ RISCV::VMSNE_VI},
+ LT.second, CostKind));
}
if (HasScalar) {
@@ -566,9 +565,12 @@ InstructionCost RISCVTTIImpl::getShuffleCost(TTI::ShuffleKind Kind,
if (LT.second.isFixedLengthVector())
// vrsub.vi has a 5 bit immediate field, otherwise an li suffices
LenCost = isInt<5>(LT.second.getVectorNumElements() - 1) ? 0 : 1;
- // FIXME: replace the constant `2` below with cost of {VID_V,VRSUB_VX}
+ unsigned Opcodes[] = {RISCV::VID_V, RISCV::VRSUB_VX, RISCV::VRGATHER_VV};
+ if (LT.second.isFixedLengthVector() &&
+ isInt<5>(LT.second.getVectorNumElements() - 1))
+ Opcodes[1] = RISCV::VRSUB_VI;
InstructionCost GatherCost =
- 2 + getRISCVInstructionCost(RISCV::VRGATHER_VV, LT.second, CostKind);
+ getRISCVInstructionCost(Opcodes, LT.second, CostKind);
// Mask operation additionally required extend and truncate
InstructionCost ExtendCost = Tp->getElementType()->isIntegerTy(1) ? 3 : 0;
return LT.first * (LenCost + GatherCost + ExtendCost);
diff --git a/llvm/lib/Target/X86/X86ExpandPseudo.cpp b/llvm/lib/Target/X86/X86ExpandPseudo.cpp
index 95c4b02..b9fb3fd 100644
--- a/llvm/lib/Target/X86/X86ExpandPseudo.cpp
+++ b/llvm/lib/Target/X86/X86ExpandPseudo.cpp
@@ -61,22 +61,22 @@ public:
}
private:
- void ExpandICallBranchFunnel(MachineBasicBlock *MBB,
+ void expandICallBranchFunnel(MachineBasicBlock *MBB,
MachineBasicBlock::iterator MBBI);
void expandCALL_RVMARKER(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI);
- bool ExpandMI(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI);
- bool ExpandMBB(MachineBasicBlock &MBB);
+ bool expandMI(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI);
+ bool expandMBB(MachineBasicBlock &MBB);
/// This function expands pseudos which affects control flow.
/// It is done in separate pass to simplify blocks navigation in main
- /// pass(calling ExpandMBB).
- bool ExpandPseudosWhichAffectControlFlow(MachineFunction &MF);
+ /// pass(calling expandMBB).
+ bool expandPseudosWhichAffectControlFlow(MachineFunction &MF);
/// Expand X86::VASTART_SAVE_XMM_REGS into set of xmm copying instructions,
/// placed into separate block guarded by check for al register(for SystemV
/// abi).
- void ExpandVastartSaveXmmRegs(
+ void expandVastartSaveXmmRegs(
MachineBasicBlock *EntryBlk,
MachineBasicBlock::iterator VAStartPseudoInstr) const;
};
@@ -87,7 +87,7 @@ char X86ExpandPseudo::ID = 0;
INITIALIZE_PASS(X86ExpandPseudo, DEBUG_TYPE, X86_EXPAND_PSEUDO_NAME, false,
false)
-void X86ExpandPseudo::ExpandICallBranchFunnel(
+void X86ExpandPseudo::expandICallBranchFunnel(
MachineBasicBlock *MBB, MachineBasicBlock::iterator MBBI) {
MachineBasicBlock *JTMBB = MBB;
MachineInstr *JTInst = &*MBBI;
@@ -259,12 +259,12 @@ void X86ExpandPseudo::expandCALL_RVMARKER(MachineBasicBlock &MBB,
/// If \p MBBI is a pseudo instruction, this method expands
/// it to the corresponding (sequence of) actual instruction(s).
/// \returns true if \p MBBI has been expanded.
-bool X86ExpandPseudo::ExpandMI(MachineBasicBlock &MBB,
+bool X86ExpandPseudo::expandMI(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI) {
MachineInstr &MI = *MBBI;
unsigned Opcode = MI.getOpcode();
const DebugLoc &DL = MBBI->getDebugLoc();
- bool HasEGPR = STI->hasEGPR();
+#define GET_EGPR_IF_ENABLED(OPC) (STI->hasEGPR() ? OPC##_EVEX : OPC)
switch (Opcode) {
default:
return false;
@@ -468,12 +468,10 @@ bool X86ExpandPseudo::ExpandMI(MachineBasicBlock &MBB,
Register Reg1 = TRI->getSubReg(Reg, X86::sub_mask_1);
auto MIBLo =
- BuildMI(MBB, MBBI, DL,
- TII->get(HasEGPR ? X86::KMOVWkm_EVEX : X86::KMOVWkm))
+ BuildMI(MBB, MBBI, DL, TII->get(GET_EGPR_IF_ENABLED(X86::KMOVWkm)))
.addReg(Reg0, RegState::Define | getDeadRegState(DstIsDead));
auto MIBHi =
- BuildMI(MBB, MBBI, DL,
- TII->get(HasEGPR ? X86::KMOVWkm_EVEX : X86::KMOVWkm))
+ BuildMI(MBB, MBBI, DL, TII->get(GET_EGPR_IF_ENABLED(X86::KMOVWkm)))
.addReg(Reg1, RegState::Define | getDeadRegState(DstIsDead));
for (int i = 0; i < X86::AddrNumOperands; ++i) {
@@ -505,10 +503,10 @@ bool X86ExpandPseudo::ExpandMI(MachineBasicBlock &MBB,
Register Reg0 = TRI->getSubReg(Reg, X86::sub_mask_0);
Register Reg1 = TRI->getSubReg(Reg, X86::sub_mask_1);
- auto MIBLo = BuildMI(MBB, MBBI, DL,
- TII->get(HasEGPR ? X86::KMOVWmk_EVEX : X86::KMOVWmk));
- auto MIBHi = BuildMI(MBB, MBBI, DL,
- TII->get(HasEGPR ? X86::KMOVWmk_EVEX : X86::KMOVWmk));
+ auto MIBLo =
+ BuildMI(MBB, MBBI, DL, TII->get(GET_EGPR_IF_ENABLED(X86::KMOVWmk)));
+ auto MIBHi =
+ BuildMI(MBB, MBBI, DL, TII->get(GET_EGPR_IF_ENABLED(X86::KMOVWmk)));
for (int i = 0; i < X86::AddrNumOperands; ++i) {
MIBLo.add(MBBI->getOperand(i));
@@ -554,9 +552,8 @@ bool X86ExpandPseudo::ExpandMI(MachineBasicBlock &MBB,
return true;
}
case TargetOpcode::ICALL_BRANCH_FUNNEL:
- ExpandICallBranchFunnel(&MBB, MBBI);
+ expandICallBranchFunnel(&MBB, MBBI);
return true;
-#define GET_EGPR_IF_ENABLED(OPC) (STI->hasEGPR() ? OPC##_EVEX : OPC)
case X86::PLDTILECFGV: {
MI.setDesc(TII->get(GET_EGPR_IF_ENABLED(X86::LDTILECFG)));
return true;
@@ -634,7 +631,7 @@ bool X86ExpandPseudo::ExpandMI(MachineBasicBlock &MBB,
// | |
// | |
//
-void X86ExpandPseudo::ExpandVastartSaveXmmRegs(
+void X86ExpandPseudo::expandVastartSaveXmmRegs(
MachineBasicBlock *EntryBlk,
MachineBasicBlock::iterator VAStartPseudoInstr) const {
assert(VAStartPseudoInstr->getOpcode() == X86::VASTART_SAVE_XMM_REGS);
@@ -719,27 +716,27 @@ void X86ExpandPseudo::ExpandVastartSaveXmmRegs(
/// Expand all pseudo instructions contained in \p MBB.
/// \returns true if any expansion occurred for \p MBB.
-bool X86ExpandPseudo::ExpandMBB(MachineBasicBlock &MBB) {
+bool X86ExpandPseudo::expandMBB(MachineBasicBlock &MBB) {
bool Modified = false;
// MBBI may be invalidated by the expansion.
MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
while (MBBI != E) {
MachineBasicBlock::iterator NMBBI = std::next(MBBI);
- Modified |= ExpandMI(MBB, MBBI);
+ Modified |= expandMI(MBB, MBBI);
MBBI = NMBBI;
}
return Modified;
}
-bool X86ExpandPseudo::ExpandPseudosWhichAffectControlFlow(MachineFunction &MF) {
+bool X86ExpandPseudo::expandPseudosWhichAffectControlFlow(MachineFunction &MF) {
// Currently pseudo which affects control flow is only
// X86::VASTART_SAVE_XMM_REGS which is located in Entry block.
// So we do not need to evaluate other blocks.
for (MachineInstr &Instr : MF.front().instrs()) {
if (Instr.getOpcode() == X86::VASTART_SAVE_XMM_REGS) {
- ExpandVastartSaveXmmRegs(&(MF.front()), Instr);
+ expandVastartSaveXmmRegs(&(MF.front()), Instr);
return true;
}
}
@@ -754,10 +751,10 @@ bool X86ExpandPseudo::runOnMachineFunction(MachineFunction &MF) {
X86FI = MF.getInfo<X86MachineFunctionInfo>();
X86FL = STI->getFrameLowering();
- bool Modified = ExpandPseudosWhichAffectControlFlow(MF);
+ bool Modified = expandPseudosWhichAffectControlFlow(MF);
for (MachineBasicBlock &MBB : MF)
- Modified |= ExpandMBB(MBB);
+ Modified |= expandMBB(MBB);
return Modified;
}
diff --git a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
index c8f80ce..5cbd9ab 100644
--- a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
+++ b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
@@ -2732,13 +2732,15 @@ bool X86DAGToDAGISel::matchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
insertDAGNode(*CurDAG, N, Zext);
SDValue NewShl = CurDAG->getNode(ISD::SHL, DL, VT, Zext, ShlAmt);
insertDAGNode(*CurDAG, N, NewShl);
+ CurDAG->ReplaceAllUsesWith(N, NewShl);
+ CurDAG->RemoveDeadNode(N.getNode());
// Convert the shift to scale factor.
AM.Scale = 1 << ShAmtV;
- AM.IndexReg = Zext;
-
- CurDAG->ReplaceAllUsesWith(N, NewShl);
- CurDAG->RemoveDeadNode(N.getNode());
+ // If matchIndexRecursively is not called here,
+ // Zext may be replaced by other nodes but later used to call a builder
+ // method
+ AM.IndexReg = matchIndexRecursively(Zext, AM, Depth + 1);
return false;
}
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index bec13d1..d98d914 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -5878,13 +5878,16 @@ static bool getFauxShuffleMask(SDValue N, const APInt &DemandedElts,
}
}
- // Peek through trunc/aext/zext.
+ // Peek through trunc/aext/zext/bitcast.
// TODO: aext shouldn't require SM_SentinelZero padding.
// TODO: handle shift of scalars.
unsigned MinBitsPerElt = Scl.getScalarValueSizeInBits();
while (Scl.getOpcode() == ISD::TRUNCATE ||
Scl.getOpcode() == ISD::ANY_EXTEND ||
- Scl.getOpcode() == ISD::ZERO_EXTEND) {
+ Scl.getOpcode() == ISD::ZERO_EXTEND ||
+ (Scl.getOpcode() == ISD::BITCAST &&
+ Scl.getScalarValueSizeInBits() ==
+ Scl.getOperand(0).getScalarValueSizeInBits())) {
Scl = Scl.getOperand(0);
MinBitsPerElt =
std::min<unsigned>(MinBitsPerElt, Scl.getScalarValueSizeInBits());
@@ -41399,7 +41402,9 @@ bool X86TargetLowering::SimplifyDemandedVectorEltsForTargetNode(
break;
}
case X86ISD::CVTSI2P:
- case X86ISD::CVTUI2P: {
+ case X86ISD::CVTUI2P:
+ case X86ISD::CVTPH2PS:
+ case X86ISD::CVTPS2PH: {
SDValue Src = Op.getOperand(0);
MVT SrcVT = Src.getSimpleValueType();
APInt SrcUndef, SrcZero;
diff --git a/llvm/lib/Target/X86/X86InstrSystem.td b/llvm/lib/Target/X86/X86InstrSystem.td
index d051047..56293e2 100644
--- a/llvm/lib/Target/X86/X86InstrSystem.td
+++ b/llvm/lib/Target/X86/X86InstrSystem.td
@@ -716,7 +716,7 @@ def INVPCID64 : I<0x82, MRMSrcMem, (outs), (ins GR64:$src1, i128mem:$src2),
def INVPCID64_EVEX : I<0xF2, MRMSrcMem, (outs), (ins GR64:$src1, i128mem:$src2),
"invpcid\t{$src2, $src1|$src1, $src2}", []>,
- EVEX, NoCD8, T_MAP4, XS, Requires<[In64BitMode]>;
+ EVEX, NoCD8, T_MAP4, XS, WIG, Requires<[In64BitMode]>;
} // SchedRW
let Predicates = [HasINVPCID, NoEGPR] in {
diff --git a/llvm/lib/Target/X86/X86InstrUtils.td b/llvm/lib/Target/X86/X86InstrUtils.td
index 05ddcfb..04d9d10 100644
--- a/llvm/lib/Target/X86/X86InstrUtils.td
+++ b/llvm/lib/Target/X86/X86InstrUtils.td
@@ -967,6 +967,7 @@ class ITy<bits<8> o, Format f, X86TypeInfo t, dag outs, dag ins, string m,
!strconcat(m, "{", t.InstrSuffix, "}\t", args), p>, NoCD8 {
let hasSideEffects = 0;
let hasREX_W = t.HasREX_W;
+ let IgnoresW = !if(!eq(t.VT, i8), 1, 0);
}
// BinOpRR - Instructions that read "reg, reg".
diff --git a/llvm/lib/Target/X86/X86InstrVMX.td b/llvm/lib/Target/X86/X86InstrVMX.td
index 7cc468f..da2b3d7 100644
--- a/llvm/lib/Target/X86/X86InstrVMX.td
+++ b/llvm/lib/Target/X86/X86InstrVMX.td
@@ -24,7 +24,7 @@ def INVEPT64 : I<0x80, MRMSrcMem, (outs), (ins GR64:$src1, i128mem:$src2),
Requires<[In64BitMode]>;
def INVEPT64_EVEX : I<0xF0, MRMSrcMem, (outs), (ins GR64:$src1, i128mem:$src2),
"invept\t{$src2, $src1|$src1, $src2}", []>,
- EVEX, NoCD8, T_MAP4, XS, Requires<[In64BitMode]>;
+ EVEX, NoCD8, T_MAP4, XS, WIG, Requires<[In64BitMode]>;
// 66 0F 38 81
def INVVPID32 : I<0x81, MRMSrcMem, (outs), (ins GR32:$src1, i128mem:$src2),
@@ -35,7 +35,7 @@ def INVVPID64 : I<0x81, MRMSrcMem, (outs), (ins GR64:$src1, i128mem:$src2),
Requires<[In64BitMode]>;
def INVVPID64_EVEX : I<0xF1, MRMSrcMem, (outs), (ins GR64:$src1, i128mem:$src2),
"invvpid\t{$src2, $src1|$src1, $src2}", []>,
- EVEX, NoCD8, T_MAP4, XS, Requires<[In64BitMode]>;
+ EVEX, NoCD8, T_MAP4, XS, WIG, Requires<[In64BitMode]>;
// 0F 01 C1
def VMCALL : I<0x01, MRM_C1, (outs), (ins), "vmcall", []>, TB;
diff --git a/llvm/lib/Transforms/IPO/FunctionImport.cpp b/llvm/lib/Transforms/IPO/FunctionImport.cpp
index 49b3f2b..5c7a74d 100644
--- a/llvm/lib/Transforms/IPO/FunctionImport.cpp
+++ b/llvm/lib/Transforms/IPO/FunctionImport.cpp
@@ -125,7 +125,8 @@ static cl::opt<bool> ComputeDead("compute-dead", cl::init(true), cl::Hidden,
static cl::opt<bool> EnableImportMetadata(
"enable-import-metadata", cl::init(false), cl::Hidden,
- cl::desc("Enable import metadata like 'thinlto_src_module'"));
+ cl::desc("Enable import metadata like 'thinlto_src_module' and "
+ "'thinlto_src_file'"));
/// Summary file to use for function importing when using -function-import from
/// the command line.
@@ -1643,11 +1644,17 @@ Expected<bool> FunctionImporter::importFunctions(
if (Error Err = F.materialize())
return std::move(Err);
if (EnableImportMetadata) {
- // Add 'thinlto_src_module' metadata for statistics and debugging.
+ // Add 'thinlto_src_module' and 'thinlto_src_file' metadata for
+ // statistics and debugging.
F.setMetadata(
"thinlto_src_module",
MDNode::get(DestModule.getContext(),
{MDString::get(DestModule.getContext(),
+ SrcModule->getModuleIdentifier())}));
+ F.setMetadata(
+ "thinlto_src_file",
+ MDNode::get(DestModule.getContext(),
+ {MDString::get(DestModule.getContext(),
SrcModule->getSourceFileName())}));
}
GlobalsToImport.insert(&F);
@@ -1687,11 +1694,17 @@ Expected<bool> FunctionImporter::importFunctions(
<< GO->getName() << " from "
<< SrcModule->getSourceFileName() << "\n");
if (EnableImportMetadata) {
- // Add 'thinlto_src_module' metadata for statistics and debugging.
+ // Add 'thinlto_src_module' and 'thinlto_src_file' metadata for
+ // statistics and debugging.
Fn->setMetadata(
"thinlto_src_module",
MDNode::get(DestModule.getContext(),
{MDString::get(DestModule.getContext(),
+ SrcModule->getModuleIdentifier())}));
+ Fn->setMetadata(
+ "thinlto_src_file",
+ MDNode::get(DestModule.getContext(),
+ {MDString::get(DestModule.getContext(),
SrcModule->getSourceFileName())}));
}
GlobalsToImport.insert(Fn);
diff --git a/llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp b/llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp
index 6ce9eb3..490cb7e 100644
--- a/llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp
+++ b/llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp
@@ -905,8 +905,8 @@ static bool processSRem(BinaryOperator *SDI, const ConstantRange &LCR,
for (Operand &Op : Ops) {
if (Op.D == Domain::NonNegative)
continue;
- auto *BO =
- BinaryOperator::CreateNeg(Op.V, Op.V->getName() + ".nonneg", SDI);
+ auto *BO = BinaryOperator::CreateNeg(Op.V, Op.V->getName() + ".nonneg",
+ SDI->getIterator());
BO->setDebugLoc(SDI->getDebugLoc());
Op.V = BO;
}
@@ -919,7 +919,8 @@ static bool processSRem(BinaryOperator *SDI, const ConstantRange &LCR,
// If the divident was non-positive, we need to negate the result.
if (Ops[0].D == Domain::NonPositive) {
- Res = BinaryOperator::CreateNeg(Res, Res->getName() + ".neg", SDI);
+ Res = BinaryOperator::CreateNeg(Res, Res->getName() + ".neg",
+ SDI->getIterator());
Res->setDebugLoc(SDI->getDebugLoc());
}
@@ -966,8 +967,8 @@ static bool processSDiv(BinaryOperator *SDI, const ConstantRange &LCR,
for (Operand &Op : Ops) {
if (Op.D == Domain::NonNegative)
continue;
- auto *BO =
- BinaryOperator::CreateNeg(Op.V, Op.V->getName() + ".nonneg", SDI);
+ auto *BO = BinaryOperator::CreateNeg(Op.V, Op.V->getName() + ".nonneg",
+ SDI->getIterator());
BO->setDebugLoc(SDI->getDebugLoc());
Op.V = BO;
}
@@ -981,7 +982,8 @@ static bool processSDiv(BinaryOperator *SDI, const ConstantRange &LCR,
// If the operands had two different domains, we need to negate the result.
if (Ops[0].D != Ops[1].D) {
- Res = BinaryOperator::CreateNeg(Res, Res->getName() + ".neg", SDI);
+ Res = BinaryOperator::CreateNeg(Res, Res->getName() + ".neg",
+ SDI->getIterator());
Res->setDebugLoc(SDI->getDebugLoc());
}
diff --git a/llvm/lib/Transforms/Scalar/Reassociate.cpp b/llvm/lib/Transforms/Scalar/Reassociate.cpp
index 818c7b4..61109ed 100644
--- a/llvm/lib/Transforms/Scalar/Reassociate.cpp
+++ b/llvm/lib/Transforms/Scalar/Reassociate.cpp
@@ -270,7 +270,8 @@ static BinaryOperator *CreateMul(Value *S1, Value *S2, const Twine &Name,
}
static Instruction *CreateNeg(Value *S1, const Twine &Name,
- Instruction *InsertBefore, Value *FlagsOp) {
+ BasicBlock::iterator InsertBefore,
+ Value *FlagsOp) {
if (S1->getType()->isIntOrIntVectorTy())
return BinaryOperator::CreateNeg(S1, Name, InsertBefore);
@@ -958,7 +959,8 @@ static Value *NegateValue(Value *V, Instruction *BI,
// Insert a 'neg' instruction that subtracts the value from zero to get the
// negation.
- Instruction *NewNeg = CreateNeg(V, V->getName() + ".neg", BI, BI);
+ Instruction *NewNeg =
+ CreateNeg(V, V->getName() + ".neg", BI->getIterator(), BI);
ToRedo.insert(NewNeg);
return NewNeg;
}
@@ -1246,7 +1248,7 @@ Value *ReassociatePass::RemoveFactorFromExpression(Value *V, Value *Factor) {
}
if (NeedsNegate)
- V = CreateNeg(V, "neg", &*InsertPt, BO);
+ V = CreateNeg(V, "neg", InsertPt, BO);
return V;
}
diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
index ea77b60..50a073e 100644
--- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
@@ -315,12 +315,6 @@ static cl::opt<bool> EnableLoadStoreRuntimeInterleave(
cl::desc(
"Enable runtime interleaving until load/store ports are saturated"));
-/// Interleave small loops with scalar reductions.
-static cl::opt<bool> InterleaveSmallLoopScalarReduction(
- "interleave-small-loop-scalar-reduction", cl::init(false), cl::Hidden,
- cl::desc("Enable interleaving for loops with small iteration counts that "
- "contain scalar reductions to expose ILP."));
-
/// The number of stores in a loop that are allowed to need predication.
static cl::opt<unsigned> NumberOfStoresToPredicate(
"vectorize-num-stores-pred", cl::init(1), cl::Hidden,
@@ -5495,8 +5489,7 @@ LoopVectorizationCostModel::selectInterleaveCount(ElementCount VF,
// If there are scalar reductions and TTI has enabled aggressive
// interleaving for reductions, we will interleave to expose ILP.
- if (InterleaveSmallLoopScalarReduction && VF.isScalar() &&
- AggressivelyInterleaveReductions) {
+ if (VF.isScalar() && AggressivelyInterleaveReductions) {
LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n");
// Interleave no less than SmallIC but not as aggressive as the normal IC
// to satisfy the rare situation when resources are too limited.
@@ -9010,11 +9003,8 @@ void LoopVectorizationPlanner::adjustRecipesForReductions(
BasicBlock *BB = CurrentLinkI->getParent();
VPValue *CondOp = nullptr;
- if (CM.blockNeedsPredicationForAnyReason(BB)) {
- VPBuilder::InsertPointGuard Guard(Builder);
- Builder.setInsertPoint(CurrentLink);
+ if (CM.blockNeedsPredicationForAnyReason(BB))
CondOp = RecipeBuilder.getBlockInMask(BB);
- }
VPReductionRecipe *RedRecipe = new VPReductionRecipe(
RdxDesc, CurrentLinkI, PreviousLink, VecOp, CondOp);
diff --git a/llvm/test/Analysis/CostModel/RISCV/rvv-shuffle.ll b/llvm/test/Analysis/CostModel/RISCV/rvv-shuffle.ll
index 30da63b..7cc7cff 100644
--- a/llvm/test/Analysis/CostModel/RISCV/rvv-shuffle.ll
+++ b/llvm/test/Analysis/CostModel/RISCV/rvv-shuffle.ll
@@ -14,7 +14,7 @@ define void @vector_broadcast() {
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %5 = shufflevector <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %6 = shufflevector <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i32> zeroinitializer
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %7 = shufflevector <vscale x 2 x i64> undef, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
-; CHECK-NEXT: Cost Model: Found an estimated cost of 11 for instruction: %8 = shufflevector <vscale x 16 x i1> undef, <vscale x 16 x i1> undef, <vscale x 16 x i32> zeroinitializer
+; CHECK-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %8 = shufflevector <vscale x 16 x i1> undef, <vscale x 16 x i1> undef, <vscale x 16 x i32> zeroinitializer
; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %9 = shufflevector <vscale x 8 x i1> undef, <vscale x 8 x i1> undef, <vscale x 8 x i32> zeroinitializer
; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %10 = shufflevector <vscale x 4 x i1> undef, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer
; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %11 = shufflevector <vscale x 2 x i1> undef, <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer
@@ -29,7 +29,7 @@ define void @vector_broadcast() {
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %5 = shufflevector <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %6 = shufflevector <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i32> zeroinitializer
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %7 = shufflevector <vscale x 2 x i64> undef, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
-; SIZE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %8 = shufflevector <vscale x 16 x i1> undef, <vscale x 16 x i1> undef, <vscale x 16 x i32> zeroinitializer
+; SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %8 = shufflevector <vscale x 16 x i1> undef, <vscale x 16 x i1> undef, <vscale x 16 x i32> zeroinitializer
; SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %9 = shufflevector <vscale x 8 x i1> undef, <vscale x 8 x i1> undef, <vscale x 8 x i32> zeroinitializer
; SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %10 = shufflevector <vscale x 4 x i1> undef, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer
; SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %11 = shufflevector <vscale x 2 x i1> undef, <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer
@@ -78,20 +78,20 @@ declare <vscale x 16 x i32> @llvm.vector.insert.nxv16i32.nxv4i32(<vscale x 16 x
define void @vector_reverse() {
; CHECK-LABEL: 'vector_reverse'
-; CHECK-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %reverse_nxv16i8 = call <vscale x 16 x i8> @llvm.experimental.vector.reverse.nxv16i8(<vscale x 16 x i8> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 21 for instruction: %reverse_nxv32i8 = call <vscale x 32 x i8> @llvm.experimental.vector.reverse.nxv32i8(<vscale x 32 x i8> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 11 for instruction: %reverse_nxv16i8 = call <vscale x 16 x i8> @llvm.experimental.vector.reverse.nxv16i8(<vscale x 16 x i8> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 27 for instruction: %reverse_nxv32i8 = call <vscale x 32 x i8> @llvm.experimental.vector.reverse.nxv32i8(<vscale x 32 x i8> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %reverse_nxv2i16 = call <vscale x 2 x i16> @llvm.experimental.vector.reverse.nxv2i16(<vscale x 2 x i16> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %reverse_nxv4i16 = call <vscale x 4 x i16> @llvm.experimental.vector.reverse.nxv4i16(<vscale x 4 x i16> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %reverse_nxv8i16 = call <vscale x 8 x i16> @llvm.experimental.vector.reverse.nxv8i16(<vscale x 8 x i16> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 21 for instruction: %reverse_nxv16i16 = call <vscale x 16 x i16> @llvm.experimental.vector.reverse.nxv16i16(<vscale x 16 x i16> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %reverse_nxv4i32 = call <vscale x 4 x i32> @llvm.experimental.vector.reverse.nxv4i32(<vscale x 4 x i32> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 21 for instruction: %reverse_nxv8i32 = call <vscale x 8 x i32> @llvm.experimental.vector.reverse.nxv8i32(<vscale x 8 x i32> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %reverse_nxv2i64 = call <vscale x 2 x i64> @llvm.experimental.vector.reverse.nxv2i64(<vscale x 2 x i64> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 21 for instruction: %reverse_nxv4i64 = call <vscale x 4 x i64> @llvm.experimental.vector.reverse.nxv4i64(<vscale x 4 x i64> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 69 for instruction: %reverse_nxv8i64 = call <vscale x 8 x i64> @llvm.experimental.vector.reverse.nxv8i64(<vscale x 8 x i64> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 138 for instruction: %reverse_nxv16i64 = call <vscale x 16 x i64> @llvm.experimental.vector.reverse.nxv16i64(<vscale x 16 x i64> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 276 for instruction: %reverse_nxv32i64 = call <vscale x 32 x i64> @llvm.experimental.vector.reverse.nxv32i64(<vscale x 32 x i64> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %reverse_nxv16i1 = call <vscale x 16 x i1> @llvm.experimental.vector.reverse.nxv16i1(<vscale x 16 x i1> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 11 for instruction: %reverse_nxv8i16 = call <vscale x 8 x i16> @llvm.experimental.vector.reverse.nxv8i16(<vscale x 8 x i16> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 27 for instruction: %reverse_nxv16i16 = call <vscale x 16 x i16> @llvm.experimental.vector.reverse.nxv16i16(<vscale x 16 x i16> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 11 for instruction: %reverse_nxv4i32 = call <vscale x 4 x i32> @llvm.experimental.vector.reverse.nxv4i32(<vscale x 4 x i32> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 27 for instruction: %reverse_nxv8i32 = call <vscale x 8 x i32> @llvm.experimental.vector.reverse.nxv8i32(<vscale x 8 x i32> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 11 for instruction: %reverse_nxv2i64 = call <vscale x 2 x i64> @llvm.experimental.vector.reverse.nxv2i64(<vscale x 2 x i64> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 27 for instruction: %reverse_nxv4i64 = call <vscale x 4 x i64> @llvm.experimental.vector.reverse.nxv4i64(<vscale x 4 x i64> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 83 for instruction: %reverse_nxv8i64 = call <vscale x 8 x i64> @llvm.experimental.vector.reverse.nxv8i64(<vscale x 8 x i64> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 166 for instruction: %reverse_nxv16i64 = call <vscale x 16 x i64> @llvm.experimental.vector.reverse.nxv16i64(<vscale x 16 x i64> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 332 for instruction: %reverse_nxv32i64 = call <vscale x 32 x i64> @llvm.experimental.vector.reverse.nxv32i64(<vscale x 32 x i64> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %reverse_nxv16i1 = call <vscale x 16 x i1> @llvm.experimental.vector.reverse.nxv16i1(<vscale x 16 x i1> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %reverse_nxv8i1 = call <vscale x 8 x i1> @llvm.experimental.vector.reverse.nxv8i1(<vscale x 8 x i1> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %reverse_nxv4i1 = call <vscale x 4 x i1> @llvm.experimental.vector.reverse.nxv4i1(<vscale x 4 x i1> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %reverse_nxv2i1 = call <vscale x 2 x i1> @llvm.experimental.vector.reverse.nxv2i1(<vscale x 2 x i1> undef)
diff --git a/llvm/test/Analysis/CostModel/RISCV/rvv-vectorinsert.ll b/llvm/test/Analysis/CostModel/RISCV/rvv-vectorinsert.ll
index 5b16b87..607abee 100644
--- a/llvm/test/Analysis/CostModel/RISCV/rvv-vectorinsert.ll
+++ b/llvm/test/Analysis/CostModel/RISCV/rvv-vectorinsert.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py UTC_ARGS: --version 4
; RUN: opt -passes="print<cost-model>" 2>&1 -disable-output -mtriple=riscv64 -mattr=+v < %s | FileCheck %s
+; RUN: opt -passes="print<cost-model>" 2>&1 -disable-output -mtriple=riscv64 -mattr=+v -riscv-v-vector-bits-min=128 -riscv-v-vector-bits-max=128 < %s | FileCheck %s --check-prefix=RTH-MINMAX
; RUN: opt -passes="print<cost-model>" 2>&1 -disable-output -mtriple=riscv64 -mattr=+v -cost-kind=code-size < %s | FileCheck %s --check-prefix=SIZE
define void @vector_insert_nxv128i8_0(<vscale x 128 x i8> %v) {
@@ -20,6 +21,23 @@ define void @vector_insert_nxv128i8_0(<vscale x 128 x i8> %v) {
; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %fixed_m8 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v128i8(<vscale x 128 x i8> %v, <128 x i8> undef, i64 0)
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
;
+; RTH-MINMAX-LABEL: 'vector_insert_nxv128i8_0'
+; RTH-MINMAX-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_mf8 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv1i8(<vscale x 128 x i8> %v, <vscale x 1 x i8> undef, i64 0)
+; RTH-MINMAX-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_mf4 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv2i8(<vscale x 128 x i8> %v, <vscale x 2 x i8> undef, i64 0)
+; RTH-MINMAX-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_mf2 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv4i8(<vscale x 128 x i8> %v, <vscale x 4 x i8> undef, i64 0)
+; RTH-MINMAX-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_m1 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv8i8(<vscale x 128 x i8> %v, <vscale x 8 x i8> undef, i64 0)
+; RTH-MINMAX-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_m2 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv16i8(<vscale x 128 x i8> %v, <vscale x 16 x i8> undef, i64 0)
+; RTH-MINMAX-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_m4 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv32i8(<vscale x 128 x i8> %v, <vscale x 32 x i8> undef, i64 0)
+; RTH-MINMAX-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_m8 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv64i8(<vscale x 128 x i8> %v, <vscale x 64 x i8> undef, i64 0)
+; RTH-MINMAX-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %fixed_mf8 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v2i8(<vscale x 128 x i8> %v, <2 x i8> undef, i64 0)
+; RTH-MINMAX-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %fixed_mf4 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v4i8(<vscale x 128 x i8> %v, <4 x i8> undef, i64 0)
+; RTH-MINMAX-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %fixed_mf2 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v8i8(<vscale x 128 x i8> %v, <8 x i8> undef, i64 0)
+; RTH-MINMAX-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %fixed_m1 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v16i8(<vscale x 128 x i8> %v, <16 x i8> undef, i64 0)
+; RTH-MINMAX-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %fixed_m2 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v32i8(<vscale x 128 x i8> %v, <32 x i8> undef, i64 0)
+; RTH-MINMAX-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %fixed_m4 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v64i8(<vscale x 128 x i8> %v, <64 x i8> undef, i64 0)
+; RTH-MINMAX-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %fixed_m8 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v128i8(<vscale x 128 x i8> %v, <128 x i8> undef, i64 0)
+; RTH-MINMAX-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+;
; SIZE-LABEL: 'vector_insert_nxv128i8_0'
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_mf8 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv1i8(<vscale x 128 x i8> %v, <vscale x 1 x i8> undef, i64 0)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_mf4 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv2i8(<vscale x 128 x i8> %v, <vscale x 2 x i8> undef, i64 0)
@@ -73,6 +91,23 @@ define void @vector_insert_nxv128i8_undef_0() {
; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %fixed_m8 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v128i8(<vscale x 128 x i8> undef, <128 x i8> undef, i64 0)
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
;
+; RTH-MINMAX-LABEL: 'vector_insert_nxv128i8_undef_0'
+; RTH-MINMAX-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_mf8 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv1i8(<vscale x 128 x i8> undef, <vscale x 1 x i8> undef, i64 0)
+; RTH-MINMAX-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_mf4 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv2i8(<vscale x 128 x i8> undef, <vscale x 2 x i8> undef, i64 0)
+; RTH-MINMAX-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_mf2 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv4i8(<vscale x 128 x i8> undef, <vscale x 4 x i8> undef, i64 0)
+; RTH-MINMAX-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_m1 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv8i8(<vscale x 128 x i8> undef, <vscale x 8 x i8> undef, i64 0)
+; RTH-MINMAX-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_m2 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv16i8(<vscale x 128 x i8> undef, <vscale x 16 x i8> undef, i64 0)
+; RTH-MINMAX-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_m4 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv32i8(<vscale x 128 x i8> undef, <vscale x 32 x i8> undef, i64 0)
+; RTH-MINMAX-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_m8 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv64i8(<vscale x 128 x i8> undef, <vscale x 64 x i8> undef, i64 0)
+; RTH-MINMAX-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %fixed_mf8 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v2i8(<vscale x 128 x i8> undef, <2 x i8> undef, i64 0)
+; RTH-MINMAX-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %fixed_mf4 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v4i8(<vscale x 128 x i8> undef, <4 x i8> undef, i64 0)
+; RTH-MINMAX-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %fixed_mf2 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v8i8(<vscale x 128 x i8> undef, <8 x i8> undef, i64 0)
+; RTH-MINMAX-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %fixed_m1 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v16i8(<vscale x 128 x i8> undef, <16 x i8> undef, i64 0)
+; RTH-MINMAX-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %fixed_m2 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v32i8(<vscale x 128 x i8> undef, <32 x i8> undef, i64 0)
+; RTH-MINMAX-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %fixed_m4 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v64i8(<vscale x 128 x i8> undef, <64 x i8> undef, i64 0)
+; RTH-MINMAX-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %fixed_m8 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v128i8(<vscale x 128 x i8> undef, <128 x i8> undef, i64 0)
+; RTH-MINMAX-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+;
; SIZE-LABEL: 'vector_insert_nxv128i8_undef_0'
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_mf8 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv1i8(<vscale x 128 x i8> undef, <vscale x 1 x i8> undef, i64 0)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_mf4 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv2i8(<vscale x 128 x i8> undef, <vscale x 2 x i8> undef, i64 0)
@@ -126,6 +161,23 @@ define void @vector_insert_nxv128i8_1(<vscale x 128 x i8> %v) {
; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %fixed_m8 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v128i8(<vscale x 128 x i8> %v, <128 x i8> undef, i64 128)
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
;
+; RTH-MINMAX-LABEL: 'vector_insert_nxv128i8_1'
+; RTH-MINMAX-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_mf8 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv1i8(<vscale x 128 x i8> %v, <vscale x 1 x i8> undef, i64 1)
+; RTH-MINMAX-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_mf4 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv2i8(<vscale x 128 x i8> %v, <vscale x 2 x i8> undef, i64 2)
+; RTH-MINMAX-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_mf2 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv4i8(<vscale x 128 x i8> %v, <vscale x 4 x i8> undef, i64 4)
+; RTH-MINMAX-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_m1 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv8i8(<vscale x 128 x i8> %v, <vscale x 8 x i8> undef, i64 8)
+; RTH-MINMAX-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_m2 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv16i8(<vscale x 128 x i8> %v, <vscale x 16 x i8> undef, i64 16)
+; RTH-MINMAX-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_m4 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv32i8(<vscale x 128 x i8> %v, <vscale x 32 x i8> undef, i64 32)
+; RTH-MINMAX-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_m8 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv64i8(<vscale x 128 x i8> %v, <vscale x 64 x i8> undef, i64 64)
+; RTH-MINMAX-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %fixed_mf8 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v2i8(<vscale x 128 x i8> %v, <2 x i8> undef, i64 2)
+; RTH-MINMAX-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %fixed_mf4 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v4i8(<vscale x 128 x i8> %v, <4 x i8> undef, i64 4)
+; RTH-MINMAX-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %fixed_mf2 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v8i8(<vscale x 128 x i8> %v, <8 x i8> undef, i64 8)
+; RTH-MINMAX-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %fixed_m1 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v16i8(<vscale x 128 x i8> %v, <16 x i8> undef, i64 16)
+; RTH-MINMAX-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %fixed_m2 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v32i8(<vscale x 128 x i8> %v, <32 x i8> undef, i64 32)
+; RTH-MINMAX-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %fixed_m4 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v64i8(<vscale x 128 x i8> %v, <64 x i8> undef, i64 64)
+; RTH-MINMAX-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %fixed_m8 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v128i8(<vscale x 128 x i8> %v, <128 x i8> undef, i64 128)
+; RTH-MINMAX-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+;
; SIZE-LABEL: 'vector_insert_nxv128i8_1'
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_mf8 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv1i8(<vscale x 128 x i8> %v, <vscale x 1 x i8> undef, i64 1)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_mf4 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv2i8(<vscale x 128 x i8> %v, <vscale x 2 x i8> undef, i64 2)
@@ -179,6 +231,23 @@ define void @vector_insert_nxv128i8_undef_1() {
; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %fixed_m8 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v128i8(<vscale x 128 x i8> undef, <128 x i8> undef, i64 128)
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
;
+; RTH-MINMAX-LABEL: 'vector_insert_nxv128i8_undef_1'
+; RTH-MINMAX-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_mf8 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv1i8(<vscale x 128 x i8> undef, <vscale x 1 x i8> undef, i64 1)
+; RTH-MINMAX-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_mf4 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv2i8(<vscale x 128 x i8> undef, <vscale x 2 x i8> undef, i64 2)
+; RTH-MINMAX-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_mf2 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv4i8(<vscale x 128 x i8> undef, <vscale x 4 x i8> undef, i64 4)
+; RTH-MINMAX-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_m1 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv8i8(<vscale x 128 x i8> undef, <vscale x 8 x i8> undef, i64 8)
+; RTH-MINMAX-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_m2 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv16i8(<vscale x 128 x i8> undef, <vscale x 16 x i8> undef, i64 16)
+; RTH-MINMAX-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_m4 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv32i8(<vscale x 128 x i8> undef, <vscale x 32 x i8> undef, i64 32)
+; RTH-MINMAX-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_m8 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv64i8(<vscale x 128 x i8> undef, <vscale x 64 x i8> undef, i64 64)
+; RTH-MINMAX-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %fixed_mf8 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v2i8(<vscale x 128 x i8> undef, <2 x i8> undef, i64 2)
+; RTH-MINMAX-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %fixed_mf4 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v4i8(<vscale x 128 x i8> undef, <4 x i8> undef, i64 4)
+; RTH-MINMAX-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %fixed_mf2 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v8i8(<vscale x 128 x i8> undef, <8 x i8> undef, i64 8)
+; RTH-MINMAX-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %fixed_m1 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v16i8(<vscale x 128 x i8> undef, <16 x i8> undef, i64 16)
+; RTH-MINMAX-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %fixed_m2 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v32i8(<vscale x 128 x i8> undef, <32 x i8> undef, i64 32)
+; RTH-MINMAX-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %fixed_m4 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v64i8(<vscale x 128 x i8> undef, <64 x i8> undef, i64 64)
+; RTH-MINMAX-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %fixed_m8 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.v128i8(<vscale x 128 x i8> undef, <128 x i8> undef, i64 128)
+; RTH-MINMAX-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+;
; SIZE-LABEL: 'vector_insert_nxv128i8_undef_1'
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_mf8 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv1i8(<vscale x 128 x i8> undef, <vscale x 1 x i8> undef, i64 1)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %scalable_mf4 = call <vscale x 128 x i8> @llvm.vector.insert.nxv128i8.nxv2i8(<vscale x 128 x i8> undef, <vscale x 2 x i8> undef, i64 2)
@@ -219,20 +288,30 @@ define void @vector_insert_v128i8_0(<128 x i8> %v) {
; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %fixed_mf8 = call <128 x i8> @llvm.vector.insert.v128i8.v2i8(<128 x i8> %v, <2 x i8> undef, i64 0)
; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %fixed_mf4 = call <128 x i8> @llvm.vector.insert.v128i8.v4i8(<128 x i8> %v, <4 x i8> undef, i64 0)
; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %fixed_mf2 = call <128 x i8> @llvm.vector.insert.v128i8.v8i8(<128 x i8> %v, <8 x i8> undef, i64 0)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %fixed_m1 = call <128 x i8> @llvm.vector.insert.v128i8.v16i8(<128 x i8> %v, <16 x i8> undef, i64 0)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %fixed_m2 = call <128 x i8> @llvm.vector.insert.v128i8.v32i8(<128 x i8> %v, <32 x i8> undef, i64 0)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %fixed_m4 = call <128 x i8> @llvm.vector.insert.v128i8.v64i8(<128 x i8> %v, <64 x i8> undef, i64 0)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %fixed_m8 = call <128 x i8> @llvm.vector.insert.v128i8.v128i8(<128 x i8> %v, <128 x i8> undef, i64 0)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %fixed_m1 = call <128 x i8> @llvm.vector.insert.v128i8.v16i8(<128 x i8> %v, <16 x i8> undef, i64 0)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %fixed_m2 = call <128 x i8> @llvm.vector.insert.v128i8.v32i8(<128 x i8> %v, <32 x i8> undef, i64 0)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %fixed_m4 = call <128 x i8> @llvm.vector.insert.v128i8.v64i8(<128 x i8> %v, <64 x i8> undef, i64 0)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %fixed_m8 = call <128 x i8> @llvm.vector.insert.v128i8.v128i8(<128 x i8> %v, <128 x i8> undef, i64 0)
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
;
+; RTH-MINMAX-LABEL: 'vector_insert_v128i8_0'
+; RTH-MINMAX-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %fixed_mf8 = call <128 x i8> @llvm.vector.insert.v128i8.v2i8(<128 x i8> %v, <2 x i8> undef, i64 0)
+; RTH-MINMAX-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %fixed_mf4 = call <128 x i8> @llvm.vector.insert.v128i8.v4i8(<128 x i8> %v, <4 x i8> undef, i64 0)
+; RTH-MINMAX-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %fixed_mf2 = call <128 x i8> @llvm.vector.insert.v128i8.v8i8(<128 x i8> %v, <8 x i8> undef, i64 0)
+; RTH-MINMAX-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %fixed_m1 = call <128 x i8> @llvm.vector.insert.v128i8.v16i8(<128 x i8> %v, <16 x i8> undef, i64 0)
+; RTH-MINMAX-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %fixed_m2 = call <128 x i8> @llvm.vector.insert.v128i8.v32i8(<128 x i8> %v, <32 x i8> undef, i64 0)
+; RTH-MINMAX-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %fixed_m4 = call <128 x i8> @llvm.vector.insert.v128i8.v64i8(<128 x i8> %v, <64 x i8> undef, i64 0)
+; RTH-MINMAX-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %fixed_m8 = call <128 x i8> @llvm.vector.insert.v128i8.v128i8(<128 x i8> %v, <128 x i8> undef, i64 0)
+; RTH-MINMAX-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+;
; SIZE-LABEL: 'vector_insert_v128i8_0'
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %fixed_mf8 = call <128 x i8> @llvm.vector.insert.v128i8.v2i8(<128 x i8> %v, <2 x i8> undef, i64 0)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %fixed_mf4 = call <128 x i8> @llvm.vector.insert.v128i8.v4i8(<128 x i8> %v, <4 x i8> undef, i64 0)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %fixed_mf2 = call <128 x i8> @llvm.vector.insert.v128i8.v8i8(<128 x i8> %v, <8 x i8> undef, i64 0)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %fixed_m1 = call <128 x i8> @llvm.vector.insert.v128i8.v16i8(<128 x i8> %v, <16 x i8> undef, i64 0)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %fixed_m2 = call <128 x i8> @llvm.vector.insert.v128i8.v32i8(<128 x i8> %v, <32 x i8> undef, i64 0)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %fixed_m4 = call <128 x i8> @llvm.vector.insert.v128i8.v64i8(<128 x i8> %v, <64 x i8> undef, i64 0)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %fixed_m8 = call <128 x i8> @llvm.vector.insert.v128i8.v128i8(<128 x i8> %v, <128 x i8> undef, i64 0)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %fixed_m1 = call <128 x i8> @llvm.vector.insert.v128i8.v16i8(<128 x i8> %v, <16 x i8> undef, i64 0)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %fixed_m2 = call <128 x i8> @llvm.vector.insert.v128i8.v32i8(<128 x i8> %v, <32 x i8> undef, i64 0)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %fixed_m4 = call <128 x i8> @llvm.vector.insert.v128i8.v64i8(<128 x i8> %v, <64 x i8> undef, i64 0)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %fixed_m8 = call <128 x i8> @llvm.vector.insert.v128i8.v128i8(<128 x i8> %v, <128 x i8> undef, i64 0)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
;
%fixed_mf8 = call <128 x i8> @llvm.vector.insert.v128i8.v2i8(<128 x i8> %v, <2 x i8> undef, i64 0)
@@ -250,20 +329,30 @@ define void @vector_insert_v128i8_undef_0() {
; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %fixed_mf8 = call <128 x i8> @llvm.vector.insert.v128i8.v2i8(<128 x i8> undef, <2 x i8> undef, i64 0)
; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %fixed_mf4 = call <128 x i8> @llvm.vector.insert.v128i8.v4i8(<128 x i8> undef, <4 x i8> undef, i64 0)
; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %fixed_mf2 = call <128 x i8> @llvm.vector.insert.v128i8.v8i8(<128 x i8> undef, <8 x i8> undef, i64 0)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %fixed_m1 = call <128 x i8> @llvm.vector.insert.v128i8.v16i8(<128 x i8> undef, <16 x i8> undef, i64 0)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %fixed_m2 = call <128 x i8> @llvm.vector.insert.v128i8.v32i8(<128 x i8> undef, <32 x i8> undef, i64 0)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %fixed_m4 = call <128 x i8> @llvm.vector.insert.v128i8.v64i8(<128 x i8> undef, <64 x i8> undef, i64 0)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %fixed_m8 = call <128 x i8> @llvm.vector.insert.v128i8.v128i8(<128 x i8> undef, <128 x i8> undef, i64 0)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %fixed_m1 = call <128 x i8> @llvm.vector.insert.v128i8.v16i8(<128 x i8> undef, <16 x i8> undef, i64 0)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %fixed_m2 = call <128 x i8> @llvm.vector.insert.v128i8.v32i8(<128 x i8> undef, <32 x i8> undef, i64 0)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %fixed_m4 = call <128 x i8> @llvm.vector.insert.v128i8.v64i8(<128 x i8> undef, <64 x i8> undef, i64 0)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %fixed_m8 = call <128 x i8> @llvm.vector.insert.v128i8.v128i8(<128 x i8> undef, <128 x i8> undef, i64 0)
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
;
+; RTH-MINMAX-LABEL: 'vector_insert_v128i8_undef_0'
+; RTH-MINMAX-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %fixed_mf8 = call <128 x i8> @llvm.vector.insert.v128i8.v2i8(<128 x i8> undef, <2 x i8> undef, i64 0)
+; RTH-MINMAX-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %fixed_mf4 = call <128 x i8> @llvm.vector.insert.v128i8.v4i8(<128 x i8> undef, <4 x i8> undef, i64 0)
+; RTH-MINMAX-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %fixed_mf2 = call <128 x i8> @llvm.vector.insert.v128i8.v8i8(<128 x i8> undef, <8 x i8> undef, i64 0)
+; RTH-MINMAX-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %fixed_m1 = call <128 x i8> @llvm.vector.insert.v128i8.v16i8(<128 x i8> undef, <16 x i8> undef, i64 0)
+; RTH-MINMAX-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %fixed_m2 = call <128 x i8> @llvm.vector.insert.v128i8.v32i8(<128 x i8> undef, <32 x i8> undef, i64 0)
+; RTH-MINMAX-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %fixed_m4 = call <128 x i8> @llvm.vector.insert.v128i8.v64i8(<128 x i8> undef, <64 x i8> undef, i64 0)
+; RTH-MINMAX-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %fixed_m8 = call <128 x i8> @llvm.vector.insert.v128i8.v128i8(<128 x i8> undef, <128 x i8> undef, i64 0)
+; RTH-MINMAX-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+;
; SIZE-LABEL: 'vector_insert_v128i8_undef_0'
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %fixed_mf8 = call <128 x i8> @llvm.vector.insert.v128i8.v2i8(<128 x i8> undef, <2 x i8> undef, i64 0)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %fixed_mf4 = call <128 x i8> @llvm.vector.insert.v128i8.v4i8(<128 x i8> undef, <4 x i8> undef, i64 0)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %fixed_mf2 = call <128 x i8> @llvm.vector.insert.v128i8.v8i8(<128 x i8> undef, <8 x i8> undef, i64 0)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %fixed_m1 = call <128 x i8> @llvm.vector.insert.v128i8.v16i8(<128 x i8> undef, <16 x i8> undef, i64 0)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %fixed_m2 = call <128 x i8> @llvm.vector.insert.v128i8.v32i8(<128 x i8> undef, <32 x i8> undef, i64 0)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %fixed_m4 = call <128 x i8> @llvm.vector.insert.v128i8.v64i8(<128 x i8> undef, <64 x i8> undef, i64 0)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %fixed_m8 = call <128 x i8> @llvm.vector.insert.v128i8.v128i8(<128 x i8> undef, <128 x i8> undef, i64 0)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %fixed_m1 = call <128 x i8> @llvm.vector.insert.v128i8.v16i8(<128 x i8> undef, <16 x i8> undef, i64 0)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %fixed_m2 = call <128 x i8> @llvm.vector.insert.v128i8.v32i8(<128 x i8> undef, <32 x i8> undef, i64 0)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %fixed_m4 = call <128 x i8> @llvm.vector.insert.v128i8.v64i8(<128 x i8> undef, <64 x i8> undef, i64 0)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %fixed_m8 = call <128 x i8> @llvm.vector.insert.v128i8.v128i8(<128 x i8> undef, <128 x i8> undef, i64 0)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
;
%fixed_mf8 = call <128 x i8> @llvm.vector.insert.v128i8.v2i8(<128 x i8> undef, <2 x i8> undef, i64 0)
@@ -286,6 +375,15 @@ define void @vector_insert_v128i8_1(<128 x i8> %v) {
; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %fixed_m4 = call <128 x i8> @llvm.vector.insert.v128i8.v64i8(<128 x i8> %v, <64 x i8> undef, i64 64)
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
;
+; RTH-MINMAX-LABEL: 'vector_insert_v128i8_1'
+; RTH-MINMAX-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %fixed_mf8 = call <128 x i8> @llvm.vector.insert.v128i8.v2i8(<128 x i8> %v, <2 x i8> undef, i64 2)
+; RTH-MINMAX-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %fixed_mf4 = call <128 x i8> @llvm.vector.insert.v128i8.v4i8(<128 x i8> %v, <4 x i8> undef, i64 4)
+; RTH-MINMAX-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %fixed_mf2 = call <128 x i8> @llvm.vector.insert.v128i8.v8i8(<128 x i8> %v, <8 x i8> undef, i64 8)
+; RTH-MINMAX-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %fixed_m1 = call <128 x i8> @llvm.vector.insert.v128i8.v16i8(<128 x i8> %v, <16 x i8> undef, i64 16)
+; RTH-MINMAX-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %fixed_m2 = call <128 x i8> @llvm.vector.insert.v128i8.v32i8(<128 x i8> %v, <32 x i8> undef, i64 32)
+; RTH-MINMAX-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %fixed_m4 = call <128 x i8> @llvm.vector.insert.v128i8.v64i8(<128 x i8> %v, <64 x i8> undef, i64 64)
+; RTH-MINMAX-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+;
; SIZE-LABEL: 'vector_insert_v128i8_1'
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %fixed_mf8 = call <128 x i8> @llvm.vector.insert.v128i8.v2i8(<128 x i8> %v, <2 x i8> undef, i64 2)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %fixed_mf4 = call <128 x i8> @llvm.vector.insert.v128i8.v4i8(<128 x i8> %v, <4 x i8> undef, i64 4)
@@ -315,6 +413,15 @@ define void @vector_insert_v128i8_undef_1() {
; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %fixed_m4 = call <128 x i8> @llvm.vector.insert.v128i8.v64i8(<128 x i8> undef, <64 x i8> undef, i64 64)
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
;
+; RTH-MINMAX-LABEL: 'vector_insert_v128i8_undef_1'
+; RTH-MINMAX-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %fixed_mf8 = call <128 x i8> @llvm.vector.insert.v128i8.v2i8(<128 x i8> undef, <2 x i8> undef, i64 2)
+; RTH-MINMAX-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %fixed_mf4 = call <128 x i8> @llvm.vector.insert.v128i8.v4i8(<128 x i8> undef, <4 x i8> undef, i64 4)
+; RTH-MINMAX-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %fixed_mf2 = call <128 x i8> @llvm.vector.insert.v128i8.v8i8(<128 x i8> undef, <8 x i8> undef, i64 8)
+; RTH-MINMAX-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %fixed_m1 = call <128 x i8> @llvm.vector.insert.v128i8.v16i8(<128 x i8> undef, <16 x i8> undef, i64 16)
+; RTH-MINMAX-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %fixed_m2 = call <128 x i8> @llvm.vector.insert.v128i8.v32i8(<128 x i8> undef, <32 x i8> undef, i64 32)
+; RTH-MINMAX-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %fixed_m4 = call <128 x i8> @llvm.vector.insert.v128i8.v64i8(<128 x i8> undef, <64 x i8> undef, i64 64)
+; RTH-MINMAX-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+;
; SIZE-LABEL: 'vector_insert_v128i8_undef_1'
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %fixed_mf8 = call <128 x i8> @llvm.vector.insert.v128i8.v2i8(<128 x i8> undef, <2 x i8> undef, i64 2)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %fixed_mf4 = call <128 x i8> @llvm.vector.insert.v128i8.v4i8(<128 x i8> undef, <4 x i8> undef, i64 4)
diff --git a/llvm/test/Analysis/CostModel/RISCV/shuffle-broadcast.ll b/llvm/test/Analysis/CostModel/RISCV/shuffle-broadcast.ll
index fc4a6b1..46bf315 100644
--- a/llvm/test/Analysis/CostModel/RISCV/shuffle-broadcast.ll
+++ b/llvm/test/Analysis/CostModel/RISCV/shuffle-broadcast.ll
@@ -45,9 +45,9 @@ define void @broadcast_scalable() #0{
; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %38 = shufflevector <vscale x 2 x i1> undef, <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer
; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %39 = shufflevector <vscale x 4 x i1> undef, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer
; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %40 = shufflevector <vscale x 8 x i1> undef, <vscale x 8 x i1> undef, <vscale x 8 x i32> zeroinitializer
-; CHECK-NEXT: Cost Model: Found an estimated cost of 11 for instruction: %41 = shufflevector <vscale x 16 x i1> undef, <vscale x 16 x i1> undef, <vscale x 16 x i32> zeroinitializer
-; CHECK-NEXT: Cost Model: Found an estimated cost of 21 for instruction: %42 = shufflevector <vscale x 32 x i1> undef, <vscale x 32 x i1> undef, <vscale x 32 x i32> zeroinitializer
-; CHECK-NEXT: Cost Model: Found an estimated cost of 41 for instruction: %43 = shufflevector <vscale x 64 x i1> undef, <vscale x 64 x i1> undef, <vscale x 64 x i32> zeroinitializer
+; CHECK-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %41 = shufflevector <vscale x 16 x i1> undef, <vscale x 16 x i1> undef, <vscale x 16 x i32> zeroinitializer
+; CHECK-NEXT: Cost Model: Found an estimated cost of 18 for instruction: %42 = shufflevector <vscale x 32 x i1> undef, <vscale x 32 x i1> undef, <vscale x 32 x i32> zeroinitializer
+; CHECK-NEXT: Cost Model: Found an estimated cost of 34 for instruction: %43 = shufflevector <vscale x 64 x i1> undef, <vscale x 64 x i1> undef, <vscale x 64 x i32> zeroinitializer
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
;
; SIZE-LABEL: 'broadcast_scalable'
@@ -92,9 +92,9 @@ define void @broadcast_scalable() #0{
; SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %38 = shufflevector <vscale x 2 x i1> undef, <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer
; SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %39 = shufflevector <vscale x 4 x i1> undef, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer
; SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %40 = shufflevector <vscale x 8 x i1> undef, <vscale x 8 x i1> undef, <vscale x 8 x i32> zeroinitializer
-; SIZE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %41 = shufflevector <vscale x 16 x i1> undef, <vscale x 16 x i1> undef, <vscale x 16 x i32> zeroinitializer
-; SIZE-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %42 = shufflevector <vscale x 32 x i1> undef, <vscale x 32 x i1> undef, <vscale x 32 x i32> zeroinitializer
-; SIZE-NEXT: Cost Model: Found an estimated cost of 13 for instruction: %43 = shufflevector <vscale x 64 x i1> undef, <vscale x 64 x i1> undef, <vscale x 64 x i32> zeroinitializer
+; SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %41 = shufflevector <vscale x 16 x i1> undef, <vscale x 16 x i1> undef, <vscale x 16 x i32> zeroinitializer
+; SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %42 = shufflevector <vscale x 32 x i1> undef, <vscale x 32 x i1> undef, <vscale x 32 x i32> zeroinitializer
+; SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %43 = shufflevector <vscale x 64 x i1> undef, <vscale x 64 x i1> undef, <vscale x 64 x i32> zeroinitializer
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
;
%zero = shufflevector <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x i32> zeroinitializer
diff --git a/llvm/test/Analysis/CostModel/RISCV/shuffle-insert_subvector.ll b/llvm/test/Analysis/CostModel/RISCV/shuffle-insert_subvector.ll
index 19a571b..9a333dc 100644
--- a/llvm/test/Analysis/CostModel/RISCV/shuffle-insert_subvector.ll
+++ b/llvm/test/Analysis/CostModel/RISCV/shuffle-insert_subvector.ll
@@ -1,6 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py
; RUN: opt < %s -passes="print<cost-model>" 2>&1 -disable-output -mtriple=riscv32 -mattr=+v,+f,+d,+zfh,+zvfh -riscv-v-vector-bits-min=-1 | FileCheck %s
-; RUN: opt < %s -passes="print<cost-model>" 2>&1 -disable-output -mtriple=riscv32 -mattr=+v,+f,+d,+zfh,+zvfh -riscv-v-vector-bits-min=128 -riscv-v-vector-bits-max=128 | FileCheck %s
; RUN: opt < %s -passes="print<cost-model>" -cost-kind=code-size 2>&1 -disable-output -mtriple=riscv32 -mattr=+v,+f,+d,+zfh,+zvfh -riscv-v-vector-bits-min=-1 | FileCheck %s --check-prefix=SIZE
; Check that we don't crash querying costs when vectors are not enabled.
; RUN: opt -passes="print<cost-model>" 2>&1 -disable-output -mtriple=riscv32
diff --git a/llvm/test/Analysis/CostModel/RISCV/shuffle-reverse.ll b/llvm/test/Analysis/CostModel/RISCV/shuffle-reverse.ll
index 146909c..e80dbe3 100644
--- a/llvm/test/Analysis/CostModel/RISCV/shuffle-reverse.ll
+++ b/llvm/test/Analysis/CostModel/RISCV/shuffle-reverse.ll
@@ -20,21 +20,21 @@ define void @reverse() {
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v2i16 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 1, i32 0>
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v4i16 = shufflevector <4 x i16> undef, <4 x i16> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v8i16 = shufflevector <8 x i16> undef, <8 x i16> undef, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
-; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v16i16 = shufflevector <16 x i16> undef, <16 x i16> undef, <16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v16i16 = shufflevector <16 x i16> undef, <16 x i16> undef, <16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v2i32 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 1, i32 0>
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v4i32 = shufflevector <4 x i32> undef, <4 x i32> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
-; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v8i32 = shufflevector <8 x i32> undef, <8 x i32> undef, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v8i32 = shufflevector <8 x i32> undef, <8 x i32> undef, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v2i64 = shufflevector <2 x i64> undef, <2 x i64> undef, <2 x i32> <i32 1, i32 0>
-; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v4i64 = shufflevector <4 x i64> undef, <4 x i64> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v4i64 = shufflevector <4 x i64> undef, <4 x i64> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v2f16 = shufflevector <2 x half> undef, <2 x half> undef, <2 x i32> <i32 1, i32 0>
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v4f16 = shufflevector <4 x half> undef, <4 x half> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v8f16 = shufflevector <8 x half> undef, <8 x half> undef, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
-; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v16f16 = shufflevector <16 x half> undef, <16 x half> undef, <16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v16f16 = shufflevector <16 x half> undef, <16 x half> undef, <16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v2f32 = shufflevector <2 x float> undef, <2 x float> undef, <2 x i32> <i32 1, i32 0>
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v4f32 = shufflevector <4 x float> undef, <4 x float> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
-; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v8f32 = shufflevector <8 x float> undef, <8 x float> undef, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v8f32 = shufflevector <8 x float> undef, <8 x float> undef, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v2f64 = shufflevector <2 x double> undef, <2 x double> undef, <2 x i32> <i32 1, i32 0>
-; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v4f64 = shufflevector <4 x double> undef, <4 x double> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v4f64 = shufflevector <4 x double> undef, <4 x double> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
;
; SIZE-LABEL: 'reverse'
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/combine-2-icmps-of-0-and-or.mir b/llvm/test/CodeGen/AArch64/GlobalISel/combine-2-icmps-of-0-and-or.mir
new file mode 100644
index 0000000..2ce5c69
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/combine-2-icmps-of-0-and-or.mir
@@ -0,0 +1,1244 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple aarch64 -run-pass=aarch64-prelegalizer-combiner -verify-machineinstrs %s -o - | FileCheck %s
+# REQUIRES: asserts
+
+
+---
+name: valid_and_eq_0_eq_0_s32
+tracksRegLiveness: true
+legalized: true
+body: |
+ bb.0:
+ liveins: $w0, $w1
+
+ ; CHECK-LABEL: name: valid_and_eq_0_eq_0_s32
+ ; CHECK: liveins: $w0, $w1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %x:_(s32) = COPY $w0
+ ; CHECK-NEXT: %y:_(s32) = COPY $w1
+ ; CHECK-NEXT: %zero:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR %x, %y
+ ; CHECK-NEXT: %and:_(s1) = G_ICMP intpred(eq), [[OR]](s32), %zero
+ ; CHECK-NEXT: %zext:_(s32) = G_ZEXT %and(s1)
+ ; CHECK-NEXT: $w0 = COPY %zext(s32)
+ ; CHECK-NEXT: RET_ReallyLR implicit $w0
+ %x:_(s32) = COPY $w0
+ %y:_(s32) = COPY $w1
+ %zero:_(s32) = G_CONSTANT i32 0
+ %cmp1:_(s1) = G_ICMP intpred(eq), %x:_(s32), %zero:_
+ %cmp2:_(s1) = G_ICMP intpred(eq), %y:_(s32), %zero:_
+ %and:_(s1) = G_AND %cmp1, %cmp2
+ %zext:_(s32) = G_ZEXT %and:_(s1)
+ $w0 = COPY %zext
+ RET_ReallyLR implicit $w0
+
+...
+---
+name: invalid_and_eq_1_eq_0_s32
+tracksRegLiveness: true
+legalized: true
+body: |
+ bb.0:
+ liveins: $w0, $w1
+
+ ; CHECK-LABEL: name: invalid_and_eq_1_eq_0_s32
+ ; CHECK: liveins: $w0, $w1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %x:_(s32) = COPY $w0
+ ; CHECK-NEXT: %y:_(s32) = COPY $w1
+ ; CHECK-NEXT: %zero:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: %one:_(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: %cmp1:_(s1) = G_ICMP intpred(eq), %x(s32), %one
+ ; CHECK-NEXT: %cmp2:_(s1) = G_ICMP intpred(eq), %y(s32), %zero
+ ; CHECK-NEXT: %and:_(s1) = G_AND %cmp1, %cmp2
+ ; CHECK-NEXT: %zext:_(s32) = G_ZEXT %and(s1)
+ ; CHECK-NEXT: $w0 = COPY %zext(s32)
+ ; CHECK-NEXT: RET_ReallyLR implicit $w0
+ %x:_(s32) = COPY $w0
+ %y:_(s32) = COPY $w1
+ %zero:_(s32) = G_CONSTANT i32 0
+ %one:_(s32) = G_CONSTANT i32 1
+ %cmp1:_(s1) = G_ICMP intpred(eq), %x:_(s32), %one:_
+ %cmp2:_(s1) = G_ICMP intpred(eq), %y:_(s32), %zero:_
+ %and:_(s1) = G_AND %cmp1, %cmp2
+ %zext:_(s32) = G_ZEXT %and:_(s1)
+ $w0 = COPY %zext
+ RET_ReallyLR implicit $w0
+
+...
+---
+name: invalid_and_eq_0_eq_1_s32
+tracksRegLiveness: true
+legalized: true
+body: |
+ bb.0:
+ liveins: $w0, $w1
+
+ ; CHECK-LABEL: name: invalid_and_eq_0_eq_1_s32
+ ; CHECK: liveins: $w0, $w1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %x:_(s32) = COPY $w0
+ ; CHECK-NEXT: %y:_(s32) = COPY $w1
+ ; CHECK-NEXT: %zero:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: %one:_(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: %cmp1:_(s1) = G_ICMP intpred(eq), %x(s32), %zero
+ ; CHECK-NEXT: %cmp2:_(s1) = G_ICMP intpred(eq), %y(s32), %one
+ ; CHECK-NEXT: %and:_(s1) = G_AND %cmp1, %cmp2
+ ; CHECK-NEXT: %zext:_(s32) = G_ZEXT %and(s1)
+ ; CHECK-NEXT: $w0 = COPY %zext(s32)
+ ; CHECK-NEXT: RET_ReallyLR implicit $w0
+ %x:_(s32) = COPY $w0
+ %y:_(s32) = COPY $w1
+ %zero:_(s32) = G_CONSTANT i32 0
+ %one:_(s32) = G_CONSTANT i32 1
+ %cmp1:_(s1) = G_ICMP intpred(eq), %x:_(s32), %zero:_
+ %cmp2:_(s1) = G_ICMP intpred(eq), %y:_(s32), %one:_
+ %and:_(s1) = G_AND %cmp1, %cmp2
+ %zext:_(s32) = G_ZEXT %and:_(s1)
+ $w0 = COPY %zext
+ RET_ReallyLR implicit $w0
+
+...
+---
+name: invalid_and_ne_0_eq_0_s32
+tracksRegLiveness: true
+legalized: true
+body: |
+ bb.0:
+ liveins: $w0, $w1
+
+ ; CHECK-LABEL: name: invalid_and_ne_0_eq_0_s32
+ ; CHECK: liveins: $w0, $w1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %x:_(s32) = COPY $w0
+ ; CHECK-NEXT: %y:_(s32) = COPY $w1
+ ; CHECK-NEXT: %zero:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: %cmp1:_(s1) = G_ICMP intpred(ne), %x(s32), %zero
+ ; CHECK-NEXT: %cmp2:_(s1) = G_ICMP intpred(eq), %y(s32), %zero
+ ; CHECK-NEXT: %and:_(s1) = G_AND %cmp1, %cmp2
+ ; CHECK-NEXT: %zext:_(s32) = G_ZEXT %and(s1)
+ ; CHECK-NEXT: $w0 = COPY %zext(s32)
+ ; CHECK-NEXT: RET_ReallyLR implicit $w0
+ %x:_(s32) = COPY $w0
+ %y:_(s32) = COPY $w1
+ %zero:_(s32) = G_CONSTANT i32 0
+ %cmp1:_(s1) = G_ICMP intpred(ne), %x:_(s32), %zero:_
+ %cmp2:_(s1) = G_ICMP intpred(eq), %y:_(s32), %zero:_
+ %and:_(s1) = G_AND %cmp1, %cmp2
+ %zext:_(s32) = G_ZEXT %and:_(s1)
+ $w0 = COPY %zext
+ RET_ReallyLR implicit $w0
+
+...
+---
+name: invalid_and_eq_0_ne_0_s32
+tracksRegLiveness: true
+legalized: true
+body: |
+ bb.0:
+ liveins: $w0, $w1
+
+ ; CHECK-LABEL: name: invalid_and_eq_0_ne_0_s32
+ ; CHECK: liveins: $w0, $w1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %x:_(s32) = COPY $w0
+ ; CHECK-NEXT: %y:_(s32) = COPY $w1
+ ; CHECK-NEXT: %zero:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: %cmp1:_(s1) = G_ICMP intpred(eq), %x(s32), %zero
+ ; CHECK-NEXT: %cmp2:_(s1) = G_ICMP intpred(ne), %y(s32), %zero
+ ; CHECK-NEXT: %and:_(s1) = G_AND %cmp1, %cmp2
+ ; CHECK-NEXT: %zext:_(s32) = G_ZEXT %and(s1)
+ ; CHECK-NEXT: $w0 = COPY %zext(s32)
+ ; CHECK-NEXT: RET_ReallyLR implicit $w0
+ %x:_(s32) = COPY $w0
+ %y:_(s32) = COPY $w1
+ %zero:_(s32) = G_CONSTANT i32 0
+ %cmp1:_(s1) = G_ICMP intpred(eq), %x:_(s32), %zero:_
+ %cmp2:_(s1) = G_ICMP intpred(ne), %y:_(s32), %zero:_
+ %and:_(s1) = G_AND %cmp1, %cmp2
+ %zext:_(s32) = G_ZEXT %and:_(s1)
+ $w0 = COPY %zext
+ RET_ReallyLR implicit $w0
+
+...
+---
+name: invalid_and_ne_0_ne_0_s32
+tracksRegLiveness: true
+legalized: true
+body: |
+ bb.0:
+ liveins: $w0, $w1
+
+ ; CHECK-LABEL: name: invalid_and_ne_0_ne_0_s32
+ ; CHECK: liveins: $w0, $w1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %x:_(s32) = COPY $w0
+ ; CHECK-NEXT: %y:_(s32) = COPY $w1
+ ; CHECK-NEXT: %zero:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: %cmp1:_(s1) = G_ICMP intpred(ne), %x(s32), %zero
+ ; CHECK-NEXT: %cmp2:_(s1) = G_ICMP intpred(ne), %y(s32), %zero
+ ; CHECK-NEXT: %and:_(s1) = G_AND %cmp1, %cmp2
+ ; CHECK-NEXT: %zext:_(s32) = G_ZEXT %and(s1)
+ ; CHECK-NEXT: $w0 = COPY %zext(s32)
+ ; CHECK-NEXT: RET_ReallyLR implicit $w0
+ %x:_(s32) = COPY $w0
+ %y:_(s32) = COPY $w1
+ %zero:_(s32) = G_CONSTANT i32 0
+ %cmp1:_(s1) = G_ICMP intpred(ne), %x:_(s32), %zero:_
+ %cmp2:_(s1) = G_ICMP intpred(ne), %y:_(s32), %zero:_
+ %and:_(s1) = G_AND %cmp1, %cmp2
+ %zext:_(s32) = G_ZEXT %and:_(s1)
+ $w0 = COPY %zext
+ RET_ReallyLR implicit $w0
+
+...
+---
+name: valid_or_ne_0_ne_0_s32
+tracksRegLiveness: true
+legalized: true
+body: |
+ bb.0:
+ liveins: $w0, $w1
+
+ ; CHECK-LABEL: name: valid_or_ne_0_ne_0_s32
+ ; CHECK: liveins: $w0, $w1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %x:_(s32) = COPY $w0
+ ; CHECK-NEXT: %y:_(s32) = COPY $w1
+ ; CHECK-NEXT: %zero:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR %x, %y
+ ; CHECK-NEXT: %or:_(s1) = G_ICMP intpred(ne), [[OR]](s32), %zero
+ ; CHECK-NEXT: %zext:_(s32) = G_ZEXT %or(s1)
+ ; CHECK-NEXT: $w0 = COPY %zext(s32)
+ ; CHECK-NEXT: RET_ReallyLR implicit $w0
+ %x:_(s32) = COPY $w0
+ %y:_(s32) = COPY $w1
+ %zero:_(s32) = G_CONSTANT i32 0
+ %cmp1:_(s1) = G_ICMP intpred(ne), %x:_(s32), %zero:_
+ %cmp2:_(s1) = G_ICMP intpred(ne), %y:_(s32), %zero:_
+ %or:_(s1) = G_OR %cmp1, %cmp2
+ %zext:_(s32) = G_ZEXT %or:_(s1)
+ $w0 = COPY %zext
+ RET_ReallyLR implicit $w0
+
+...
+---
+name: invalid_or_ne_1_ne_0_s32
+tracksRegLiveness: true
+legalized: true
+body: |
+ bb.0:
+ liveins: $w0, $w1
+
+ ; CHECK-LABEL: name: invalid_or_ne_1_ne_0_s32
+ ; CHECK: liveins: $w0, $w1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %x:_(s32) = COPY $w0
+ ; CHECK-NEXT: %y:_(s32) = COPY $w1
+ ; CHECK-NEXT: %zero:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: %one:_(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: %cmp1:_(s1) = G_ICMP intpred(ne), %x(s32), %one
+ ; CHECK-NEXT: %cmp2:_(s1) = G_ICMP intpred(ne), %y(s32), %zero
+ ; CHECK-NEXT: %or:_(s1) = G_OR %cmp1, %cmp2
+ ; CHECK-NEXT: %zext:_(s32) = G_ZEXT %or(s1)
+ ; CHECK-NEXT: $w0 = COPY %zext(s32)
+ ; CHECK-NEXT: RET_ReallyLR implicit $w0
+ %x:_(s32) = COPY $w0
+ %y:_(s32) = COPY $w1
+ %zero:_(s32) = G_CONSTANT i32 0
+ %one:_(s32) = G_CONSTANT i32 1
+ %cmp1:_(s1) = G_ICMP intpred(ne), %x:_(s32), %one:_
+ %cmp2:_(s1) = G_ICMP intpred(ne), %y:_(s32), %zero:_
+ %or:_(s1) = G_OR %cmp1, %cmp2
+ %zext:_(s32) = G_ZEXT %or:_(s1)
+ $w0 = COPY %zext
+ RET_ReallyLR implicit $w0
+
+...
+---
+name: invalid_or_ne_0_ne_1_s32
+tracksRegLiveness: true
+legalized: true
+body: |
+ bb.0:
+ liveins: $w0, $w1
+
+ ; CHECK-LABEL: name: invalid_or_ne_0_ne_1_s32
+ ; CHECK: liveins: $w0, $w1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %x:_(s32) = COPY $w0
+ ; CHECK-NEXT: %y:_(s32) = COPY $w1
+ ; CHECK-NEXT: %zero:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: %one:_(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: %cmp1:_(s1) = G_ICMP intpred(ne), %x(s32), %zero
+ ; CHECK-NEXT: %cmp2:_(s1) = G_ICMP intpred(ne), %y(s32), %one
+ ; CHECK-NEXT: %or:_(s1) = G_OR %cmp1, %cmp2
+ ; CHECK-NEXT: %zext:_(s32) = G_ZEXT %or(s1)
+ ; CHECK-NEXT: $w0 = COPY %zext(s32)
+ ; CHECK-NEXT: RET_ReallyLR implicit $w0
+ %x:_(s32) = COPY $w0
+ %y:_(s32) = COPY $w1
+ %zero:_(s32) = G_CONSTANT i32 0
+ %one:_(s32) = G_CONSTANT i32 1
+ %cmp1:_(s1) = G_ICMP intpred(ne), %x:_(s32), %zero:_
+ %cmp2:_(s1) = G_ICMP intpred(ne), %y:_(s32), %one:_
+ %or:_(s1) = G_OR %cmp1, %cmp2
+ %zext:_(s32) = G_ZEXT %or:_(s1)
+ $w0 = COPY %zext
+ RET_ReallyLR implicit $w0
+
+...
+---
+name: invalid_or_eq_0_ne_0_s32
+tracksRegLiveness: true
+legalized: true
+body: |
+ bb.0:
+ liveins: $w0, $w1
+
+ ; CHECK-LABEL: name: invalid_or_eq_0_ne_0_s32
+ ; CHECK: liveins: $w0, $w1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %x:_(s32) = COPY $w0
+ ; CHECK-NEXT: %y:_(s32) = COPY $w1
+ ; CHECK-NEXT: %zero:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: %cmp1:_(s1) = G_ICMP intpred(eq), %x(s32), %zero
+ ; CHECK-NEXT: %cmp2:_(s1) = G_ICMP intpred(ne), %y(s32), %zero
+ ; CHECK-NEXT: %or:_(s1) = G_OR %cmp1, %cmp2
+ ; CHECK-NEXT: %zext:_(s32) = G_ZEXT %or(s1)
+ ; CHECK-NEXT: $w0 = COPY %zext(s32)
+ ; CHECK-NEXT: RET_ReallyLR implicit $w0
+ %x:_(s32) = COPY $w0
+ %y:_(s32) = COPY $w1
+ %zero:_(s32) = G_CONSTANT i32 0
+ %cmp1:_(s1) = G_ICMP intpred(eq), %x:_(s32), %zero:_
+ %cmp2:_(s1) = G_ICMP intpred(ne), %y:_(s32), %zero:_
+ %or:_(s1) = G_OR %cmp1, %cmp2
+ %zext:_(s32) = G_ZEXT %or:_(s1)
+ $w0 = COPY %zext
+ RET_ReallyLR implicit $w0
+
+...
+---
+name: invalid_or_ne_0_eq_0_s32
+tracksRegLiveness: true
+legalized: true
+body: |
+ bb.0:
+ liveins: $w0, $w1
+
+ ; CHECK-LABEL: name: invalid_or_ne_0_eq_0_s32
+ ; CHECK: liveins: $w0, $w1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %x:_(s32) = COPY $w0
+ ; CHECK-NEXT: %y:_(s32) = COPY $w1
+ ; CHECK-NEXT: %zero:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: %cmp1:_(s1) = G_ICMP intpred(ne), %x(s32), %zero
+ ; CHECK-NEXT: %cmp2:_(s1) = G_ICMP intpred(eq), %y(s32), %zero
+ ; CHECK-NEXT: %or:_(s1) = G_OR %cmp1, %cmp2
+ ; CHECK-NEXT: %zext:_(s32) = G_ZEXT %or(s1)
+ ; CHECK-NEXT: $w0 = COPY %zext(s32)
+ ; CHECK-NEXT: RET_ReallyLR implicit $w0
+ %x:_(s32) = COPY $w0
+ %y:_(s32) = COPY $w1
+ %zero:_(s32) = G_CONSTANT i32 0
+ %cmp1:_(s1) = G_ICMP intpred(ne), %x:_(s32), %zero:_
+ %cmp2:_(s1) = G_ICMP intpred(eq), %y:_(s32), %zero:_
+ %or:_(s1) = G_OR %cmp1, %cmp2
+ %zext:_(s32) = G_ZEXT %or:_(s1)
+ $w0 = COPY %zext
+ RET_ReallyLR implicit $w0
+
+...
+
+---
+name: valid_and_eq_0_eq_0_s64
+tracksRegLiveness: true
+legalized: true
+body: |
+ bb.0:
+ liveins: $x0, $x1
+
+ ; CHECK-LABEL: name: valid_and_eq_0_eq_0_s64
+ ; CHECK: liveins: $x0, $x1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %x:_(s64) = COPY $x0
+ ; CHECK-NEXT: %y:_(s64) = COPY $x1
+ ; CHECK-NEXT: %zero:_(s64) = G_CONSTANT i64 0
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR %x, %y
+ ; CHECK-NEXT: %and:_(s1) = G_ICMP intpred(eq), [[OR]](s64), %zero
+ ; CHECK-NEXT: %zext:_(s64) = G_ZEXT %and(s1)
+ ; CHECK-NEXT: $x0 = COPY %zext(s64)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
+ %x:_(s64) = COPY $x0
+ %y:_(s64) = COPY $x1
+ %zero:_(s64) = G_CONSTANT i64 0
+ %cmp1:_(s1) = G_ICMP intpred(eq), %x:_(s64), %zero:_
+ %cmp2:_(s1) = G_ICMP intpred(eq), %y:_(s64), %zero:_
+ %and:_(s1) = G_AND %cmp1, %cmp2
+ %zext:_(s64) = G_ZEXT %and:_(s1)
+ $x0 = COPY %zext
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: invalid_and_eq_1_eq_0_s64
+tracksRegLiveness: true
+legalized: true
+body: |
+ bb.0:
+ liveins: $x0, $x1
+
+ ; CHECK-LABEL: name: invalid_and_eq_1_eq_0_s64
+ ; CHECK: liveins: $x0, $x1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %x:_(s64) = COPY $x0
+ ; CHECK-NEXT: %y:_(s64) = COPY $x1
+ ; CHECK-NEXT: %zero:_(s64) = G_CONSTANT i64 0
+ ; CHECK-NEXT: %one:_(s64) = G_CONSTANT i64 1
+ ; CHECK-NEXT: %cmp1:_(s1) = G_ICMP intpred(eq), %x(s64), %one
+ ; CHECK-NEXT: %cmp2:_(s1) = G_ICMP intpred(eq), %y(s64), %zero
+ ; CHECK-NEXT: %and:_(s1) = G_AND %cmp1, %cmp2
+ ; CHECK-NEXT: %zext:_(s64) = G_ZEXT %and(s1)
+ ; CHECK-NEXT: $x0 = COPY %zext(s64)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
+ %x:_(s64) = COPY $x0
+ %y:_(s64) = COPY $x1
+ %zero:_(s64) = G_CONSTANT i64 0
+ %one:_(s64) = G_CONSTANT i64 1
+ %cmp1:_(s1) = G_ICMP intpred(eq), %x:_(s64), %one:_
+ %cmp2:_(s1) = G_ICMP intpred(eq), %y:_(s64), %zero:_
+ %and:_(s1) = G_AND %cmp1, %cmp2
+ %zext:_(s64) = G_ZEXT %and:_(s1)
+ $x0 = COPY %zext
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: invalid_and_eq_0_eq_1_s64
+tracksRegLiveness: true
+legalized: true
+body: |
+ bb.0:
+ liveins: $x0, $x1
+
+ ; CHECK-LABEL: name: invalid_and_eq_0_eq_1_s64
+ ; CHECK: liveins: $x0, $x1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %x:_(s64) = COPY $x0
+ ; CHECK-NEXT: %y:_(s64) = COPY $x1
+ ; CHECK-NEXT: %zero:_(s64) = G_CONSTANT i64 0
+ ; CHECK-NEXT: %one:_(s64) = G_CONSTANT i64 1
+ ; CHECK-NEXT: %cmp1:_(s1) = G_ICMP intpred(eq), %x(s64), %zero
+ ; CHECK-NEXT: %cmp2:_(s1) = G_ICMP intpred(eq), %y(s64), %one
+ ; CHECK-NEXT: %and:_(s1) = G_AND %cmp1, %cmp2
+ ; CHECK-NEXT: %zext:_(s64) = G_ZEXT %and(s1)
+ ; CHECK-NEXT: $x0 = COPY %zext(s64)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
+ %x:_(s64) = COPY $x0
+ %y:_(s64) = COPY $x1
+ %zero:_(s64) = G_CONSTANT i64 0
+ %one:_(s64) = G_CONSTANT i64 1
+ %cmp1:_(s1) = G_ICMP intpred(eq), %x:_(s64), %zero:_
+ %cmp2:_(s1) = G_ICMP intpred(eq), %y:_(s64), %one:_
+ %and:_(s1) = G_AND %cmp1, %cmp2
+ %zext:_(s64) = G_ZEXT %and:_(s1)
+ $x0 = COPY %zext
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: invalid_and_ne_0_eq_0_s64
+tracksRegLiveness: true
+legalized: true
+body: |
+ bb.0:
+ liveins: $x0, $x1
+
+ ; CHECK-LABEL: name: invalid_and_ne_0_eq_0_s64
+ ; CHECK: liveins: $x0, $x1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %x:_(s64) = COPY $x0
+ ; CHECK-NEXT: %y:_(s64) = COPY $x1
+ ; CHECK-NEXT: %zero:_(s64) = G_CONSTANT i64 0
+ ; CHECK-NEXT: %cmp1:_(s1) = G_ICMP intpred(ne), %x(s64), %zero
+ ; CHECK-NEXT: %cmp2:_(s1) = G_ICMP intpred(eq), %y(s64), %zero
+ ; CHECK-NEXT: %and:_(s1) = G_AND %cmp1, %cmp2
+ ; CHECK-NEXT: %zext:_(s64) = G_ZEXT %and(s1)
+ ; CHECK-NEXT: $x0 = COPY %zext(s64)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
+ %x:_(s64) = COPY $x0
+ %y:_(s64) = COPY $x1
+ %zero:_(s64) = G_CONSTANT i64 0
+ %cmp1:_(s1) = G_ICMP intpred(ne), %x:_(s64), %zero:_
+ %cmp2:_(s1) = G_ICMP intpred(eq), %y:_(s64), %zero:_
+ %and:_(s1) = G_AND %cmp1, %cmp2
+ %zext:_(s64) = G_ZEXT %and:_(s1)
+ $x0 = COPY %zext
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: invalid_and_eq_0_ne_0_s64
+tracksRegLiveness: true
+legalized: true
+body: |
+ bb.0:
+ liveins: $x0, $x1
+
+ ; CHECK-LABEL: name: invalid_and_eq_0_ne_0_s64
+ ; CHECK: liveins: $x0, $x1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %x:_(s64) = COPY $x0
+ ; CHECK-NEXT: %y:_(s64) = COPY $x1
+ ; CHECK-NEXT: %zero:_(s64) = G_CONSTANT i64 0
+ ; CHECK-NEXT: %cmp1:_(s1) = G_ICMP intpred(eq), %x(s64), %zero
+ ; CHECK-NEXT: %cmp2:_(s1) = G_ICMP intpred(ne), %y(s64), %zero
+ ; CHECK-NEXT: %and:_(s1) = G_AND %cmp1, %cmp2
+ ; CHECK-NEXT: %zext:_(s64) = G_ZEXT %and(s1)
+ ; CHECK-NEXT: $x0 = COPY %zext(s64)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
+ %x:_(s64) = COPY $x0
+ %y:_(s64) = COPY $x1
+ %zero:_(s64) = G_CONSTANT i64 0
+ %cmp1:_(s1) = G_ICMP intpred(eq), %x:_(s64), %zero:_
+ %cmp2:_(s1) = G_ICMP intpred(ne), %y:_(s64), %zero:_
+ %and:_(s1) = G_AND %cmp1, %cmp2
+ %zext:_(s64) = G_ZEXT %and:_(s1)
+ $x0 = COPY %zext
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: invalid_and_ne_0_ne_0_s64
+tracksRegLiveness: true
+legalized: true
+body: |
+ bb.0:
+ liveins: $x0, $x1
+
+ ; CHECK-LABEL: name: invalid_and_ne_0_ne_0_s64
+ ; CHECK: liveins: $x0, $x1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %x:_(s64) = COPY $x0
+ ; CHECK-NEXT: %y:_(s64) = COPY $x1
+ ; CHECK-NEXT: %zero:_(s64) = G_CONSTANT i64 0
+ ; CHECK-NEXT: %cmp1:_(s1) = G_ICMP intpred(ne), %x(s64), %zero
+ ; CHECK-NEXT: %cmp2:_(s1) = G_ICMP intpred(ne), %y(s64), %zero
+ ; CHECK-NEXT: %and:_(s1) = G_AND %cmp1, %cmp2
+ ; CHECK-NEXT: %zext:_(s64) = G_ZEXT %and(s1)
+ ; CHECK-NEXT: $x0 = COPY %zext(s64)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
+ %x:_(s64) = COPY $x0
+ %y:_(s64) = COPY $x1
+ %zero:_(s64) = G_CONSTANT i64 0
+ %cmp1:_(s1) = G_ICMP intpred(ne), %x:_(s64), %zero:_
+ %cmp2:_(s1) = G_ICMP intpred(ne), %y:_(s64), %zero:_
+ %and:_(s1) = G_AND %cmp1, %cmp2
+ %zext:_(s64) = G_ZEXT %and:_(s1)
+ $x0 = COPY %zext
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: valid_or_ne_0_ne_0_s64
+tracksRegLiveness: true
+legalized: true
+body: |
+ bb.0:
+ liveins: $x0, $x1
+
+ ; CHECK-LABEL: name: valid_or_ne_0_ne_0_s64
+ ; CHECK: liveins: $x0, $x1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %x:_(s64) = COPY $x0
+ ; CHECK-NEXT: %y:_(s64) = COPY $x1
+ ; CHECK-NEXT: %zero:_(s64) = G_CONSTANT i64 0
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR %x, %y
+ ; CHECK-NEXT: %or:_(s1) = G_ICMP intpred(ne), [[OR]](s64), %zero
+ ; CHECK-NEXT: %zext:_(s64) = G_ZEXT %or(s1)
+ ; CHECK-NEXT: $x0 = COPY %zext(s64)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
+ %x:_(s64) = COPY $x0
+ %y:_(s64) = COPY $x1
+ %zero:_(s64) = G_CONSTANT i64 0
+ %cmp1:_(s1) = G_ICMP intpred(ne), %x:_(s64), %zero:_
+ %cmp2:_(s1) = G_ICMP intpred(ne), %y:_(s64), %zero:_
+ %or:_(s1) = G_OR %cmp1, %cmp2
+ %zext:_(s64) = G_ZEXT %or:_(s1)
+ $x0 = COPY %zext
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: invalid_or_ne_1_ne_0_s64
+tracksRegLiveness: true
+legalized: true
+body: |
+ bb.0:
+ liveins: $x0, $x1
+
+ ; CHECK-LABEL: name: invalid_or_ne_1_ne_0_s64
+ ; CHECK: liveins: $x0, $x1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %x:_(s64) = COPY $x0
+ ; CHECK-NEXT: %y:_(s64) = COPY $x1
+ ; CHECK-NEXT: %zero:_(s64) = G_CONSTANT i64 0
+ ; CHECK-NEXT: %one:_(s64) = G_CONSTANT i64 1
+ ; CHECK-NEXT: %cmp1:_(s1) = G_ICMP intpred(ne), %x(s64), %one
+ ; CHECK-NEXT: %cmp2:_(s1) = G_ICMP intpred(ne), %y(s64), %zero
+ ; CHECK-NEXT: %or:_(s1) = G_OR %cmp1, %cmp2
+ ; CHECK-NEXT: %zext:_(s64) = G_ZEXT %or(s1)
+ ; CHECK-NEXT: $x0 = COPY %zext(s64)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
+ %x:_(s64) = COPY $x0
+ %y:_(s64) = COPY $x1
+ %zero:_(s64) = G_CONSTANT i64 0
+ %one:_(s64) = G_CONSTANT i64 1
+ %cmp1:_(s1) = G_ICMP intpred(ne), %x:_(s64), %one:_
+ %cmp2:_(s1) = G_ICMP intpred(ne), %y:_(s64), %zero:_
+ %or:_(s1) = G_OR %cmp1, %cmp2
+ %zext:_(s64) = G_ZEXT %or:_(s1)
+ $x0 = COPY %zext
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: invalid_or_ne_0_ne_1_s64
+tracksRegLiveness: true
+legalized: true
+body: |
+ bb.0:
+ liveins: $x0, $x1
+
+ ; CHECK-LABEL: name: invalid_or_ne_0_ne_1_s64
+ ; CHECK: liveins: $x0, $x1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %x:_(s64) = COPY $x0
+ ; CHECK-NEXT: %y:_(s64) = COPY $x1
+ ; CHECK-NEXT: %zero:_(s64) = G_CONSTANT i64 0
+ ; CHECK-NEXT: %one:_(s64) = G_CONSTANT i64 1
+ ; CHECK-NEXT: %cmp1:_(s1) = G_ICMP intpred(ne), %x(s64), %zero
+ ; CHECK-NEXT: %cmp2:_(s1) = G_ICMP intpred(ne), %y(s64), %one
+ ; CHECK-NEXT: %or:_(s1) = G_OR %cmp1, %cmp2
+ ; CHECK-NEXT: %zext:_(s64) = G_ZEXT %or(s1)
+ ; CHECK-NEXT: $x0 = COPY %zext(s64)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
+ %x:_(s64) = COPY $x0
+ %y:_(s64) = COPY $x1
+ %zero:_(s64) = G_CONSTANT i64 0
+ %one:_(s64) = G_CONSTANT i64 1
+ %cmp1:_(s1) = G_ICMP intpred(ne), %x:_(s64), %zero:_
+ %cmp2:_(s1) = G_ICMP intpred(ne), %y:_(s64), %one:_
+ %or:_(s1) = G_OR %cmp1, %cmp2
+ %zext:_(s64) = G_ZEXT %or:_(s1)
+ $x0 = COPY %zext
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: invalid_or_eq_0_ne_0_s64
+tracksRegLiveness: true
+legalized: true
+body: |
+ bb.0:
+ liveins: $x0, $x1
+
+ ; CHECK-LABEL: name: invalid_or_eq_0_ne_0_s64
+ ; CHECK: liveins: $x0, $x1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %x:_(s64) = COPY $x0
+ ; CHECK-NEXT: %y:_(s64) = COPY $x1
+ ; CHECK-NEXT: %zero:_(s64) = G_CONSTANT i64 0
+ ; CHECK-NEXT: %cmp1:_(s1) = G_ICMP intpred(eq), %x(s64), %zero
+ ; CHECK-NEXT: %cmp2:_(s1) = G_ICMP intpred(ne), %y(s64), %zero
+ ; CHECK-NEXT: %or:_(s1) = G_OR %cmp1, %cmp2
+ ; CHECK-NEXT: %zext:_(s64) = G_ZEXT %or(s1)
+ ; CHECK-NEXT: $x0 = COPY %zext(s64)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
+ %x:_(s64) = COPY $x0
+ %y:_(s64) = COPY $x1
+ %zero:_(s64) = G_CONSTANT i64 0
+ %cmp1:_(s1) = G_ICMP intpred(eq), %x:_(s64), %zero:_
+ %cmp2:_(s1) = G_ICMP intpred(ne), %y:_(s64), %zero:_
+ %or:_(s1) = G_OR %cmp1, %cmp2
+ %zext:_(s64) = G_ZEXT %or:_(s1)
+ $x0 = COPY %zext
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: invalid_or_ne_0_eq_0_s64
+tracksRegLiveness: true
+legalized: true
+body: |
+ bb.0:
+ liveins: $x0, $x1
+
+ ; CHECK-LABEL: name: invalid_or_ne_0_eq_0_s64
+ ; CHECK: liveins: $x0, $x1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %x:_(s64) = COPY $x0
+ ; CHECK-NEXT: %y:_(s64) = COPY $x1
+ ; CHECK-NEXT: %zero:_(s64) = G_CONSTANT i64 0
+ ; CHECK-NEXT: %cmp1:_(s1) = G_ICMP intpred(ne), %x(s64), %zero
+ ; CHECK-NEXT: %cmp2:_(s1) = G_ICMP intpred(eq), %y(s64), %zero
+ ; CHECK-NEXT: %or:_(s1) = G_OR %cmp1, %cmp2
+ ; CHECK-NEXT: %zext:_(s64) = G_ZEXT %or(s1)
+ ; CHECK-NEXT: $x0 = COPY %zext(s64)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
+ %x:_(s64) = COPY $x0
+ %y:_(s64) = COPY $x1
+ %zero:_(s64) = G_CONSTANT i64 0
+ %cmp1:_(s1) = G_ICMP intpred(ne), %x:_(s64), %zero:_
+ %cmp2:_(s1) = G_ICMP intpred(eq), %y:_(s64), %zero:_
+ %or:_(s1) = G_OR %cmp1, %cmp2
+ %zext:_(s64) = G_ZEXT %or:_(s1)
+ $x0 = COPY %zext
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: valid_and_eq_0_eq_0_vec
+tracksRegLiveness: true
+legalized: true
+body: |
+ bb.0:
+ liveins: $x0, $x1
+
+ ; CHECK-LABEL: name: valid_and_eq_0_eq_0_vec
+ ; CHECK: liveins: $x0, $x1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %x:_(<2 x s32>) = COPY $x0
+ ; CHECK-NEXT: %y:_(<2 x s32>) = COPY $x1
+ ; CHECK-NEXT: %zero_scalar:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: %zero:_(<2 x s32>) = G_BUILD_VECTOR %zero_scalar(s32), %zero_scalar(s32)
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<2 x s32>) = G_OR %x, %y
+ ; CHECK-NEXT: %and:_(<2 x s1>) = G_ICMP intpred(eq), [[OR]](<2 x s32>), %zero
+ ; CHECK-NEXT: %zext:_(<2 x s32>) = G_ZEXT %and(<2 x s1>)
+ ; CHECK-NEXT: $x0 = COPY %zext(<2 x s32>)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
+ %x:_(<2 x s32>) = COPY $x0
+ %y:_(<2 x s32>) = COPY $x1
+ %zero_scalar:_(s32) = G_CONSTANT i32 0
+ %zero:_(<2 x s32>) = G_BUILD_VECTOR %zero_scalar(s32), %zero_scalar(s32)
+ %cmp1:_(<2 x s1>) = G_ICMP intpred(eq), %x:_(<2 x s32>), %zero:_
+ %cmp2:_(<2 x s1>) = G_ICMP intpred(eq), %y:_(<2 x s32>), %zero:_
+ %and:_(<2 x s1>) = G_AND %cmp1, %cmp2
+ %zext:_(<2 x s32>) = G_ZEXT %and:_(<2 x s1>)
+ $x0 = COPY %zext
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: invalid_and_eq_non_0_eq_0_vec
+tracksRegLiveness: true
+legalized: true
+body: |
+ bb.0:
+ liveins: $x0, $x1
+
+ ; CHECK-LABEL: name: invalid_and_eq_non_0_eq_0_vec
+ ; CHECK: liveins: $x0, $x1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %x:_(<2 x s32>) = COPY $x0
+ ; CHECK-NEXT: %y:_(<2 x s32>) = COPY $x1
+ ; CHECK-NEXT: %scalar0:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: %scalar1:_(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: %zero:_(<2 x s32>) = G_BUILD_VECTOR %scalar0(s32), %scalar0(s32)
+ ; CHECK-NEXT: %non_zero:_(<2 x s32>) = G_BUILD_VECTOR %scalar0(s32), %scalar1(s32)
+ ; CHECK-NEXT: %cmp1:_(<2 x s1>) = G_ICMP intpred(eq), %x(<2 x s32>), %non_zero
+ ; CHECK-NEXT: %cmp2:_(<2 x s1>) = G_ICMP intpred(eq), %y(<2 x s32>), %zero
+ ; CHECK-NEXT: %and:_(<2 x s1>) = G_AND %cmp1, %cmp2
+ ; CHECK-NEXT: %zext:_(<2 x s32>) = G_ZEXT %and(<2 x s1>)
+ ; CHECK-NEXT: $x0 = COPY %zext(<2 x s32>)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
+ %x:_(<2 x s32>) = COPY $x0
+ %y:_(<2 x s32>) = COPY $x1
+ %scalar0:_(s32) = G_CONSTANT i32 0
+ %scalar1:_(s32) = G_CONSTANT i32 1
+ %zero:_(<2 x s32>) = G_BUILD_VECTOR %scalar0(s32), %scalar0(s32)
+ %non_zero:_(<2 x s32>) = G_BUILD_VECTOR %scalar0(s32), %scalar1(s32)
+ %cmp1:_(<2 x s1>) = G_ICMP intpred(eq), %x:_(<2 x s32>), %non_zero:_
+ %cmp2:_(<2 x s1>) = G_ICMP intpred(eq), %y:_(<2 x s32>), %zero:_
+ %and:_(<2 x s1>) = G_AND %cmp1, %cmp2
+ %zext:_(<2 x s32>) = G_ZEXT %and:_(<2 x s1>)
+ $x0 = COPY %zext
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: invalid_and_eq_0_eq_non_0_vec
+tracksRegLiveness: true
+legalized: true
+body: |
+ bb.0:
+ liveins: $x0, $x1
+
+ ; CHECK-LABEL: name: invalid_and_eq_0_eq_non_0_vec
+ ; CHECK: liveins: $x0, $x1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %x:_(<2 x s32>) = COPY $x0
+ ; CHECK-NEXT: %y:_(<2 x s32>) = COPY $x1
+ ; CHECK-NEXT: %scalar0:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: %scalar1:_(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: %zero:_(<2 x s32>) = G_BUILD_VECTOR %scalar0(s32), %scalar0(s32)
+ ; CHECK-NEXT: %non_zero:_(<2 x s32>) = G_BUILD_VECTOR %scalar0(s32), %scalar1(s32)
+ ; CHECK-NEXT: %cmp1:_(<2 x s1>) = G_ICMP intpred(eq), %x(<2 x s32>), %zero
+ ; CHECK-NEXT: %cmp2:_(<2 x s1>) = G_ICMP intpred(eq), %y(<2 x s32>), %non_zero
+ ; CHECK-NEXT: %and:_(<2 x s1>) = G_AND %cmp1, %cmp2
+ ; CHECK-NEXT: %zext:_(<2 x s32>) = G_ZEXT %and(<2 x s1>)
+ ; CHECK-NEXT: $x0 = COPY %zext(<2 x s32>)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
+ %x:_(<2 x s32>) = COPY $x0
+ %y:_(<2 x s32>) = COPY $x1
+ %scalar0:_(s32) = G_CONSTANT i32 0
+ %scalar1:_(s32) = G_CONSTANT i32 1
+ %zero:_(<2 x s32>) = G_BUILD_VECTOR %scalar0(s32), %scalar0(s32)
+ %non_zero:_(<2 x s32>) = G_BUILD_VECTOR %scalar0(s32), %scalar1(s32)
+ %cmp1:_(<2 x s1>) = G_ICMP intpred(eq), %x:_(<2 x s32>), %zero:_
+ %cmp2:_(<2 x s1>) = G_ICMP intpred(eq), %y:_(<2 x s32>), %non_zero:_
+ %and:_(<2 x s1>) = G_AND %cmp1, %cmp2
+ %zext:_(<2 x s32>) = G_ZEXT %and:_(<2 x s1>)
+ $x0 = COPY %zext
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: invalid_and_ne_0_eq_0_vec
+tracksRegLiveness: true
+legalized: true
+body: |
+ bb.0:
+ liveins: $x0, $x1
+
+ ; CHECK-LABEL: name: invalid_and_ne_0_eq_0_vec
+ ; CHECK: liveins: $x0, $x1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %x:_(<2 x s32>) = COPY $x0
+ ; CHECK-NEXT: %y:_(<2 x s32>) = COPY $x1
+ ; CHECK-NEXT: %zero_scalar:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: %zero:_(<2 x s32>) = G_BUILD_VECTOR %zero_scalar(s32), %zero_scalar(s32)
+ ; CHECK-NEXT: %cmp1:_(<2 x s1>) = G_ICMP intpred(ne), %x(<2 x s32>), %zero
+ ; CHECK-NEXT: %cmp2:_(<2 x s1>) = G_ICMP intpred(eq), %y(<2 x s32>), %zero
+ ; CHECK-NEXT: %and:_(<2 x s1>) = G_AND %cmp1, %cmp2
+ ; CHECK-NEXT: %zext:_(<2 x s32>) = G_ZEXT %and(<2 x s1>)
+ ; CHECK-NEXT: $x0 = COPY %zext(<2 x s32>)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
+ %x:_(<2 x s32>) = COPY $x0
+ %y:_(<2 x s32>) = COPY $x1
+ %zero_scalar:_(s32) = G_CONSTANT i32 0
+ %zero:_(<2 x s32>) = G_BUILD_VECTOR %zero_scalar(s32), %zero_scalar(s32)
+ %cmp1:_(<2 x s1>) = G_ICMP intpred(ne), %x:_(<2 x s32>), %zero:_
+ %cmp2:_(<2 x s1>) = G_ICMP intpred(eq), %y:_(<2 x s32>), %zero:_
+ %and:_(<2 x s1>) = G_AND %cmp1, %cmp2
+ %zext:_(<2 x s32>) = G_ZEXT %and:_(<2 x s1>)
+ $x0 = COPY %zext
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: invalid_and_eq_0_ne_0_vec
+tracksRegLiveness: true
+legalized: true
+body: |
+ bb.0:
+ liveins: $x0, $x1
+
+ ; CHECK-LABEL: name: invalid_and_eq_0_ne_0_vec
+ ; CHECK: liveins: $x0, $x1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %x:_(<2 x s32>) = COPY $x0
+ ; CHECK-NEXT: %y:_(<2 x s32>) = COPY $x1
+ ; CHECK-NEXT: %zero_scalar:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: %zero:_(<2 x s32>) = G_BUILD_VECTOR %zero_scalar(s32), %zero_scalar(s32)
+ ; CHECK-NEXT: %cmp1:_(<2 x s1>) = G_ICMP intpred(eq), %x(<2 x s32>), %zero
+ ; CHECK-NEXT: %cmp2:_(<2 x s1>) = G_ICMP intpred(ne), %y(<2 x s32>), %zero
+ ; CHECK-NEXT: %and:_(<2 x s1>) = G_AND %cmp1, %cmp2
+ ; CHECK-NEXT: %zext:_(<2 x s32>) = G_ZEXT %and(<2 x s1>)
+ ; CHECK-NEXT: $x0 = COPY %zext(<2 x s32>)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
+ %x:_(<2 x s32>) = COPY $x0
+ %y:_(<2 x s32>) = COPY $x1
+ %zero_scalar:_(s32) = G_CONSTANT i32 0
+ %zero:_(<2 x s32>) = G_BUILD_VECTOR %zero_scalar(s32), %zero_scalar(s32)
+ %cmp1:_(<2 x s1>) = G_ICMP intpred(eq), %x:_(<2 x s32>), %zero:_
+ %cmp2:_(<2 x s1>) = G_ICMP intpred(ne), %y:_(<2 x s32>), %zero:_
+ %and:_(<2 x s1>) = G_AND %cmp1, %cmp2
+ %zext:_(<2 x s32>) = G_ZEXT %and:_(<2 x s1>)
+ $x0 = COPY %zext
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: invalid_and_ne_0_ne_0_vec
+tracksRegLiveness: true
+legalized: true
+body: |
+ bb.0:
+ liveins: $x0, $x1
+
+ ; CHECK-LABEL: name: invalid_and_ne_0_ne_0_vec
+ ; CHECK: liveins: $x0, $x1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %x:_(<2 x s32>) = COPY $x0
+ ; CHECK-NEXT: %y:_(<2 x s32>) = COPY $x1
+ ; CHECK-NEXT: %zero_scalar:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: %zero:_(<2 x s32>) = G_BUILD_VECTOR %zero_scalar(s32), %zero_scalar(s32)
+ ; CHECK-NEXT: %cmp1:_(<2 x s1>) = G_ICMP intpred(ne), %x(<2 x s32>), %zero
+ ; CHECK-NEXT: %cmp2:_(<2 x s1>) = G_ICMP intpred(ne), %y(<2 x s32>), %zero
+ ; CHECK-NEXT: %and:_(<2 x s1>) = G_AND %cmp1, %cmp2
+ ; CHECK-NEXT: %zext:_(<2 x s32>) = G_ZEXT %and(<2 x s1>)
+ ; CHECK-NEXT: $x0 = COPY %zext(<2 x s32>)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
+ %x:_(<2 x s32>) = COPY $x0
+ %y:_(<2 x s32>) = COPY $x1
+ %zero_scalar:_(s32) = G_CONSTANT i32 0
+ %zero:_(<2 x s32>) = G_BUILD_VECTOR %zero_scalar(s32), %zero_scalar(s32)
+ %cmp1:_(<2 x s1>) = G_ICMP intpred(ne), %x:_(<2 x s32>), %zero:_
+ %cmp2:_(<2 x s1>) = G_ICMP intpred(ne), %y:_(<2 x s32>), %zero:_
+ %and:_(<2 x s1>) = G_AND %cmp1, %cmp2
+ %zext:_(<2 x s32>) = G_ZEXT %and:_(<2 x s1>)
+ $x0 = COPY %zext
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: valid_or_ne_0_ne_0_vec
+tracksRegLiveness: true
+legalized: true
+body: |
+ bb.0:
+ liveins: $x0, $x1
+
+ ; CHECK-LABEL: name: valid_or_ne_0_ne_0_vec
+ ; CHECK: liveins: $x0, $x1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %x:_(<2 x s32>) = COPY $x0
+ ; CHECK-NEXT: %y:_(<2 x s32>) = COPY $x1
+ ; CHECK-NEXT: %zero_scalar:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: %zero:_(<2 x s32>) = G_BUILD_VECTOR %zero_scalar(s32), %zero_scalar(s32)
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<2 x s32>) = G_OR %x, %y
+ ; CHECK-NEXT: %and:_(<2 x s1>) = G_ICMP intpred(ne), [[OR]](<2 x s32>), %zero
+ ; CHECK-NEXT: %zext:_(<2 x s32>) = G_ZEXT %and(<2 x s1>)
+ ; CHECK-NEXT: $x0 = COPY %zext(<2 x s32>)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
+ %x:_(<2 x s32>) = COPY $x0
+ %y:_(<2 x s32>) = COPY $x1
+ %zero_scalar:_(s32) = G_CONSTANT i32 0
+ %zero:_(<2 x s32>) = G_BUILD_VECTOR %zero_scalar(s32), %zero_scalar(s32)
+ %cmp1:_(<2 x s1>) = G_ICMP intpred(ne), %x:_(<2 x s32>), %zero:_
+ %cmp2:_(<2 x s1>) = G_ICMP intpred(ne), %y:_(<2 x s32>), %zero:_
+ %and:_(<2 x s1>) = G_OR %cmp1, %cmp2
+ %zext:_(<2 x s32>) = G_ZEXT %and:_(<2 x s1>)
+ $x0 = COPY %zext
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: invalid_or_ne_non_0_ne_0_vec
+tracksRegLiveness: true
+legalized: true
+body: |
+ bb.0:
+ liveins: $x0, $x1
+
+ ; CHECK-LABEL: name: invalid_or_ne_non_0_ne_0_vec
+ ; CHECK: liveins: $x0, $x1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %x:_(<2 x s32>) = COPY $x0
+ ; CHECK-NEXT: %y:_(<2 x s32>) = COPY $x1
+ ; CHECK-NEXT: %scalar0:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: %scalar1:_(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: %zero:_(<2 x s32>) = G_BUILD_VECTOR %scalar0(s32), %scalar0(s32)
+ ; CHECK-NEXT: %non_zero:_(<2 x s32>) = G_BUILD_VECTOR %scalar0(s32), %scalar1(s32)
+ ; CHECK-NEXT: %cmp1:_(<2 x s1>) = G_ICMP intpred(ne), %x(<2 x s32>), %non_zero
+ ; CHECK-NEXT: %cmp2:_(<2 x s1>) = G_ICMP intpred(ne), %y(<2 x s32>), %zero
+ ; CHECK-NEXT: %and:_(<2 x s1>) = G_OR %cmp1, %cmp2
+ ; CHECK-NEXT: %zext:_(<2 x s32>) = G_ZEXT %and(<2 x s1>)
+ ; CHECK-NEXT: $x0 = COPY %zext(<2 x s32>)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
+ %x:_(<2 x s32>) = COPY $x0
+ %y:_(<2 x s32>) = COPY $x1
+ %scalar0:_(s32) = G_CONSTANT i32 0
+ %scalar1:_(s32) = G_CONSTANT i32 1
+ %zero:_(<2 x s32>) = G_BUILD_VECTOR %scalar0(s32), %scalar0(s32)
+ %non_zero:_(<2 x s32>) = G_BUILD_VECTOR %scalar0(s32), %scalar1(s32)
+ %cmp1:_(<2 x s1>) = G_ICMP intpred(ne), %x:_(<2 x s32>), %non_zero:_
+ %cmp2:_(<2 x s1>) = G_ICMP intpred(ne), %y:_(<2 x s32>), %zero:_
+ %and:_(<2 x s1>) = G_OR %cmp1, %cmp2
+ %zext:_(<2 x s32>) = G_ZEXT %and:_(<2 x s1>)
+ $x0 = COPY %zext
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: invalid_or_ne_0_ne_non_0_vec
+tracksRegLiveness: true
+legalized: true
+body: |
+ bb.0:
+ liveins: $x0, $x1
+
+ ; CHECK-LABEL: name: invalid_or_ne_0_ne_non_0_vec
+ ; CHECK: liveins: $x0, $x1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %x:_(<2 x s32>) = COPY $x0
+ ; CHECK-NEXT: %y:_(<2 x s32>) = COPY $x1
+ ; CHECK-NEXT: %scalar0:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: %scalar1:_(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: %zero:_(<2 x s32>) = G_BUILD_VECTOR %scalar0(s32), %scalar0(s32)
+ ; CHECK-NEXT: %non_zero:_(<2 x s32>) = G_BUILD_VECTOR %scalar0(s32), %scalar1(s32)
+ ; CHECK-NEXT: %cmp1:_(<2 x s1>) = G_ICMP intpred(ne), %x(<2 x s32>), %zero
+ ; CHECK-NEXT: %cmp2:_(<2 x s1>) = G_ICMP intpred(ne), %y(<2 x s32>), %non_zero
+ ; CHECK-NEXT: %and:_(<2 x s1>) = G_OR %cmp1, %cmp2
+ ; CHECK-NEXT: %zext:_(<2 x s32>) = G_ZEXT %and(<2 x s1>)
+ ; CHECK-NEXT: $x0 = COPY %zext(<2 x s32>)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
+ %x:_(<2 x s32>) = COPY $x0
+ %y:_(<2 x s32>) = COPY $x1
+ %scalar0:_(s32) = G_CONSTANT i32 0
+ %scalar1:_(s32) = G_CONSTANT i32 1
+ %zero:_(<2 x s32>) = G_BUILD_VECTOR %scalar0(s32), %scalar0(s32)
+ %non_zero:_(<2 x s32>) = G_BUILD_VECTOR %scalar0(s32), %scalar1(s32)
+ %cmp1:_(<2 x s1>) = G_ICMP intpred(ne), %x:_(<2 x s32>), %zero:_
+ %cmp2:_(<2 x s1>) = G_ICMP intpred(ne), %y:_(<2 x s32>), %non_zero:_
+ %and:_(<2 x s1>) = G_OR %cmp1, %cmp2
+ %zext:_(<2 x s32>) = G_ZEXT %and:_(<2 x s1>)
+ $x0 = COPY %zext
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: invalid_or_eq_0_ne_0_vec
+tracksRegLiveness: true
+legalized: true
+body: |
+ bb.0:
+ liveins: $x0, $x1
+
+ ; CHECK-LABEL: name: invalid_or_eq_0_ne_0_vec
+ ; CHECK: liveins: $x0, $x1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %x:_(<2 x s32>) = COPY $x0
+ ; CHECK-NEXT: %y:_(<2 x s32>) = COPY $x1
+ ; CHECK-NEXT: %zero_scalar:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: %zero:_(<2 x s32>) = G_BUILD_VECTOR %zero_scalar(s32), %zero_scalar(s32)
+ ; CHECK-NEXT: %cmp1:_(<2 x s1>) = G_ICMP intpred(eq), %x(<2 x s32>), %zero
+ ; CHECK-NEXT: %cmp2:_(<2 x s1>) = G_ICMP intpred(ne), %y(<2 x s32>), %zero
+ ; CHECK-NEXT: %and:_(<2 x s1>) = G_OR %cmp1, %cmp2
+ ; CHECK-NEXT: %zext:_(<2 x s32>) = G_ZEXT %and(<2 x s1>)
+ ; CHECK-NEXT: $x0 = COPY %zext(<2 x s32>)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
+ %x:_(<2 x s32>) = COPY $x0
+ %y:_(<2 x s32>) = COPY $x1
+ %zero_scalar:_(s32) = G_CONSTANT i32 0
+ %zero:_(<2 x s32>) = G_BUILD_VECTOR %zero_scalar(s32), %zero_scalar(s32)
+ %cmp1:_(<2 x s1>) = G_ICMP intpred(eq), %x:_(<2 x s32>), %zero:_
+ %cmp2:_(<2 x s1>) = G_ICMP intpred(ne), %y:_(<2 x s32>), %zero:_
+ %and:_(<2 x s1>) = G_OR %cmp1, %cmp2
+ %zext:_(<2 x s32>) = G_ZEXT %and:_(<2 x s1>)
+ $x0 = COPY %zext
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: invalid_or_ne_0_eq_0_vec
+tracksRegLiveness: true
+legalized: true
+body: |
+ bb.0:
+ liveins: $x0, $x1
+
+ ; CHECK-LABEL: name: invalid_or_ne_0_eq_0_vec
+ ; CHECK: liveins: $x0, $x1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %x:_(<2 x s32>) = COPY $x0
+ ; CHECK-NEXT: %y:_(<2 x s32>) = COPY $x1
+ ; CHECK-NEXT: %zero_scalar:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: %zero:_(<2 x s32>) = G_BUILD_VECTOR %zero_scalar(s32), %zero_scalar(s32)
+ ; CHECK-NEXT: %cmp1:_(<2 x s1>) = G_ICMP intpred(ne), %x(<2 x s32>), %zero
+ ; CHECK-NEXT: %cmp2:_(<2 x s1>) = G_ICMP intpred(eq), %y(<2 x s32>), %zero
+ ; CHECK-NEXT: %and:_(<2 x s1>) = G_OR %cmp1, %cmp2
+ ; CHECK-NEXT: %zext:_(<2 x s32>) = G_ZEXT %and(<2 x s1>)
+ ; CHECK-NEXT: $x0 = COPY %zext(<2 x s32>)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
+ %x:_(<2 x s32>) = COPY $x0
+ %y:_(<2 x s32>) = COPY $x1
+ %zero_scalar:_(s32) = G_CONSTANT i32 0
+ %zero:_(<2 x s32>) = G_BUILD_VECTOR %zero_scalar(s32), %zero_scalar(s32)
+ %cmp1:_(<2 x s1>) = G_ICMP intpred(ne), %x:_(<2 x s32>), %zero:_
+ %cmp2:_(<2 x s1>) = G_ICMP intpred(eq), %y:_(<2 x s32>), %zero:_
+ %and:_(<2 x s1>) = G_OR %cmp1, %cmp2
+ %zext:_(<2 x s32>) = G_ZEXT %and:_(<2 x s1>)
+ $x0 = COPY %zext
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: invalid_or_eq_0_eq_0_vec
+tracksRegLiveness: true
+legalized: true
+body: |
+ bb.0:
+ liveins: $x0, $x1
+
+ ; CHECK-LABEL: name: invalid_or_eq_0_eq_0_vec
+ ; CHECK: liveins: $x0, $x1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %x:_(<2 x s32>) = COPY $x0
+ ; CHECK-NEXT: %y:_(<2 x s32>) = COPY $x1
+ ; CHECK-NEXT: %zero_scalar:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: %zero:_(<2 x s32>) = G_BUILD_VECTOR %zero_scalar(s32), %zero_scalar(s32)
+ ; CHECK-NEXT: %cmp1:_(<2 x s1>) = G_ICMP intpred(eq), %x(<2 x s32>), %zero
+ ; CHECK-NEXT: %cmp2:_(<2 x s1>) = G_ICMP intpred(eq), %y(<2 x s32>), %zero
+ ; CHECK-NEXT: %and:_(<2 x s1>) = G_OR %cmp1, %cmp2
+ ; CHECK-NEXT: %zext:_(<2 x s32>) = G_ZEXT %and(<2 x s1>)
+ ; CHECK-NEXT: $x0 = COPY %zext(<2 x s32>)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
+ %x:_(<2 x s32>) = COPY $x0
+ %y:_(<2 x s32>) = COPY $x1
+ %zero_scalar:_(s32) = G_CONSTANT i32 0
+ %zero:_(<2 x s32>) = G_BUILD_VECTOR %zero_scalar(s32), %zero_scalar(s32)
+ %cmp1:_(<2 x s1>) = G_ICMP intpred(eq), %x:_(<2 x s32>), %zero:_
+ %cmp2:_(<2 x s1>) = G_ICMP intpred(eq), %y:_(<2 x s32>), %zero:_
+ %and:_(<2 x s1>) = G_OR %cmp1, %cmp2
+ %zext:_(<2 x s32>) = G_ZEXT %and:_(<2 x s1>)
+ $x0 = COPY %zext
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: invalid_p0_src
+tracksRegLiveness: true
+legalized: true
+body: |
+ bb.0:
+ liveins: $x0, $x1
+
+ ; CHECK-LABEL: name: invalid_p0_src
+ ; CHECK: liveins: $x0, $x1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %x:_(p0) = COPY $x0
+ ; CHECK-NEXT: %y:_(p0) = COPY $x1
+ ; CHECK-NEXT: %zero:_(p0) = G_CONSTANT i64 0
+ ; CHECK-NEXT: %cmp1:_(s1) = G_ICMP intpred(eq), %x(p0), %zero
+ ; CHECK-NEXT: %cmp2:_(s1) = G_ICMP intpred(eq), %y(p0), %zero
+ ; CHECK-NEXT: %and:_(s1) = G_AND %cmp1, %cmp2
+ ; CHECK-NEXT: %zext:_(s64) = G_ZEXT %and(s1)
+ ; CHECK-NEXT: $x0 = COPY %zext(s64)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
+ %x:_(p0) = COPY $x0
+ %y:_(p0) = COPY $x1
+ %zero:_(p0) = G_CONSTANT i64 0
+ %cmp1:_(s1) = G_ICMP intpred(eq), %x:_(p0), %zero:_
+ %cmp2:_(s1) = G_ICMP intpred(eq), %y:_(p0), %zero:_
+ %and:_(s1) = G_AND %cmp1, %cmp2
+ %zext:_(s64) = G_ZEXT %and:_(s1)
+ $x0 = COPY %zext
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: invalid_p0_src_vec
+tracksRegLiveness: true
+legalized: true
+body: |
+ bb.0:
+ liveins: $q0, $q1
+
+ ; CHECK-LABEL: name: invalid_p0_src_vec
+ ; CHECK: liveins: $q0, $q1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %x:_(<2 x p0>) = COPY $q0
+ ; CHECK-NEXT: %y:_(<2 x p0>) = COPY $q1
+ ; CHECK-NEXT: %scalar0:_(p0) = G_CONSTANT i64 0
+ ; CHECK-NEXT: %zero:_(<2 x p0>) = G_BUILD_VECTOR %scalar0(p0), %scalar0(p0)
+ ; CHECK-NEXT: %cmp1:_(<2 x s1>) = G_ICMP intpred(eq), %x(<2 x p0>), %zero
+ ; CHECK-NEXT: %cmp2:_(<2 x s1>) = G_ICMP intpred(eq), %y(<2 x p0>), %zero
+ ; CHECK-NEXT: %or:_(<2 x s1>) = G_AND %cmp1, %cmp2
+ ; CHECK-NEXT: %zext:_(<2 x s64>) = G_ZEXT %or(<2 x s1>)
+ ; CHECK-NEXT: $q0 = COPY %zext(<2 x s64>)
+ ; CHECK-NEXT: RET_ReallyLR implicit $q0
+ %x:_(<2 x p0>) = COPY $q0
+ %y:_(<2 x p0>) = COPY $q1
+ %scalar0:_(p0) = G_CONSTANT i64 0
+ %zero:_(<2 x p0>) = G_BUILD_VECTOR %scalar0(p0), %scalar0(p0)
+ %cmp1:_(<2 x s1>) = G_ICMP intpred(eq), %x:_(<2 x p0>), %zero:_
+ %cmp2:_(<2 x s1>) = G_ICMP intpred(eq), %y:_(<2 x p0>), %zero:_
+ %or:_(<2 x s1>) = G_AND %cmp1, %cmp2
+ %zext:_(<2 x s64>) = G_ZEXT %or:_(<2 x s1>)
+ $q0 = COPY %zext
+ RET_ReallyLR implicit $q0
+
+...
+---
+name: invalid_diff_src_ty
+tracksRegLiveness: true
+legalized: true
+body: |
+ bb.0:
+ liveins: $w0, $x1
+
+ ; CHECK-LABEL: name: invalid_diff_src_ty
+ ; CHECK: liveins: $w0, $x1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %x:_(s32) = COPY $w0
+ ; CHECK-NEXT: %y:_(s64) = COPY $x1
+ ; CHECK-NEXT: %zero_s32:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: %zero_s64:_(s64) = G_CONSTANT i64 0
+ ; CHECK-NEXT: %cmp1:_(s1) = G_ICMP intpred(eq), %x(s32), %zero_s32
+ ; CHECK-NEXT: %cmp2:_(s1) = G_ICMP intpred(eq), %y(s64), %zero_s64
+ ; CHECK-NEXT: %and:_(s1) = G_AND %cmp1, %cmp2
+ ; CHECK-NEXT: %zext:_(s64) = G_ZEXT %and(s1)
+ ; CHECK-NEXT: $x0 = COPY %zext(s64)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
+ %x:_(s32) = COPY $w0
+ %y:_(s64) = COPY $x1
+ %zero_s32:_(s32) = G_CONSTANT i32 0
+ %zero_s64:_(s64) = G_CONSTANT i64 0
+ %cmp1:_(s1) = G_ICMP intpred(eq), %x:_(s32), %zero_s32:_
+ %cmp2:_(s1) = G_ICMP intpred(eq), %y:_(s64), %zero_s64:_
+ %and:_(s1) = G_AND %cmp1, %cmp2
+ %zext:_(s64) = G_ZEXT %and:_(s1)
+ $x0 = COPY %zext
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: invalid_diff_src_ty_vec
+tracksRegLiveness: true
+legalized: true
+body: |
+ bb.0:
+ liveins: $x0, $q1
+
+ ; CHECK-LABEL: name: invalid_diff_src_ty_vec
+ ; CHECK: liveins: $x0, $q1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %x:_(<2 x s32>) = COPY $x0
+ ; CHECK-NEXT: %y:_(<2 x s64>) = COPY $q1
+ ; CHECK-NEXT: %scalar0s32:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: %scalar0s64:_(s64) = G_CONSTANT i64 0
+ ; CHECK-NEXT: %zero_s32:_(<2 x s32>) = G_BUILD_VECTOR %scalar0s32(s32), %scalar0s32(s32)
+ ; CHECK-NEXT: %zero_s64:_(<2 x s64>) = G_BUILD_VECTOR %scalar0s64(s64), %scalar0s64(s64)
+ ; CHECK-NEXT: %cmp1:_(<2 x s1>) = G_ICMP intpred(eq), %x(<2 x s32>), %zero_s32
+ ; CHECK-NEXT: %cmp2:_(<2 x s1>) = G_ICMP intpred(eq), %y(<2 x s64>), %zero_s64
+ ; CHECK-NEXT: %or:_(<2 x s1>) = G_AND %cmp1, %cmp2
+ ; CHECK-NEXT: %zext:_(<2 x s32>) = G_ZEXT %or(<2 x s1>)
+ ; CHECK-NEXT: $x0 = COPY %zext(<2 x s32>)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
+ %x:_(<2 x s32>) = COPY $x0
+ %y:_(<2 x s64>) = COPY $q1
+ %scalar0s32:_(s32) = G_CONSTANT i32 0
+ %scalar0s64:_(s64) = G_CONSTANT i64 0
+ %zero_s32:_(<2 x s32>) = G_BUILD_VECTOR %scalar0s32(s32), %scalar0s32(s32)
+ %zero_s64:_(<2 x s64>) = G_BUILD_VECTOR %scalar0s64(s64), %scalar0s64(s64)
+ %cmp1:_(<2 x s1>) = G_ICMP intpred(eq), %x:_(<2 x s32>), %zero_s32:_
+ %cmp2:_(<2 x s1>) = G_ICMP intpred(eq), %y:_(<2 x s64>), %zero_s64:_
+ %or:_(<2 x s1>) = G_AND %cmp1, %cmp2
+ %zext:_(<2 x s32>) = G_ZEXT %or:_(<2 x s1>)
+ $x0 = COPY %zext
+ RET_ReallyLR implicit $x0
+
+...
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-inline-asm.ll b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-inline-asm.ll
index ef8e466..42f6570 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-inline-asm.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-inline-asm.ll
@@ -26,7 +26,7 @@ define void @asm_simple_register_clobber() {
define i64 @asm_register_early_clobber() {
; CHECK-LABEL: name: asm_register_early_clobber
; CHECK: bb.1 (%ir-block.0):
- ; CHECK-NEXT: INLINEASM &"mov $0, 7; mov $1, 7", 1 /* sideeffect attdialect */, 2752523 /* regdef-ec:GPR64common */, def early-clobber %0, 2752523 /* regdef-ec:GPR64common */, def early-clobber %1, !0
+ ; CHECK-NEXT: INLINEASM &"mov $0, 7; mov $1, 7", 1 /* sideeffect attdialect */, {{[0-9]+}} /* regdef-ec:GPR64common */, def early-clobber %0, {{[0-9]+}} /* regdef-ec:GPR64common */, def early-clobber %1, !0
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY %0
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY %1
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY]], [[COPY1]]
@@ -54,7 +54,7 @@ entry:
define i32 @test_single_register_output() nounwind ssp {
; CHECK-LABEL: name: test_single_register_output
; CHECK: bb.1.entry:
- ; CHECK-NEXT: INLINEASM &"mov ${0:w}, 7", 0 /* attdialect */, 1703946 /* regdef:GPR32common */, def %0
+ ; CHECK-NEXT: INLINEASM &"mov ${0:w}, 7", 0 /* attdialect */, {{[0-9]+}} /* regdef:GPR32common */, def %0
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY %0
; CHECK-NEXT: $w0 = COPY [[COPY]](s32)
; CHECK-NEXT: RET_ReallyLR implicit $w0
@@ -66,7 +66,7 @@ entry:
define i64 @test_single_register_output_s64() nounwind ssp {
; CHECK-LABEL: name: test_single_register_output_s64
; CHECK: bb.1.entry:
- ; CHECK-NEXT: INLINEASM &"mov $0, 7", 0 /* attdialect */, 2752522 /* regdef:GPR64common */, def %0
+ ; CHECK-NEXT: INLINEASM &"mov $0, 7", 0 /* attdialect */, {{[0-9]+}} /* regdef:GPR64common */, def %0
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY %0
; CHECK-NEXT: $x0 = COPY [[COPY]](s64)
; CHECK-NEXT: RET_ReallyLR implicit $x0
@@ -79,7 +79,7 @@ entry:
define float @test_multiple_register_outputs_same() #0 {
; CHECK-LABEL: name: test_multiple_register_outputs_same
; CHECK: bb.1 (%ir-block.0):
- ; CHECK-NEXT: INLINEASM &"mov $0, #0; mov $1, #0", 0 /* attdialect */, 1703946 /* regdef:GPR32common */, def %0, 1703946 /* regdef:GPR32common */, def %1
+ ; CHECK-NEXT: INLINEASM &"mov $0, #0; mov $1, #0", 0 /* attdialect */, {{[0-9]+}} /* regdef:GPR32common */, def %0, {{[0-9]+}} /* regdef:GPR32common */, def %1
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY %0
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY %1
; CHECK-NEXT: [[FADD:%[0-9]+]]:_(s32) = G_FADD [[COPY]], [[COPY1]]
@@ -96,7 +96,7 @@ define float @test_multiple_register_outputs_same() #0 {
define double @test_multiple_register_outputs_mixed() #0 {
; CHECK-LABEL: name: test_multiple_register_outputs_mixed
; CHECK: bb.1 (%ir-block.0):
- ; CHECK-NEXT: INLINEASM &"mov $0, #0; mov $1, #0", 0 /* attdialect */, 1703946 /* regdef:GPR32common */, def %0, 2555914 /* regdef:FPR64 */, def %1
+ ; CHECK-NEXT: INLINEASM &"mov $0, #0; mov $1, #0", 0 /* attdialect */, {{[0-9]+}} /* regdef:GPR32common */, def %0, {{[0-9]+}} /* regdef:FPR64 */, def %1
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY %0
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY %1
; CHECK-NEXT: $d0 = COPY [[COPY1]](s64)
@@ -125,7 +125,7 @@ define zeroext i8 @test_register_output_trunc(ptr %src) nounwind {
; CHECK-NEXT: liveins: $x0
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
- ; CHECK-NEXT: INLINEASM &"mov ${0:w}, 32", 0 /* attdialect */, 1703946 /* regdef:GPR32common */, def %1
+ ; CHECK-NEXT: INLINEASM &"mov ${0:w}, 32", 0 /* attdialect */, {{[0-9]+}} /* regdef:GPR32common */, def %1
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY %1
; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY1]](s32)
; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[TRUNC]](s8)
@@ -155,7 +155,7 @@ define void @test_input_register_imm() {
; CHECK: bb.1 (%ir-block.0):
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 42
; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64common = COPY [[C]](s64)
- ; CHECK-NEXT: INLINEASM &"mov x0, $0", 1 /* sideeffect attdialect */, 2752521 /* reguse:GPR64common */, [[COPY]]
+ ; CHECK-NEXT: INLINEASM &"mov x0, $0", 1 /* sideeffect attdialect */, {{[0-9]+}} /* reguse:GPR64common */, [[COPY]]
; CHECK-NEXT: RET_ReallyLR
call void asm sideeffect "mov x0, $0", "r"(i64 42)
ret void
@@ -190,7 +190,7 @@ define zeroext i8 @test_input_register(ptr %src) nounwind {
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64common = COPY [[COPY]](p0)
- ; CHECK-NEXT: INLINEASM &"ldtrb ${0:w}, [$1]", 0 /* attdialect */, 1703946 /* regdef:GPR32common */, def %1, 2752521 /* reguse:GPR64common */, [[COPY1]]
+ ; CHECK-NEXT: INLINEASM &"ldtrb ${0:w}, [$1]", 0 /* attdialect */, {{[0-9]+}} /* regdef:GPR32common */, def %1, {{[0-9]+}} /* reguse:GPR64common */, [[COPY1]]
; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY %1
; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY2]](s32)
; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[TRUNC]](s8)
@@ -207,7 +207,7 @@ define i32 @test_memory_constraint(ptr %a) nounwind {
; CHECK-NEXT: liveins: $x0
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
- ; CHECK-NEXT: INLINEASM &"ldr $0, $1", 8 /* mayload attdialect */, 1703946 /* regdef:GPR32common */, def %1, 262158 /* mem:m */, [[COPY]](p0)
+ ; CHECK-NEXT: INLINEASM &"ldr $0, $1", 8 /* mayload attdialect */, {{[0-9]+}} /* regdef:GPR32common */, def %1, 262158 /* mem:m */, [[COPY]](p0)
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY %1
; CHECK-NEXT: $w0 = COPY [[COPY1]](s32)
; CHECK-NEXT: RET_ReallyLR implicit $w0
@@ -221,7 +221,7 @@ define i16 @test_anyext_input() {
; CHECK-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 1
; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[C]](s16)
; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr32common = COPY [[ANYEXT]](s32)
- ; CHECK-NEXT: INLINEASM &"", 1 /* sideeffect attdialect */, 1703946 /* regdef:GPR32common */, def %0, 1703945 /* reguse:GPR32common */, [[COPY]]
+ ; CHECK-NEXT: INLINEASM &"", 1 /* sideeffect attdialect */, {{[0-9]+}} /* regdef:GPR32common */, def %0, {{[0-9]+}} /* reguse:GPR32common */, [[COPY]]
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY %0
; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC]](s16)
@@ -237,7 +237,7 @@ define i16 @test_anyext_input_with_matching_constraint() {
; CHECK-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 1
; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[C]](s16)
; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr32common = COPY [[ANYEXT]](s32)
- ; CHECK-NEXT: INLINEASM &"", 1 /* sideeffect attdialect */, 1703946 /* regdef:GPR32common */, def %0, 2147483657 /* reguse tiedto:$0 */, [[COPY]](tied-def 3)
+ ; CHECK-NEXT: INLINEASM &"", 1 /* sideeffect attdialect */, {{[0-9]+}} /* regdef:GPR32common */, def %0, 2147483657 /* reguse tiedto:$0 */, [[COPY]](tied-def 3)
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY %0
; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC]](s16)
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-unwind-inline-asm.ll b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-unwind-inline-asm.ll
index 59eb80a..fbffb50 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-unwind-inline-asm.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-unwind-inline-asm.ll
@@ -71,7 +71,7 @@ define void @test2() #0 personality ptr @__gcc_personality_v0 {
; CHECK-NEXT: G_INVOKE_REGION_START
; CHECK-NEXT: EH_LABEL <mcsymbol >
; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64common = COPY [[DEF]](p0)
- ; CHECK-NEXT: INLINEASM &"", 1 /* sideeffect attdialect */, 2752521 /* reguse:GPR64common */, [[COPY]]
+ ; CHECK-NEXT: INLINEASM &"", 1 /* sideeffect attdialect */, {{[0-9]+}} /* reguse:GPR64common */, [[COPY]]
; CHECK-NEXT: EH_LABEL <mcsymbol >
; CHECK-NEXT: G_BR %bb.2
; CHECK-NEXT: {{ $}}
diff --git a/llvm/test/CodeGen/AArch64/aarch64-sme2-asm.ll b/llvm/test/CodeGen/AArch64/aarch64-sme2-asm.ll
index 8ed7059..5829969 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-sme2-asm.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-sme2-asm.ll
@@ -5,7 +5,7 @@ entry:
; CHECK: %0:ppr = COPY $p0
; CHECK: STR_PXI %0, %stack.0.predcnt.addr, 0 :: (store unknown-size into %ir.predcnt.addr, align 2)
; CHECK: %1:pnr_p8to15 = COPY %0
-; CHECK: INLINEASM &"ld1w {z0.s,z1.s,z2.s,z3.s}, $0/z, [x10]", 1 /* sideeffect attdialect */, 458761 /* reguse:PNR_p8to15 */, %1
+; CHECK: INLINEASM &"ld1w {z0.s,z1.s,z2.s,z3.s}, $0/z, [x10]", 1 /* sideeffect attdialect */, {{[0-9]+}} /* reguse:PNR_p8to15 */, %1
; CHECK: RET_ReallyLR
%predcnt.addr = alloca target("aarch64.svcount"), align 2
store target("aarch64.svcount") %predcnt, ptr %predcnt.addr, align 2
@@ -19,7 +19,7 @@ entry:
; CHECK: %0:ppr = COPY $p0
; CHECK: STR_PXI %0, %stack.0.predcnt.addr, 0 :: (store unknown-size into %ir.predcnt.addr, align 2)
; CHECK: %1:pnr = COPY %0
-; CHECK: INLINEASM &"ld1w {z0.s,z1.s,z2.s,z3.s}, $0/z, [x10]", 1 /* sideeffect attdialect */, 262153 /* reguse:PNR */, %1
+; CHECK: INLINEASM &"ld1w {z0.s,z1.s,z2.s,z3.s}, $0/z, [x10]", 1 /* sideeffect attdialect */, {{[0-9]+}} /* reguse:PNR */, %1
; CHECK: RET_ReallyLR
%predcnt.addr = alloca target("aarch64.svcount"), align 2
store target("aarch64.svcount") %predcnt, ptr %predcnt.addr, align 2
@@ -33,7 +33,7 @@ entry:
; CHECK: %0:ppr = COPY $p0
; CHECK: STR_PXI %0, %stack.0.predcnt.addr, 0 :: (store unknown-size into %ir.predcnt.addr, align 2)
; CHECK: %1:pnr_3b = COPY %0
-; CHECK: INLINEASM &"fadd z0.h, $0/m, z0.h, #0.5", 1 /* sideeffect attdialect */, 393225 /* reguse:PNR_3b */, %1
+; CHECK: INLINEASM &"fadd z0.h, $0/m, z0.h, #0.5", 1 /* sideeffect attdialect */, {{[0-9]+}} /* reguse:PNR_3b */, %1
; CHECK: RET_ReallyLR
%predcnt.addr = alloca target("aarch64.svcount"), align 2
store target("aarch64.svcount") %predcnt, ptr %predcnt.addr, align 2
diff --git a/llvm/test/CodeGen/AArch64/aes.ll b/llvm/test/CodeGen/AArch64/aes.ll
new file mode 100644
index 0000000..2bef28d
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/aes.ll
@@ -0,0 +1,43 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc %s -o - -mtriple=aarch64 -mattr=+aes | FileCheck %s
+
+declare <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %d, <16 x i8> %k)
+declare <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %d, <16 x i8> %k)
+
+define <16 x i8> @aese(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: aese:
+; CHECK: // %bb.0:
+; CHECK-NEXT: aese v0.16b, v1.16b
+; CHECK-NEXT: ret
+ %r = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %a, <16 x i8> %b)
+ ret <16 x i8> %r
+}
+
+define <16 x i8> @aese_c(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: aese_c:
+; CHECK: // %bb.0:
+; CHECK-NEXT: aese v1.16b, v0.16b
+; CHECK-NEXT: mov v0.16b, v1.16b
+; CHECK-NEXT: ret
+ %r = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %b, <16 x i8> %a)
+ ret <16 x i8> %r
+}
+
+define <16 x i8> @aesd(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: aesd:
+; CHECK: // %bb.0:
+; CHECK-NEXT: aesd v0.16b, v1.16b
+; CHECK-NEXT: ret
+ %r = call <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %a, <16 x i8> %b)
+ ret <16 x i8> %r
+}
+
+define <16 x i8> @aesd_c(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: aesd_c:
+; CHECK: // %bb.0:
+; CHECK-NEXT: aesd v1.16b, v0.16b
+; CHECK-NEXT: mov v0.16b, v1.16b
+; CHECK-NEXT: ret
+ %r = call <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %b, <16 x i8> %a)
+ ret <16 x i8> %r
+}
diff --git a/llvm/test/CodeGen/AArch64/callbr-asm-outputs-indirect-isel.ll b/llvm/test/CodeGen/AArch64/callbr-asm-outputs-indirect-isel.ll
index 3b7b5dd3..fbe89e7 100644
--- a/llvm/test/CodeGen/AArch64/callbr-asm-outputs-indirect-isel.ll
+++ b/llvm/test/CodeGen/AArch64/callbr-asm-outputs-indirect-isel.ll
@@ -18,7 +18,7 @@ define i32 @test0() {
; CHECK: bb.0.entry:
; CHECK-NEXT: successors: %bb.2(0x80000000), %bb.1(0x00000000)
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: INLINEASM_BR &"# $0", 0 /* attdialect */, 1703946 /* regdef:GPR32common */, def %5, 13 /* imm */, %bb.1
+ ; CHECK-NEXT: INLINEASM_BR &"# $0", 0 /* attdialect */, {{[0-9]+}} /* regdef:GPR32common */, def %5, 13 /* imm */, %bb.1
; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr32all = COPY %5
; CHECK-NEXT: B %bb.2
; CHECK-NEXT: {{ $}}
@@ -31,7 +31,7 @@ define i32 @test0() {
; CHECK-NEXT: bb.2.direct:
; CHECK-NEXT: successors: %bb.4(0x80000000), %bb.3(0x00000000)
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: INLINEASM_BR &"# $0", 0 /* attdialect */, 1703946 /* regdef:GPR32common */, def %7, 13 /* imm */, %bb.3
+ ; CHECK-NEXT: INLINEASM_BR &"# $0", 0 /* attdialect */, {{[0-9]+}} /* regdef:GPR32common */, def %7, 13 /* imm */, %bb.3
; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr32all = COPY %7
; CHECK-NEXT: B %bb.4
; CHECK-NEXT: {{ $}}
@@ -107,7 +107,7 @@ define i32 @dont_split1() {
; CHECK: bb.0.entry:
; CHECK-NEXT: successors: %bb.1(0x80000000), %bb.2(0x00000000)
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: INLINEASM_BR &"", 0 /* attdialect */, 1703946 /* regdef:GPR32common */, def %1, 13 /* imm */, %bb.2
+ ; CHECK-NEXT: INLINEASM_BR &"", 0 /* attdialect */, {{[0-9]+}} /* regdef:GPR32common */, def %1, 13 /* imm */, %bb.2
; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr32all = COPY %1
; CHECK-NEXT: B %bb.1
; CHECK-NEXT: {{ $}}
@@ -168,7 +168,7 @@ define i32 @dont_split3() {
; CHECK: bb.0.entry:
; CHECK-NEXT: successors: %bb.1(0x80000000), %bb.2(0x00000000)
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: INLINEASM_BR &"", 0 /* attdialect */, 1703946 /* regdef:GPR32common */, def %0, 13 /* imm */, %bb.2
+ ; CHECK-NEXT: INLINEASM_BR &"", 0 /* attdialect */, {{[0-9]+}} /* regdef:GPR32common */, def %0, 13 /* imm */, %bb.2
; CHECK-NEXT: B %bb.1
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.1.x:
@@ -194,7 +194,7 @@ define i32 @split_me0() {
; CHECK: bb.0.entry:
; CHECK-NEXT: successors: %bb.2(0x80000000), %bb.1(0x00000000)
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: INLINEASM_BR &"", 0 /* attdialect */, 1703946 /* regdef:GPR32common */, def %3, 13 /* imm */, %bb.1
+ ; CHECK-NEXT: INLINEASM_BR &"", 0 /* attdialect */, {{[0-9]+}} /* regdef:GPR32common */, def %3, 13 /* imm */, %bb.1
; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr32all = COPY %3
; CHECK-NEXT: B %bb.2
; CHECK-NEXT: {{ $}}
@@ -244,7 +244,7 @@ define i32 @split_me1(i1 %z) {
; CHECK-NEXT: bb.1.w:
; CHECK-NEXT: successors: %bb.3(0x80000000), %bb.2(0x00000000)
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: INLINEASM_BR &"", 0 /* attdialect */, 1703946 /* regdef:GPR32common */, def %5, 13 /* imm */, %bb.2, 13 /* imm */, %bb.2
+ ; CHECK-NEXT: INLINEASM_BR &"", 0 /* attdialect */, {{[0-9]+}} /* regdef:GPR32common */, def %5, 13 /* imm */, %bb.2, 13 /* imm */, %bb.2
; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr32all = COPY %5
; CHECK-NEXT: B %bb.3
; CHECK-NEXT: {{ $}}
@@ -297,7 +297,7 @@ define i32 @split_me2(i1 %z) {
; CHECK-NEXT: bb.1.w:
; CHECK-NEXT: successors: %bb.3(0x80000000), %bb.2(0x00000000)
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: INLINEASM_BR &"", 0 /* attdialect */, 1703946 /* regdef:GPR32common */, def %6, 13 /* imm */, %bb.2, 13 /* imm */, %bb.2
+ ; CHECK-NEXT: INLINEASM_BR &"", 0 /* attdialect */, {{[0-9]+}} /* regdef:GPR32common */, def %6, 13 /* imm */, %bb.2, 13 /* imm */, %bb.2
; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr32all = COPY %6
; CHECK-NEXT: B %bb.3
; CHECK-NEXT: {{ $}}
@@ -340,7 +340,7 @@ define i32 @dont_split4() {
; CHECK: bb.0.entry:
; CHECK-NEXT: successors: %bb.1(0x80000000), %bb.2(0x00000000)
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: INLINEASM_BR &"", 0 /* attdialect */, 1703946 /* regdef:GPR32common */, def %3, 13 /* imm */, %bb.2
+ ; CHECK-NEXT: INLINEASM_BR &"", 0 /* attdialect */, {{[0-9]+}} /* regdef:GPR32common */, def %3, 13 /* imm */, %bb.2
; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr32all = COPY %3
; CHECK-NEXT: B %bb.1
; CHECK-NEXT: {{ $}}
@@ -379,7 +379,7 @@ define i32 @dont_split5() {
; CHECK: bb.0.entry:
; CHECK-NEXT: successors: %bb.2(0x80000000), %bb.1(0x00000000)
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: INLINEASM_BR &"", 0 /* attdialect */, 1703946 /* regdef:GPR32common */, def %3, 13 /* imm */, %bb.1
+ ; CHECK-NEXT: INLINEASM_BR &"", 0 /* attdialect */, {{[0-9]+}} /* regdef:GPR32common */, def %3, 13 /* imm */, %bb.1
; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr32all = COPY %3
; CHECK-NEXT: B %bb.2
; CHECK-NEXT: {{ $}}
@@ -410,7 +410,7 @@ define i32 @split_me3() {
; CHECK: bb.0.entry:
; CHECK-NEXT: successors: %bb.2(0x80000000), %bb.1(0x00000000)
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: INLINEASM_BR &"", 0 /* attdialect */, 1703946 /* regdef:GPR32common */, def %3, 13 /* imm */, %bb.1
+ ; CHECK-NEXT: INLINEASM_BR &"", 0 /* attdialect */, {{[0-9]+}} /* regdef:GPR32common */, def %3, 13 /* imm */, %bb.1
; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr32all = COPY %3
; CHECK-NEXT: B %bb.2
; CHECK-NEXT: {{ $}}
@@ -456,7 +456,7 @@ define i32 @dont_split6(i32 %0) {
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[PHI:%[0-9]+]]:gpr32all = PHI [[COPY]], %bb.0, %2, %bb.2
; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr32common = COPY [[PHI]]
- ; CHECK-NEXT: INLINEASM_BR &"", 0 /* attdialect */, 1703946 /* regdef:GPR32common */, def %4, 2147483657 /* reguse tiedto:$0 */, [[COPY1]](tied-def 3), 13 /* imm */, %bb.2
+ ; CHECK-NEXT: INLINEASM_BR &"", 0 /* attdialect */, {{[0-9]+}} /* regdef:GPR32common */, def %4, 2147483657 /* reguse tiedto:$0 */, [[COPY1]](tied-def 3), 13 /* imm */, %bb.2
; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr32all = COPY %4
; CHECK-NEXT: B %bb.3
; CHECK-NEXT: {{ $}}
@@ -491,7 +491,7 @@ define i32 @split_me4() {
; CHECK: bb.0.entry:
; CHECK-NEXT: successors: %bb.2(0x80000000), %bb.1(0x00000000)
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: INLINEASM_BR &"", 0 /* attdialect */, 1703946 /* regdef:GPR32common */, def %3, 13 /* imm */, %bb.1
+ ; CHECK-NEXT: INLINEASM_BR &"", 0 /* attdialect */, {{[0-9]+}} /* regdef:GPR32common */, def %3, 13 /* imm */, %bb.1
; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr32all = COPY %3
; CHECK-NEXT: B %bb.2
; CHECK-NEXT: {{ $}}
@@ -522,7 +522,7 @@ define i32 @split_me5() {
; CHECK: bb.0.entry:
; CHECK-NEXT: successors: %bb.2(0x80000000), %bb.1(0x00000000)
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: INLINEASM_BR &"", 0 /* attdialect */, 1703946 /* regdef:GPR32common */, def %3, 13 /* imm */, %bb.1
+ ; CHECK-NEXT: INLINEASM_BR &"", 0 /* attdialect */, {{[0-9]+}} /* regdef:GPR32common */, def %3, 13 /* imm */, %bb.1
; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr32all = COPY %3
; CHECK-NEXT: B %bb.2
; CHECK-NEXT: {{ $}}
diff --git a/llvm/test/CodeGen/AArch64/emit_fneg_with_non_register_operand.mir b/llvm/test/CodeGen/AArch64/emit_fneg_with_non_register_operand.mir
index 483dbd2..92fb053 100644
--- a/llvm/test/CodeGen/AArch64/emit_fneg_with_non_register_operand.mir
+++ b/llvm/test/CodeGen/AArch64/emit_fneg_with_non_register_operand.mir
@@ -91,10 +91,10 @@ body: |
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[LOADgot:%[0-9]+]]:gpr64common = LOADgot target-flags(aarch64-got) @c
; CHECK-NEXT: [[LDRDui:%[0-9]+]]:fpr64 = LDRDui [[LOADgot]], 0 :: (dereferenceable load (s64) from @c)
- ; CHECK-NEXT: INLINEASM &"", 1 /* sideeffect attdialect */, 2359306 /* regdef:WSeqPairsClass_with_sube32_in_MatrixIndexGPR32_12_15 */, def %2, 2147483657 /* reguse tiedto:$0 */, [[LDRDui]](tied-def 3)
+ ; CHECK-NEXT: INLINEASM &"", 1 /* sideeffect attdialect */, {{[0-9]+}} /* regdef:WSeqPairsClass_with_sube32_in_MatrixIndexGPR32_12_15 */, def %2, 2147483657 /* reguse tiedto:$0 */, [[LDRDui]](tied-def 3)
; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY %2
; CHECK-NEXT: [[LDRDui1:%[0-9]+]]:fpr64 = LDRDui [[LOADgot]], 0 :: (dereferenceable load (s64) from @c)
- ; CHECK-NEXT: INLINEASM &"", 1 /* sideeffect attdialect */, 2359306 /* regdef:WSeqPairsClass_with_sube32_in_MatrixIndexGPR32_12_15 */, def %4, 2147483657 /* reguse tiedto:$0 */, [[LDRDui1]](tied-def 3)
+ ; CHECK-NEXT: INLINEASM &"", 1 /* sideeffect attdialect */, {{[0-9]+}} /* regdef:WSeqPairsClass_with_sube32_in_MatrixIndexGPR32_12_15 */, def %4, 2147483657 /* reguse tiedto:$0 */, [[LDRDui1]](tied-def 3)
; CHECK-NEXT: [[FNEGDr:%[0-9]+]]:fpr64 = FNEGDr %2
; CHECK-NEXT: nofpexcept FCMPDrr %4, killed [[FNEGDr]], implicit-def $nzcv, implicit $fpcr
; CHECK-NEXT: Bcc 1, %bb.2, implicit $nzcv
diff --git a/llvm/test/CodeGen/AArch64/peephole-insvigpr.mir b/llvm/test/CodeGen/AArch64/peephole-insvigpr.mir
index 041b2dc..6514834 100644
--- a/llvm/test/CodeGen/AArch64/peephole-insvigpr.mir
+++ b/llvm/test/CodeGen/AArch64/peephole-insvigpr.mir
@@ -487,7 +487,7 @@ body: |
; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64common = COPY $x0
; CHECK-NEXT: [[DEF:%[0-9]+]]:gpr64all = IMPLICIT_DEF
; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64sp = COPY [[DEF]]
- ; CHECK-NEXT: INLINEASM &"ldr ${0:s}, $1", 8 /* mayload attdialect */, 2359306 /* regdef:WSeqPairsClass_with_sube32_in_MatrixIndexGPR32_12_15 */, def %1, 262158 /* mem:m */, killed [[COPY1]]
+ ; CHECK-NEXT: INLINEASM &"ldr ${0:s}, $1", 8 /* mayload attdialect */, {{[0-9]+}} /* regdef:WSeqPairsClass_with_sube32_in_MatrixIndexGPR32_12_15 */, def %1, 262158 /* mem:m */, killed [[COPY1]]
; CHECK-NEXT: [[MOVIv2d_ns:%[0-9]+]]:fpr128 = MOVIv2d_ns 0
; CHECK-NEXT: [[COPY2:%[0-9]+]]:fpr64 = COPY [[MOVIv2d_ns]].dsub
; CHECK-NEXT: [[DEF1:%[0-9]+]]:fpr128 = IMPLICIT_DEF
diff --git a/llvm/test/CodeGen/AArch64/setcc_knownbits.ll b/llvm/test/CodeGen/AArch64/setcc_knownbits.ll
index 46b714d..bb9546a 100644
--- a/llvm/test/CodeGen/AArch64/setcc_knownbits.ll
+++ b/llvm/test/CodeGen/AArch64/setcc_knownbits.ll
@@ -21,9 +21,7 @@ define noundef i1 @logger(i32 noundef %logLevel, ptr %ea, ptr %pll) {
; CHECK-NEXT: ret
; CHECK-NEXT: .LBB1_2: // %land.rhs
; CHECK-NEXT: ldr x8, [x1]
-; CHECK-NEXT: ldrb w8, [x8]
-; CHECK-NEXT: cmp w8, #0
-; CHECK-NEXT: cset w0, ne
+; CHECK-NEXT: ldrb w0, [x8]
; CHECK-NEXT: ret
entry:
%0 = load i32, ptr %pll, align 4
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-phis-no-lane-mask-merging.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-phis-no-lane-mask-merging.ll
index 0f70c19..d4d5cb1 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-phis-no-lane-mask-merging.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-phis-no-lane-mask-merging.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
-; RUN: llc -global-isel -amdgpu-global-isel-risky-select -mtriple=amdgcn-amd-amdpal -mcpu=gfx1010 -verify-machineinstrs < %s | FileCheck -check-prefix=GFX10 %s
+; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=gfx1010 -verify-machineinstrs < %s | FileCheck -check-prefix=GFX10 %s
; Divergent phis that don't require lowering using lane mask merging
@@ -147,32 +147,28 @@ define void @divergent_i1_phi_used_inside_loop_bigger_loop_body(float %val, floa
; GFX10-LABEL: divergent_i1_phi_used_inside_loop_bigger_loop_body:
; GFX10: ; %bb.0: ; %entry
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-NEXT: v_cmp_lt_f32_e32 vcc_lo, 1.0, v1
-; GFX10-NEXT: s_mov_b32 s5, 0
+; GFX10-NEXT: s_mov_b32 s4, 0
+; GFX10-NEXT: v_cmp_lt_f32_e64 s5, 1.0, v1
; GFX10-NEXT: v_mov_b32_e32 v1, 0x3e8
-; GFX10-NEXT: v_mov_b32_e32 v8, s5
+; GFX10-NEXT: v_mov_b32_e32 v8, s4
; GFX10-NEXT: ; implicit-def: $sgpr6
-; GFX10-NEXT: v_cndmask_b32_e64 v9, 0, 1, vcc_lo
; GFX10-NEXT: s_branch .LBB3_2
; GFX10-NEXT: .LBB3_1: ; %loop_body
; GFX10-NEXT: ; in Loop: Header=BB3_2 Depth=1
; GFX10-NEXT: v_cvt_f32_u32_e32 v9, v8
-; GFX10-NEXT: s_xor_b32 s4, s4, -1
+; GFX10-NEXT: s_xor_b32 s5, s5, -1
; GFX10-NEXT: v_add_nc_u32_e32 v8, 1, v8
; GFX10-NEXT: v_cmp_gt_f32_e32 vcc_lo, v9, v0
-; GFX10-NEXT: v_cndmask_b32_e64 v9, 0, 1, s4
-; GFX10-NEXT: s_or_b32 s5, vcc_lo, s5
+; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_andn2_b32 s6, s6, exec_lo
-; GFX10-NEXT: s_and_b32 s4, exec_lo, s4
-; GFX10-NEXT: s_or_b32 s6, s6, s4
-; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s5
+; GFX10-NEXT: s_and_b32 s7, exec_lo, s5
+; GFX10-NEXT: s_or_b32 s6, s6, s7
+; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execz .LBB3_6
; GFX10-NEXT: .LBB3_2: ; %loop_start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX10-NEXT: v_and_b32_e32 v9, 1, v9
; GFX10-NEXT: v_cmp_ge_i32_e32 vcc_lo, 0x3e8, v8
; GFX10-NEXT: s_mov_b32 s7, 1
-; GFX10-NEXT: v_cmp_ne_u32_e64 s4, 0, v9
; GFX10-NEXT: s_cbranch_vccz .LBB3_4
; GFX10-NEXT: ; %bb.3: ; %else
; GFX10-NEXT: ; in Loop: Header=BB3_2 Depth=1
@@ -189,7 +185,7 @@ define void @divergent_i1_phi_used_inside_loop_bigger_loop_body(float %val, floa
; GFX10-NEXT: flat_store_dword v[4:5], v1
; GFX10-NEXT: s_branch .LBB3_1
; GFX10-NEXT: .LBB3_6: ; %exit
-; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s5
+; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: v_cndmask_b32_e64 v0, 0, 1.0, s6
; GFX10-NEXT: flat_store_dword v[2:3], v0
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-phis-no-lane-mask-merging.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-phis-no-lane-mask-merging.mir
index 5549c89..9b0bd27 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-phis-no-lane-mask-merging.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-phis-no-lane-mask-merging.mir
@@ -33,6 +33,7 @@ body: |
; GFX10-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(uge), [[COPY2]](s32), [[C]]
; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; GFX10-NEXT: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[COPY3]](s32), [[C1]]
+ ; GFX10-NEXT: [[COPY6:%[0-9]+]]:sreg_32(s1) = COPY [[ICMP]](s1)
; GFX10-NEXT: G_BRCOND [[ICMP1]](s1), %bb.2
; GFX10-NEXT: G_BR %bb.1
; GFX10-NEXT: {{ $}}
@@ -46,7 +47,8 @@ body: |
; GFX10-NEXT: bb.2:
; GFX10-NEXT: successors: %bb.4(0x80000000)
; GFX10-NEXT: {{ $}}
- ; GFX10-NEXT: [[PHI:%[0-9]+]]:sreg_32(s1) = G_PHI %14(s1), %bb.3, [[ICMP]](s1), %bb.0
+ ; GFX10-NEXT: [[PHI:%[0-9]+]]:sreg_32(s1) = PHI [[COPY6]](s1), %bb.0, %20(s1), %bb.3
+ ; GFX10-NEXT: [[COPY7:%[0-9]+]]:sreg_32(s1) = COPY [[PHI]](s1)
; GFX10-NEXT: G_BR %bb.4
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: bb.3:
@@ -54,12 +56,13 @@ body: |
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; GFX10-NEXT: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[COPY2]](s32), [[C3]]
+ ; GFX10-NEXT: [[COPY8:%[0-9]+]]:sreg_32(s1) = COPY [[ICMP2]](s1)
; GFX10-NEXT: G_BR %bb.2
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: bb.4:
; GFX10-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
; GFX10-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; GFX10-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[PHI]](s1), [[C5]], [[C4]]
+ ; GFX10-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[COPY7]](s1), [[C5]], [[C4]]
; GFX10-NEXT: G_STORE [[SELECT]](s32), [[MV]](p1) :: (store (s32), addrspace 1)
; GFX10-NEXT: S_ENDPGM 0
bb.0:
@@ -126,9 +129,10 @@ body: |
; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr0
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 6
; GFX10-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(uge), [[COPY2]](s32), [[C]]
- ; GFX10-NEXT: [[COPY4:%[0-9]+]]:sreg_32(s1) = COPY [[ICMP]](s1)
; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; GFX10-NEXT: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[COPY3]](s32), [[C1]]
+ ; GFX10-NEXT: [[COPY4:%[0-9]+]]:sreg_32(s1) = COPY [[ICMP]](s1)
+ ; GFX10-NEXT: [[COPY5:%[0-9]+]]:sreg_32(s1) = COPY [[COPY4]](s1)
; GFX10-NEXT: G_BRCOND [[ICMP1]](s1), %bb.2
; GFX10-NEXT: G_BR %bb.1
; GFX10-NEXT: {{ $}}
@@ -137,17 +141,17 @@ body: |
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; GFX10-NEXT: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[COPY2]](s32), [[C2]]
- ; GFX10-NEXT: [[COPY5:%[0-9]+]]:sreg_32(s1) = COPY [[ICMP2]](s1)
- ; GFX10-NEXT: [[S_ANDN2_B32_:%[0-9]+]]:sreg_32(s1) = S_ANDN2_B32 [[COPY4]](s1), $exec_lo, implicit-def $scc
- ; GFX10-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY5]](s1), implicit-def $scc
+ ; GFX10-NEXT: [[COPY6:%[0-9]+]]:sreg_32(s1) = COPY [[ICMP2]](s1)
+ ; GFX10-NEXT: [[S_ANDN2_B32_:%[0-9]+]]:sreg_32(s1) = S_ANDN2_B32 [[COPY5]](s1), $exec_lo, implicit-def $scc
+ ; GFX10-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY6]](s1), implicit-def $scc
; GFX10-NEXT: [[S_OR_B32_:%[0-9]+]]:sreg_32(s1) = S_OR_B32 [[S_ANDN2_B32_]](s1), [[S_AND_B32_]](s1), implicit-def $scc
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: bb.2:
- ; GFX10-NEXT: [[PHI:%[0-9]+]]:sreg_32(s1) = PHI [[ICMP]](s1), %bb.0, [[S_OR_B32_]](s1), %bb.1
- ; GFX10-NEXT: [[COPY6:%[0-9]+]]:sreg_32(s1) = COPY [[PHI]](s1)
+ ; GFX10-NEXT: [[PHI:%[0-9]+]]:sreg_32(s1) = PHI [[COPY4]](s1), %bb.0, [[S_OR_B32_]](s1), %bb.1
+ ; GFX10-NEXT: [[COPY7:%[0-9]+]]:sreg_32(s1) = COPY [[PHI]](s1)
; GFX10-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
; GFX10-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; GFX10-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[COPY6]](s1), [[C4]], [[C3]]
+ ; GFX10-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[COPY7]](s1), [[C4]], [[C3]]
; GFX10-NEXT: G_STORE [[SELECT]](s32), [[MV]](p1) :: (store (s32), addrspace 1)
; GFX10-NEXT: S_ENDPGM 0
bb.0:
@@ -292,19 +296,21 @@ body: |
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.000000e+00
; GFX10-NEXT: [[FCMP:%[0-9]+]]:_(s1) = G_FCMP floatpred(ogt), [[COPY1]](s32), [[C1]]
+ ; GFX10-NEXT: [[COPY8:%[0-9]+]]:sreg_32(s1) = COPY [[FCMP]](s1)
; GFX10-NEXT: [[DEF:%[0-9]+]]:sreg_32(s1) = IMPLICIT_DEF
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: bb.1:
; GFX10-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
; GFX10-NEXT: {{ $}}
- ; GFX10-NEXT: [[PHI:%[0-9]+]]:sreg_32(s1) = PHI [[DEF]](s1), %bb.0, %39(s1), %bb.5
- ; GFX10-NEXT: [[PHI1:%[0-9]+]]:_(s32) = G_PHI %15(s32), %bb.5, [[C]](s32), %bb.0
- ; GFX10-NEXT: [[PHI2:%[0-9]+]]:_(s32) = G_PHI [[C]](s32), %bb.0, %17(s32), %bb.5
- ; GFX10-NEXT: [[PHI3:%[0-9]+]]:sreg_32(s1) = G_PHI [[FCMP]](s1), %bb.0, %19(s1), %bb.5
- ; GFX10-NEXT: [[COPY8:%[0-9]+]]:sreg_32(s1) = COPY [[PHI]](s1)
+ ; GFX10-NEXT: [[PHI:%[0-9]+]]:sreg_32(s1) = PHI [[DEF]](s1), %bb.0, %42(s1), %bb.5
+ ; GFX10-NEXT: [[PHI1:%[0-9]+]]:sreg_32(s1) = PHI [[COPY8]](s1), %bb.0, %39(s1), %bb.5
+ ; GFX10-NEXT: [[PHI2:%[0-9]+]]:_(s32) = G_PHI %15(s32), %bb.5, [[C]](s32), %bb.0
+ ; GFX10-NEXT: [[PHI3:%[0-9]+]]:_(s32) = G_PHI [[C]](s32), %bb.0, %17(s32), %bb.5
+ ; GFX10-NEXT: [[COPY9:%[0-9]+]]:sreg_32(s1) = COPY [[PHI]](s1)
+ ; GFX10-NEXT: [[COPY10:%[0-9]+]]:sreg_32(s1) = COPY [[PHI1]](s1)
; GFX10-NEXT: [[C2:%[0-9]+]]:_(s1) = G_CONSTANT i1 true
; GFX10-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 1000
- ; GFX10-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(sle), [[PHI2]](s32), [[C3]]
+ ; GFX10-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(sle), [[PHI3]](s32), [[C3]]
; GFX10-NEXT: G_BRCOND [[ICMP]](s1), %bb.4
; GFX10-NEXT: G_BR %bb.2
; GFX10-NEXT: {{ $}}
@@ -336,26 +342,27 @@ body: |
; GFX10-NEXT: successors: %bb.6(0x04000000), %bb.1(0x7c000000)
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: [[C8:%[0-9]+]]:_(s1) = G_CONSTANT i1 true
- ; GFX10-NEXT: [[XOR1:%[0-9]+]]:_(s1) = G_XOR [[PHI3]], [[C8]]
- ; GFX10-NEXT: [[COPY9:%[0-9]+]]:sreg_32(s1) = COPY [[XOR1]](s1)
- ; GFX10-NEXT: [[UITOFP:%[0-9]+]]:_(s32) = G_UITOFP [[PHI2]](s32)
+ ; GFX10-NEXT: [[XOR1:%[0-9]+]]:_(s1) = G_XOR [[COPY10]], [[C8]]
+ ; GFX10-NEXT: [[COPY11:%[0-9]+]]:sreg_32(s1) = COPY [[XOR1]](s1)
+ ; GFX10-NEXT: [[UITOFP:%[0-9]+]]:_(s32) = G_UITOFP [[PHI3]](s32)
; GFX10-NEXT: [[FCMP1:%[0-9]+]]:_(s1) = G_FCMP floatpred(ogt), [[UITOFP]](s32), [[COPY]]
; GFX10-NEXT: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; GFX10-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[PHI2]], [[C9]]
- ; GFX10-NEXT: [[INTRINSIC_CONVERGENT:%[0-9]+]]:sreg_32_xm0_xexec(s32) = G_INTRINSIC_CONVERGENT intrinsic(@llvm.amdgcn.if.break), [[FCMP1]](s1), [[PHI1]](s32)
- ; GFX10-NEXT: [[S_ANDN2_B32_:%[0-9]+]]:sreg_32(s1) = S_ANDN2_B32 [[COPY8]](s1), $exec_lo, implicit-def $scc
- ; GFX10-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY9]](s1), implicit-def $scc
+ ; GFX10-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[PHI3]], [[C9]]
+ ; GFX10-NEXT: [[INTRINSIC_CONVERGENT:%[0-9]+]]:sreg_32_xm0_xexec(s32) = G_INTRINSIC_CONVERGENT intrinsic(@llvm.amdgcn.if.break), [[FCMP1]](s1), [[PHI2]](s32)
+ ; GFX10-NEXT: [[COPY12:%[0-9]+]]:sreg_32(s1) = COPY [[XOR1]](s1)
+ ; GFX10-NEXT: [[S_ANDN2_B32_:%[0-9]+]]:sreg_32(s1) = S_ANDN2_B32 [[COPY9]](s1), $exec_lo, implicit-def $scc
+ ; GFX10-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY11]](s1), implicit-def $scc
; GFX10-NEXT: [[S_OR_B32_:%[0-9]+]]:sreg_32(s1) = S_OR_B32 [[S_ANDN2_B32_]](s1), [[S_AND_B32_]](s1), implicit-def $scc
; GFX10-NEXT: SI_LOOP [[INTRINSIC_CONVERGENT]](s32), %bb.1, implicit-def $exec, implicit-def $scc, implicit $exec
; GFX10-NEXT: G_BR %bb.6
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: bb.6:
; GFX10-NEXT: [[PHI5:%[0-9]+]]:_(s32) = G_PHI [[INTRINSIC_CONVERGENT]](s32), %bb.5
- ; GFX10-NEXT: [[COPY10:%[0-9]+]]:sreg_32(s1) = COPY [[S_OR_B32_]](s1)
+ ; GFX10-NEXT: [[COPY13:%[0-9]+]]:sreg_32(s1) = COPY [[S_OR_B32_]](s1)
; GFX10-NEXT: G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.end.cf), [[PHI5]](s32)
; GFX10-NEXT: [[C10:%[0-9]+]]:_(s32) = G_FCONSTANT float 0.000000e+00
; GFX10-NEXT: [[C11:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.000000e+00
- ; GFX10-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[COPY10]](s1), [[C11]], [[C10]]
+ ; GFX10-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[COPY13]](s1), [[C11]], [[C10]]
; GFX10-NEXT: G_STORE [[SELECT]](s32), [[MV]](p0) :: (store (s32))
; GFX10-NEXT: SI_RETURN
bb.0:
@@ -475,6 +482,7 @@ body: |
; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s1) = G_TRUNC [[AND1]](s32)
; GFX10-NEXT: [[C5:%[0-9]+]]:_(s1) = G_CONSTANT i1 true
; GFX10-NEXT: [[XOR:%[0-9]+]]:_(s1) = G_XOR [[TRUNC1]], [[C5]]
+ ; GFX10-NEXT: [[COPY3:%[0-9]+]]:sreg_32(s1) = COPY [[C5]](s1)
; GFX10-NEXT: G_BRCOND [[XOR]](s1), %bb.2
; GFX10-NEXT: G_BR %bb.1
; GFX10-NEXT: {{ $}}
@@ -487,9 +495,10 @@ body: |
; GFX10-NEXT: bb.2:
; GFX10-NEXT: successors: %bb.5(0x40000000), %bb.6(0x40000000)
; GFX10-NEXT: {{ $}}
- ; GFX10-NEXT: [[PHI:%[0-9]+]]:_(s32) = G_PHI %30(s32), %bb.4, [[DEF]](s32), %bb.0
- ; GFX10-NEXT: [[PHI1:%[0-9]+]]:sreg_32(s1) = G_PHI %32(s1), %bb.4, [[C5]](s1), %bb.0
- ; GFX10-NEXT: G_BRCOND [[PHI1]](s1), %bb.5
+ ; GFX10-NEXT: [[PHI:%[0-9]+]]:sreg_32(s1) = PHI [[COPY3]](s1), %bb.0, %58(s1), %bb.4
+ ; GFX10-NEXT: [[PHI1:%[0-9]+]]:_(s32) = G_PHI %30(s32), %bb.4, [[DEF]](s32), %bb.0
+ ; GFX10-NEXT: [[COPY4:%[0-9]+]]:sreg_32(s1) = COPY [[PHI]](s1)
+ ; GFX10-NEXT: G_BRCOND [[COPY4]](s1), %bb.5
; GFX10-NEXT: G_BR %bb.6
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: bb.3:
@@ -517,6 +526,7 @@ body: |
; GFX10-NEXT: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[PHI5]](s32), [[AMDGPU_BUFFER_LOAD]]
; GFX10-NEXT: [[OR1:%[0-9]+]]:_(s1) = G_OR [[ICMP]], [[ICMP2]]
; GFX10-NEXT: [[ZEXT1:%[0-9]+]]:_(s32) = G_ZEXT [[OR1]](s1)
+ ; GFX10-NEXT: [[COPY5:%[0-9]+]]:sreg_32(s1) = COPY [[C10]](s1)
; GFX10-NEXT: G_BR %bb.2
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: bb.5:
@@ -527,7 +537,7 @@ body: |
; GFX10-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[ZEXT2]], [[C11]]
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: bb.6:
- ; GFX10-NEXT: [[PHI6:%[0-9]+]]:_(s32) = G_PHI [[PHI]](s32), %bb.2, [[OR2]](s32), %bb.5
+ ; GFX10-NEXT: [[PHI6:%[0-9]+]]:_(s32) = G_PHI [[PHI1]](s32), %bb.2, [[OR2]](s32), %bb.5
; GFX10-NEXT: [[UV:%[0-9]+]]:_(<4 x s32>), [[UV1:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[LOAD]](<8 x s32>)
; GFX10-NEXT: [[ADD3:%[0-9]+]]:_(s32) = G_ADD [[COPY2]], [[COPY1]]
; GFX10-NEXT: [[C12:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-used-outside-loop.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-used-outside-loop.ll
index e9df20f..49c2326 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-used-outside-loop.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-used-outside-loop.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
-; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -amdgpu-global-isel-risky-select -mcpu=gfx1010 -verify-machineinstrs < %s | FileCheck -check-prefix=GFX10 %s
+; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=gfx1010 -verify-machineinstrs < %s | FileCheck -check-prefix=GFX10 %s
; This file contains various tests that have divergent i1s used outside of
; the loop. These are lane masks is sgpr and need to have correct value in
@@ -137,28 +137,24 @@ define void @divergent_i1_xor_used_outside_loop(float %val, float %pre.cond.val,
; GFX10-LABEL: divergent_i1_xor_used_outside_loop:
; GFX10: ; %bb.0: ; %entry
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-NEXT: v_cmp_lt_f32_e32 vcc_lo, 1.0, v1
-; GFX10-NEXT: s_mov_b32 s5, 0
+; GFX10-NEXT: s_mov_b32 s4, 0
+; GFX10-NEXT: v_cmp_lt_f32_e64 s5, 1.0, v1
+; GFX10-NEXT: v_mov_b32_e32 v1, s4
; GFX10-NEXT: ; implicit-def: $sgpr6
-; GFX10-NEXT: v_mov_b32_e32 v1, s5
-; GFX10-NEXT: v_cndmask_b32_e64 v4, 0, 1, vcc_lo
; GFX10-NEXT: .LBB2_1: ; %loop
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX10-NEXT: v_and_b32_e32 v4, 1, v4
-; GFX10-NEXT: v_cvt_f32_u32_e32 v5, v1
+; GFX10-NEXT: v_cvt_f32_u32_e32 v4, v1
+; GFX10-NEXT: s_xor_b32 s5, s5, -1
; GFX10-NEXT: v_add_nc_u32_e32 v1, 1, v1
-; GFX10-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v4
-; GFX10-NEXT: v_cmp_gt_f32_e64 s4, v5, v0
-; GFX10-NEXT: s_xor_b32 s7, vcc_lo, -1
-; GFX10-NEXT: s_or_b32 s5, s4, s5
-; GFX10-NEXT: v_mov_b32_e32 v4, s7
-; GFX10-NEXT: s_andn2_b32 s4, s6, exec_lo
-; GFX10-NEXT: s_and_b32 s6, exec_lo, s7
-; GFX10-NEXT: s_or_b32 s6, s4, s6
-; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s5
+; GFX10-NEXT: v_cmp_gt_f32_e32 vcc_lo, v4, v0
+; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX10-NEXT: s_andn2_b32 s6, s6, exec_lo
+; GFX10-NEXT: s_and_b32 s7, exec_lo, s5
+; GFX10-NEXT: s_or_b32 s6, s6, s7
+; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB2_1
; GFX10-NEXT: ; %bb.2: ; %exit
-; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s5
+; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: v_cndmask_b32_e64 v0, 0, 1.0, s6
; GFX10-NEXT: flat_store_dword v[2:3], v0
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
@@ -197,7 +193,7 @@ define void @divergent_i1_xor_used_outside_loop_larger_loop_body(i32 %num.elts,
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX10-NEXT: s_mov_b32 s5, 0
-; GFX10-NEXT: s_mov_b32 s6, 1
+; GFX10-NEXT: s_mov_b32 s6, -1
; GFX10-NEXT: s_and_saveexec_b32 s4, vcc_lo
; GFX10-NEXT: s_cbranch_execz .LBB3_6
; GFX10-NEXT: ; %bb.1: ; %loop.start.preheader
@@ -332,7 +328,7 @@ define void @divergent_i1_icmp_used_outside_loop(i32 %v0, i32 %v1, ptr addrspace
; GFX10-NEXT: s_waitcnt_depctr 0xffe3
; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s7
; GFX10-NEXT: v_cmp_ne_u32_e64 s4, v1, v4
-; GFX10-NEXT: s_mov_b32 s7, 1
+; GFX10-NEXT: s_mov_b32 s7, -1
; GFX10-NEXT: ; implicit-def: $vgpr5
; GFX10-NEXT: s_and_saveexec_b32 s8, s4
; GFX10-NEXT: s_cbranch_execz .LBB4_1
@@ -410,7 +406,7 @@ define amdgpu_ps void @divergent_i1_freeze_used_outside_loop(i32 %n, ptr addrspa
; GFX10-LABEL: divergent_i1_freeze_used_outside_loop:
; GFX10: ; %bb.0: ; %entry
; GFX10-NEXT: s_mov_b32 s0, 0
-; GFX10-NEXT: s_mov_b32 s3, 1
+; GFX10-NEXT: s_mov_b32 s3, -1
; GFX10-NEXT: v_mov_b32_e32 v5, s0
; GFX10-NEXT: ; implicit-def: $sgpr1
; GFX10-NEXT: ; implicit-def: $sgpr2
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-used-outside-loop.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-used-outside-loop.mir
index ace9bec..206c0ad 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-used-outside-loop.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-used-outside-loop.mir
@@ -175,14 +175,15 @@ body: |
; GFX10-NEXT: [[S_ANDN2_B32_3:%[0-9]+]]:sreg_32_xm0_xexec(s1) = S_ANDN2_B32 [[COPY9]](s1), $exec_lo, implicit-def $scc
; GFX10-NEXT: [[S_AND_B32_3:%[0-9]+]]:sreg_32_xm0_xexec(s1) = S_AND_B32 $exec_lo, [[COPY13]](s1), implicit-def $scc
; GFX10-NEXT: [[S_OR_B32_3:%[0-9]+]]:sreg_32_xm0_xexec(s1) = S_OR_B32 [[S_ANDN2_B32_3]](s1), [[S_AND_B32_3]](s1), implicit-def $scc
+ ; GFX10-NEXT: [[COPY14:%[0-9]+]]:sreg_32(s1) = COPY [[COPY7]](s1)
; GFX10-NEXT: G_BRCOND [[ICMP1]](s1), %bb.1
; GFX10-NEXT: G_BR %bb.4
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: bb.4:
- ; GFX10-NEXT: [[COPY14:%[0-9]+]]:sreg_32(s1) = COPY [[COPY7]](s1)
+ ; GFX10-NEXT: [[COPY15:%[0-9]+]]:sreg_32(s1) = COPY [[COPY14]](s1)
; GFX10-NEXT: [[C6:%[0-9]+]]:_(s32) = G_FCONSTANT float 0.000000e+00
; GFX10-NEXT: [[C7:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.000000e+00
- ; GFX10-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[COPY14]](s1), [[C7]], [[C6]]
+ ; GFX10-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[COPY15]](s1), [[C7]], [[C6]]
; GFX10-NEXT: G_STORE [[SELECT]](s32), [[MV1]](p0) :: (store (s32))
; GFX10-NEXT: SI_RETURN
bb.0:
@@ -255,37 +256,40 @@ body: |
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.000000e+00
; GFX10-NEXT: [[FCMP:%[0-9]+]]:_(s1) = G_FCMP floatpred(ogt), [[COPY1]](s32), [[C1]]
+ ; GFX10-NEXT: [[COPY4:%[0-9]+]]:sreg_32(s1) = COPY [[FCMP]](s1)
; GFX10-NEXT: [[DEF:%[0-9]+]]:sreg_32(s1) = IMPLICIT_DEF
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: bb.1:
; GFX10-NEXT: successors: %bb.2(0x04000000), %bb.1(0x7c000000)
; GFX10-NEXT: {{ $}}
- ; GFX10-NEXT: [[PHI:%[0-9]+]]:sreg_32(s1) = PHI [[DEF]](s1), %bb.0, %24(s1), %bb.1
- ; GFX10-NEXT: [[PHI1:%[0-9]+]]:_(s32) = G_PHI %9(s32), %bb.1, [[C]](s32), %bb.0
- ; GFX10-NEXT: [[PHI2:%[0-9]+]]:_(s32) = G_PHI [[C]](s32), %bb.0, %11(s32), %bb.1
- ; GFX10-NEXT: [[PHI3:%[0-9]+]]:sreg_32(s1) = G_PHI [[FCMP]](s1), %bb.0, %13(s1), %bb.1
- ; GFX10-NEXT: [[COPY4:%[0-9]+]]:sreg_32(s1) = COPY [[PHI]](s1)
+ ; GFX10-NEXT: [[PHI:%[0-9]+]]:sreg_32(s1) = PHI [[DEF]](s1), %bb.0, %27(s1), %bb.1
+ ; GFX10-NEXT: [[PHI1:%[0-9]+]]:sreg_32(s1) = PHI [[COPY4]](s1), %bb.0, %24(s1), %bb.1
+ ; GFX10-NEXT: [[PHI2:%[0-9]+]]:_(s32) = G_PHI %9(s32), %bb.1, [[C]](s32), %bb.0
+ ; GFX10-NEXT: [[PHI3:%[0-9]+]]:_(s32) = G_PHI [[C]](s32), %bb.0, %11(s32), %bb.1
+ ; GFX10-NEXT: [[COPY5:%[0-9]+]]:sreg_32(s1) = COPY [[PHI]](s1)
+ ; GFX10-NEXT: [[COPY6:%[0-9]+]]:sreg_32(s1) = COPY [[PHI1]](s1)
; GFX10-NEXT: [[C2:%[0-9]+]]:_(s1) = G_CONSTANT i1 true
- ; GFX10-NEXT: [[XOR:%[0-9]+]]:_(s1) = G_XOR [[PHI3]], [[C2]]
- ; GFX10-NEXT: [[COPY5:%[0-9]+]]:sreg_32(s1) = COPY [[XOR]](s1)
- ; GFX10-NEXT: [[UITOFP:%[0-9]+]]:_(s32) = G_UITOFP [[PHI2]](s32)
+ ; GFX10-NEXT: [[XOR:%[0-9]+]]:_(s1) = G_XOR [[COPY6]], [[C2]]
+ ; GFX10-NEXT: [[COPY7:%[0-9]+]]:sreg_32(s1) = COPY [[XOR]](s1)
+ ; GFX10-NEXT: [[UITOFP:%[0-9]+]]:_(s32) = G_UITOFP [[PHI3]](s32)
; GFX10-NEXT: [[FCMP1:%[0-9]+]]:_(s1) = G_FCMP floatpred(ogt), [[UITOFP]](s32), [[COPY]]
; GFX10-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; GFX10-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[PHI2]], [[C3]]
- ; GFX10-NEXT: [[INTRINSIC_CONVERGENT:%[0-9]+]]:sreg_32_xm0_xexec(s32) = G_INTRINSIC_CONVERGENT intrinsic(@llvm.amdgcn.if.break), [[FCMP1]](s1), [[PHI1]](s32)
- ; GFX10-NEXT: [[S_ANDN2_B32_:%[0-9]+]]:sreg_32(s1) = S_ANDN2_B32 [[COPY4]](s1), $exec_lo, implicit-def $scc
- ; GFX10-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY5]](s1), implicit-def $scc
+ ; GFX10-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[PHI3]], [[C3]]
+ ; GFX10-NEXT: [[INTRINSIC_CONVERGENT:%[0-9]+]]:sreg_32_xm0_xexec(s32) = G_INTRINSIC_CONVERGENT intrinsic(@llvm.amdgcn.if.break), [[FCMP1]](s1), [[PHI2]](s32)
+ ; GFX10-NEXT: [[COPY8:%[0-9]+]]:sreg_32(s1) = COPY [[XOR]](s1)
+ ; GFX10-NEXT: [[S_ANDN2_B32_:%[0-9]+]]:sreg_32(s1) = S_ANDN2_B32 [[COPY5]](s1), $exec_lo, implicit-def $scc
+ ; GFX10-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY7]](s1), implicit-def $scc
; GFX10-NEXT: [[S_OR_B32_:%[0-9]+]]:sreg_32(s1) = S_OR_B32 [[S_ANDN2_B32_]](s1), [[S_AND_B32_]](s1), implicit-def $scc
; GFX10-NEXT: SI_LOOP [[INTRINSIC_CONVERGENT]](s32), %bb.1, implicit-def $exec, implicit-def $scc, implicit $exec
; GFX10-NEXT: G_BR %bb.2
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: bb.2:
; GFX10-NEXT: [[PHI4:%[0-9]+]]:_(s32) = G_PHI [[INTRINSIC_CONVERGENT]](s32), %bb.1
- ; GFX10-NEXT: [[COPY6:%[0-9]+]]:sreg_32(s1) = COPY [[S_OR_B32_]](s1)
+ ; GFX10-NEXT: [[COPY9:%[0-9]+]]:sreg_32(s1) = COPY [[S_OR_B32_]](s1)
; GFX10-NEXT: G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.end.cf), [[PHI4]](s32)
; GFX10-NEXT: [[C4:%[0-9]+]]:_(s32) = G_FCONSTANT float 0.000000e+00
; GFX10-NEXT: [[C5:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.000000e+00
- ; GFX10-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[COPY6]](s1), [[C5]], [[C4]]
+ ; GFX10-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[COPY9]](s1), [[C5]], [[C4]]
; GFX10-NEXT: G_STORE [[SELECT]](s32), [[MV]](p0) :: (store (s32))
; GFX10-NEXT: SI_RETURN
bb.0:
@@ -349,7 +353,8 @@ body: |
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; GFX10-NEXT: [[ICMP:%[0-9]+]]:sreg_32_xm0_xexec(s1) = G_ICMP intpred(eq), [[COPY]](s32), [[C]]
; GFX10-NEXT: [[C1:%[0-9]+]]:_(s1) = G_CONSTANT i1 true
- ; GFX10-NEXT: [[COPY5:%[0-9]+]]:sreg_32_xm0_xexec(s1) = COPY [[C1]](s1)
+ ; GFX10-NEXT: [[COPY5:%[0-9]+]]:sreg_32(s1) = COPY [[C1]](s1)
+ ; GFX10-NEXT: [[COPY6:%[0-9]+]]:sreg_32_xm0_xexec(s1) = COPY [[COPY5]](s1)
; GFX10-NEXT: [[SI_IF:%[0-9]+]]:sreg_32_xm0_xexec(s32) = SI_IF [[ICMP]](s1), %bb.2, implicit-def $exec, implicit-def $scc, implicit $exec
; GFX10-NEXT: G_BR %bb.1
; GFX10-NEXT: {{ $}}
@@ -365,26 +370,26 @@ body: |
; GFX10-NEXT: bb.2:
; GFX10-NEXT: successors: %bb.5(0x40000000), %bb.6(0x40000000)
; GFX10-NEXT: {{ $}}
- ; GFX10-NEXT: [[PHI:%[0-9]+]]:sreg_32_xm0_xexec(s1) = PHI [[C1]](s1), %bb.0, %39(s1), %bb.8
- ; GFX10-NEXT: [[COPY6:%[0-9]+]]:sreg_32_xm0_xexec(s1) = COPY [[PHI]](s1)
+ ; GFX10-NEXT: [[PHI:%[0-9]+]]:sreg_32_xm0_xexec(s1) = PHI [[COPY5]](s1), %bb.0, %40(s1), %bb.8
+ ; GFX10-NEXT: [[COPY7:%[0-9]+]]:sreg_32_xm0_xexec(s1) = COPY [[PHI]](s1)
; GFX10-NEXT: G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.end.cf), [[SI_IF]](s32)
- ; GFX10-NEXT: [[SI_IF1:%[0-9]+]]:sreg_32_xm0_xexec(s32) = SI_IF [[COPY6]](s1), %bb.6, implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX10-NEXT: [[SI_IF1:%[0-9]+]]:sreg_32_xm0_xexec(s32) = SI_IF [[COPY7]](s1), %bb.6, implicit-def $exec, implicit-def $scc, implicit $exec
; GFX10-NEXT: G_BR %bb.5
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: bb.3:
; GFX10-NEXT: successors: %bb.4(0x40000000), %bb.7(0x40000000)
; GFX10-NEXT: {{ $}}
- ; GFX10-NEXT: [[PHI1:%[0-9]+]]:sreg_32(s1) = PHI [[DEF3]](s1), %bb.1, %72(s1), %bb.7
- ; GFX10-NEXT: [[PHI2:%[0-9]+]]:sreg_32(s1) = PHI [[DEF2]](s1), %bb.1, %61(s1), %bb.7
- ; GFX10-NEXT: [[PHI3:%[0-9]+]]:sreg_32(s1) = PHI [[DEF1]](s1), %bb.1, %48(s1), %bb.7
+ ; GFX10-NEXT: [[PHI1:%[0-9]+]]:sreg_32(s1) = PHI [[DEF3]](s1), %bb.1, %73(s1), %bb.7
+ ; GFX10-NEXT: [[PHI2:%[0-9]+]]:sreg_32(s1) = PHI [[DEF2]](s1), %bb.1, %62(s1), %bb.7
+ ; GFX10-NEXT: [[PHI3:%[0-9]+]]:sreg_32(s1) = PHI [[DEF1]](s1), %bb.1, %49(s1), %bb.7
; GFX10-NEXT: [[PHI4:%[0-9]+]]:_(s32) = G_PHI [[C2]](s32), %bb.1, %17(s32), %bb.7
; GFX10-NEXT: [[PHI5:%[0-9]+]]:_(s32) = G_PHI %19(s32), %bb.7, [[C2]](s32), %bb.1
- ; GFX10-NEXT: [[COPY7:%[0-9]+]]:sreg_32(s1) = COPY [[PHI1]](s1)
- ; GFX10-NEXT: [[COPY8:%[0-9]+]]:sreg_32(s1) = COPY [[PHI2]](s1)
- ; GFX10-NEXT: [[COPY9:%[0-9]+]]:sreg_32(s1) = COPY [[PHI3]](s1)
+ ; GFX10-NEXT: [[COPY8:%[0-9]+]]:sreg_32(s1) = COPY [[PHI1]](s1)
+ ; GFX10-NEXT: [[COPY9:%[0-9]+]]:sreg_32(s1) = COPY [[PHI2]](s1)
+ ; GFX10-NEXT: [[COPY10:%[0-9]+]]:sreg_32(s1) = COPY [[PHI3]](s1)
; GFX10-NEXT: [[C3:%[0-9]+]]:_(s1) = G_CONSTANT i1 true
- ; GFX10-NEXT: [[COPY10:%[0-9]+]]:sreg_32(s1) = COPY [[C3]](s1)
; GFX10-NEXT: [[COPY11:%[0-9]+]]:sreg_32(s1) = COPY [[C3]](s1)
+ ; GFX10-NEXT: [[COPY12:%[0-9]+]]:sreg_32(s1) = COPY [[C3]](s1)
; GFX10-NEXT: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[PHI5]](s32)
; GFX10-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
; GFX10-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[SEXT]], [[C4]](s32)
@@ -392,14 +397,14 @@ body: |
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s32), addrspace 1)
; GFX10-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; GFX10-NEXT: [[ICMP1:%[0-9]+]]:sreg_32_xm0_xexec(s1) = G_ICMP intpred(ne), [[LOAD]](s32), [[C5]]
- ; GFX10-NEXT: [[S_ANDN2_B32_:%[0-9]+]]:sreg_32(s1) = S_ANDN2_B32 [[COPY9]](s1), $exec_lo, implicit-def $scc
- ; GFX10-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY11]](s1), implicit-def $scc
+ ; GFX10-NEXT: [[S_ANDN2_B32_:%[0-9]+]]:sreg_32(s1) = S_ANDN2_B32 [[COPY10]](s1), $exec_lo, implicit-def $scc
+ ; GFX10-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY12]](s1), implicit-def $scc
; GFX10-NEXT: [[S_OR_B32_:%[0-9]+]]:sreg_32(s1) = S_OR_B32 [[S_ANDN2_B32_]](s1), [[S_AND_B32_]](s1), implicit-def $scc
- ; GFX10-NEXT: [[COPY12:%[0-9]+]]:sreg_32(s1) = COPY [[S_OR_B32_]](s1)
- ; GFX10-NEXT: [[S_ANDN2_B32_1:%[0-9]+]]:sreg_32(s1) = S_ANDN2_B32 [[COPY8]](s1), $exec_lo, implicit-def $scc
- ; GFX10-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY10]](s1), implicit-def $scc
+ ; GFX10-NEXT: [[COPY13:%[0-9]+]]:sreg_32(s1) = COPY [[S_OR_B32_]](s1)
+ ; GFX10-NEXT: [[S_ANDN2_B32_1:%[0-9]+]]:sreg_32(s1) = S_ANDN2_B32 [[COPY9]](s1), $exec_lo, implicit-def $scc
+ ; GFX10-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY11]](s1), implicit-def $scc
; GFX10-NEXT: [[S_OR_B32_1:%[0-9]+]]:sreg_32(s1) = S_OR_B32 [[S_ANDN2_B32_1]](s1), [[S_AND_B32_1]](s1), implicit-def $scc
- ; GFX10-NEXT: [[COPY13:%[0-9]+]]:sreg_32(s1) = COPY [[S_OR_B32_1]](s1)
+ ; GFX10-NEXT: [[COPY14:%[0-9]+]]:sreg_32(s1) = COPY [[S_OR_B32_1]](s1)
; GFX10-NEXT: [[SI_IF2:%[0-9]+]]:sreg_32_xm0_xexec(s32) = SI_IF [[ICMP1]](s1), %bb.7, implicit-def $exec, implicit-def $scc, implicit $exec
; GFX10-NEXT: G_BR %bb.4
; GFX10-NEXT: {{ $}}
@@ -407,16 +412,16 @@ body: |
; GFX10-NEXT: successors: %bb.7(0x80000000)
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: [[C6:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
- ; GFX10-NEXT: [[COPY14:%[0-9]+]]:sreg_32(s1) = COPY [[C6]](s1)
+ ; GFX10-NEXT: [[COPY15:%[0-9]+]]:sreg_32(s1) = COPY [[C6]](s1)
; GFX10-NEXT: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; GFX10-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[PHI5]], [[C7]]
; GFX10-NEXT: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[PHI5]](s32), [[COPY]]
- ; GFX10-NEXT: [[COPY15:%[0-9]+]]:sreg_32(s1) = COPY [[ICMP2]](s1)
- ; GFX10-NEXT: [[S_ANDN2_B32_2:%[0-9]+]]:sreg_32(s1) = S_ANDN2_B32 [[COPY12]](s1), $exec_lo, implicit-def $scc
- ; GFX10-NEXT: [[S_AND_B32_2:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY14]](s1), implicit-def $scc
+ ; GFX10-NEXT: [[COPY16:%[0-9]+]]:sreg_32(s1) = COPY [[ICMP2]](s1)
+ ; GFX10-NEXT: [[S_ANDN2_B32_2:%[0-9]+]]:sreg_32(s1) = S_ANDN2_B32 [[COPY13]](s1), $exec_lo, implicit-def $scc
+ ; GFX10-NEXT: [[S_AND_B32_2:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY15]](s1), implicit-def $scc
; GFX10-NEXT: [[S_OR_B32_2:%[0-9]+]]:sreg_32(s1) = S_OR_B32 [[S_ANDN2_B32_2]](s1), [[S_AND_B32_2]](s1), implicit-def $scc
- ; GFX10-NEXT: [[S_ANDN2_B32_3:%[0-9]+]]:sreg_32(s1) = S_ANDN2_B32 [[COPY13]](s1), $exec_lo, implicit-def $scc
- ; GFX10-NEXT: [[S_AND_B32_3:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY15]](s1), implicit-def $scc
+ ; GFX10-NEXT: [[S_ANDN2_B32_3:%[0-9]+]]:sreg_32(s1) = S_ANDN2_B32 [[COPY14]](s1), $exec_lo, implicit-def $scc
+ ; GFX10-NEXT: [[S_AND_B32_3:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY16]](s1), implicit-def $scc
; GFX10-NEXT: [[S_OR_B32_3:%[0-9]+]]:sreg_32(s1) = S_OR_B32 [[S_ANDN2_B32_3]](s1), [[S_AND_B32_3]](s1), implicit-def $scc
; GFX10-NEXT: G_BR %bb.7
; GFX10-NEXT: {{ $}}
@@ -436,15 +441,15 @@ body: |
; GFX10-NEXT: [[PHI6:%[0-9]+]]:sreg_32(s1) = PHI [[S_OR_B32_1]](s1), %bb.3, [[S_OR_B32_3]](s1), %bb.4
; GFX10-NEXT: [[PHI7:%[0-9]+]]:sreg_32(s1) = PHI [[S_OR_B32_]](s1), %bb.3, [[S_OR_B32_2]](s1), %bb.4
; GFX10-NEXT: [[PHI8:%[0-9]+]]:_(s32) = G_PHI [[ADD]](s32), %bb.4, [[DEF]](s32), %bb.3
- ; GFX10-NEXT: [[COPY16:%[0-9]+]]:sreg_32(s1) = COPY [[PHI6]](s1)
- ; GFX10-NEXT: [[COPY17:%[0-9]+]]:sreg_32(s1) = COPY [[PHI7]](s1)
+ ; GFX10-NEXT: [[COPY17:%[0-9]+]]:sreg_32(s1) = COPY [[PHI6]](s1)
+ ; GFX10-NEXT: [[COPY18:%[0-9]+]]:sreg_32(s1) = COPY [[PHI7]](s1)
; GFX10-NEXT: G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.end.cf), [[SI_IF2]](s32)
; GFX10-NEXT: [[C9:%[0-9]+]]:_(s1) = G_CONSTANT i1 true
- ; GFX10-NEXT: [[XOR:%[0-9]+]]:_(s1) = G_XOR [[COPY17]], [[C9]]
- ; GFX10-NEXT: [[COPY18:%[0-9]+]]:sreg_32(s1) = COPY [[XOR]](s1)
- ; GFX10-NEXT: [[INTRINSIC_CONVERGENT:%[0-9]+]]:sreg_32_xm0_xexec(s32) = G_INTRINSIC_CONVERGENT intrinsic(@llvm.amdgcn.if.break), [[COPY16]](s1), [[PHI4]](s32)
- ; GFX10-NEXT: [[S_ANDN2_B32_4:%[0-9]+]]:sreg_32(s1) = S_ANDN2_B32 [[COPY7]](s1), $exec_lo, implicit-def $scc
- ; GFX10-NEXT: [[S_AND_B32_4:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY18]](s1), implicit-def $scc
+ ; GFX10-NEXT: [[XOR:%[0-9]+]]:_(s1) = G_XOR [[COPY18]], [[C9]]
+ ; GFX10-NEXT: [[COPY19:%[0-9]+]]:sreg_32(s1) = COPY [[XOR]](s1)
+ ; GFX10-NEXT: [[INTRINSIC_CONVERGENT:%[0-9]+]]:sreg_32_xm0_xexec(s32) = G_INTRINSIC_CONVERGENT intrinsic(@llvm.amdgcn.if.break), [[COPY17]](s1), [[PHI4]](s32)
+ ; GFX10-NEXT: [[S_ANDN2_B32_4:%[0-9]+]]:sreg_32(s1) = S_ANDN2_B32 [[COPY8]](s1), $exec_lo, implicit-def $scc
+ ; GFX10-NEXT: [[S_AND_B32_4:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY19]](s1), implicit-def $scc
; GFX10-NEXT: [[S_OR_B32_4:%[0-9]+]]:sreg_32(s1) = S_OR_B32 [[S_ANDN2_B32_4]](s1), [[S_AND_B32_4]](s1), implicit-def $scc
; GFX10-NEXT: SI_LOOP [[INTRINSIC_CONVERGENT]](s32), %bb.3, implicit-def $exec, implicit-def $scc, implicit $exec
; GFX10-NEXT: G_BR %bb.8
@@ -453,11 +458,11 @@ body: |
; GFX10-NEXT: successors: %bb.2(0x80000000)
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: [[PHI9:%[0-9]+]]:_(s32) = G_PHI [[INTRINSIC_CONVERGENT]](s32), %bb.7
- ; GFX10-NEXT: [[COPY19:%[0-9]+]]:sreg_32(s1) = COPY [[S_OR_B32_4]](s1)
- ; GFX10-NEXT: [[COPY20:%[0-9]+]]:sreg_32_xm0_xexec(s1) = COPY [[COPY19]](s1)
+ ; GFX10-NEXT: [[COPY20:%[0-9]+]]:sreg_32(s1) = COPY [[S_OR_B32_4]](s1)
+ ; GFX10-NEXT: [[COPY21:%[0-9]+]]:sreg_32_xm0_xexec(s1) = COPY [[COPY20]](s1)
; GFX10-NEXT: G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.end.cf), [[PHI9]](s32)
- ; GFX10-NEXT: [[S_ANDN2_B32_5:%[0-9]+]]:sreg_32_xm0_xexec(s1) = S_ANDN2_B32 [[COPY5]](s1), $exec_lo, implicit-def $scc
- ; GFX10-NEXT: [[S_AND_B32_5:%[0-9]+]]:sreg_32_xm0_xexec(s1) = S_AND_B32 $exec_lo, [[COPY20]](s1), implicit-def $scc
+ ; GFX10-NEXT: [[S_ANDN2_B32_5:%[0-9]+]]:sreg_32_xm0_xexec(s1) = S_ANDN2_B32 [[COPY6]](s1), $exec_lo, implicit-def $scc
+ ; GFX10-NEXT: [[S_AND_B32_5:%[0-9]+]]:sreg_32_xm0_xexec(s1) = S_AND_B32 $exec_lo, [[COPY21]](s1), implicit-def $scc
; GFX10-NEXT: [[S_OR_B32_5:%[0-9]+]]:sreg_32_xm0_xexec(s1) = S_OR_B32 [[S_ANDN2_B32_5]](s1), [[S_AND_B32_5]](s1), implicit-def $scc
; GFX10-NEXT: G_BR %bb.2
bb.0:
@@ -574,7 +579,7 @@ body: |
; GFX10-NEXT: bb.1:
; GFX10-NEXT: successors: %bb.2(0x80000000)
; GFX10-NEXT: {{ $}}
- ; GFX10-NEXT: [[PHI:%[0-9]+]]:sreg_32_xm0_xexec(s1) = PHI [[DEF1]](s1), %bb.0, %38(s1), %bb.6
+ ; GFX10-NEXT: [[PHI:%[0-9]+]]:sreg_32_xm0_xexec(s1) = PHI [[DEF1]](s1), %bb.0, %39(s1), %bb.6
; GFX10-NEXT: [[PHI1:%[0-9]+]]:_(s32) = G_PHI %11(s32), %bb.6, [[C]](s32), %bb.0
; GFX10-NEXT: [[PHI2:%[0-9]+]]:_(s32) = G_PHI [[C]](s32), %bb.0, %13(s32), %bb.6
; GFX10-NEXT: [[COPY6:%[0-9]+]]:sreg_32_xm0_xexec(s1) = COPY [[PHI]](s1)
@@ -600,9 +605,10 @@ body: |
; GFX10-NEXT: successors: %bb.5(0x40000000), %bb.6(0x40000000)
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: [[C2:%[0-9]+]]:_(s1) = G_CONSTANT i1 true
- ; GFX10-NEXT: [[COPY8:%[0-9]+]]:sreg_32(s1) = COPY [[C2]](s1)
; GFX10-NEXT: G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.end.cf), [[SI_IF]](s32)
; GFX10-NEXT: [[ICMP1:%[0-9]+]]:sreg_32_xm0_xexec(s1) = G_ICMP intpred(ne), [[COPY1]](s32), [[PHI2]]
+ ; GFX10-NEXT: [[COPY8:%[0-9]+]]:sreg_32(s1) = COPY [[C2]](s1)
+ ; GFX10-NEXT: [[COPY9:%[0-9]+]]:sreg_32(s1) = COPY [[COPY8]](s1)
; GFX10-NEXT: [[SI_IF1:%[0-9]+]]:sreg_32_xm0_xexec(s32) = SI_IF [[ICMP1]](s1), %bb.6, implicit-def $exec, implicit-def $scc, implicit $exec
; GFX10-NEXT: G_BR %bb.5
; GFX10-NEXT: {{ $}}
@@ -610,21 +616,21 @@ body: |
; GFX10-NEXT: successors: %bb.6(0x80000000)
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: [[C3:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
- ; GFX10-NEXT: [[COPY9:%[0-9]+]]:sreg_32(s1) = COPY [[C3]](s1)
+ ; GFX10-NEXT: [[COPY10:%[0-9]+]]:sreg_32(s1) = COPY [[C3]](s1)
; GFX10-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; GFX10-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[PHI2]], [[C4]]
- ; GFX10-NEXT: [[S_ANDN2_B32_:%[0-9]+]]:sreg_32(s1) = S_ANDN2_B32 [[COPY8]](s1), $exec_lo, implicit-def $scc
- ; GFX10-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY9]](s1), implicit-def $scc
+ ; GFX10-NEXT: [[S_ANDN2_B32_:%[0-9]+]]:sreg_32(s1) = S_ANDN2_B32 [[COPY9]](s1), $exec_lo, implicit-def $scc
+ ; GFX10-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY10]](s1), implicit-def $scc
; GFX10-NEXT: [[S_OR_B32_:%[0-9]+]]:sreg_32(s1) = S_OR_B32 [[S_ANDN2_B32_]](s1), [[S_AND_B32_]](s1), implicit-def $scc
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: bb.6:
; GFX10-NEXT: successors: %bb.7(0x04000000), %bb.1(0x7c000000)
; GFX10-NEXT: {{ $}}
- ; GFX10-NEXT: [[PHI3:%[0-9]+]]:sreg_32(s1) = PHI [[C2]](s1), %bb.4, [[S_OR_B32_]](s1), %bb.5
+ ; GFX10-NEXT: [[PHI3:%[0-9]+]]:sreg_32(s1) = PHI [[COPY8]](s1), %bb.4, [[S_OR_B32_]](s1), %bb.5
; GFX10-NEXT: [[PHI4:%[0-9]+]]:_(s32) = G_PHI [[ADD]](s32), %bb.5, [[DEF]](s32), %bb.4
- ; GFX10-NEXT: [[COPY10:%[0-9]+]]:sreg_32(s1) = COPY [[PHI3]](s1)
+ ; GFX10-NEXT: [[COPY11:%[0-9]+]]:sreg_32(s1) = COPY [[PHI3]](s1)
; GFX10-NEXT: G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.end.cf), [[SI_IF1]](s32)
- ; GFX10-NEXT: [[INTRINSIC_CONVERGENT:%[0-9]+]]:sreg_32_xm0_xexec(s32) = G_INTRINSIC_CONVERGENT intrinsic(@llvm.amdgcn.if.break), [[COPY10]](s1), [[PHI1]](s32)
+ ; GFX10-NEXT: [[INTRINSIC_CONVERGENT:%[0-9]+]]:sreg_32_xm0_xexec(s32) = G_INTRINSIC_CONVERGENT intrinsic(@llvm.amdgcn.if.break), [[COPY11]](s1), [[PHI1]](s32)
; GFX10-NEXT: [[S_ANDN2_B32_1:%[0-9]+]]:sreg_32_xm0_xexec(s1) = S_ANDN2_B32 [[COPY6]](s1), $exec_lo, implicit-def $scc
; GFX10-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec(s1) = S_AND_B32 $exec_lo, [[COPY7]](s1), implicit-def $scc
; GFX10-NEXT: [[S_OR_B32_1:%[0-9]+]]:sreg_32_xm0_xexec(s1) = S_OR_B32 [[S_ANDN2_B32_1]](s1), [[S_AND_B32_1]](s1), implicit-def $scc
@@ -636,9 +642,9 @@ body: |
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: [[PHI5:%[0-9]+]]:_(s32) = G_PHI [[INTRINSIC_CONVERGENT]](s32), %bb.6
; GFX10-NEXT: [[PHI6:%[0-9]+]]:_(s32) = G_PHI [[PHI2]](s32), %bb.6
- ; GFX10-NEXT: [[COPY11:%[0-9]+]]:sreg_32_xm0_xexec(s1) = COPY [[S_OR_B32_1]](s1)
+ ; GFX10-NEXT: [[COPY12:%[0-9]+]]:sreg_32_xm0_xexec(s1) = COPY [[S_OR_B32_1]](s1)
; GFX10-NEXT: G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.end.cf), [[PHI5]](s32)
- ; GFX10-NEXT: [[SI_IF2:%[0-9]+]]:sreg_32_xm0_xexec(s32) = SI_IF [[COPY11]](s1), %bb.9, implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX10-NEXT: [[SI_IF2:%[0-9]+]]:sreg_32_xm0_xexec(s32) = SI_IF [[COPY12]](s1), %bb.9, implicit-def $exec, implicit-def $scc, implicit $exec
; GFX10-NEXT: G_BR %bb.8
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: bb.8:
@@ -751,26 +757,27 @@ body: |
; GFX10-NEXT: [[MV1:%[0-9]+]]:_(p0) = G_MERGE_VALUES [[COPY3]](s32), [[COPY4]](s32)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; GFX10-NEXT: [[C1:%[0-9]+]]:_(s1) = G_CONSTANT i1 true
+ ; GFX10-NEXT: [[COPY5:%[0-9]+]]:sreg_32(s1) = COPY [[C1]](s1)
; GFX10-NEXT: [[DEF:%[0-9]+]]:sreg_32(s1) = IMPLICIT_DEF
; GFX10-NEXT: [[DEF1:%[0-9]+]]:sreg_32(s1) = IMPLICIT_DEF
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: bb.1:
; GFX10-NEXT: successors: %bb.2(0x40000000), %bb.3(0x40000000)
; GFX10-NEXT: {{ $}}
- ; GFX10-NEXT: [[PHI:%[0-9]+]]:sreg_32(s1) = PHI [[DEF1]](s1), %bb.0, %53(s1), %bb.3
- ; GFX10-NEXT: [[PHI1:%[0-9]+]]:sreg_32(s1) = PHI [[DEF]](s1), %bb.0, %42(s1), %bb.3
- ; GFX10-NEXT: [[PHI2:%[0-9]+]]:sreg_32_xm0_xexec(s1) = PHI [[C1]](s1), %bb.0, %32(s1), %bb.3
+ ; GFX10-NEXT: [[PHI:%[0-9]+]]:sreg_32(s1) = PHI [[DEF1]](s1), %bb.0, %54(s1), %bb.3
+ ; GFX10-NEXT: [[PHI1:%[0-9]+]]:sreg_32(s1) = PHI [[DEF]](s1), %bb.0, %43(s1), %bb.3
+ ; GFX10-NEXT: [[PHI2:%[0-9]+]]:sreg_32_xm0_xexec(s1) = PHI [[COPY5]](s1), %bb.0, %33(s1), %bb.3
; GFX10-NEXT: [[PHI3:%[0-9]+]]:_(s32) = G_PHI %10(s32), %bb.3, [[C]](s32), %bb.0
; GFX10-NEXT: [[PHI4:%[0-9]+]]:_(s32) = G_PHI [[C]](s32), %bb.0, %12(s32), %bb.3
- ; GFX10-NEXT: [[COPY5:%[0-9]+]]:sreg_32(s1) = COPY [[PHI]](s1)
- ; GFX10-NEXT: [[COPY6:%[0-9]+]]:sreg_32(s1) = COPY [[PHI1]](s1)
- ; GFX10-NEXT: [[COPY7:%[0-9]+]]:sreg_32_xm0_xexec(s1) = COPY [[PHI2]](s1)
- ; GFX10-NEXT: [[COPY8:%[0-9]+]]:sreg_32(s1) = COPY [[COPY7]](s1)
- ; GFX10-NEXT: [[S_ANDN2_B32_:%[0-9]+]]:sreg_32(s1) = S_ANDN2_B32 [[COPY6]](s1), $exec_lo, implicit-def $scc
- ; GFX10-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY8]](s1), implicit-def $scc
+ ; GFX10-NEXT: [[COPY6:%[0-9]+]]:sreg_32(s1) = COPY [[PHI]](s1)
+ ; GFX10-NEXT: [[COPY7:%[0-9]+]]:sreg_32(s1) = COPY [[PHI1]](s1)
+ ; GFX10-NEXT: [[COPY8:%[0-9]+]]:sreg_32_xm0_xexec(s1) = COPY [[PHI2]](s1)
+ ; GFX10-NEXT: [[COPY9:%[0-9]+]]:sreg_32(s1) = COPY [[COPY8]](s1)
+ ; GFX10-NEXT: [[S_ANDN2_B32_:%[0-9]+]]:sreg_32(s1) = S_ANDN2_B32 [[COPY7]](s1), $exec_lo, implicit-def $scc
+ ; GFX10-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY9]](s1), implicit-def $scc
; GFX10-NEXT: [[S_OR_B32_:%[0-9]+]]:sreg_32(s1) = S_OR_B32 [[S_ANDN2_B32_]](s1), [[S_AND_B32_]](s1), implicit-def $scc
- ; GFX10-NEXT: [[COPY9:%[0-9]+]]:sreg_32(s1) = COPY [[S_OR_B32_]](s1)
- ; GFX10-NEXT: [[SI_IF:%[0-9]+]]:sreg_32_xm0_xexec(s32) = SI_IF [[COPY7]](s1), %bb.3, implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX10-NEXT: [[COPY10:%[0-9]+]]:sreg_32(s1) = COPY [[S_OR_B32_]](s1)
+ ; GFX10-NEXT: [[SI_IF:%[0-9]+]]:sreg_32_xm0_xexec(s32) = SI_IF [[COPY8]](s1), %bb.3, implicit-def $exec, implicit-def $scc, implicit $exec
; GFX10-NEXT: G_BR %bb.2
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: bb.2:
@@ -783,10 +790,10 @@ body: |
; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s32), addrspace 1)
; GFX10-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; GFX10-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[LOAD]](s32), [[C3]]
- ; GFX10-NEXT: [[COPY10:%[0-9]+]]:sreg_32(s1) = COPY [[ICMP]](s1)
+ ; GFX10-NEXT: [[COPY11:%[0-9]+]]:sreg_32(s1) = COPY [[ICMP]](s1)
; GFX10-NEXT: [[DEF2:%[0-9]+]]:sreg_32_xm0_xexec(s1) = IMPLICIT_DEF
- ; GFX10-NEXT: [[S_ANDN2_B32_1:%[0-9]+]]:sreg_32(s1) = S_ANDN2_B32 [[COPY9]](s1), $exec_lo, implicit-def $scc
- ; GFX10-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY10]](s1), implicit-def $scc
+ ; GFX10-NEXT: [[S_ANDN2_B32_1:%[0-9]+]]:sreg_32(s1) = S_ANDN2_B32 [[COPY10]](s1), $exec_lo, implicit-def $scc
+ ; GFX10-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY11]](s1), implicit-def $scc
; GFX10-NEXT: [[S_OR_B32_1:%[0-9]+]]:sreg_32(s1) = S_OR_B32 [[S_ANDN2_B32_1]](s1), [[S_AND_B32_1]](s1), implicit-def $scc
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: bb.3:
@@ -794,32 +801,32 @@ body: |
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: [[PHI5:%[0-9]+]]:sreg_32(s1) = PHI [[S_OR_B32_]](s1), %bb.1, [[S_OR_B32_1]](s1), %bb.2
; GFX10-NEXT: [[PHI6:%[0-9]+]]:sreg_32_xm0_xexec(s1) = PHI [[PHI2]](s1), %bb.1, [[DEF2]](s1), %bb.2
- ; GFX10-NEXT: [[COPY11:%[0-9]+]]:sreg_32(s1) = COPY [[PHI5]](s1)
- ; GFX10-NEXT: [[COPY12:%[0-9]+]]:sreg_32_xm0_xexec(s1) = COPY [[PHI6]](s1)
+ ; GFX10-NEXT: [[COPY12:%[0-9]+]]:sreg_32(s1) = COPY [[PHI5]](s1)
+ ; GFX10-NEXT: [[COPY13:%[0-9]+]]:sreg_32_xm0_xexec(s1) = COPY [[PHI6]](s1)
; GFX10-NEXT: G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.end.cf), [[SI_IF]](s32)
- ; GFX10-NEXT: [[FREEZE:%[0-9]+]]:_(s1) = G_FREEZE [[COPY11]]
- ; GFX10-NEXT: [[COPY13:%[0-9]+]]:sreg_32(s1) = COPY [[FREEZE]](s1)
- ; GFX10-NEXT: [[COPY14:%[0-9]+]]:sreg_32_xm0_xexec(s1) = COPY [[FREEZE]](s1)
+ ; GFX10-NEXT: [[FREEZE:%[0-9]+]]:_(s1) = G_FREEZE [[COPY12]]
+ ; GFX10-NEXT: [[COPY14:%[0-9]+]]:sreg_32(s1) = COPY [[FREEZE]](s1)
+ ; GFX10-NEXT: [[COPY15:%[0-9]+]]:sreg_32_xm0_xexec(s1) = COPY [[FREEZE]](s1)
; GFX10-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; GFX10-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[PHI4]], [[C4]]
; GFX10-NEXT: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[PHI4]](s32), [[COPY]]
; GFX10-NEXT: [[INTRINSIC_CONVERGENT:%[0-9]+]]:sreg_32_xm0_xexec(s32) = G_INTRINSIC_CONVERGENT intrinsic(@llvm.amdgcn.if.break), [[ICMP1]](s1), [[PHI3]](s32)
- ; GFX10-NEXT: [[S_ANDN2_B32_2:%[0-9]+]]:sreg_32_xm0_xexec(s1) = S_ANDN2_B32 [[COPY12]](s1), $exec_lo, implicit-def $scc
- ; GFX10-NEXT: [[S_AND_B32_2:%[0-9]+]]:sreg_32_xm0_xexec(s1) = S_AND_B32 $exec_lo, [[COPY14]](s1), implicit-def $scc
+ ; GFX10-NEXT: [[S_ANDN2_B32_2:%[0-9]+]]:sreg_32_xm0_xexec(s1) = S_ANDN2_B32 [[COPY13]](s1), $exec_lo, implicit-def $scc
+ ; GFX10-NEXT: [[S_AND_B32_2:%[0-9]+]]:sreg_32_xm0_xexec(s1) = S_AND_B32 $exec_lo, [[COPY15]](s1), implicit-def $scc
; GFX10-NEXT: [[S_OR_B32_2:%[0-9]+]]:sreg_32_xm0_xexec(s1) = S_OR_B32 [[S_ANDN2_B32_2]](s1), [[S_AND_B32_2]](s1), implicit-def $scc
- ; GFX10-NEXT: [[S_ANDN2_B32_3:%[0-9]+]]:sreg_32(s1) = S_ANDN2_B32 [[COPY5]](s1), $exec_lo, implicit-def $scc
- ; GFX10-NEXT: [[S_AND_B32_3:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY13]](s1), implicit-def $scc
+ ; GFX10-NEXT: [[S_ANDN2_B32_3:%[0-9]+]]:sreg_32(s1) = S_ANDN2_B32 [[COPY6]](s1), $exec_lo, implicit-def $scc
+ ; GFX10-NEXT: [[S_AND_B32_3:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY14]](s1), implicit-def $scc
; GFX10-NEXT: [[S_OR_B32_3:%[0-9]+]]:sreg_32(s1) = S_OR_B32 [[S_ANDN2_B32_3]](s1), [[S_AND_B32_3]](s1), implicit-def $scc
; GFX10-NEXT: SI_LOOP [[INTRINSIC_CONVERGENT]](s32), %bb.1, implicit-def $exec, implicit-def $scc, implicit $exec
; GFX10-NEXT: G_BR %bb.4
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: bb.4:
; GFX10-NEXT: [[PHI7:%[0-9]+]]:_(s32) = G_PHI [[INTRINSIC_CONVERGENT]](s32), %bb.3
- ; GFX10-NEXT: [[COPY15:%[0-9]+]]:sreg_32(s1) = COPY [[S_OR_B32_3]](s1)
+ ; GFX10-NEXT: [[COPY16:%[0-9]+]]:sreg_32(s1) = COPY [[S_OR_B32_3]](s1)
; GFX10-NEXT: G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.end.cf), [[PHI7]](s32)
; GFX10-NEXT: [[C5:%[0-9]+]]:_(s32) = G_FCONSTANT float 0.000000e+00
; GFX10-NEXT: [[C6:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.000000e+00
- ; GFX10-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[COPY15]](s1), [[C6]], [[C5]]
+ ; GFX10-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[COPY16]](s1), [[C6]], [[C5]]
; GFX10-NEXT: G_STORE [[SELECT]](s32), [[MV1]](p0) :: (store (s32))
; GFX10-NEXT: S_ENDPGM 0
bb.0:
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-structurizer.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-structurizer.ll
index 609fff5..1698f84 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-structurizer.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-structurizer.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
-; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -amdgpu-global-isel-risky-select -mcpu=gfx1010 -verify-machineinstrs < %s | FileCheck -check-prefix=GFX10 %s
+; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=gfx1010 -verify-machineinstrs < %s | FileCheck -check-prefix=GFX10 %s
; Simples case, if - then, that requires lane mask merging,
; %phi lane mask will hold %val_A at %A. Lanes that are active in %B
@@ -43,13 +43,12 @@ define amdgpu_ps void @divergent_i1_phi_if_else(ptr addrspace(1) %out, i32 %tid,
; GFX10: ; %bb.0: ; %entry
; GFX10-NEXT: s_and_b32 s0, 1, s0
; GFX10-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v3
-; GFX10-NEXT: v_cmp_ne_u32_e64 s2, 0, s0
-; GFX10-NEXT: ; implicit-def: $sgpr0
+; GFX10-NEXT: v_cmp_ne_u32_e64 s0, 0, s0
; GFX10-NEXT: s_and_saveexec_b32 s1, vcc_lo
; GFX10-NEXT: s_xor_b32 s1, exec_lo, s1
; GFX10-NEXT: ; %bb.1: ; %B
; GFX10-NEXT: v_cmp_gt_u32_e32 vcc_lo, 2, v2
-; GFX10-NEXT: s_andn2_b32 s0, s2, exec_lo
+; GFX10-NEXT: s_andn2_b32 s0, s0, exec_lo
; GFX10-NEXT: ; implicit-def: $vgpr2
; GFX10-NEXT: s_and_b32 s2, exec_lo, vcc_lo
; GFX10-NEXT: s_or_b32 s0, s0, s2
@@ -211,7 +210,7 @@ define amdgpu_cs void @loop_with_2breaks(ptr addrspace(1) %x, ptr addrspace(1) %
; GFX10-NEXT: ; in Loop: Header=BB3_3 Depth=1
; GFX10-NEXT: v_add_co_u32 v9, vcc_lo, v4, v7
; GFX10-NEXT: v_add_co_ci_u32_e32 v10, vcc_lo, v5, v8, vcc_lo
-; GFX10-NEXT: s_mov_b32 s4, 1
+; GFX10-NEXT: s_mov_b32 s4, -1
; GFX10-NEXT: global_load_dword v9, v[9:10], off
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v9
@@ -308,7 +307,7 @@ define amdgpu_cs void @loop_with_3breaks(ptr addrspace(1) %x, ptr addrspace(1) %
; GFX10-NEXT: ; in Loop: Header=BB4_4 Depth=1
; GFX10-NEXT: v_add_co_u32 v11, vcc_lo, v4, v9
; GFX10-NEXT: v_add_co_ci_u32_e32 v12, vcc_lo, v5, v10, vcc_lo
-; GFX10-NEXT: s_mov_b32 s4, 1
+; GFX10-NEXT: s_mov_b32 s4, -1
; GFX10-NEXT: global_load_dword v11, v[11:12], off
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v11
@@ -318,7 +317,7 @@ define amdgpu_cs void @loop_with_3breaks(ptr addrspace(1) %x, ptr addrspace(1) %
; GFX10-NEXT: ; in Loop: Header=BB4_4 Depth=1
; GFX10-NEXT: v_add_co_u32 v11, vcc_lo, v6, v9
; GFX10-NEXT: v_add_co_ci_u32_e32 v12, vcc_lo, v7, v10, vcc_lo
-; GFX10-NEXT: s_mov_b32 s5, 1
+; GFX10-NEXT: s_mov_b32 s5, -1
; GFX10-NEXT: global_load_dword v11, v[11:12], off
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v11
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-structurizer.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-structurizer.mir
index df5505e..8197b07 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-structurizer.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-structurizer.mir
@@ -18,9 +18,10 @@ body: |
; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 6
; GFX10-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(uge), [[COPY2]](s32), [[C]]
- ; GFX10-NEXT: [[COPY4:%[0-9]+]]:sreg_32(s1) = COPY [[ICMP]](s1)
; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; GFX10-NEXT: [[ICMP1:%[0-9]+]]:sreg_32_xm0_xexec(s1) = G_ICMP intpred(eq), [[COPY3]](s32), [[C1]]
+ ; GFX10-NEXT: [[COPY4:%[0-9]+]]:sreg_32(s1) = COPY [[ICMP]](s1)
+ ; GFX10-NEXT: [[COPY5:%[0-9]+]]:sreg_32(s1) = COPY [[COPY4]](s1)
; GFX10-NEXT: [[SI_IF:%[0-9]+]]:sreg_32_xm0_xexec(s32) = SI_IF [[ICMP1]](s1), %bb.2, implicit-def $exec, implicit-def $scc, implicit $exec
; GFX10-NEXT: G_BR %bb.1
; GFX10-NEXT: {{ $}}
@@ -29,18 +30,18 @@ body: |
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; GFX10-NEXT: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[COPY2]](s32), [[C2]]
- ; GFX10-NEXT: [[COPY5:%[0-9]+]]:sreg_32(s1) = COPY [[ICMP2]](s1)
- ; GFX10-NEXT: [[S_ANDN2_B32_:%[0-9]+]]:sreg_32(s1) = S_ANDN2_B32 [[COPY4]](s1), $exec_lo, implicit-def $scc
- ; GFX10-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY5]](s1), implicit-def $scc
+ ; GFX10-NEXT: [[COPY6:%[0-9]+]]:sreg_32(s1) = COPY [[ICMP2]](s1)
+ ; GFX10-NEXT: [[S_ANDN2_B32_:%[0-9]+]]:sreg_32(s1) = S_ANDN2_B32 [[COPY5]](s1), $exec_lo, implicit-def $scc
+ ; GFX10-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY6]](s1), implicit-def $scc
; GFX10-NEXT: [[S_OR_B32_:%[0-9]+]]:sreg_32(s1) = S_OR_B32 [[S_ANDN2_B32_]](s1), [[S_AND_B32_]](s1), implicit-def $scc
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: bb.2:
- ; GFX10-NEXT: [[PHI:%[0-9]+]]:sreg_32(s1) = PHI [[ICMP]](s1), %bb.0, [[S_OR_B32_]](s1), %bb.1
- ; GFX10-NEXT: [[COPY6:%[0-9]+]]:sreg_32(s1) = COPY [[PHI]](s1)
+ ; GFX10-NEXT: [[PHI:%[0-9]+]]:sreg_32(s1) = PHI [[COPY4]](s1), %bb.0, [[S_OR_B32_]](s1), %bb.1
+ ; GFX10-NEXT: [[COPY7:%[0-9]+]]:sreg_32(s1) = COPY [[PHI]](s1)
; GFX10-NEXT: G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.end.cf), [[SI_IF]](s32)
; GFX10-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
; GFX10-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; GFX10-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[COPY6]](s1), [[C4]], [[C3]]
+ ; GFX10-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[COPY7]](s1), [[C4]], [[C3]]
; GFX10-NEXT: G_STORE [[SELECT]](s32), [[MV]](p1) :: (store (s32), addrspace 1)
; GFX10-NEXT: S_ENDPGM 0
bb.0:
@@ -91,18 +92,20 @@ body: |
; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
; GFX10-NEXT: [[DEF:%[0-9]+]]:_(s1) = G_IMPLICIT_DEF
- ; GFX10-NEXT: [[COPY4:%[0-9]+]]:sreg_32(s1) = COPY [[DEF]](s1)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; GFX10-NEXT: [[ICMP:%[0-9]+]]:sreg_32_xm0_xexec(s1) = G_ICMP intpred(ne), [[COPY3]](s32), [[C]]
+ ; GFX10-NEXT: [[COPY4:%[0-9]+]]:sreg_32(s1) = COPY [[DEF]](s1)
+ ; GFX10-NEXT: [[COPY5:%[0-9]+]]:sreg_32(s1) = COPY [[COPY4]](s1)
; GFX10-NEXT: [[SI_IF:%[0-9]+]]:sreg_32_xm0_xexec(s32) = SI_IF [[ICMP]](s1), %bb.1, implicit-def $exec, implicit-def $scc, implicit $exec
; GFX10-NEXT: G_BR %bb.3
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: bb.1:
; GFX10-NEXT: successors: %bb.2(0x40000000), %bb.4(0x40000000)
; GFX10-NEXT: {{ $}}
- ; GFX10-NEXT: [[PHI:%[0-9]+]]:sreg_32(s1) = PHI [[DEF]](s1), %bb.0, %19(s1), %bb.3
- ; GFX10-NEXT: [[COPY5:%[0-9]+]]:sreg_32(s1) = COPY [[PHI]](s1)
- ; GFX10-NEXT: [[COPY6:%[0-9]+]]:sreg_32(s1) = COPY [[COPY5]](s1)
+ ; GFX10-NEXT: [[PHI:%[0-9]+]]:sreg_32(s1) = PHI [[COPY4]](s1), %bb.0, %20(s1), %bb.3
+ ; GFX10-NEXT: [[COPY6:%[0-9]+]]:sreg_32(s1) = COPY [[PHI]](s1)
+ ; GFX10-NEXT: [[COPY7:%[0-9]+]]:sreg_32(s1) = COPY [[COPY6]](s1)
+ ; GFX10-NEXT: [[COPY8:%[0-9]+]]:sreg_32(s1) = COPY [[COPY7]](s1)
; GFX10-NEXT: [[SI_ELSE:%[0-9]+]]:sreg_32_xm0_xexec(s32) = SI_ELSE [[SI_IF]](s32), %bb.4, implicit-def $exec, implicit-def $scc, implicit $exec
; GFX10-NEXT: G_BR %bb.2
; GFX10-NEXT: {{ $}}
@@ -111,9 +114,9 @@ body: |
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; GFX10-NEXT: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(uge), [[COPY2]](s32), [[C1]]
- ; GFX10-NEXT: [[COPY7:%[0-9]+]]:sreg_32(s1) = COPY [[ICMP1]](s1)
- ; GFX10-NEXT: [[S_ANDN2_B32_:%[0-9]+]]:sreg_32(s1) = S_ANDN2_B32 [[COPY6]](s1), $exec_lo, implicit-def $scc
- ; GFX10-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY7]](s1), implicit-def $scc
+ ; GFX10-NEXT: [[COPY9:%[0-9]+]]:sreg_32(s1) = COPY [[ICMP1]](s1)
+ ; GFX10-NEXT: [[S_ANDN2_B32_:%[0-9]+]]:sreg_32(s1) = S_ANDN2_B32 [[COPY8]](s1), $exec_lo, implicit-def $scc
+ ; GFX10-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY9]](s1), implicit-def $scc
; GFX10-NEXT: [[S_OR_B32_:%[0-9]+]]:sreg_32(s1) = S_OR_B32 [[S_ANDN2_B32_]](s1), [[S_AND_B32_]](s1), implicit-def $scc
; GFX10-NEXT: G_BR %bb.4
; GFX10-NEXT: {{ $}}
@@ -122,19 +125,19 @@ body: |
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
; GFX10-NEXT: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[COPY2]](s32), [[C2]]
- ; GFX10-NEXT: [[COPY8:%[0-9]+]]:sreg_32(s1) = COPY [[ICMP2]](s1)
- ; GFX10-NEXT: [[S_ANDN2_B32_1:%[0-9]+]]:sreg_32(s1) = S_ANDN2_B32 [[COPY4]](s1), $exec_lo, implicit-def $scc
- ; GFX10-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY8]](s1), implicit-def $scc
+ ; GFX10-NEXT: [[COPY10:%[0-9]+]]:sreg_32(s1) = COPY [[ICMP2]](s1)
+ ; GFX10-NEXT: [[S_ANDN2_B32_1:%[0-9]+]]:sreg_32(s1) = S_ANDN2_B32 [[COPY5]](s1), $exec_lo, implicit-def $scc
+ ; GFX10-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY10]](s1), implicit-def $scc
; GFX10-NEXT: [[S_OR_B32_1:%[0-9]+]]:sreg_32(s1) = S_OR_B32 [[S_ANDN2_B32_1]](s1), [[S_AND_B32_1]](s1), implicit-def $scc
; GFX10-NEXT: G_BR %bb.1
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: bb.4:
- ; GFX10-NEXT: [[PHI1:%[0-9]+]]:sreg_32(s1) = PHI [[COPY5]](s1), %bb.1, [[S_OR_B32_]](s1), %bb.2
- ; GFX10-NEXT: [[COPY9:%[0-9]+]]:sreg_32(s1) = COPY [[PHI1]](s1)
+ ; GFX10-NEXT: [[PHI1:%[0-9]+]]:sreg_32(s1) = PHI [[COPY7]](s1), %bb.1, [[S_OR_B32_]](s1), %bb.2
+ ; GFX10-NEXT: [[COPY11:%[0-9]+]]:sreg_32(s1) = COPY [[PHI1]](s1)
; GFX10-NEXT: G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.end.cf), [[SI_ELSE]](s32)
; GFX10-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; GFX10-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; GFX10-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[COPY9]](s1), [[C3]], [[C4]]
+ ; GFX10-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[COPY11]](s1), [[C3]], [[C4]]
; GFX10-NEXT: G_STORE [[SELECT]](s32), [[MV]](p1) :: (store (s32), addrspace 1)
; GFX10-NEXT: S_ENDPGM 0
bb.0:
@@ -368,13 +371,14 @@ body: |
; GFX10-NEXT: successors: %bb.4(0x40000000), %bb.5(0x40000000)
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: [[C4:%[0-9]+]]:_(s1) = G_CONSTANT i1 true
- ; GFX10-NEXT: [[COPY9:%[0-9]+]]:sreg_32(s1) = COPY [[C4]](s1)
; GFX10-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
; GFX10-NEXT: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[SEXT]], [[C5]](s32)
; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[MV2]], [[SHL1]](s64)
; GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load (s32), addrspace 1)
; GFX10-NEXT: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; GFX10-NEXT: [[ICMP1:%[0-9]+]]:sreg_32_xm0_xexec(s1) = G_ICMP intpred(ne), [[LOAD1]](s32), [[C6]]
+ ; GFX10-NEXT: [[COPY9:%[0-9]+]]:sreg_32(s1) = COPY [[C4]](s1)
+ ; GFX10-NEXT: [[COPY10:%[0-9]+]]:sreg_32(s1) = COPY [[COPY9]](s1)
; GFX10-NEXT: [[SI_IF1:%[0-9]+]]:sreg_32_xm0_xexec(s32) = SI_IF [[ICMP1]](s1), %bb.5, implicit-def $exec, implicit-def $scc, implicit $exec
; GFX10-NEXT: G_BR %bb.4
; GFX10-NEXT: {{ $}}
@@ -383,9 +387,9 @@ body: |
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: [[PHI3:%[0-9]+]]:sreg_32(s1) = PHI [[S_OR_B32_]](s1), %bb.1, %47(s1), %bb.5
; GFX10-NEXT: [[PHI4:%[0-9]+]]:_(s32) = G_PHI %32(s32), %bb.5, [[DEF]](s32), %bb.1
- ; GFX10-NEXT: [[COPY10:%[0-9]+]]:sreg_32(s1) = COPY [[PHI3]](s1)
+ ; GFX10-NEXT: [[COPY11:%[0-9]+]]:sreg_32(s1) = COPY [[PHI3]](s1)
; GFX10-NEXT: G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.end.cf), [[SI_IF]](s32)
- ; GFX10-NEXT: [[INTRINSIC_CONVERGENT:%[0-9]+]]:sreg_32_xm0_xexec(s32) = G_INTRINSIC_CONVERGENT intrinsic(@llvm.amdgcn.if.break), [[COPY10]](s1), [[PHI1]](s32)
+ ; GFX10-NEXT: [[INTRINSIC_CONVERGENT:%[0-9]+]]:sreg_32_xm0_xexec(s32) = G_INTRINSIC_CONVERGENT intrinsic(@llvm.amdgcn.if.break), [[COPY11]](s1), [[PHI1]](s32)
; GFX10-NEXT: SI_LOOP [[INTRINSIC_CONVERGENT]](s32), %bb.1, implicit-def $exec, implicit-def $scc, implicit $exec
; GFX10-NEXT: G_BR %bb.6
; GFX10-NEXT: {{ $}}
@@ -402,21 +406,21 @@ body: |
; GFX10-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[PHI2]], [[C8]]
; GFX10-NEXT: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 100
; GFX10-NEXT: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[PHI2]](s32), [[C9]]
- ; GFX10-NEXT: [[COPY11:%[0-9]+]]:sreg_32(s1) = COPY [[ICMP2]](s1)
- ; GFX10-NEXT: [[S_ANDN2_B32_1:%[0-9]+]]:sreg_32(s1) = S_ANDN2_B32 [[COPY9]](s1), $exec_lo, implicit-def $scc
- ; GFX10-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY11]](s1), implicit-def $scc
+ ; GFX10-NEXT: [[COPY12:%[0-9]+]]:sreg_32(s1) = COPY [[ICMP2]](s1)
+ ; GFX10-NEXT: [[S_ANDN2_B32_1:%[0-9]+]]:sreg_32(s1) = S_ANDN2_B32 [[COPY10]](s1), $exec_lo, implicit-def $scc
+ ; GFX10-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY12]](s1), implicit-def $scc
; GFX10-NEXT: [[S_OR_B32_1:%[0-9]+]]:sreg_32(s1) = S_OR_B32 [[S_ANDN2_B32_1]](s1), [[S_AND_B32_1]](s1), implicit-def $scc
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: bb.5:
; GFX10-NEXT: successors: %bb.3(0x80000000)
; GFX10-NEXT: {{ $}}
- ; GFX10-NEXT: [[PHI5:%[0-9]+]]:sreg_32(s1) = PHI [[C4]](s1), %bb.2, [[S_OR_B32_1]](s1), %bb.4
+ ; GFX10-NEXT: [[PHI5:%[0-9]+]]:sreg_32(s1) = PHI [[COPY9]](s1), %bb.2, [[S_OR_B32_1]](s1), %bb.4
; GFX10-NEXT: [[PHI6:%[0-9]+]]:_(s32) = G_PHI [[ADD1]](s32), %bb.4, [[DEF]](s32), %bb.2
- ; GFX10-NEXT: [[COPY12:%[0-9]+]]:sreg_32(s1) = COPY [[PHI5]](s1)
- ; GFX10-NEXT: [[COPY13:%[0-9]+]]:sreg_32(s1) = COPY [[COPY12]](s1)
+ ; GFX10-NEXT: [[COPY13:%[0-9]+]]:sreg_32(s1) = COPY [[PHI5]](s1)
+ ; GFX10-NEXT: [[COPY14:%[0-9]+]]:sreg_32(s1) = COPY [[COPY13]](s1)
; GFX10-NEXT: G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.end.cf), [[SI_IF1]](s32)
; GFX10-NEXT: [[S_ANDN2_B32_2:%[0-9]+]]:sreg_32(s1) = S_ANDN2_B32 [[COPY8]](s1), $exec_lo, implicit-def $scc
- ; GFX10-NEXT: [[S_AND_B32_2:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY13]](s1), implicit-def $scc
+ ; GFX10-NEXT: [[S_AND_B32_2:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY14]](s1), implicit-def $scc
; GFX10-NEXT: [[S_OR_B32_2:%[0-9]+]]:sreg_32(s1) = S_OR_B32 [[S_ANDN2_B32_2]](s1), [[S_AND_B32_2]](s1), implicit-def $scc
; GFX10-NEXT: G_BR %bb.3
; GFX10-NEXT: {{ $}}
@@ -560,13 +564,14 @@ body: |
; GFX10-NEXT: successors: %bb.4(0x40000000), %bb.5(0x40000000)
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: [[C4:%[0-9]+]]:_(s1) = G_CONSTANT i1 true
- ; GFX10-NEXT: [[COPY11:%[0-9]+]]:sreg_32(s1) = COPY [[C4]](s1)
; GFX10-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
; GFX10-NEXT: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[SEXT]], [[C5]](s32)
; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[MV2]], [[SHL1]](s64)
; GFX10-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load (s32), addrspace 1)
; GFX10-NEXT: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; GFX10-NEXT: [[ICMP1:%[0-9]+]]:sreg_32_xm0_xexec(s1) = G_ICMP intpred(ne), [[LOAD1]](s32), [[C6]]
+ ; GFX10-NEXT: [[COPY11:%[0-9]+]]:sreg_32(s1) = COPY [[C4]](s1)
+ ; GFX10-NEXT: [[COPY12:%[0-9]+]]:sreg_32(s1) = COPY [[COPY11]](s1)
; GFX10-NEXT: [[SI_IF1:%[0-9]+]]:sreg_32_xm0_xexec(s32) = SI_IF [[ICMP1]](s1), %bb.5, implicit-def $exec, implicit-def $scc, implicit $exec
; GFX10-NEXT: G_BR %bb.4
; GFX10-NEXT: {{ $}}
@@ -575,9 +580,9 @@ body: |
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: [[PHI3:%[0-9]+]]:sreg_32(s1) = PHI [[S_OR_B32_]](s1), %bb.1, %60(s1), %bb.5
; GFX10-NEXT: [[PHI4:%[0-9]+]]:_(s32) = G_PHI %35(s32), %bb.5, [[DEF]](s32), %bb.1
- ; GFX10-NEXT: [[COPY12:%[0-9]+]]:sreg_32(s1) = COPY [[PHI3]](s1)
+ ; GFX10-NEXT: [[COPY13:%[0-9]+]]:sreg_32(s1) = COPY [[PHI3]](s1)
; GFX10-NEXT: G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.end.cf), [[SI_IF]](s32)
- ; GFX10-NEXT: [[INTRINSIC_CONVERGENT:%[0-9]+]]:sreg_32_xm0_xexec(s32) = G_INTRINSIC_CONVERGENT intrinsic(@llvm.amdgcn.if.break), [[COPY12]](s1), [[PHI1]](s32)
+ ; GFX10-NEXT: [[INTRINSIC_CONVERGENT:%[0-9]+]]:sreg_32_xm0_xexec(s32) = G_INTRINSIC_CONVERGENT intrinsic(@llvm.amdgcn.if.break), [[COPY13]](s1), [[PHI1]](s32)
; GFX10-NEXT: SI_LOOP [[INTRINSIC_CONVERGENT]](s32), %bb.1, implicit-def $exec, implicit-def $scc, implicit $exec
; GFX10-NEXT: G_BR %bb.8
; GFX10-NEXT: {{ $}}
@@ -585,26 +590,27 @@ body: |
; GFX10-NEXT: successors: %bb.6(0x40000000), %bb.7(0x40000000)
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: [[C7:%[0-9]+]]:_(s1) = G_CONSTANT i1 true
- ; GFX10-NEXT: [[COPY13:%[0-9]+]]:sreg_32(s1) = COPY [[C7]](s1)
; GFX10-NEXT: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
; GFX10-NEXT: [[SHL2:%[0-9]+]]:_(s64) = G_SHL [[SEXT]], [[C8]](s32)
; GFX10-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[MV3]], [[SHL2]](s64)
; GFX10-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s32), addrspace 1)
; GFX10-NEXT: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; GFX10-NEXT: [[ICMP2:%[0-9]+]]:sreg_32_xm0_xexec(s1) = G_ICMP intpred(ne), [[LOAD2]](s32), [[C9]]
+ ; GFX10-NEXT: [[COPY14:%[0-9]+]]:sreg_32(s1) = COPY [[C7]](s1)
+ ; GFX10-NEXT: [[COPY15:%[0-9]+]]:sreg_32(s1) = COPY [[COPY14]](s1)
; GFX10-NEXT: [[SI_IF2:%[0-9]+]]:sreg_32_xm0_xexec(s32) = SI_IF [[ICMP2]](s1), %bb.7, implicit-def $exec, implicit-def $scc, implicit $exec
; GFX10-NEXT: G_BR %bb.6
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: bb.5:
; GFX10-NEXT: successors: %bb.3(0x80000000)
; GFX10-NEXT: {{ $}}
- ; GFX10-NEXT: [[PHI5:%[0-9]+]]:sreg_32(s1) = PHI [[C4]](s1), %bb.2, %71(s1), %bb.7
+ ; GFX10-NEXT: [[PHI5:%[0-9]+]]:sreg_32(s1) = PHI [[COPY11]](s1), %bb.2, %72(s1), %bb.7
; GFX10-NEXT: [[PHI6:%[0-9]+]]:_(s32) = G_PHI %46(s32), %bb.7, [[DEF]](s32), %bb.2
- ; GFX10-NEXT: [[COPY14:%[0-9]+]]:sreg_32(s1) = COPY [[PHI5]](s1)
- ; GFX10-NEXT: [[COPY15:%[0-9]+]]:sreg_32(s1) = COPY [[COPY14]](s1)
+ ; GFX10-NEXT: [[COPY16:%[0-9]+]]:sreg_32(s1) = COPY [[PHI5]](s1)
+ ; GFX10-NEXT: [[COPY17:%[0-9]+]]:sreg_32(s1) = COPY [[COPY16]](s1)
; GFX10-NEXT: G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.end.cf), [[SI_IF1]](s32)
; GFX10-NEXT: [[S_ANDN2_B32_1:%[0-9]+]]:sreg_32(s1) = S_ANDN2_B32 [[COPY10]](s1), $exec_lo, implicit-def $scc
- ; GFX10-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY15]](s1), implicit-def $scc
+ ; GFX10-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY17]](s1), implicit-def $scc
; GFX10-NEXT: [[S_OR_B32_1:%[0-9]+]]:sreg_32(s1) = S_OR_B32 [[S_ANDN2_B32_1]](s1), [[S_AND_B32_1]](s1), implicit-def $scc
; GFX10-NEXT: G_BR %bb.3
; GFX10-NEXT: {{ $}}
@@ -621,21 +627,21 @@ body: |
; GFX10-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[PHI2]], [[C11]]
; GFX10-NEXT: [[C12:%[0-9]+]]:_(s32) = G_CONSTANT i32 100
; GFX10-NEXT: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[PHI2]](s32), [[C12]]
- ; GFX10-NEXT: [[COPY16:%[0-9]+]]:sreg_32(s1) = COPY [[ICMP3]](s1)
- ; GFX10-NEXT: [[S_ANDN2_B32_2:%[0-9]+]]:sreg_32(s1) = S_ANDN2_B32 [[COPY13]](s1), $exec_lo, implicit-def $scc
- ; GFX10-NEXT: [[S_AND_B32_2:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY16]](s1), implicit-def $scc
+ ; GFX10-NEXT: [[COPY18:%[0-9]+]]:sreg_32(s1) = COPY [[ICMP3]](s1)
+ ; GFX10-NEXT: [[S_ANDN2_B32_2:%[0-9]+]]:sreg_32(s1) = S_ANDN2_B32 [[COPY15]](s1), $exec_lo, implicit-def $scc
+ ; GFX10-NEXT: [[S_AND_B32_2:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY18]](s1), implicit-def $scc
; GFX10-NEXT: [[S_OR_B32_2:%[0-9]+]]:sreg_32(s1) = S_OR_B32 [[S_ANDN2_B32_2]](s1), [[S_AND_B32_2]](s1), implicit-def $scc
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: bb.7:
; GFX10-NEXT: successors: %bb.5(0x80000000)
; GFX10-NEXT: {{ $}}
- ; GFX10-NEXT: [[PHI7:%[0-9]+]]:sreg_32(s1) = PHI [[C7]](s1), %bb.4, [[S_OR_B32_2]](s1), %bb.6
+ ; GFX10-NEXT: [[PHI7:%[0-9]+]]:sreg_32(s1) = PHI [[COPY14]](s1), %bb.4, [[S_OR_B32_2]](s1), %bb.6
; GFX10-NEXT: [[PHI8:%[0-9]+]]:_(s32) = G_PHI [[ADD1]](s32), %bb.6, [[DEF]](s32), %bb.4
- ; GFX10-NEXT: [[COPY17:%[0-9]+]]:sreg_32(s1) = COPY [[PHI7]](s1)
- ; GFX10-NEXT: [[COPY18:%[0-9]+]]:sreg_32(s1) = COPY [[COPY17]](s1)
+ ; GFX10-NEXT: [[COPY19:%[0-9]+]]:sreg_32(s1) = COPY [[PHI7]](s1)
+ ; GFX10-NEXT: [[COPY20:%[0-9]+]]:sreg_32(s1) = COPY [[COPY19]](s1)
; GFX10-NEXT: G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.end.cf), [[SI_IF2]](s32)
- ; GFX10-NEXT: [[S_ANDN2_B32_3:%[0-9]+]]:sreg_32(s1) = S_ANDN2_B32 [[COPY11]](s1), $exec_lo, implicit-def $scc
- ; GFX10-NEXT: [[S_AND_B32_3:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY18]](s1), implicit-def $scc
+ ; GFX10-NEXT: [[S_ANDN2_B32_3:%[0-9]+]]:sreg_32(s1) = S_ANDN2_B32 [[COPY12]](s1), $exec_lo, implicit-def $scc
+ ; GFX10-NEXT: [[S_AND_B32_3:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY20]](s1), implicit-def $scc
; GFX10-NEXT: [[S_OR_B32_3:%[0-9]+]]:sreg_32(s1) = S_OR_B32 [[S_ANDN2_B32_3]](s1), [[S_AND_B32_3]](s1), implicit-def $scc
; GFX10-NEXT: G_BR %bb.5
; GFX10-NEXT: {{ $}}
@@ -970,6 +976,7 @@ body: |
; GFX10-NEXT: [[DEF1:%[0-9]+]]:sreg_32(s1) = IMPLICIT_DEF
; GFX10-NEXT: [[DEF2:%[0-9]+]]:sreg_32(s1) = IMPLICIT_DEF
; GFX10-NEXT: [[DEF3:%[0-9]+]]:sreg_32(s1) = IMPLICIT_DEF
+ ; GFX10-NEXT: [[COPY7:%[0-9]+]]:sreg_32(s1) = COPY [[ICMP]](s1)
; GFX10-NEXT: G_BR %bb.7
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: bb.1:
@@ -982,19 +989,19 @@ body: |
; GFX10-NEXT: bb.2:
; GFX10-NEXT: successors: %bb.4(0x40000000), %bb.7(0x40000000)
; GFX10-NEXT: {{ $}}
- ; GFX10-NEXT: [[PHI:%[0-9]+]]:sreg_32_xm0_xexec(s1) = PHI %67(s1), %bb.6, %70(s1), %bb.7
+ ; GFX10-NEXT: [[PHI:%[0-9]+]]:sreg_32_xm0_xexec(s1) = PHI %67(s1), %bb.6, %71(s1), %bb.7
; GFX10-NEXT: [[PHI1:%[0-9]+]]:sreg_32(s1) = PHI %49(s1), %bb.6, %48(s1), %bb.7
; GFX10-NEXT: [[PHI2:%[0-9]+]]:sreg_32(s1) = PHI %35(s1), %bb.6, %34(s1), %bb.7
- ; GFX10-NEXT: [[COPY7:%[0-9]+]]:sreg_32_xm0_xexec(s1) = COPY [[PHI]](s1)
- ; GFX10-NEXT: [[COPY8:%[0-9]+]]:sreg_32(s1) = COPY [[PHI1]](s1)
- ; GFX10-NEXT: [[COPY9:%[0-9]+]]:sreg_32(s1) = COPY [[PHI2]](s1)
- ; GFX10-NEXT: [[COPY10:%[0-9]+]]:sreg_32_xm0_xexec(s1) = COPY [[COPY9]](s1)
+ ; GFX10-NEXT: [[COPY8:%[0-9]+]]:sreg_32_xm0_xexec(s1) = COPY [[PHI]](s1)
+ ; GFX10-NEXT: [[COPY9:%[0-9]+]]:sreg_32(s1) = COPY [[PHI1]](s1)
+ ; GFX10-NEXT: [[COPY10:%[0-9]+]]:sreg_32(s1) = COPY [[PHI2]](s1)
+ ; GFX10-NEXT: [[COPY11:%[0-9]+]]:sreg_32_xm0_xexec(s1) = COPY [[COPY10]](s1)
; GFX10-NEXT: G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.end.cf), %15(s32)
- ; GFX10-NEXT: [[INTRINSIC_CONVERGENT:%[0-9]+]]:sreg_32_xm0_xexec(s32) = G_INTRINSIC_CONVERGENT intrinsic(@llvm.amdgcn.if.break), [[COPY8]](s1), %17(s32)
- ; GFX10-NEXT: [[S_ANDN2_B32_:%[0-9]+]]:sreg_32_xm0_xexec(s1) = S_ANDN2_B32 [[COPY7]](s1), $exec_lo, implicit-def $scc
- ; GFX10-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec(s1) = S_AND_B32 $exec_lo, [[COPY10]](s1), implicit-def $scc
+ ; GFX10-NEXT: [[INTRINSIC_CONVERGENT:%[0-9]+]]:sreg_32_xm0_xexec(s32) = G_INTRINSIC_CONVERGENT intrinsic(@llvm.amdgcn.if.break), [[COPY9]](s1), %17(s32)
+ ; GFX10-NEXT: [[S_ANDN2_B32_:%[0-9]+]]:sreg_32_xm0_xexec(s1) = S_ANDN2_B32 [[COPY8]](s1), $exec_lo, implicit-def $scc
+ ; GFX10-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec(s1) = S_AND_B32 $exec_lo, [[COPY11]](s1), implicit-def $scc
; GFX10-NEXT: [[S_OR_B32_:%[0-9]+]]:sreg_32_xm0_xexec(s1) = S_OR_B32 [[S_ANDN2_B32_]](s1), [[S_AND_B32_]](s1), implicit-def $scc
- ; GFX10-NEXT: [[COPY11:%[0-9]+]]:sreg_32_xm0_xexec(s1) = COPY [[S_OR_B32_]](s1)
+ ; GFX10-NEXT: [[COPY12:%[0-9]+]]:sreg_32_xm0_xexec(s1) = COPY [[S_OR_B32_]](s1)
; GFX10-NEXT: SI_LOOP [[INTRINSIC_CONVERGENT]](s32), %bb.7, implicit-def $exec, implicit-def $scc, implicit $exec
; GFX10-NEXT: G_BR %bb.4
; GFX10-NEXT: {{ $}}
@@ -1011,28 +1018,28 @@ body: |
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.end.cf), [[INTRINSIC_CONVERGENT]](s32)
; GFX10-NEXT: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[COPY5]](s32), [[COPY]]
- ; GFX10-NEXT: [[COPY12:%[0-9]+]]:sreg_32(s1) = COPY [[ICMP2]](s1)
+ ; GFX10-NEXT: [[COPY13:%[0-9]+]]:sreg_32(s1) = COPY [[ICMP2]](s1)
; GFX10-NEXT: [[C2:%[0-9]+]]:_(s1) = G_CONSTANT i1 true
- ; GFX10-NEXT: [[COPY13:%[0-9]+]]:sreg_32_xm0_xexec(s1) = COPY [[C2]](s1)
+ ; GFX10-NEXT: [[COPY14:%[0-9]+]]:sreg_32_xm0_xexec(s1) = COPY [[C2]](s1)
; GFX10-NEXT: [[XOR:%[0-9]+]]:_(s1) = G_XOR [[ICMP]], [[C2]]
; GFX10-NEXT: [[OR:%[0-9]+]]:_(s1) = G_OR [[ICMP2]], [[XOR]]
; GFX10-NEXT: [[INTRINSIC_CONVERGENT2:%[0-9]+]]:sreg_32_xm0_xexec(s32) = G_INTRINSIC_CONVERGENT intrinsic(@llvm.amdgcn.if.break), [[OR]](s1), %25(s32)
; GFX10-NEXT: [[DEF4:%[0-9]+]]:sreg_32(s1) = IMPLICIT_DEF
; GFX10-NEXT: [[DEF5:%[0-9]+]]:sreg_32(s1) = IMPLICIT_DEF
; GFX10-NEXT: [[S_ANDN2_B32_1:%[0-9]+]]:sreg_32(s1) = S_ANDN2_B32 %63(s1), $exec_lo, implicit-def $scc
- ; GFX10-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY12]](s1), implicit-def $scc
+ ; GFX10-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY13]](s1), implicit-def $scc
; GFX10-NEXT: [[S_OR_B32_1:%[0-9]+]]:sreg_32(s1) = S_OR_B32 [[S_ANDN2_B32_1]](s1), [[S_AND_B32_1]](s1), implicit-def $scc
- ; GFX10-NEXT: [[S_ANDN2_B32_2:%[0-9]+]]:sreg_32_xm0_xexec(s1) = S_ANDN2_B32 [[COPY11]](s1), $exec_lo, implicit-def $scc
- ; GFX10-NEXT: [[S_AND_B32_2:%[0-9]+]]:sreg_32_xm0_xexec(s1) = S_AND_B32 $exec_lo, [[COPY13]](s1), implicit-def $scc
+ ; GFX10-NEXT: [[S_ANDN2_B32_2:%[0-9]+]]:sreg_32_xm0_xexec(s1) = S_ANDN2_B32 [[COPY12]](s1), $exec_lo, implicit-def $scc
+ ; GFX10-NEXT: [[S_AND_B32_2:%[0-9]+]]:sreg_32_xm0_xexec(s1) = S_AND_B32 $exec_lo, [[COPY14]](s1), implicit-def $scc
; GFX10-NEXT: [[S_OR_B32_2:%[0-9]+]]:sreg_32_xm0_xexec(s1) = S_OR_B32 [[S_ANDN2_B32_2]](s1), [[S_AND_B32_2]](s1), implicit-def $scc
; GFX10-NEXT: SI_LOOP [[INTRINSIC_CONVERGENT2]](s32), %bb.7, implicit-def $exec, implicit-def $scc, implicit $exec
; GFX10-NEXT: G_BR %bb.5
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: bb.5:
; GFX10-NEXT: [[PHI4:%[0-9]+]]:_(s32) = G_PHI [[INTRINSIC_CONVERGENT2]](s32), %bb.4
- ; GFX10-NEXT: [[COPY14:%[0-9]+]]:sreg_32(s1) = COPY [[S_OR_B32_1]](s1)
+ ; GFX10-NEXT: [[COPY15:%[0-9]+]]:sreg_32(s1) = COPY [[S_OR_B32_1]](s1)
; GFX10-NEXT: G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.end.cf), [[PHI4]](s32)
- ; GFX10-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[COPY14]](s1), [[COPY3]], [[COPY2]]
+ ; GFX10-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[COPY15]](s1), [[COPY3]], [[COPY2]]
; GFX10-NEXT: [[INTRINSIC_CONVERGENT3:%[0-9]+]]:_(s32) = G_INTRINSIC_CONVERGENT intrinsic(@llvm.amdgcn.readfirstlane), [[SELECT]](s32)
; GFX10-NEXT: $sgpr0 = COPY [[INTRINSIC_CONVERGENT3]](s32)
; GFX10-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0
@@ -1042,14 +1049,14 @@ body: |
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: [[PHI5:%[0-9]+]]:_(s32) = G_PHI [[INTRINSIC_CONVERGENT1]](s32), %bb.3
; GFX10-NEXT: [[C3:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
- ; GFX10-NEXT: [[COPY15:%[0-9]+]]:sreg_32(s1) = COPY [[C3]](s1)
; GFX10-NEXT: [[COPY16:%[0-9]+]]:sreg_32(s1) = COPY [[C3]](s1)
+ ; GFX10-NEXT: [[COPY17:%[0-9]+]]:sreg_32(s1) = COPY [[C3]](s1)
; GFX10-NEXT: G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.end.cf), [[PHI5]](s32)
; GFX10-NEXT: [[S_ANDN2_B32_3:%[0-9]+]]:sreg_32(s1) = S_ANDN2_B32 %42(s1), $exec_lo, implicit-def $scc
- ; GFX10-NEXT: [[S_AND_B32_3:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY16]](s1), implicit-def $scc
+ ; GFX10-NEXT: [[S_AND_B32_3:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY17]](s1), implicit-def $scc
; GFX10-NEXT: [[S_OR_B32_3:%[0-9]+]]:sreg_32(s1) = S_OR_B32 [[S_ANDN2_B32_3]](s1), [[S_AND_B32_3]](s1), implicit-def $scc
; GFX10-NEXT: [[S_ANDN2_B32_4:%[0-9]+]]:sreg_32(s1) = S_ANDN2_B32 %56(s1), $exec_lo, implicit-def $scc
- ; GFX10-NEXT: [[S_AND_B32_4:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY15]](s1), implicit-def $scc
+ ; GFX10-NEXT: [[S_AND_B32_4:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY16]](s1), implicit-def $scc
; GFX10-NEXT: [[S_OR_B32_4:%[0-9]+]]:sreg_32(s1) = S_OR_B32 [[S_ANDN2_B32_4]](s1), [[S_AND_B32_4]](s1), implicit-def $scc
; GFX10-NEXT: [[DEF6:%[0-9]+]]:sreg_32_xm0_xexec(s1) = IMPLICIT_DEF
; GFX10-NEXT: G_BR %bb.2
@@ -1057,27 +1064,27 @@ body: |
; GFX10-NEXT: bb.7:
; GFX10-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
; GFX10-NEXT: {{ $}}
- ; GFX10-NEXT: [[PHI6:%[0-9]+]]:sreg_32_xm0_xexec(s1) = PHI [[ICMP]](s1), %bb.0, [[S_OR_B32_]](s1), %bb.2, [[S_OR_B32_2]](s1), %bb.4
+ ; GFX10-NEXT: [[PHI6:%[0-9]+]]:sreg_32_xm0_xexec(s1) = PHI [[COPY7]](s1), %bb.0, [[S_OR_B32_]](s1), %bb.2, [[S_OR_B32_2]](s1), %bb.4
; GFX10-NEXT: [[PHI7:%[0-9]+]]:sreg_32(s1) = PHI [[DEF3]](s1), %bb.0, [[PHI7]](s1), %bb.2, [[S_OR_B32_1]](s1), %bb.4
; GFX10-NEXT: [[PHI8:%[0-9]+]]:sreg_32(s1) = PHI [[DEF2]](s1), %bb.0, [[PHI1]](s1), %bb.2, [[DEF5]](s1), %bb.4
; GFX10-NEXT: [[PHI9:%[0-9]+]]:sreg_32(s1) = PHI [[DEF1]](s1), %bb.0, [[PHI2]](s1), %bb.2, [[DEF4]](s1), %bb.4
; GFX10-NEXT: [[PHI10:%[0-9]+]]:_(s32) = G_PHI [[INTRINSIC_CONVERGENT2]](s32), %bb.4, [[PHI10]](s32), %bb.2, [[C]](s32), %bb.0
; GFX10-NEXT: [[PHI11:%[0-9]+]]:_(s32) = G_PHI [[C]](s32), %bb.4, [[INTRINSIC_CONVERGENT]](s32), %bb.2, [[C]](s32), %bb.0
- ; GFX10-NEXT: [[COPY17:%[0-9]+]]:sreg_32_xm0_xexec(s1) = COPY [[PHI6]](s1)
- ; GFX10-NEXT: [[COPY18:%[0-9]+]]:sreg_32(s1) = COPY [[PHI7]](s1)
- ; GFX10-NEXT: [[COPY19:%[0-9]+]]:sreg_32(s1) = COPY [[PHI8]](s1)
- ; GFX10-NEXT: [[COPY20:%[0-9]+]]:sreg_32(s1) = COPY [[PHI9]](s1)
+ ; GFX10-NEXT: [[COPY18:%[0-9]+]]:sreg_32_xm0_xexec(s1) = COPY [[PHI6]](s1)
+ ; GFX10-NEXT: [[COPY19:%[0-9]+]]:sreg_32(s1) = COPY [[PHI7]](s1)
+ ; GFX10-NEXT: [[COPY20:%[0-9]+]]:sreg_32(s1) = COPY [[PHI8]](s1)
+ ; GFX10-NEXT: [[COPY21:%[0-9]+]]:sreg_32(s1) = COPY [[PHI9]](s1)
; GFX10-NEXT: [[C4:%[0-9]+]]:_(s1) = G_CONSTANT i1 true
- ; GFX10-NEXT: [[COPY21:%[0-9]+]]:sreg_32(s1) = COPY [[C4]](s1)
- ; GFX10-NEXT: [[S_ANDN2_B32_5:%[0-9]+]]:sreg_32(s1) = S_ANDN2_B32 [[COPY20]](s1), $exec_lo, implicit-def $scc
+ ; GFX10-NEXT: [[COPY22:%[0-9]+]]:sreg_32(s1) = COPY [[C4]](s1)
+ ; GFX10-NEXT: [[S_ANDN2_B32_5:%[0-9]+]]:sreg_32(s1) = S_ANDN2_B32 [[COPY21]](s1), $exec_lo, implicit-def $scc
; GFX10-NEXT: [[S_AND_B32_5:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY6]](s1), implicit-def $scc
; GFX10-NEXT: [[S_OR_B32_5:%[0-9]+]]:sreg_32(s1) = S_OR_B32 [[S_ANDN2_B32_5]](s1), [[S_AND_B32_5]](s1), implicit-def $scc
- ; GFX10-NEXT: [[COPY22:%[0-9]+]]:sreg_32(s1) = COPY [[S_OR_B32_5]](s1)
- ; GFX10-NEXT: [[S_ANDN2_B32_6:%[0-9]+]]:sreg_32(s1) = S_ANDN2_B32 [[COPY19]](s1), $exec_lo, implicit-def $scc
- ; GFX10-NEXT: [[S_AND_B32_6:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY21]](s1), implicit-def $scc
+ ; GFX10-NEXT: [[COPY23:%[0-9]+]]:sreg_32(s1) = COPY [[S_OR_B32_5]](s1)
+ ; GFX10-NEXT: [[S_ANDN2_B32_6:%[0-9]+]]:sreg_32(s1) = S_ANDN2_B32 [[COPY20]](s1), $exec_lo, implicit-def $scc
+ ; GFX10-NEXT: [[S_AND_B32_6:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY22]](s1), implicit-def $scc
; GFX10-NEXT: [[S_OR_B32_6:%[0-9]+]]:sreg_32(s1) = S_OR_B32 [[S_ANDN2_B32_6]](s1), [[S_AND_B32_6]](s1), implicit-def $scc
- ; GFX10-NEXT: [[COPY23:%[0-9]+]]:sreg_32(s1) = COPY [[S_OR_B32_6]](s1)
- ; GFX10-NEXT: [[SI_IF:%[0-9]+]]:sreg_32_xm0_xexec(s32) = SI_IF [[COPY17]](s1), %bb.2, implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX10-NEXT: [[COPY24:%[0-9]+]]:sreg_32(s1) = COPY [[S_OR_B32_6]](s1)
+ ; GFX10-NEXT: [[SI_IF:%[0-9]+]]:sreg_32_xm0_xexec(s32) = SI_IF [[COPY18]](s1), %bb.2, implicit-def $exec, implicit-def $scc, implicit $exec
; GFX10-NEXT: G_BR %bb.1
bb.0:
successors: %bb.7(0x80000000)
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-temporal-divergent-i1.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-temporal-divergent-i1.ll
index 312c6a3..1855ede 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-temporal-divergent-i1.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-temporal-divergent-i1.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
-; RUN: llc -global-isel -amdgpu-global-isel-risky-select -mtriple=amdgcn-amd-amdpal -mcpu=gfx1010 -verify-machineinstrs < %s | FileCheck -check-prefix=GFX10 %s
+; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=gfx1010 -verify-machineinstrs < %s | FileCheck -check-prefix=GFX10 %s
define void @temporal_divergent_i1_phi(float %val, ptr %addr) {
; GFX10-LABEL: temporal_divergent_i1_phi:
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-temporal-divergent-reg.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-temporal-divergent-reg.ll
index b21e6a7..1934958 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-temporal-divergent-reg.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-temporal-divergent-reg.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
-; RUN: llc -global-isel -amdgpu-global-isel-risky-select -mtriple=amdgcn-amd-amdpal -mcpu=gfx1010 -verify-machineinstrs < %s | FileCheck -check-prefix=GFX10 %s
+; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=gfx1010 -verify-machineinstrs < %s | FileCheck -check-prefix=GFX10 %s
define void @temporal_divergent_i32(float %val, ptr %addr) {
; GFX10-LABEL: temporal_divergent_i32:
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-phi.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-phi.mir
index c7d45f0..4bb9eb8 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-phi.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-phi.mir
@@ -1,5 +1,5 @@
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-# RUN: llc -mtriple=amdgcn -amdgpu-global-isel-risky-select -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s -check-prefix=GCN
+# RUN: llc -mtriple=amdgcn -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s -check-prefix=GCN
---
name: g_phi_s32_ss_sbranch
@@ -322,60 +322,6 @@ body: |
...
---
-name: g_phi_vcc_s1_sbranch
-legalized: true
-regBankSelected: true
-tracksRegLiveness: true
-machineFunctionInfo: {}
-body: |
- ; GCN-LABEL: name: g_phi_vcc_s1_sbranch
- ; GCN: bb.0:
- ; GCN-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
- ; GCN-NEXT: liveins: $vgpr0, $vgpr1, $sgpr2
- ; GCN-NEXT: {{ $}}
- ; GCN-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GCN-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GCN-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
- ; GCN-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_64 = V_CMP_EQ_U32_e64 [[COPY]], [[S_MOV_B32_]], implicit $exec
- ; GCN-NEXT: S_CMP_EQ_U32 [[COPY2]], [[S_MOV_B32_]], implicit-def $scc
- ; GCN-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $scc
- ; GCN-NEXT: $scc = COPY [[COPY3]]
- ; GCN-NEXT: S_CBRANCH_SCC1 %bb.1, implicit $scc
- ; GCN-NEXT: S_BRANCH %bb.2
- ; GCN-NEXT: {{ $}}
- ; GCN-NEXT: bb.1:
- ; GCN-NEXT: successors: %bb.2(0x80000000)
- ; GCN-NEXT: {{ $}}
- ; GCN-NEXT: [[V_CMP_EQ_U32_e64_1:%[0-9]+]]:sreg_64 = V_CMP_EQ_U32_e64 [[COPY1]], [[S_MOV_B32_]], implicit $exec
- ; GCN-NEXT: S_BRANCH %bb.2
- ; GCN-NEXT: {{ $}}
- ; GCN-NEXT: bb.2:
- ; GCN-NEXT: [[PHI:%[0-9]+]]:sreg_64_xexec = PHI [[V_CMP_EQ_U32_e64_]], %bb.0, [[V_CMP_EQ_U32_e64_1]], %bb.1
- ; GCN-NEXT: S_SETPC_B64 undef $sgpr30_sgpr31, implicit [[PHI]]
- bb.0:
- liveins: $vgpr0, $vgpr1, $sgpr2
-
- %0:vgpr(s32) = COPY $vgpr0
- %1:vgpr(s32) = COPY $vgpr1
- %2:sgpr(s32) = COPY $sgpr2
- %3:sgpr(s32) = G_CONSTANT i32 0
- %4:vcc(s1) = G_ICMP intpred(eq), %0, %3
- %5:sgpr(s32) = G_ICMP intpred(eq), %2(s32), %3
- G_BRCOND %5, %bb.1
- G_BR %bb.2
-
- bb.1:
- %6:vcc(s1) = G_ICMP intpred(eq), %1, %3
- G_BR %bb.2
-
- bb.2:
- %7:vcc(s1) = G_PHI %4, %bb.0, %6, %bb.1
- S_SETPC_B64 undef $sgpr30_sgpr31, implicit %7
-
-...
-
----
name: phi_s32_ss_sbranch
legalized: true
regBankSelected: true
diff --git a/llvm/test/CodeGen/AMDGPU/div_i128.ll b/llvm/test/CodeGen/AMDGPU/div_i128.ll
index 5296ad3..2f3d5d9 100644
--- a/llvm/test/CodeGen/AMDGPU/div_i128.ll
+++ b/llvm/test/CodeGen/AMDGPU/div_i128.ll
@@ -2310,2860 +2310,6 @@ define i128 @v_udiv_i128_vv(i128 %lhs, i128 %rhs) {
ret i128 %div
}
-define i128 @v_srem_i128_vv(i128 %lhs, i128 %rhs) {
-; GFX9-LABEL: v_srem_i128_vv:
-; GFX9: ; %bb.0: ; %_udiv-special-cases
-; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_ashrrev_i32_e32 v20, 31, v3
-; GFX9-NEXT: v_xor_b32_e32 v0, v0, v20
-; GFX9-NEXT: v_xor_b32_e32 v10, v2, v20
-; GFX9-NEXT: v_xor_b32_e32 v1, v1, v20
-; GFX9-NEXT: v_sub_co_u32_e32 v2, vcc, v0, v20
-; GFX9-NEXT: v_xor_b32_e32 v9, v3, v20
-; GFX9-NEXT: v_subb_co_u32_e32 v3, vcc, v1, v20, vcc
-; GFX9-NEXT: v_ashrrev_i32_e32 v8, 31, v7
-; GFX9-NEXT: v_subb_co_u32_e32 v0, vcc, v10, v20, vcc
-; GFX9-NEXT: v_xor_b32_e32 v4, v4, v8
-; GFX9-NEXT: v_subb_co_u32_e32 v1, vcc, v9, v20, vcc
-; GFX9-NEXT: v_xor_b32_e32 v5, v5, v8
-; GFX9-NEXT: v_sub_co_u32_e32 v23, vcc, v4, v8
-; GFX9-NEXT: v_xor_b32_e32 v6, v6, v8
-; GFX9-NEXT: v_subb_co_u32_e32 v21, vcc, v5, v8, vcc
-; GFX9-NEXT: v_xor_b32_e32 v7, v7, v8
-; GFX9-NEXT: v_subb_co_u32_e32 v4, vcc, v6, v8, vcc
-; GFX9-NEXT: v_subb_co_u32_e32 v5, vcc, v7, v8, vcc
-; GFX9-NEXT: v_or_b32_e32 v7, v21, v5
-; GFX9-NEXT: v_or_b32_e32 v6, v23, v4
-; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[6:7]
-; GFX9-NEXT: v_or_b32_e32 v7, v3, v1
-; GFX9-NEXT: v_or_b32_e32 v6, v2, v0
-; GFX9-NEXT: v_cmp_eq_u64_e64 s[4:5], 0, v[6:7]
-; GFX9-NEXT: v_ffbh_u32_e32 v6, v4
-; GFX9-NEXT: v_add_u32_e32 v6, 32, v6
-; GFX9-NEXT: v_ffbh_u32_e32 v7, v5
-; GFX9-NEXT: v_min_u32_e32 v6, v6, v7
-; GFX9-NEXT: v_ffbh_u32_e32 v7, v23
-; GFX9-NEXT: v_add_u32_e32 v7, 32, v7
-; GFX9-NEXT: v_ffbh_u32_e32 v8, v21
-; GFX9-NEXT: v_min_u32_e32 v7, v7, v8
-; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX9-NEXT: v_add_co_u32_e32 v7, vcc, 64, v7
-; GFX9-NEXT: v_addc_co_u32_e64 v8, s[6:7], 0, 0, vcc
-; GFX9-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[4:5]
-; GFX9-NEXT: v_ffbh_u32_e32 v9, v1
-; GFX9-NEXT: v_cndmask_b32_e32 v6, v7, v6, vcc
-; GFX9-NEXT: v_ffbh_u32_e32 v7, v0
-; GFX9-NEXT: v_add_u32_e32 v7, 32, v7
-; GFX9-NEXT: v_min_u32_e32 v7, v7, v9
-; GFX9-NEXT: v_ffbh_u32_e32 v9, v2
-; GFX9-NEXT: v_add_u32_e32 v9, 32, v9
-; GFX9-NEXT: v_ffbh_u32_e32 v10, v3
-; GFX9-NEXT: v_min_u32_e32 v9, v9, v10
-; GFX9-NEXT: v_cndmask_b32_e64 v8, v8, 0, vcc
-; GFX9-NEXT: v_add_co_u32_e32 v9, vcc, 64, v9
-; GFX9-NEXT: v_addc_co_u32_e64 v10, s[6:7], 0, 0, vcc
-; GFX9-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[0:1]
-; GFX9-NEXT: s_mov_b64 s[6:7], 0x7f
-; GFX9-NEXT: v_cndmask_b32_e32 v7, v9, v7, vcc
-; GFX9-NEXT: v_cndmask_b32_e64 v10, v10, 0, vcc
-; GFX9-NEXT: v_sub_co_u32_e32 v6, vcc, v6, v7
-; GFX9-NEXT: v_subb_co_u32_e32 v7, vcc, v8, v10, vcc
-; GFX9-NEXT: v_mov_b32_e32 v9, 0
-; GFX9-NEXT: v_subbrev_co_u32_e32 v8, vcc, 0, v9, vcc
-; GFX9-NEXT: v_subbrev_co_u32_e32 v9, vcc, 0, v9, vcc
-; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, s[6:7], v[6:7]
-; GFX9-NEXT: v_or_b32_e32 v13, v7, v9
-; GFX9-NEXT: v_cndmask_b32_e64 v10, 0, 1, vcc
-; GFX9-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[8:9]
-; GFX9-NEXT: v_mov_b32_e32 v22, v20
-; GFX9-NEXT: v_cndmask_b32_e64 v11, 0, 1, vcc
-; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[8:9]
-; GFX9-NEXT: v_cndmask_b32_e32 v10, v11, v10, vcc
-; GFX9-NEXT: v_and_b32_e32 v10, 1, v10
-; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v10
-; GFX9-NEXT: v_xor_b32_e32 v10, 0x7f, v6
-; GFX9-NEXT: v_or_b32_e32 v12, v10, v8
-; GFX9-NEXT: s_or_b64 s[4:5], s[4:5], vcc
-; GFX9-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[12:13]
-; GFX9-NEXT: s_xor_b64 s[6:7], s[4:5], -1
-; GFX9-NEXT: v_cndmask_b32_e64 v11, v1, 0, s[4:5]
-; GFX9-NEXT: v_cndmask_b32_e64 v12, v0, 0, s[4:5]
-; GFX9-NEXT: v_cndmask_b32_e64 v10, v3, 0, s[4:5]
-; GFX9-NEXT: v_cndmask_b32_e64 v13, v2, 0, s[4:5]
-; GFX9-NEXT: s_and_b64 s[4:5], s[6:7], vcc
-; GFX9-NEXT: s_and_saveexec_b64 s[8:9], s[4:5]
-; GFX9-NEXT: s_cbranch_execz .LBB2_6
-; GFX9-NEXT: ; %bb.1: ; %udiv-bb1
-; GFX9-NEXT: v_add_co_u32_e32 v24, vcc, 1, v6
-; GFX9-NEXT: v_addc_co_u32_e32 v25, vcc, 0, v7, vcc
-; GFX9-NEXT: v_addc_co_u32_e32 v26, vcc, 0, v8, vcc
-; GFX9-NEXT: v_sub_u32_e32 v13, 0x7f, v6
-; GFX9-NEXT: v_addc_co_u32_e32 v27, vcc, 0, v9, vcc
-; GFX9-NEXT: v_sub_u32_e32 v11, 64, v13
-; GFX9-NEXT: v_or_b32_e32 v8, v25, v27
-; GFX9-NEXT: v_or_b32_e32 v7, v24, v26
-; GFX9-NEXT: v_lshlrev_b64 v[9:10], v13, v[0:1]
-; GFX9-NEXT: v_lshrrev_b64 v[11:12], v11, v[2:3]
-; GFX9-NEXT: v_sub_u32_e32 v6, 63, v6
-; GFX9-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[7:8]
-; GFX9-NEXT: v_lshlrev_b64 v[6:7], v6, v[2:3]
-; GFX9-NEXT: v_or_b32_e32 v8, v10, v12
-; GFX9-NEXT: v_or_b32_e32 v9, v9, v11
-; GFX9-NEXT: v_cmp_gt_u32_e64 s[4:5], 64, v13
-; GFX9-NEXT: v_cmp_eq_u32_e64 s[6:7], 0, v13
-; GFX9-NEXT: v_lshlrev_b64 v[12:13], v13, v[2:3]
-; GFX9-NEXT: v_cndmask_b32_e64 v7, v7, v8, s[4:5]
-; GFX9-NEXT: v_cndmask_b32_e64 v6, v6, v9, s[4:5]
-; GFX9-NEXT: v_mov_b32_e32 v8, 0
-; GFX9-NEXT: v_mov_b32_e32 v10, 0
-; GFX9-NEXT: v_cndmask_b32_e64 v7, v7, v1, s[6:7]
-; GFX9-NEXT: v_cndmask_b32_e64 v6, v6, v0, s[6:7]
-; GFX9-NEXT: v_cndmask_b32_e64 v13, 0, v13, s[4:5]
-; GFX9-NEXT: v_mov_b32_e32 v9, 0
-; GFX9-NEXT: v_mov_b32_e32 v11, 0
-; GFX9-NEXT: v_cndmask_b32_e64 v12, 0, v12, s[4:5]
-; GFX9-NEXT: s_and_saveexec_b64 s[4:5], vcc
-; GFX9-NEXT: s_xor_b64 s[6:7], exec, s[4:5]
-; GFX9-NEXT: s_cbranch_execz .LBB2_5
-; GFX9-NEXT: ; %bb.2: ; %udiv-preheader
-; GFX9-NEXT: v_sub_u32_e32 v10, 64, v24
-; GFX9-NEXT: v_lshrrev_b64 v[8:9], v24, v[2:3]
-; GFX9-NEXT: v_lshlrev_b64 v[10:11], v10, v[0:1]
-; GFX9-NEXT: v_cmp_gt_u32_e32 vcc, 64, v24
-; GFX9-NEXT: v_or_b32_e32 v10, v8, v10
-; GFX9-NEXT: v_subrev_u32_e32 v8, 64, v24
-; GFX9-NEXT: v_or_b32_e32 v11, v9, v11
-; GFX9-NEXT: v_lshrrev_b64 v[8:9], v8, v[0:1]
-; GFX9-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v24
-; GFX9-NEXT: v_cndmask_b32_e32 v9, v9, v11, vcc
-; GFX9-NEXT: v_cndmask_b32_e64 v15, v9, v3, s[4:5]
-; GFX9-NEXT: v_cndmask_b32_e32 v10, v8, v10, vcc
-; GFX9-NEXT: v_lshrrev_b64 v[8:9], v24, v[0:1]
-; GFX9-NEXT: v_cndmask_b32_e64 v14, v10, v2, s[4:5]
-; GFX9-NEXT: v_cndmask_b32_e32 v17, 0, v9, vcc
-; GFX9-NEXT: v_cndmask_b32_e32 v16, 0, v8, vcc
-; GFX9-NEXT: v_add_co_u32_e32 v28, vcc, -1, v23
-; GFX9-NEXT: v_addc_co_u32_e32 v29, vcc, -1, v21, vcc
-; GFX9-NEXT: v_addc_co_u32_e32 v30, vcc, -1, v4, vcc
-; GFX9-NEXT: v_mov_b32_e32 v18, 0
-; GFX9-NEXT: v_mov_b32_e32 v10, 0
-; GFX9-NEXT: v_addc_co_u32_e32 v31, vcc, -1, v5, vcc
-; GFX9-NEXT: s_mov_b64 s[4:5], 0
-; GFX9-NEXT: v_mov_b32_e32 v19, 0
-; GFX9-NEXT: v_mov_b32_e32 v11, 0
-; GFX9-NEXT: v_mov_b32_e32 v9, 0
-; GFX9-NEXT: .LBB2_3: ; %udiv-do-while
-; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT: v_lshrrev_b32_e32 v32, 31, v15
-; GFX9-NEXT: v_lshlrev_b64 v[14:15], 1, v[14:15]
-; GFX9-NEXT: v_lshrrev_b32_e32 v33, 31, v7
-; GFX9-NEXT: v_lshlrev_b64 v[6:7], 1, v[6:7]
-; GFX9-NEXT: v_lshrrev_b32_e32 v8, 31, v13
-; GFX9-NEXT: v_lshlrev_b64 v[16:17], 1, v[16:17]
-; GFX9-NEXT: v_or_b32_e32 v14, v14, v33
-; GFX9-NEXT: v_or3_b32 v6, v6, v8, v10
-; GFX9-NEXT: v_sub_co_u32_e32 v8, vcc, v28, v14
-; GFX9-NEXT: v_or_b32_e32 v16, v16, v32
-; GFX9-NEXT: v_subb_co_u32_e32 v8, vcc, v29, v15, vcc
-; GFX9-NEXT: v_subb_co_u32_e32 v8, vcc, v30, v16, vcc
-; GFX9-NEXT: v_lshlrev_b64 v[12:13], 1, v[12:13]
-; GFX9-NEXT: v_subb_co_u32_e32 v8, vcc, v31, v17, vcc
-; GFX9-NEXT: v_ashrrev_i32_e32 v8, 31, v8
-; GFX9-NEXT: v_or_b32_e32 v12, v18, v12
-; GFX9-NEXT: v_and_b32_e32 v18, v8, v23
-; GFX9-NEXT: v_or_b32_e32 v13, v19, v13
-; GFX9-NEXT: v_and_b32_e32 v19, v8, v21
-; GFX9-NEXT: v_sub_co_u32_e32 v14, vcc, v14, v18
-; GFX9-NEXT: v_and_b32_e32 v32, v8, v4
-; GFX9-NEXT: v_subb_co_u32_e32 v15, vcc, v15, v19, vcc
-; GFX9-NEXT: v_and_b32_e32 v33, v8, v5
-; GFX9-NEXT: v_subb_co_u32_e32 v16, vcc, v16, v32, vcc
-; GFX9-NEXT: v_subb_co_u32_e32 v17, vcc, v17, v33, vcc
-; GFX9-NEXT: v_add_co_u32_e32 v24, vcc, -1, v24
-; GFX9-NEXT: v_addc_co_u32_e32 v25, vcc, -1, v25, vcc
-; GFX9-NEXT: v_addc_co_u32_e32 v26, vcc, -1, v26, vcc
-; GFX9-NEXT: v_addc_co_u32_e32 v27, vcc, -1, v27, vcc
-; GFX9-NEXT: v_or_b32_e32 v18, v24, v26
-; GFX9-NEXT: v_or_b32_e32 v19, v25, v27
-; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[18:19]
-; GFX9-NEXT: v_and_b32_e32 v8, 1, v8
-; GFX9-NEXT: v_mov_b32_e32 v19, v9
-; GFX9-NEXT: v_or3_b32 v7, v7, 0, v11
-; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX9-NEXT: v_mov_b32_e32 v18, v8
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GFX9-NEXT: s_cbranch_execnz .LBB2_3
-; GFX9-NEXT: ; %bb.4: ; %Flow
-; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
-; GFX9-NEXT: .LBB2_5: ; %Flow2
-; GFX9-NEXT: s_or_b64 exec, exec, s[6:7]
-; GFX9-NEXT: v_lshlrev_b64 v[14:15], 1, v[12:13]
-; GFX9-NEXT: v_lshlrev_b64 v[6:7], 1, v[6:7]
-; GFX9-NEXT: v_lshrrev_b32_e32 v12, 31, v13
-; GFX9-NEXT: v_or3_b32 v11, v7, 0, v11
-; GFX9-NEXT: v_or3_b32 v12, v6, v12, v10
-; GFX9-NEXT: v_or_b32_e32 v10, v9, v15
-; GFX9-NEXT: v_or_b32_e32 v13, v8, v14
-; GFX9-NEXT: .LBB2_6: ; %Flow3
-; GFX9-NEXT: s_or_b64 exec, exec, s[8:9]
-; GFX9-NEXT: v_mul_lo_u32 v16, v13, v5
-; GFX9-NEXT: v_mad_u64_u32 v[5:6], s[4:5], v23, v13, 0
-; GFX9-NEXT: v_mov_b32_e32 v15, 0
-; GFX9-NEXT: v_mad_u64_u32 v[7:8], s[4:5], v13, v4, 0
-; GFX9-NEXT: v_mov_b32_e32 v14, v6
-; GFX9-NEXT: v_mad_u64_u32 v[13:14], s[4:5], v21, v13, v[14:15]
-; GFX9-NEXT: v_mul_lo_u32 v9, v10, v4
-; GFX9-NEXT: v_mul_lo_u32 v11, v11, v23
-; GFX9-NEXT: v_mov_b32_e32 v4, v14
-; GFX9-NEXT: v_mov_b32_e32 v14, v15
-; GFX9-NEXT: v_mad_u64_u32 v[13:14], s[4:5], v23, v10, v[13:14]
-; GFX9-NEXT: v_add3_u32 v8, v8, v16, v9
-; GFX9-NEXT: v_mad_u64_u32 v[6:7], s[4:5], v12, v23, v[7:8]
-; GFX9-NEXT: v_mov_b32_e32 v8, v14
-; GFX9-NEXT: v_add_co_u32_e32 v8, vcc, v4, v8
-; GFX9-NEXT: v_addc_co_u32_e64 v9, s[4:5], 0, 0, vcc
-; GFX9-NEXT: v_mul_lo_u32 v12, v12, v21
-; GFX9-NEXT: v_mad_u64_u32 v[8:9], s[4:5], v21, v10, v[8:9]
-; GFX9-NEXT: v_add3_u32 v4, v11, v7, v12
-; GFX9-NEXT: v_add_co_u32_e32 v6, vcc, v8, v6
-; GFX9-NEXT: v_addc_co_u32_e32 v4, vcc, v9, v4, vcc
-; GFX9-NEXT: v_mov_b32_e32 v7, v13
-; GFX9-NEXT: v_sub_co_u32_e32 v2, vcc, v2, v5
-; GFX9-NEXT: v_subb_co_u32_e32 v3, vcc, v3, v7, vcc
-; GFX9-NEXT: v_subb_co_u32_e32 v0, vcc, v0, v6, vcc
-; GFX9-NEXT: v_subb_co_u32_e32 v1, vcc, v1, v4, vcc
-; GFX9-NEXT: v_xor_b32_e32 v5, v0, v20
-; GFX9-NEXT: v_xor_b32_e32 v0, v2, v20
-; GFX9-NEXT: v_xor_b32_e32 v4, v1, v22
-; GFX9-NEXT: v_xor_b32_e32 v1, v3, v22
-; GFX9-NEXT: v_sub_co_u32_e32 v0, vcc, v0, v20
-; GFX9-NEXT: v_subb_co_u32_e32 v1, vcc, v1, v22, vcc
-; GFX9-NEXT: v_subb_co_u32_e32 v2, vcc, v5, v20, vcc
-; GFX9-NEXT: v_subb_co_u32_e32 v3, vcc, v4, v22, vcc
-; GFX9-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX9-O0-LABEL: v_srem_i128_vv:
-; GFX9-O0: ; %bb.0: ; %_udiv-special-cases
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-O0-NEXT: s_xor_saveexec_b64 s[4:5], -1
-; GFX9-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:348 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:352 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:356 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:360 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_mov_b64 exec, s[4:5]
-; GFX9-O0-NEXT: ; implicit-def: $vgpr8 : SGPR spill to VGPR lane
-; GFX9-O0-NEXT: v_mov_b32_e32 v8, v6
-; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: v_mov_b32_e32 v14, v2
-; GFX9-O0-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:120 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v1
-; GFX9-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: v_mov_b32_e32 v4, v0
-; GFX9-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
-; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: s_mov_b64 exec, s[18:19]
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; kill: def $vgpr2 killed $vgpr2 def $vgpr2_vgpr3 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v5
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; kill: def $vgpr4 killed $vgpr4 def $vgpr4_vgpr5 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v6
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; kill: def $vgpr8 killed $vgpr8 def $vgpr8_vgpr9 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v9, v7
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; kill: def $vgpr14 killed $vgpr14 def $vgpr14_vgpr15 killed $exec
-; GFX9-O0-NEXT: s_waitcnt vmcnt(1)
-; GFX9-O0-NEXT: v_mov_b32_e32 v15, v1
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4_sgpr5
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4_sgpr5
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4_sgpr5
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4_sgpr5
-; GFX9-O0-NEXT: s_mov_b32 s4, 63
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v14
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v15
-; GFX9-O0-NEXT: v_ashrrev_i64 v[12:13], s4, v[6:7]
-; GFX9-O0-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v12
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v13
-; GFX9-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v8
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v9
-; GFX9-O0-NEXT: v_ashrrev_i64 v[6:7], s4, v[6:7]
-; GFX9-O0-NEXT: v_mov_b32_e32 v1, v15
-; GFX9-O0-NEXT: v_mov_b32_e32 v10, v13
-; GFX9-O0-NEXT: v_xor_b32_e64 v1, v1, v10
-; GFX9-O0-NEXT: v_mov_b32_e32 v11, v14
-; GFX9-O0-NEXT: v_xor_b32_e64 v13, v11, v12
-; GFX9-O0-NEXT: ; kill: def $vgpr13 killed $vgpr13 def $vgpr13_vgpr14 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v14, v1
-; GFX9-O0-NEXT: v_mov_b32_e32 v1, v5
-; GFX9-O0-NEXT: v_xor_b32_e64 v1, v1, v10
-; GFX9-O0-NEXT: ; kill: def $vgpr4 killed $vgpr4 killed $vgpr4_vgpr5 killed $exec
-; GFX9-O0-NEXT: v_xor_b32_e64 v15, v4, v12
-; GFX9-O0-NEXT: ; kill: def $vgpr15 killed $vgpr15 def $vgpr15_vgpr16 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v16, v1
-; GFX9-O0-NEXT: v_mov_b32_e32 v1, v9
-; GFX9-O0-NEXT: v_mov_b32_e32 v4, v7
-; GFX9-O0-NEXT: v_xor_b32_e64 v1, v1, v4
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v8
-; GFX9-O0-NEXT: ; kill: def $vgpr6 killed $vgpr6 killed $vgpr6_vgpr7 killed $exec
-; GFX9-O0-NEXT: v_xor_b32_e64 v7, v5, v6
-; GFX9-O0-NEXT: ; kill: def $vgpr7 killed $vgpr7 def $vgpr7_vgpr8 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v8, v1
-; GFX9-O0-NEXT: v_mov_b32_e32 v1, v3
-; GFX9-O0-NEXT: v_xor_b32_e64 v1, v1, v4
-; GFX9-O0-NEXT: ; kill: def $vgpr2 killed $vgpr2 killed $vgpr2_vgpr3 killed $exec
-; GFX9-O0-NEXT: v_xor_b32_e64 v2, v2, v6
-; GFX9-O0-NEXT: ; kill: def $vgpr2 killed $vgpr2 def $vgpr2_vgpr3 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v1
-; GFX9-O0-NEXT: v_mov_b32_e32 v9, v15
-; GFX9-O0-NEXT: v_mov_b32_e32 v1, v16
-; GFX9-O0-NEXT: v_mov_b32_e32 v11, v13
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v14
-; GFX9-O0-NEXT: v_sub_co_u32_e32 v9, vcc, v9, v12
-; GFX9-O0-NEXT: v_subb_co_u32_e32 v1, vcc, v1, v10, vcc
-; GFX9-O0-NEXT: v_subb_co_u32_e32 v13, vcc, v11, v12, vcc
-; GFX9-O0-NEXT: v_subb_co_u32_e32 v5, vcc, v5, v10, vcc
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; kill: def $vgpr13 killed $vgpr13 def $vgpr13_vgpr14 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v14, v5
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; kill: def $vgpr9 killed $vgpr9 def $vgpr9_vgpr10 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v10, v1
-; GFX9-O0-NEXT: v_mov_b32_e32 v1, v2
-; GFX9-O0-NEXT: ; kill: def $vgpr3 killed $vgpr3 killed $vgpr2_vgpr3 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v7
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v8
-; GFX9-O0-NEXT: v_sub_co_u32_e32 v1, vcc, v1, v6
-; GFX9-O0-NEXT: v_subb_co_u32_e32 v3, vcc, v3, v4, vcc
-; GFX9-O0-NEXT: v_subb_co_u32_e32 v11, vcc, v5, v6, vcc
-; GFX9-O0-NEXT: v_subb_co_u32_e32 v2, vcc, v2, v4, vcc
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; kill: def $vgpr11 killed $vgpr11 def $vgpr11_vgpr12 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v12, v2
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v3
-; GFX9-O0-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v11
-; GFX9-O0-NEXT: v_mov_b32_e32 v4, v12
-; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: v_mov_b32_e32 v4, v2
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v1
-; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v13
-; GFX9-O0-NEXT: v_mov_b32_e32 v4, v14
-; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v9
-; GFX9-O0-NEXT: v_mov_b32_e32 v4, v10
-; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v12
-; GFX9-O0-NEXT: v_mov_b32_e32 v8, v2
-; GFX9-O0-NEXT: v_or_b32_e64 v3, v8, v7
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v11
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v1
-; GFX9-O0-NEXT: v_or_b32_e64 v1, v5, v6
-; GFX9-O0-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v3
-; GFX9-O0-NEXT: s_mov_b64 s[6:7], 0
-; GFX9-O0-NEXT: v_writelane_b32 v0, s6, 0
-; GFX9-O0-NEXT: v_writelane_b32 v0, s7, 1
-; GFX9-O0-NEXT: v_cmp_eq_u64_e64 s[4:5], v[1:2], s[6:7]
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v14
-; GFX9-O0-NEXT: v_mov_b32_e32 v4, v10
-; GFX9-O0-NEXT: v_or_b32_e64 v15, v4, v2
-; GFX9-O0-NEXT: v_mov_b32_e32 v1, v13
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v9
-; GFX9-O0-NEXT: v_or_b32_e64 v9, v3, v1
-; GFX9-O0-NEXT: ; kill: def $vgpr9 killed $vgpr9 def $vgpr9_vgpr10 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v10, v15
-; GFX9-O0-NEXT: v_cmp_eq_u64_e64 s[8:9], v[9:10], s[6:7]
-; GFX9-O0-NEXT: s_or_b64 s[4:5], s[4:5], s[8:9]
-; GFX9-O0-NEXT: v_ffbh_u32_e64 v6, v6
-; GFX9-O0-NEXT: s_mov_b32 s9, 32
-; GFX9-O0-NEXT: v_add_u32_e64 v6, v6, s9
-; GFX9-O0-NEXT: v_ffbh_u32_e64 v7, v7
-; GFX9-O0-NEXT: v_min_u32_e64 v6, v6, v7
-; GFX9-O0-NEXT: s_mov_b32 s8, 0
-; GFX9-O0-NEXT: ; implicit-def: $sgpr10
-; GFX9-O0-NEXT: v_mov_b32_e32 v9, s8
-; GFX9-O0-NEXT: ; kill: def $vgpr6 killed $vgpr6 def $vgpr6_vgpr7 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v9
-; GFX9-O0-NEXT: v_mov_b32_e32 v10, v7
-; GFX9-O0-NEXT: v_ffbh_u32_e64 v5, v5
-; GFX9-O0-NEXT: v_add_u32_e64 v5, v5, s9
-; GFX9-O0-NEXT: v_ffbh_u32_e64 v8, v8
-; GFX9-O0-NEXT: v_min_u32_e64 v15, v5, v8
-; GFX9-O0-NEXT: ; implicit-def: $sgpr10
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, s8
-; GFX9-O0-NEXT: ; kill: def $vgpr15 killed $vgpr15 def $vgpr15_vgpr16 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v16, v5
-; GFX9-O0-NEXT: s_mov_b64 s[10:11], 64
-; GFX9-O0-NEXT: v_mov_b32_e32 v8, v15
-; GFX9-O0-NEXT: s_mov_b32 s12, s10
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v16
-; GFX9-O0-NEXT: s_mov_b32 s14, s11
-; GFX9-O0-NEXT: v_add_co_u32_e64 v8, s[12:13], v8, s12
-; GFX9-O0-NEXT: v_mov_b32_e32 v9, s14
-; GFX9-O0-NEXT: v_addc_co_u32_e64 v5, s[12:13], v5, v9, s[12:13]
-; GFX9-O0-NEXT: ; kill: def $vgpr8 killed $vgpr8 def $vgpr8_vgpr9 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v9, v5
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v9
-; GFX9-O0-NEXT: v_cmp_ne_u64_e64 s[12:13], v[11:12], s[6:7]
-; GFX9-O0-NEXT: v_cndmask_b32_e64 v5, v5, v10, s[12:13]
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v6
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v8
-; GFX9-O0-NEXT: v_cndmask_b32_e64 v9, v6, v7, s[12:13]
-; GFX9-O0-NEXT: ; implicit-def: $sgpr12
-; GFX9-O0-NEXT: ; implicit-def: $sgpr12
-; GFX9-O0-NEXT: ; kill: def $vgpr9 killed $vgpr9 def $vgpr9_vgpr10 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v10, v5
-; GFX9-O0-NEXT: v_ffbh_u32_e64 v5, v1
-; GFX9-O0-NEXT: v_add_u32_e64 v5, v5, s9
-; GFX9-O0-NEXT: v_ffbh_u32_e64 v6, v2
-; GFX9-O0-NEXT: v_min_u32_e64 v6, v5, v6
-; GFX9-O0-NEXT: ; implicit-def: $sgpr12
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, s8
-; GFX9-O0-NEXT: ; kill: def $vgpr6 killed $vgpr6 def $vgpr6_vgpr7 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v5
-; GFX9-O0-NEXT: v_mov_b32_e32 v8, v7
-; GFX9-O0-NEXT: v_ffbh_u32_e64 v5, v3
-; GFX9-O0-NEXT: v_add_u32_e64 v5, v5, s9
-; GFX9-O0-NEXT: v_ffbh_u32_e64 v11, v4
-; GFX9-O0-NEXT: v_min_u32_e64 v15, v5, v11
-; GFX9-O0-NEXT: ; implicit-def: $sgpr9
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, s8
-; GFX9-O0-NEXT: ; kill: def $vgpr15 killed $vgpr15 def $vgpr15_vgpr16 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v16, v5
-; GFX9-O0-NEXT: v_mov_b32_e32 v11, v15
-; GFX9-O0-NEXT: s_mov_b32 s8, s10
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v16
-; GFX9-O0-NEXT: s_mov_b32 s10, s11
-; GFX9-O0-NEXT: v_add_co_u32_e64 v11, s[8:9], v11, s8
-; GFX9-O0-NEXT: v_mov_b32_e32 v12, s10
-; GFX9-O0-NEXT: v_addc_co_u32_e64 v5, s[8:9], v5, v12, s[8:9]
-; GFX9-O0-NEXT: ; kill: def $vgpr11 killed $vgpr11 def $vgpr11_vgpr12 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v12, v5
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v12
-; GFX9-O0-NEXT: v_cmp_ne_u64_e64 s[8:9], v[13:14], s[6:7]
-; GFX9-O0-NEXT: v_cndmask_b32_e64 v5, v5, v8, s[8:9]
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v6
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v11
-; GFX9-O0-NEXT: v_cndmask_b32_e64 v6, v6, v7, s[8:9]
-; GFX9-O0-NEXT: ; implicit-def: $sgpr8
-; GFX9-O0-NEXT: ; implicit-def: $sgpr8
-; GFX9-O0-NEXT: ; kill: def $vgpr6 killed $vgpr6 def $vgpr6_vgpr7 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v5
-; GFX9-O0-NEXT: v_mov_b32_e32 v8, v6
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v9
-; GFX9-O0-NEXT: ; kill: def $vgpr7 killed $vgpr7 killed $vgpr6_vgpr7 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v10
-; GFX9-O0-NEXT: s_mov_b32 s10, s6
-; GFX9-O0-NEXT: s_mov_b32 s11, s7
-; GFX9-O0-NEXT: v_sub_co_u32_e32 v5, vcc, v5, v8
-; GFX9-O0-NEXT: v_subb_co_u32_e32 v9, vcc, v6, v7, vcc
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, s10
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, s10
-; GFX9-O0-NEXT: v_subb_co_u32_e32 v8, vcc, v6, v7, vcc
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, s11
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, s11
-; GFX9-O0-NEXT: v_subb_co_u32_e32 v7, vcc, v6, v7, vcc
-; GFX9-O0-NEXT: ; implicit-def: $sgpr8
-; GFX9-O0-NEXT: ; implicit-def: $sgpr8
-; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v9
-; GFX9-O0-NEXT: ; implicit-def: $sgpr8
-; GFX9-O0-NEXT: ; implicit-def: $sgpr8
-; GFX9-O0-NEXT: ; kill: def $vgpr8 killed $vgpr8 def $vgpr8_vgpr9 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v9, v7
-; GFX9-O0-NEXT: v_mov_b32_e32 v11, v9
-; GFX9-O0-NEXT: v_mov_b32_e32 v10, v8
-; GFX9-O0-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: v_mov_b32_e32 v11, v6
-; GFX9-O0-NEXT: v_mov_b32_e32 v10, v5
-; GFX9-O0-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_mov_b64 s[8:9], s[6:7]
-; GFX9-O0-NEXT: v_cmp_eq_u64_e64 s[8:9], v[8:9], s[8:9]
-; GFX9-O0-NEXT: s_mov_b64 s[12:13], 0x7f
-; GFX9-O0-NEXT: s_mov_b64 s[14:15], s[12:13]
-; GFX9-O0-NEXT: v_cmp_gt_u64_e64 s[14:15], v[5:6], s[14:15]
-; GFX9-O0-NEXT: v_cndmask_b32_e64 v10, 0, 1, s[14:15]
-; GFX9-O0-NEXT: s_mov_b64 s[14:15], s[6:7]
-; GFX9-O0-NEXT: v_cmp_ne_u64_e64 s[14:15], v[8:9], s[14:15]
-; GFX9-O0-NEXT: v_cndmask_b32_e64 v7, 0, 1, s[14:15]
-; GFX9-O0-NEXT: v_cndmask_b32_e64 v7, v7, v10, s[8:9]
-; GFX9-O0-NEXT: v_and_b32_e64 v7, 1, v7
-; GFX9-O0-NEXT: v_cmp_eq_u32_e64 s[8:9], v7, 1
-; GFX9-O0-NEXT: s_or_b64 s[8:9], s[4:5], s[8:9]
-; GFX9-O0-NEXT: s_mov_b64 s[4:5], -1
-; GFX9-O0-NEXT: s_xor_b64 s[4:5], s[8:9], s[4:5]
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v6
-; GFX9-O0-NEXT: s_mov_b32 s14, s13
-; GFX9-O0-NEXT: v_xor_b32_e64 v7, v7, s14
-; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 killed $vgpr5_vgpr6 killed $exec
-; GFX9-O0-NEXT: ; kill: def $sgpr12 killed $sgpr12 killed $sgpr12_sgpr13
-; GFX9-O0-NEXT: v_xor_b32_e64 v5, v5, s12
-; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v7
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v6
-; GFX9-O0-NEXT: v_mov_b32_e32 v10, v9
-; GFX9-O0-NEXT: v_or_b32_e64 v7, v7, v10
-; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 killed $vgpr5_vgpr6 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v8
-; GFX9-O0-NEXT: v_or_b32_e64 v5, v5, v6
-; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v7
-; GFX9-O0-NEXT: v_cmp_ne_u64_e64 s[6:7], v[5:6], s[6:7]
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, s11
-; GFX9-O0-NEXT: v_cndmask_b32_e64 v5, v2, v5, s[8:9]
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, s10
-; GFX9-O0-NEXT: v_cndmask_b32_e64 v1, v1, v2, s[8:9]
-; GFX9-O0-NEXT: ; implicit-def: $sgpr12
-; GFX9-O0-NEXT: ; implicit-def: $sgpr12
-; GFX9-O0-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v5
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, s11
-; GFX9-O0-NEXT: v_cndmask_b32_e64 v5, v4, v5, s[8:9]
-; GFX9-O0-NEXT: v_mov_b32_e32 v4, s10
-; GFX9-O0-NEXT: v_cndmask_b32_e64 v3, v3, v4, s[8:9]
-; GFX9-O0-NEXT: ; implicit-def: $sgpr8
-; GFX9-O0-NEXT: ; implicit-def: $sgpr8
-; GFX9-O0-NEXT: ; kill: def $vgpr3 killed $vgpr3 def $vgpr3_vgpr4 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v4, v5
-; GFX9-O0-NEXT: s_and_b64 s[6:7], s[4:5], s[6:7]
-; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_mov_b64 s[4:5], exec
-; GFX9-O0-NEXT: v_writelane_b32 v0, s4, 2
-; GFX9-O0-NEXT: v_writelane_b32 v0, s5, 3
-; GFX9-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
-; GFX9-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_mov_b64 exec, s[18:19]
-; GFX9-O0-NEXT: s_and_b64 s[4:5], s[4:5], s[6:7]
-; GFX9-O0-NEXT: s_mov_b64 exec, s[4:5]
-; GFX9-O0-NEXT: s_cbranch_execz .LBB2_3
-; GFX9-O0-NEXT: s_branch .LBB2_8
-; GFX9-O0-NEXT: .LBB2_1: ; %Flow
-; GFX9-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
-; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: s_mov_b64 exec, s[18:19]
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: v_readlane_b32 s4, v0, 4
-; GFX9-O0-NEXT: v_readlane_b32 s5, v0, 5
-; GFX9-O0-NEXT: s_or_b64 exec, exec, s[4:5]
-; GFX9-O0-NEXT: ; %bb.2: ; %Flow
-; GFX9-O0-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: s_waitcnt vmcnt(6)
-; GFX9-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_branch .LBB2_5
-; GFX9-O0-NEXT: .LBB2_3: ; %Flow2
-; GFX9-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
-; GFX9-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: s_mov_b64 exec, s[18:19]
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: v_readlane_b32 s4, v4, 2
-; GFX9-O0-NEXT: v_readlane_b32 s5, v4, 3
-; GFX9-O0-NEXT: s_or_b64 exec, exec, s[4:5]
-; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:12 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:16 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_branch .LBB2_9
-; GFX9-O0-NEXT: .LBB2_4: ; %udiv-loop-exit
-; GFX9-O0-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: s_mov_b32 s4, 1
-; GFX9-O0-NEXT: s_waitcnt vmcnt(2)
-; GFX9-O0-NEXT: v_lshlrev_b64 v[2:3], s4, v[0:1]
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: v_lshlrev_b64 v[9:10], s4, v[9:10]
-; GFX9-O0-NEXT: s_mov_b32 s4, 63
-; GFX9-O0-NEXT: v_lshrrev_b64 v[0:1], s4, v[0:1]
-; GFX9-O0-NEXT: v_mov_b32_e32 v11, v1
-; GFX9-O0-NEXT: v_mov_b32_e32 v4, v10
-; GFX9-O0-NEXT: v_mov_b32_e32 v12, v8
-; GFX9-O0-NEXT: v_or3_b32 v4, v4, v11, v12
-; GFX9-O0-NEXT: v_mov_b32_e32 v1, v0
-; GFX9-O0-NEXT: v_mov_b32_e32 v0, v9
-; GFX9-O0-NEXT: v_or3_b32 v0, v0, v1, v7
-; GFX9-O0-NEXT: ; kill: def $vgpr0 killed $vgpr0 def $vgpr0_vgpr1 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v1, v4
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v3
-; GFX9-O0-NEXT: v_mov_b32_e32 v4, v6
-; GFX9-O0-NEXT: v_or_b32_e64 v4, v4, v7
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v2
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v5
-; GFX9-O0-NEXT: v_or_b32_e64 v2, v2, v3
-; GFX9-O0-NEXT: ; kill: def $vgpr2 killed $vgpr2 def $vgpr2_vgpr3 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v4
-; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_branch .LBB2_3
-; GFX9-O0-NEXT: .LBB2_5: ; %Flow1
-; GFX9-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
-; GFX9-O0-NEXT: buffer_load_dword v8, off, s[0:3], s32 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: s_mov_b64 exec, s[18:19]
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: v_readlane_b32 s4, v8, 6
-; GFX9-O0-NEXT: v_readlane_b32 s5, v8, 7
-; GFX9-O0-NEXT: s_or_b64 exec, exec, s[4:5]
-; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:124 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_branch .LBB2_4
-; GFX9-O0-NEXT: .LBB2_6: ; %udiv-do-while
-; GFX9-O0-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX9-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
-; GFX9-O0-NEXT: buffer_load_dword v16, off, s[0:3], s32 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: s_mov_b64 exec, s[18:19]
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: v_readlane_b32 s6, v16, 8
-; GFX9-O0-NEXT: v_readlane_b32 s7, v16, 9
-; GFX9-O0-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:244 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:256 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:260 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:264 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:268 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:272 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:276 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:288 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:292 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:296 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:52 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:56 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:60 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:64 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:300 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:304 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:308 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:312 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: s_mov_b32 s4, 63
-; GFX9-O0-NEXT: s_waitcnt vmcnt(16)
-; GFX9-O0-NEXT: v_lshrrev_b64 v[29:30], s4, v[2:3]
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v30
-; GFX9-O0-NEXT: s_mov_b32 s5, 1
-; GFX9-O0-NEXT: v_lshlrev_b64 v[23:24], s5, v[23:24]
-; GFX9-O0-NEXT: v_mov_b32_e32 v4, v24
-; GFX9-O0-NEXT: v_or_b32_e64 v4, v4, v5
-; GFX9-O0-NEXT: v_mov_b32_e32 v10, v29
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v23
-; GFX9-O0-NEXT: v_or_b32_e64 v23, v5, v10
-; GFX9-O0-NEXT: ; kill: def $vgpr23 killed $vgpr23 def $vgpr23_vgpr24 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v24, v4
-; GFX9-O0-NEXT: v_lshlrev_b64 v[29:30], s5, v[2:3]
-; GFX9-O0-NEXT: v_lshrrev_b64 v[4:5], s4, v[6:7]
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v30
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v5
-; GFX9-O0-NEXT: v_or_b32_e64 v2, v2, v3
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v29
-; GFX9-O0-NEXT: ; kill: def $vgpr4 killed $vgpr4 killed $vgpr4_vgpr5 killed $exec
-; GFX9-O0-NEXT: v_or_b32_e64 v4, v3, v4
-; GFX9-O0-NEXT: ; kill: def $vgpr4 killed $vgpr4 def $vgpr4_vgpr5 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v2
-; GFX9-O0-NEXT: v_lshlrev_b64 v[2:3], s5, v[0:1]
-; GFX9-O0-NEXT: v_lshlrev_b64 v[29:30], s5, v[6:7]
-; GFX9-O0-NEXT: v_lshrrev_b64 v[0:1], s4, v[0:1]
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v1
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v30
-; GFX9-O0-NEXT: s_waitcnt vmcnt(10)
-; GFX9-O0-NEXT: v_mov_b32_e32 v10, v28
-; GFX9-O0-NEXT: v_or3_b32 v6, v6, v7, v10
-; GFX9-O0-NEXT: v_mov_b32_e32 v1, v0
-; GFX9-O0-NEXT: v_mov_b32_e32 v0, v29
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v27
-; GFX9-O0-NEXT: v_or3_b32 v0, v0, v1, v7
-; GFX9-O0-NEXT: ; kill: def $vgpr0 killed $vgpr0 def $vgpr0_vgpr1 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v1, v6
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v3
-; GFX9-O0-NEXT: s_waitcnt vmcnt(8)
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v26
-; GFX9-O0-NEXT: v_or_b32_e64 v6, v6, v7
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v2
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v25
-; GFX9-O0-NEXT: v_or_b32_e64 v2, v2, v3
-; GFX9-O0-NEXT: ; kill: def $vgpr2 killed $vgpr2 def $vgpr2_vgpr3 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v6
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v4
-; GFX9-O0-NEXT: v_mov_b32_e32 v10, v5
-; GFX9-O0-NEXT: v_mov_b32_e32 v4, v23
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v24
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: v_mov_b32_e32 v13, v11
-; GFX9-O0-NEXT: v_mov_b32_e32 v11, v14
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v15
-; GFX9-O0-NEXT: v_sub_co_u32_e32 v13, vcc, v13, v6
-; GFX9-O0-NEXT: v_subb_co_u32_e32 v12, vcc, v12, v10, vcc
-; GFX9-O0-NEXT: v_subb_co_u32_e32 v11, vcc, v11, v4, vcc
-; GFX9-O0-NEXT: v_subb_co_u32_e32 v7, vcc, v7, v5, vcc
-; GFX9-O0-NEXT: ; implicit-def: $sgpr5
-; GFX9-O0-NEXT: ; implicit-def: $sgpr5
-; GFX9-O0-NEXT: ; kill: def $vgpr11 killed $vgpr11 def $vgpr11_vgpr12 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v12, v7
-; GFX9-O0-NEXT: v_ashrrev_i64 v[13:14], s4, v[11:12]
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v14
-; GFX9-O0-NEXT: s_mov_b64 s[4:5], 1
-; GFX9-O0-NEXT: s_mov_b32 s8, s5
-; GFX9-O0-NEXT: v_and_b32_e64 v12, v7, s8
-; GFX9-O0-NEXT: v_mov_b32_e32 v11, v13
-; GFX9-O0-NEXT: ; kill: def $sgpr4 killed $sgpr4 killed $sgpr4_sgpr5
-; GFX9-O0-NEXT: v_and_b32_e64 v14, v11, s4
-; GFX9-O0-NEXT: ; kill: def $vgpr14 killed $vgpr14 def $vgpr14_vgpr15 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v15, v12
-; GFX9-O0-NEXT: v_mov_b32_e32 v12, 0
-; GFX9-O0-NEXT: v_mov_b32_e32 v13, 0
-; GFX9-O0-NEXT: v_mov_b32_e32 v23, v22
-; GFX9-O0-NEXT: v_and_b32_e64 v23, v7, v23
-; GFX9-O0-NEXT: v_and_b32_e64 v21, v11, v21
-; GFX9-O0-NEXT: ; kill: def $vgpr21 killed $vgpr21 def $vgpr21_vgpr22 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v22, v23
-; GFX9-O0-NEXT: v_mov_b32_e32 v23, v20
-; GFX9-O0-NEXT: v_and_b32_e64 v7, v7, v23
-; GFX9-O0-NEXT: v_and_b32_e64 v23, v11, v19
-; GFX9-O0-NEXT: ; kill: def $vgpr23 killed $vgpr23 def $vgpr23_vgpr24 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v24, v7
-; GFX9-O0-NEXT: v_mov_b32_e32 v20, v23
-; GFX9-O0-NEXT: v_mov_b32_e32 v19, v24
-; GFX9-O0-NEXT: v_mov_b32_e32 v11, v21
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v22
-; GFX9-O0-NEXT: v_sub_co_u32_e32 v6, vcc, v6, v20
-; GFX9-O0-NEXT: v_subb_co_u32_e32 v10, vcc, v10, v19, vcc
-; GFX9-O0-NEXT: v_subb_co_u32_e32 v4, vcc, v4, v11, vcc
-; GFX9-O0-NEXT: v_subb_co_u32_e32 v7, vcc, v5, v7, vcc
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; kill: def $vgpr4 killed $vgpr4 def $vgpr4_vgpr5 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v7
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; kill: def $vgpr6 killed $vgpr6 def $vgpr6_vgpr7 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v10
-; GFX9-O0-NEXT: v_mov_b32_e32 v11, v8
-; GFX9-O0-NEXT: ; kill: def $vgpr9 killed $vgpr9 killed $vgpr8_vgpr9 killed $exec
-; GFX9-O0-NEXT: s_mov_b64 s[8:9], -1
-; GFX9-O0-NEXT: s_mov_b32 s5, s8
-; GFX9-O0-NEXT: s_mov_b32 s4, s9
-; GFX9-O0-NEXT: v_mov_b32_e32 v10, v17
-; GFX9-O0-NEXT: v_mov_b32_e32 v8, v18
-; GFX9-O0-NEXT: v_mov_b32_e32 v17, s5
-; GFX9-O0-NEXT: v_add_co_u32_e32 v20, vcc, v11, v17
-; GFX9-O0-NEXT: v_mov_b32_e32 v11, s4
-; GFX9-O0-NEXT: v_addc_co_u32_e32 v9, vcc, v9, v11, vcc
-; GFX9-O0-NEXT: v_mov_b32_e32 v11, s5
-; GFX9-O0-NEXT: v_addc_co_u32_e32 v17, vcc, v10, v11, vcc
-; GFX9-O0-NEXT: v_mov_b32_e32 v10, s4
-; GFX9-O0-NEXT: v_addc_co_u32_e32 v8, vcc, v8, v10, vcc
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; kill: def $vgpr20 killed $vgpr20 def $vgpr20_vgpr21 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v21, v9
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; kill: def $vgpr17 killed $vgpr17 def $vgpr17_vgpr18 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v18, v8
-; GFX9-O0-NEXT: v_mov_b32_e32 v8, v17
-; GFX9-O0-NEXT: v_mov_b32_e32 v9, v18
-; GFX9-O0-NEXT: v_mov_b32_e32 v10, v20
-; GFX9-O0-NEXT: v_mov_b32_e32 v11, v21
-; GFX9-O0-NEXT: v_mov_b32_e32 v22, v18
-; GFX9-O0-NEXT: v_mov_b32_e32 v19, v21
-; GFX9-O0-NEXT: v_or_b32_e64 v19, v19, v22
-; GFX9-O0-NEXT: v_mov_b32_e32 v18, v17
-; GFX9-O0-NEXT: v_mov_b32_e32 v17, v20
-; GFX9-O0-NEXT: v_or_b32_e64 v17, v17, v18
-; GFX9-O0-NEXT: ; kill: def $vgpr17 killed $vgpr17 def $vgpr17_vgpr18 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v18, v19
-; GFX9-O0-NEXT: v_cmp_eq_u64_e64 s[4:5], v[17:18], v[12:13]
-; GFX9-O0-NEXT: s_or_b64 s[4:5], s[4:5], s[6:7]
-; GFX9-O0-NEXT: v_mov_b32_e32 v18, v3
-; GFX9-O0-NEXT: v_mov_b32_e32 v17, v2
-; GFX9-O0-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: v_mov_b32_e32 v18, v1
-; GFX9-O0-NEXT: v_mov_b32_e32 v17, v0
-; GFX9-O0-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: v_mov_b32_e32 v18, v15
-; GFX9-O0-NEXT: v_mov_b32_e32 v17, v14
-; GFX9-O0-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: v_mov_b32_e32 v18, v13
-; GFX9-O0-NEXT: v_mov_b32_e32 v17, v12
-; GFX9-O0-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_mov_b64 s[6:7], s[4:5]
-; GFX9-O0-NEXT: v_writelane_b32 v16, s6, 4
-; GFX9-O0-NEXT: v_writelane_b32 v16, s7, 5
-; GFX9-O0-NEXT: s_mov_b64 s[6:7], s[4:5]
-; GFX9-O0-NEXT: v_writelane_b32 v16, s6, 8
-; GFX9-O0-NEXT: v_writelane_b32 v16, s7, 9
-; GFX9-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
-; GFX9-O0-NEXT: buffer_store_dword v16, off, s[0:3], s32 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_mov_b64 exec, s[18:19]
-; GFX9-O0-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GFX9-O0-NEXT: s_cbranch_execnz .LBB2_6
-; GFX9-O0-NEXT: s_branch .LBB2_1
-; GFX9-O0-NEXT: .LBB2_7: ; %udiv-preheader
-; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:316 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:320 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:324 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:328 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:332 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:336 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:340 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:344 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
-; GFX9-O0-NEXT: buffer_load_dword v16, off, s[0:3], s32 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: s_mov_b64 exec, s[18:19]
-; GFX9-O0-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:60 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:64 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:52 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:56 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:44 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:48 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:36 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:40 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: s_waitcnt vmcnt(9)
-; GFX9-O0-NEXT: v_mov_b32_e32 v4, v10
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: v_lshrrev_b64 v[6:7], v4, v[21:22]
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v7
-; GFX9-O0-NEXT: s_mov_b32 s6, 64
-; GFX9-O0-NEXT: v_sub_u32_e64 v12, s6, v4
-; GFX9-O0-NEXT: v_lshlrev_b64 v[23:24], v12, v[19:20]
-; GFX9-O0-NEXT: v_mov_b32_e32 v12, v24
-; GFX9-O0-NEXT: v_or_b32_e64 v5, v5, v12
-; GFX9-O0-NEXT: ; kill: def $vgpr6 killed $vgpr6 killed $vgpr6_vgpr7 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v23
-; GFX9-O0-NEXT: v_or_b32_e64 v6, v6, v7
-; GFX9-O0-NEXT: ; kill: def $vgpr6 killed $vgpr6 def $vgpr6_vgpr7 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v5
-; GFX9-O0-NEXT: v_mov_b32_e32 v12, v7
-; GFX9-O0-NEXT: v_cmp_lt_u32_e64 s[4:5], v4, s6
-; GFX9-O0-NEXT: v_sub_u32_e64 v5, v4, s6
-; GFX9-O0-NEXT: v_lshrrev_b64 v[23:24], v5, v[19:20]
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v24
-; GFX9-O0-NEXT: v_cndmask_b32_e64 v5, v5, v12, s[4:5]
-; GFX9-O0-NEXT: s_mov_b32 s6, 0
-; GFX9-O0-NEXT: v_cmp_eq_u32_e64 s[6:7], v4, s6
-; GFX9-O0-NEXT: v_mov_b32_e32 v12, v22
-; GFX9-O0-NEXT: v_cndmask_b32_e64 v5, v5, v12, s[6:7]
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v6
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v23
-; GFX9-O0-NEXT: v_cndmask_b32_e64 v6, v6, v7, s[4:5]
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v21
-; GFX9-O0-NEXT: v_cndmask_b32_e64 v6, v6, v7, s[6:7]
-; GFX9-O0-NEXT: ; implicit-def: $sgpr6
-; GFX9-O0-NEXT: ; implicit-def: $sgpr6
-; GFX9-O0-NEXT: ; kill: def $vgpr6 killed $vgpr6 def $vgpr6_vgpr7 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v5
-; GFX9-O0-NEXT: v_lshrrev_b64 v[4:5], v4, v[19:20]
-; GFX9-O0-NEXT: v_mov_b32_e32 v15, v5
-; GFX9-O0-NEXT: s_mov_b64 s[6:7], 0
-; GFX9-O0-NEXT: s_mov_b32 s8, s7
-; GFX9-O0-NEXT: v_mov_b32_e32 v12, s8
-; GFX9-O0-NEXT: v_cndmask_b32_e64 v12, v12, v15, s[4:5]
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v4
-; GFX9-O0-NEXT: s_mov_b32 s8, s6
-; GFX9-O0-NEXT: v_mov_b32_e32 v4, s8
-; GFX9-O0-NEXT: v_cndmask_b32_e64 v4, v4, v5, s[4:5]
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; kill: def $vgpr4 killed $vgpr4 def $vgpr4_vgpr5 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v12
-; GFX9-O0-NEXT: v_mov_b32_e32 v12, v13
-; GFX9-O0-NEXT: v_mov_b32_e32 v15, v14
-; GFX9-O0-NEXT: s_mov_b64 s[8:9], -1
-; GFX9-O0-NEXT: s_mov_b32 s5, s8
-; GFX9-O0-NEXT: s_mov_b32 s4, s9
-; GFX9-O0-NEXT: v_mov_b32_e32 v14, v17
-; GFX9-O0-NEXT: v_mov_b32_e32 v13, v18
-; GFX9-O0-NEXT: v_mov_b32_e32 v17, s5
-; GFX9-O0-NEXT: v_add_co_u32_e32 v12, vcc, v12, v17
-; GFX9-O0-NEXT: v_mov_b32_e32 v17, s4
-; GFX9-O0-NEXT: v_addc_co_u32_e32 v17, vcc, v15, v17, vcc
-; GFX9-O0-NEXT: v_mov_b32_e32 v15, s5
-; GFX9-O0-NEXT: v_addc_co_u32_e32 v14, vcc, v14, v15, vcc
-; GFX9-O0-NEXT: v_mov_b32_e32 v15, s4
-; GFX9-O0-NEXT: v_addc_co_u32_e32 v13, vcc, v13, v15, vcc
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; kill: def $vgpr14 killed $vgpr14 def $vgpr14_vgpr15 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v15, v13
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; kill: def $vgpr12 killed $vgpr12 def $vgpr12_vgpr13 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v13, v17
-; GFX9-O0-NEXT: s_mov_b64 s[8:9], s[6:7]
-; GFX9-O0-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_mov_b64 s[4:5], s[6:7]
-; GFX9-O0-NEXT: v_mov_b32_e32 v15, s9
-; GFX9-O0-NEXT: v_mov_b32_e32 v14, s8
-; GFX9-O0-NEXT: v_mov_b32_e32 v13, s7
-; GFX9-O0-NEXT: v_mov_b32_e32 v12, s6
-; GFX9-O0-NEXT: v_writelane_b32 v16, s4, 8
-; GFX9-O0-NEXT: v_writelane_b32 v16, s5, 9
-; GFX9-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
-; GFX9-O0-NEXT: buffer_store_dword v16, off, s[0:3], s32 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_mov_b64 exec, s[18:19]
-; GFX9-O0-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_branch .LBB2_6
-; GFX9-O0-NEXT: .LBB2_8: ; %udiv-bb1
-; GFX9-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
-; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: s_mov_b64 exec, s[18:19]
-; GFX9-O0-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:36 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:40 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:44 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:48 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:28 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:32 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:24 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: s_mov_b64 s[6:7], 1
-; GFX9-O0-NEXT: s_mov_b32 s5, s6
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: v_mov_b32_e32 v4, v1
-; GFX9-O0-NEXT: s_mov_b32 s4, s7
-; GFX9-O0-NEXT: s_mov_b64 s[6:7], 0
-; GFX9-O0-NEXT: s_mov_b32 s8, s6
-; GFX9-O0-NEXT: s_mov_b32 s9, s7
-; GFX9-O0-NEXT: v_mov_b32_e32 v1, v5
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v6
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, s5
-; GFX9-O0-NEXT: v_add_co_u32_e32 v9, vcc, v4, v5
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, s4
-; GFX9-O0-NEXT: v_addc_co_u32_e32 v2, vcc, v2, v5, vcc
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, s8
-; GFX9-O0-NEXT: v_addc_co_u32_e32 v1, vcc, v1, v5, vcc
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, s9
-; GFX9-O0-NEXT: v_addc_co_u32_e32 v3, vcc, v3, v5, vcc
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; kill: def $vgpr9 killed $vgpr9 def $vgpr9_vgpr10 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v10, v2
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v3
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v2
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v1
-; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v9
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v10
-; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_mov_b32 s4, 0x7f
-; GFX9-O0-NEXT: v_sub_u32_e64 v3, s4, v4
-; GFX9-O0-NEXT: v_lshlrev_b64 v[5:6], v3, v[11:12]
-; GFX9-O0-NEXT: v_mov_b32_e32 v13, v6
-; GFX9-O0-NEXT: s_mov_b32 s4, 64
-; GFX9-O0-NEXT: v_sub_u32_e64 v14, s4, v3
-; GFX9-O0-NEXT: v_lshrrev_b64 v[14:15], v14, v[7:8]
-; GFX9-O0-NEXT: v_mov_b32_e32 v16, v15
-; GFX9-O0-NEXT: v_or_b32_e64 v13, v13, v16
-; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 killed $vgpr5_vgpr6 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v14
-; GFX9-O0-NEXT: v_or_b32_e64 v5, v5, v6
-; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v13
-; GFX9-O0-NEXT: v_mov_b32_e32 v15, v6
-; GFX9-O0-NEXT: v_cmp_lt_u32_e64 s[4:5], v3, s4
-; GFX9-O0-NEXT: s_mov_b32 s10, 63
-; GFX9-O0-NEXT: v_sub_u32_e64 v4, s10, v4
-; GFX9-O0-NEXT: v_lshlrev_b64 v[13:14], v4, v[7:8]
-; GFX9-O0-NEXT: v_mov_b32_e32 v4, v14
-; GFX9-O0-NEXT: v_cndmask_b32_e64 v4, v4, v15, s[4:5]
-; GFX9-O0-NEXT: s_mov_b32 s10, 0
-; GFX9-O0-NEXT: v_cmp_eq_u32_e64 s[10:11], v3, s10
-; GFX9-O0-NEXT: v_mov_b32_e32 v15, v12
-; GFX9-O0-NEXT: v_cndmask_b32_e64 v4, v4, v15, s[10:11]
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v5
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v13
-; GFX9-O0-NEXT: v_cndmask_b32_e64 v5, v5, v6, s[4:5]
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v11
-; GFX9-O0-NEXT: v_cndmask_b32_e64 v5, v5, v6, s[10:11]
-; GFX9-O0-NEXT: ; implicit-def: $sgpr10
-; GFX9-O0-NEXT: ; implicit-def: $sgpr10
-; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v4
-; GFX9-O0-NEXT: v_lshlrev_b64 v[7:8], v3, v[7:8]
-; GFX9-O0-NEXT: v_mov_b32_e32 v4, v8
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, s9
-; GFX9-O0-NEXT: v_cndmask_b32_e64 v3, v3, v4, s[4:5]
-; GFX9-O0-NEXT: ; kill: def $vgpr7 killed $vgpr7 killed $vgpr7_vgpr8 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v4, s8
-; GFX9-O0-NEXT: v_cndmask_b32_e64 v7, v4, v7, s[4:5]
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; kill: def $vgpr7 killed $vgpr7 def $vgpr7_vgpr8 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v8, v3
-; GFX9-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: v_mov_b32_e32 v4, v2
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v10
-; GFX9-O0-NEXT: v_or_b32_e64 v3, v3, v4
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v1
-; GFX9-O0-NEXT: v_mov_b32_e32 v1, v9
-; GFX9-O0-NEXT: v_or_b32_e64 v1, v1, v2
-; GFX9-O0-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v3
-; GFX9-O0-NEXT: v_cmp_ne_u64_e64 s[4:5], v[1:2], s[6:7]
-; GFX9-O0-NEXT: s_mov_b64 s[8:9], s[6:7]
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, s8
-; GFX9-O0-NEXT: v_mov_b32_e32 v4, s9
-; GFX9-O0-NEXT: v_mov_b32_e32 v1, s6
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, s7
-; GFX9-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_mov_b64 s[6:7], exec
-; GFX9-O0-NEXT: s_and_b64 s[4:5], s[6:7], s[4:5]
-; GFX9-O0-NEXT: s_xor_b64 s[6:7], s[4:5], s[6:7]
-; GFX9-O0-NEXT: v_writelane_b32 v0, s6, 6
-; GFX9-O0-NEXT: v_writelane_b32 v0, s7, 7
-; GFX9-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
-; GFX9-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_mov_b64 exec, s[18:19]
-; GFX9-O0-NEXT: s_mov_b64 exec, s[4:5]
-; GFX9-O0-NEXT: s_cbranch_execz .LBB2_5
-; GFX9-O0-NEXT: s_branch .LBB2_7
-; GFX9-O0-NEXT: .LBB2_9: ; %udiv-end
-; GFX9-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
-; GFX9-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: s_mov_b64 exec, s[18:19]
-; GFX9-O0-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:108 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:100 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:104 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:92 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:68 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:72 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: s_mov_b32 s4, 32
-; GFX9-O0-NEXT: s_waitcnt vmcnt(2)
-; GFX9-O0-NEXT: v_lshrrev_b64 v[0:1], s4, v[5:6]
-; GFX9-O0-NEXT: v_mov_b32_e32 v1, v0
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: v_mov_b32_e32 v0, v17
-; GFX9-O0-NEXT: v_mul_lo_u32 v3, v1, v0
-; GFX9-O0-NEXT: v_lshrrev_b64 v[17:18], s4, v[17:18]
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v17
-; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 killed $vgpr5_vgpr6 killed $exec
-; GFX9-O0-NEXT: v_mul_lo_u32 v2, v5, v2
-; GFX9-O0-NEXT: v_mad_u64_u32 v[17:18], s[6:7], v5, v0, 0
-; GFX9-O0-NEXT: v_mov_b32_e32 v0, v18
-; GFX9-O0-NEXT: v_add3_u32 v2, v0, v2, v3
-; GFX9-O0-NEXT: ; implicit-def: $sgpr5
-; GFX9-O0-NEXT: ; implicit-def: $sgpr6
-; GFX9-O0-NEXT: ; implicit-def: $sgpr6
-; GFX9-O0-NEXT: v_mov_b32_e32 v0, s5
-; GFX9-O0-NEXT: ; kill: def $vgpr2 killed $vgpr2 def $vgpr2_vgpr3 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v0
-; GFX9-O0-NEXT: v_lshlrev_b64 v[2:3], s4, v[2:3]
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v3
-; GFX9-O0-NEXT: ; kill: def $vgpr17 killed $vgpr17 killed $vgpr17_vgpr18 killed $exec
-; GFX9-O0-NEXT: s_mov_b32 s5, 0
-; GFX9-O0-NEXT: ; implicit-def: $sgpr6
-; GFX9-O0-NEXT: v_mov_b32_e32 v0, s5
-; GFX9-O0-NEXT: ; kill: def $vgpr17 killed $vgpr17 def $vgpr17_vgpr18 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v18, v0
-; GFX9-O0-NEXT: v_mov_b32_e32 v0, v18
-; GFX9-O0-NEXT: v_or_b32_e64 v0, v0, v6
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v2
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v17
-; GFX9-O0-NEXT: v_or_b32_e64 v17, v2, v3
-; GFX9-O0-NEXT: ; kill: def $vgpr17 killed $vgpr17 def $vgpr17_vgpr18 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v18, v0
-; GFX9-O0-NEXT: v_lshrrev_b64 v[2:3], s4, v[19:20]
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v2
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v11
-; GFX9-O0-NEXT: v_mul_lo_u32 v3, v2, v6
-; GFX9-O0-NEXT: v_lshrrev_b64 v[11:12], s4, v[11:12]
-; GFX9-O0-NEXT: ; kill: def $vgpr11 killed $vgpr11 killed $vgpr11_vgpr12 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v0, v19
-; GFX9-O0-NEXT: v_mul_lo_u32 v11, v11, v0
-; GFX9-O0-NEXT: v_mad_u64_u32 v[19:20], s[6:7], v2, v0, 0
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v20
-; GFX9-O0-NEXT: v_add3_u32 v2, v2, v3, v11
-; GFX9-O0-NEXT: ; implicit-def: $sgpr6
-; GFX9-O0-NEXT: ; implicit-def: $sgpr7
-; GFX9-O0-NEXT: ; implicit-def: $sgpr7
-; GFX9-O0-NEXT: v_mov_b32_e32 v11, s6
-; GFX9-O0-NEXT: ; kill: def $vgpr2 killed $vgpr2 def $vgpr2_vgpr3 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v11
-; GFX9-O0-NEXT: v_lshlrev_b64 v[2:3], s4, v[2:3]
-; GFX9-O0-NEXT: v_mov_b32_e32 v12, v3
-; GFX9-O0-NEXT: ; kill: def $vgpr19 killed $vgpr19 killed $vgpr19_vgpr20 killed $exec
-; GFX9-O0-NEXT: ; implicit-def: $sgpr6
-; GFX9-O0-NEXT: v_mov_b32_e32 v11, s5
-; GFX9-O0-NEXT: ; kill: def $vgpr19 killed $vgpr19 def $vgpr19_vgpr20 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v20, v11
-; GFX9-O0-NEXT: v_mov_b32_e32 v11, v20
-; GFX9-O0-NEXT: v_or_b32_e64 v11, v11, v12
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v2
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v19
-; GFX9-O0-NEXT: v_or_b32_e64 v2, v2, v3
-; GFX9-O0-NEXT: ; kill: def $vgpr2 killed $vgpr2 def $vgpr2_vgpr3 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v11
-; GFX9-O0-NEXT: v_mov_b32_e32 v11, v2
-; GFX9-O0-NEXT: v_mov_b32_e32 v12, v17
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v3
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v18
-; GFX9-O0-NEXT: v_add_co_u32_e64 v17, s[6:7], v11, v12
-; GFX9-O0-NEXT: v_addc_co_u32_e64 v2, s[6:7], v2, v3, s[6:7]
-; GFX9-O0-NEXT: ; kill: def $vgpr17 killed $vgpr17 def $vgpr17_vgpr18 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v18, v2
-; GFX9-O0-NEXT: v_mad_u64_u32 v[19:20], s[6:7], v6, v1, 0
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v19
-; GFX9-O0-NEXT: ; implicit-def: $sgpr6
-; GFX9-O0-NEXT: v_mov_b32_e32 v11, s5
-; GFX9-O0-NEXT: ; kill: def $vgpr2 killed $vgpr2 def $vgpr2_vgpr3 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v11
-; GFX9-O0-NEXT: v_mov_b32_e32 v11, v3
-; GFX9-O0-NEXT: v_mov_b32_e32 v19, v20
-; GFX9-O0-NEXT: ; implicit-def: $sgpr6
-; GFX9-O0-NEXT: ; implicit-def: $sgpr7
-; GFX9-O0-NEXT: ; implicit-def: $sgpr7
-; GFX9-O0-NEXT: v_mov_b32_e32 v12, s6
-; GFX9-O0-NEXT: ; kill: def $vgpr19 killed $vgpr19 def $vgpr19_vgpr20 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v20, v12
-; GFX9-O0-NEXT: v_lshlrev_b64 v[19:20], s4, v[19:20]
-; GFX9-O0-NEXT: v_mov_b32_e32 v12, v20
-; GFX9-O0-NEXT: v_or_b32_e64 v11, v11, v12
-; GFX9-O0-NEXT: ; kill: def $vgpr2 killed $vgpr2 killed $vgpr2_vgpr3 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v19
-; GFX9-O0-NEXT: v_or_b32_e64 v2, v2, v3
-; GFX9-O0-NEXT: ; kill: def $vgpr2 killed $vgpr2 def $vgpr2_vgpr3 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v11
-; GFX9-O0-NEXT: v_mad_u64_u32 v[19:20], s[6:7], v6, v5, 0
-; GFX9-O0-NEXT: v_mov_b32_e32 v11, v19
-; GFX9-O0-NEXT: ; implicit-def: $sgpr6
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, s5
-; GFX9-O0-NEXT: ; kill: def $vgpr11 killed $vgpr11 def $vgpr11_vgpr12 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v12, v6
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v12
-; GFX9-O0-NEXT: v_mov_b32_e32 v19, v20
-; GFX9-O0-NEXT: ; implicit-def: $sgpr6
-; GFX9-O0-NEXT: ; implicit-def: $sgpr7
-; GFX9-O0-NEXT: ; implicit-def: $sgpr7
-; GFX9-O0-NEXT: v_mov_b32_e32 v21, s6
-; GFX9-O0-NEXT: ; kill: def $vgpr19 killed $vgpr19 def $vgpr19_vgpr20 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v20, v21
-; GFX9-O0-NEXT: v_lshlrev_b64 v[19:20], s4, v[19:20]
-; GFX9-O0-NEXT: v_mov_b32_e32 v21, v20
-; GFX9-O0-NEXT: v_or_b32_e64 v6, v6, v21
-; GFX9-O0-NEXT: ; kill: def $vgpr11 killed $vgpr11 killed $vgpr11_vgpr12 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v12, v19
-; GFX9-O0-NEXT: v_or_b32_e64 v23, v11, v12
-; GFX9-O0-NEXT: ; kill: def $vgpr23 killed $vgpr23 def $vgpr23_vgpr24 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v24, v6
-; GFX9-O0-NEXT: v_mad_u64_u32 v[11:12], s[6:7], v0, v5, 0
-; GFX9-O0-NEXT: v_mov_b32_e32 v21, v12
-; GFX9-O0-NEXT: ; implicit-def: $sgpr6
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, s5
-; GFX9-O0-NEXT: ; kill: def $vgpr21 killed $vgpr21 def $vgpr21_vgpr22 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v22, v5
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v23
-; GFX9-O0-NEXT: v_mov_b32_e32 v20, v21
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v24
-; GFX9-O0-NEXT: v_mov_b32_e32 v19, v22
-; GFX9-O0-NEXT: v_add_co_u32_e64 v5, s[6:7], v5, v20
-; GFX9-O0-NEXT: v_addc_co_u32_e64 v19, s[6:7], v6, v19, s[6:7]
-; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v19
-; GFX9-O0-NEXT: v_mov_b32_e32 v19, v6
-; GFX9-O0-NEXT: s_mov_b64 s[6:7], 0xffffffff
-; GFX9-O0-NEXT: s_mov_b32 s8, s7
-; GFX9-O0-NEXT: v_and_b32_e64 v19, v19, s8
-; GFX9-O0-NEXT: v_mov_b32_e32 v20, v5
-; GFX9-O0-NEXT: ; kill: def $sgpr6 killed $sgpr6 killed $sgpr6_sgpr7
-; GFX9-O0-NEXT: v_and_b32_e64 v21, v20, s6
-; GFX9-O0-NEXT: ; kill: def $vgpr21 killed $vgpr21 def $vgpr21_vgpr22 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v22, v19
-; GFX9-O0-NEXT: v_mad_u64_u32 v[19:20], s[6:7], v0, v1, 0
-; GFX9-O0-NEXT: v_mov_b32_e32 v23, v19
-; GFX9-O0-NEXT: ; implicit-def: $sgpr6
-; GFX9-O0-NEXT: v_mov_b32_e32 v0, s5
-; GFX9-O0-NEXT: ; kill: def $vgpr23 killed $vgpr23 def $vgpr23_vgpr24 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v24, v0
-; GFX9-O0-NEXT: v_mov_b32_e32 v0, v24
-; GFX9-O0-NEXT: v_mov_b32_e32 v19, v20
-; GFX9-O0-NEXT: ; implicit-def: $sgpr6
-; GFX9-O0-NEXT: ; implicit-def: $sgpr7
-; GFX9-O0-NEXT: ; implicit-def: $sgpr7
-; GFX9-O0-NEXT: v_mov_b32_e32 v1, s6
-; GFX9-O0-NEXT: ; kill: def $vgpr19 killed $vgpr19 def $vgpr19_vgpr20 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v20, v1
-; GFX9-O0-NEXT: v_lshlrev_b64 v[19:20], s4, v[19:20]
-; GFX9-O0-NEXT: v_mov_b32_e32 v1, v20
-; GFX9-O0-NEXT: v_or_b32_e64 v0, v0, v1
-; GFX9-O0-NEXT: v_mov_b32_e32 v1, v23
-; GFX9-O0-NEXT: ; kill: def $vgpr19 killed $vgpr19 killed $vgpr19_vgpr20 killed $exec
-; GFX9-O0-NEXT: v_or_b32_e64 v23, v1, v19
-; GFX9-O0-NEXT: ; kill: def $vgpr23 killed $vgpr23 def $vgpr23_vgpr24 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v24, v0
-; GFX9-O0-NEXT: v_mov_b32_e32 v0, v23
-; GFX9-O0-NEXT: v_mov_b32_e32 v20, v21
-; GFX9-O0-NEXT: v_mov_b32_e32 v1, v24
-; GFX9-O0-NEXT: v_mov_b32_e32 v19, v22
-; GFX9-O0-NEXT: v_add_co_u32_e64 v0, s[6:7], v0, v20
-; GFX9-O0-NEXT: v_addc_co_u32_e64 v19, s[6:7], v1, v19, s[6:7]
-; GFX9-O0-NEXT: ; kill: def $vgpr0 killed $vgpr0 def $vgpr0_vgpr1 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v1, v19
-; GFX9-O0-NEXT: v_lshrrev_b64 v[21:22], s4, v[0:1]
-; GFX9-O0-NEXT: v_lshrrev_b64 v[5:6], s4, v[5:6]
-; GFX9-O0-NEXT: v_mov_b32_e32 v19, v5
-; GFX9-O0-NEXT: v_mov_b32_e32 v20, v21
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v6
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v22
-; GFX9-O0-NEXT: v_add_co_u32_e64 v19, s[6:7], v19, v20
-; GFX9-O0-NEXT: v_addc_co_u32_e64 v5, s[6:7], v5, v6, s[6:7]
-; GFX9-O0-NEXT: ; kill: def $vgpr19 killed $vgpr19 def $vgpr19_vgpr20 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v20, v5
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v2
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v19
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v3
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v20
-; GFX9-O0-NEXT: v_add_co_u32_e64 v19, s[6:7], v5, v6
-; GFX9-O0-NEXT: v_addc_co_u32_e64 v2, s[6:7], v2, v3, s[6:7]
-; GFX9-O0-NEXT: ; kill: def $vgpr19 killed $vgpr19 def $vgpr19_vgpr20 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v20, v2
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v19
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v17
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v20
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v18
-; GFX9-O0-NEXT: v_add_co_u32_e64 v2, s[6:7], v2, v6
-; GFX9-O0-NEXT: v_addc_co_u32_e64 v5, s[6:7], v3, v5, s[6:7]
-; GFX9-O0-NEXT: ; kill: def $vgpr2 killed $vgpr2 def $vgpr2_vgpr3 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v5
-; GFX9-O0-NEXT: v_lshlrev_b64 v[0:1], s4, v[0:1]
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v1
-; GFX9-O0-NEXT: ; kill: def $vgpr11 killed $vgpr11 killed $vgpr11_vgpr12 killed $exec
-; GFX9-O0-NEXT: ; implicit-def: $sgpr6
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, s5
-; GFX9-O0-NEXT: ; kill: def $vgpr11 killed $vgpr11 def $vgpr11_vgpr12 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v12, v5
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v12
-; GFX9-O0-NEXT: v_or_b32_e64 v5, v5, v6
-; GFX9-O0-NEXT: v_mov_b32_e32 v1, v0
-; GFX9-O0-NEXT: v_mov_b32_e32 v0, v11
-; GFX9-O0-NEXT: v_or_b32_e64 v0, v0, v1
-; GFX9-O0-NEXT: ; kill: def $vgpr0 killed $vgpr0 def $vgpr0_vgpr1 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v1, v5
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v2
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v3
-; GFX9-O0-NEXT: v_mov_b32_e32 v12, v0
-; GFX9-O0-NEXT: v_mov_b32_e32 v11, v1
-; GFX9-O0-NEXT: v_mov_b32_e32 v0, v15
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v16
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v13
-; GFX9-O0-NEXT: v_mov_b32_e32 v1, v14
-; GFX9-O0-NEXT: v_sub_co_u32_e32 v0, vcc, v0, v12
-; GFX9-O0-NEXT: v_subb_co_u32_e32 v3, vcc, v3, v11, vcc
-; GFX9-O0-NEXT: v_subb_co_u32_e32 v5, vcc, v5, v6, vcc
-; GFX9-O0-NEXT: v_subb_co_u32_e32 v2, vcc, v1, v2, vcc
-; GFX9-O0-NEXT: ; implicit-def: $sgpr5
-; GFX9-O0-NEXT: ; implicit-def: $sgpr5
-; GFX9-O0-NEXT: ; kill: def $vgpr0 killed $vgpr0 def $vgpr0_vgpr1 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v1, v3
-; GFX9-O0-NEXT: ; implicit-def: $sgpr5
-; GFX9-O0-NEXT: ; implicit-def: $sgpr5
-; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v2
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v6
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v10
-; GFX9-O0-NEXT: v_xor_b32_e64 v3, v3, v2
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v5
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v9
-; GFX9-O0-NEXT: v_xor_b32_e64 v9, v6, v5
-; GFX9-O0-NEXT: ; kill: def $vgpr9 killed $vgpr9 def $vgpr9_vgpr10 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v10, v3
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v1
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v8
-; GFX9-O0-NEXT: v_xor_b32_e64 v3, v3, v6
-; GFX9-O0-NEXT: ; kill: def $vgpr0 killed $vgpr0 killed $vgpr0_vgpr1 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v8, v7
-; GFX9-O0-NEXT: v_xor_b32_e64 v0, v0, v8
-; GFX9-O0-NEXT: ; kill: def $vgpr0 killed $vgpr0 def $vgpr0_vgpr1 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v1, v3
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v0
-; GFX9-O0-NEXT: v_mov_b32_e32 v0, v1
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v9
-; GFX9-O0-NEXT: v_mov_b32_e32 v1, v10
-; GFX9-O0-NEXT: v_sub_co_u32_e32 v7, vcc, v7, v8
-; GFX9-O0-NEXT: v_subb_co_u32_e32 v0, vcc, v0, v6, vcc
-; GFX9-O0-NEXT: v_subb_co_u32_e32 v5, vcc, v3, v5, vcc
-; GFX9-O0-NEXT: v_subb_co_u32_e32 v1, vcc, v1, v2, vcc
-; GFX9-O0-NEXT: ; implicit-def: $sgpr5
-; GFX9-O0-NEXT: ; implicit-def: $sgpr5
-; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v1
-; GFX9-O0-NEXT: ; implicit-def: $sgpr5
-; GFX9-O0-NEXT: ; implicit-def: $sgpr5
-; GFX9-O0-NEXT: ; kill: def $vgpr7 killed $vgpr7 def $vgpr7_vgpr8 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v8, v0
-; GFX9-O0-NEXT: v_mov_b32_e32 v0, v7
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v5
-; GFX9-O0-NEXT: v_lshrrev_b64 v[7:8], s4, v[7:8]
-; GFX9-O0-NEXT: v_mov_b32_e32 v1, v7
-; GFX9-O0-NEXT: v_lshrrev_b64 v[5:6], s4, v[5:6]
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v5
-; GFX9-O0-NEXT: ; kill: killed $vgpr4
-; GFX9-O0-NEXT: s_xor_saveexec_b64 s[4:5], -1
-; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:348 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: s_nop 0
-; GFX9-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:352 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:356 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:360 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: s_mov_b64 exec, s[4:5]
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: s_setpc_b64 s[30:31]
- %div = srem i128 %lhs, %rhs
- ret i128 %div
-}
-
-define i128 @v_urem_i128_vv(i128 %lhs, i128 %rhs) {
-; GFX9-LABEL: v_urem_i128_vv:
-; GFX9: ; %bb.0: ; %_udiv-special-cases
-; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_or_b32_e32 v9, v5, v7
-; GFX9-NEXT: v_or_b32_e32 v8, v4, v6
-; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[8:9]
-; GFX9-NEXT: v_or_b32_e32 v9, v1, v3
-; GFX9-NEXT: v_or_b32_e32 v8, v0, v2
-; GFX9-NEXT: v_cmp_eq_u64_e64 s[4:5], 0, v[8:9]
-; GFX9-NEXT: v_ffbh_u32_e32 v8, v6
-; GFX9-NEXT: v_add_u32_e32 v8, 32, v8
-; GFX9-NEXT: v_ffbh_u32_e32 v9, v7
-; GFX9-NEXT: v_min_u32_e32 v8, v8, v9
-; GFX9-NEXT: v_ffbh_u32_e32 v9, v4
-; GFX9-NEXT: v_add_u32_e32 v9, 32, v9
-; GFX9-NEXT: v_ffbh_u32_e32 v10, v5
-; GFX9-NEXT: v_min_u32_e32 v9, v9, v10
-; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX9-NEXT: v_add_co_u32_e32 v9, vcc, 64, v9
-; GFX9-NEXT: v_addc_co_u32_e64 v10, s[6:7], 0, 0, vcc
-; GFX9-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[6:7]
-; GFX9-NEXT: v_ffbh_u32_e32 v11, v3
-; GFX9-NEXT: v_cndmask_b32_e32 v8, v9, v8, vcc
-; GFX9-NEXT: v_ffbh_u32_e32 v9, v2
-; GFX9-NEXT: v_add_u32_e32 v9, 32, v9
-; GFX9-NEXT: v_min_u32_e32 v9, v9, v11
-; GFX9-NEXT: v_ffbh_u32_e32 v11, v0
-; GFX9-NEXT: v_add_u32_e32 v11, 32, v11
-; GFX9-NEXT: v_ffbh_u32_e32 v12, v1
-; GFX9-NEXT: v_min_u32_e32 v11, v11, v12
-; GFX9-NEXT: v_cndmask_b32_e64 v10, v10, 0, vcc
-; GFX9-NEXT: v_add_co_u32_e32 v11, vcc, 64, v11
-; GFX9-NEXT: v_addc_co_u32_e64 v12, s[6:7], 0, 0, vcc
-; GFX9-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[2:3]
-; GFX9-NEXT: s_mov_b64 s[6:7], 0x7f
-; GFX9-NEXT: v_cndmask_b32_e32 v9, v11, v9, vcc
-; GFX9-NEXT: v_cndmask_b32_e64 v12, v12, 0, vcc
-; GFX9-NEXT: v_sub_co_u32_e32 v8, vcc, v8, v9
-; GFX9-NEXT: v_subb_co_u32_e32 v9, vcc, v10, v12, vcc
-; GFX9-NEXT: v_mov_b32_e32 v11, 0
-; GFX9-NEXT: v_subbrev_co_u32_e32 v10, vcc, 0, v11, vcc
-; GFX9-NEXT: v_subbrev_co_u32_e32 v11, vcc, 0, v11, vcc
-; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, s[6:7], v[8:9]
-; GFX9-NEXT: v_cndmask_b32_e64 v12, 0, 1, vcc
-; GFX9-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[10:11]
-; GFX9-NEXT: v_cndmask_b32_e64 v13, 0, 1, vcc
-; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[10:11]
-; GFX9-NEXT: v_cndmask_b32_e32 v12, v13, v12, vcc
-; GFX9-NEXT: v_and_b32_e32 v12, 1, v12
-; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v12
-; GFX9-NEXT: v_xor_b32_e32 v12, 0x7f, v8
-; GFX9-NEXT: v_or_b32_e32 v13, v9, v11
-; GFX9-NEXT: v_or_b32_e32 v12, v12, v10
-; GFX9-NEXT: s_or_b64 s[4:5], s[4:5], vcc
-; GFX9-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[12:13]
-; GFX9-NEXT: s_xor_b64 s[6:7], s[4:5], -1
-; GFX9-NEXT: v_cndmask_b32_e64 v15, v3, 0, s[4:5]
-; GFX9-NEXT: v_cndmask_b32_e64 v14, v2, 0, s[4:5]
-; GFX9-NEXT: v_cndmask_b32_e64 v13, v1, 0, s[4:5]
-; GFX9-NEXT: v_cndmask_b32_e64 v12, v0, 0, s[4:5]
-; GFX9-NEXT: s_and_b64 s[4:5], s[6:7], vcc
-; GFX9-NEXT: s_and_saveexec_b64 s[8:9], s[4:5]
-; GFX9-NEXT: s_cbranch_execz .LBB3_6
-; GFX9-NEXT: ; %bb.1: ; %udiv-bb1
-; GFX9-NEXT: v_add_co_u32_e32 v22, vcc, 1, v8
-; GFX9-NEXT: v_addc_co_u32_e32 v23, vcc, 0, v9, vcc
-; GFX9-NEXT: v_addc_co_u32_e32 v24, vcc, 0, v10, vcc
-; GFX9-NEXT: v_sub_u32_e32 v15, 0x7f, v8
-; GFX9-NEXT: v_addc_co_u32_e32 v25, vcc, 0, v11, vcc
-; GFX9-NEXT: v_sub_u32_e32 v13, 64, v15
-; GFX9-NEXT: v_or_b32_e32 v10, v23, v25
-; GFX9-NEXT: v_or_b32_e32 v9, v22, v24
-; GFX9-NEXT: v_lshlrev_b64 v[11:12], v15, v[2:3]
-; GFX9-NEXT: v_lshrrev_b64 v[13:14], v13, v[0:1]
-; GFX9-NEXT: v_sub_u32_e32 v8, 63, v8
-; GFX9-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[9:10]
-; GFX9-NEXT: v_lshlrev_b64 v[8:9], v8, v[0:1]
-; GFX9-NEXT: v_or_b32_e32 v10, v12, v14
-; GFX9-NEXT: v_or_b32_e32 v11, v11, v13
-; GFX9-NEXT: v_cmp_gt_u32_e64 s[4:5], 64, v15
-; GFX9-NEXT: v_cndmask_b32_e64 v9, v9, v10, s[4:5]
-; GFX9-NEXT: v_cndmask_b32_e64 v8, v8, v11, s[4:5]
-; GFX9-NEXT: v_lshlrev_b64 v[10:11], v15, v[0:1]
-; GFX9-NEXT: v_cmp_eq_u32_e64 s[6:7], 0, v15
-; GFX9-NEXT: v_mov_b32_e32 v12, 0
-; GFX9-NEXT: v_mov_b32_e32 v14, 0
-; GFX9-NEXT: v_cndmask_b32_e64 v9, v9, v3, s[6:7]
-; GFX9-NEXT: v_cndmask_b32_e64 v8, v8, v2, s[6:7]
-; GFX9-NEXT: v_cndmask_b32_e64 v11, 0, v11, s[4:5]
-; GFX9-NEXT: v_mov_b32_e32 v13, 0
-; GFX9-NEXT: v_mov_b32_e32 v15, 0
-; GFX9-NEXT: v_cndmask_b32_e64 v10, 0, v10, s[4:5]
-; GFX9-NEXT: s_and_saveexec_b64 s[4:5], vcc
-; GFX9-NEXT: s_xor_b64 s[6:7], exec, s[4:5]
-; GFX9-NEXT: s_cbranch_execz .LBB3_5
-; GFX9-NEXT: ; %bb.2: ; %udiv-preheader
-; GFX9-NEXT: v_sub_u32_e32 v14, 64, v22
-; GFX9-NEXT: v_lshrrev_b64 v[12:13], v22, v[0:1]
-; GFX9-NEXT: v_lshlrev_b64 v[14:15], v14, v[2:3]
-; GFX9-NEXT: v_cmp_gt_u32_e32 vcc, 64, v22
-; GFX9-NEXT: v_or_b32_e32 v14, v12, v14
-; GFX9-NEXT: v_subrev_u32_e32 v12, 64, v22
-; GFX9-NEXT: v_or_b32_e32 v15, v13, v15
-; GFX9-NEXT: v_lshrrev_b64 v[12:13], v12, v[2:3]
-; GFX9-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v22
-; GFX9-NEXT: v_cndmask_b32_e32 v13, v13, v15, vcc
-; GFX9-NEXT: v_cndmask_b32_e64 v17, v13, v1, s[4:5]
-; GFX9-NEXT: v_cndmask_b32_e32 v14, v12, v14, vcc
-; GFX9-NEXT: v_lshrrev_b64 v[12:13], v22, v[2:3]
-; GFX9-NEXT: v_cndmask_b32_e64 v16, v14, v0, s[4:5]
-; GFX9-NEXT: v_cndmask_b32_e32 v19, 0, v13, vcc
-; GFX9-NEXT: v_cndmask_b32_e32 v18, 0, v12, vcc
-; GFX9-NEXT: v_add_co_u32_e32 v26, vcc, -1, v4
-; GFX9-NEXT: v_addc_co_u32_e32 v27, vcc, -1, v5, vcc
-; GFX9-NEXT: v_addc_co_u32_e32 v28, vcc, -1, v6, vcc
-; GFX9-NEXT: v_mov_b32_e32 v20, 0
-; GFX9-NEXT: v_mov_b32_e32 v14, 0
-; GFX9-NEXT: v_addc_co_u32_e32 v29, vcc, -1, v7, vcc
-; GFX9-NEXT: s_mov_b64 s[4:5], 0
-; GFX9-NEXT: v_mov_b32_e32 v21, 0
-; GFX9-NEXT: v_mov_b32_e32 v15, 0
-; GFX9-NEXT: v_mov_b32_e32 v13, 0
-; GFX9-NEXT: .LBB3_3: ; %udiv-do-while
-; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT: v_lshrrev_b32_e32 v12, 31, v11
-; GFX9-NEXT: v_lshlrev_b64 v[10:11], 1, v[10:11]
-; GFX9-NEXT: v_lshlrev_b64 v[18:19], 1, v[18:19]
-; GFX9-NEXT: v_or_b32_e32 v10, v20, v10
-; GFX9-NEXT: v_lshrrev_b32_e32 v20, 31, v17
-; GFX9-NEXT: v_lshlrev_b64 v[16:17], 1, v[16:17]
-; GFX9-NEXT: v_or_b32_e32 v18, v18, v20
-; GFX9-NEXT: v_lshrrev_b32_e32 v20, 31, v9
-; GFX9-NEXT: v_or_b32_e32 v16, v16, v20
-; GFX9-NEXT: v_sub_co_u32_e32 v20, vcc, v26, v16
-; GFX9-NEXT: v_subb_co_u32_e32 v20, vcc, v27, v17, vcc
-; GFX9-NEXT: v_subb_co_u32_e32 v20, vcc, v28, v18, vcc
-; GFX9-NEXT: v_subb_co_u32_e32 v20, vcc, v29, v19, vcc
-; GFX9-NEXT: v_ashrrev_i32_e32 v30, 31, v20
-; GFX9-NEXT: v_and_b32_e32 v20, v30, v4
-; GFX9-NEXT: v_sub_co_u32_e32 v16, vcc, v16, v20
-; GFX9-NEXT: v_and_b32_e32 v20, v30, v5
-; GFX9-NEXT: v_subb_co_u32_e32 v17, vcc, v17, v20, vcc
-; GFX9-NEXT: v_and_b32_e32 v20, v30, v6
-; GFX9-NEXT: v_subb_co_u32_e32 v18, vcc, v18, v20, vcc
-; GFX9-NEXT: v_and_b32_e32 v20, v30, v7
-; GFX9-NEXT: v_subb_co_u32_e32 v19, vcc, v19, v20, vcc
-; GFX9-NEXT: v_add_co_u32_e32 v22, vcc, -1, v22
-; GFX9-NEXT: v_addc_co_u32_e32 v23, vcc, -1, v23, vcc
-; GFX9-NEXT: v_addc_co_u32_e32 v24, vcc, -1, v24, vcc
-; GFX9-NEXT: v_addc_co_u32_e32 v25, vcc, -1, v25, vcc
-; GFX9-NEXT: v_or_b32_e32 v11, v21, v11
-; GFX9-NEXT: v_lshlrev_b64 v[8:9], 1, v[8:9]
-; GFX9-NEXT: v_or_b32_e32 v20, v22, v24
-; GFX9-NEXT: v_or_b32_e32 v21, v23, v25
-; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[20:21]
-; GFX9-NEXT: v_or3_b32 v8, v8, v12, v14
-; GFX9-NEXT: v_and_b32_e32 v12, 1, v30
-; GFX9-NEXT: v_mov_b32_e32 v21, v13
-; GFX9-NEXT: v_or3_b32 v9, v9, 0, v15
-; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX9-NEXT: v_mov_b32_e32 v20, v12
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GFX9-NEXT: s_cbranch_execnz .LBB3_3
-; GFX9-NEXT: ; %bb.4: ; %Flow
-; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
-; GFX9-NEXT: .LBB3_5: ; %Flow2
-; GFX9-NEXT: s_or_b64 exec, exec, s[6:7]
-; GFX9-NEXT: v_lshlrev_b64 v[16:17], 1, v[10:11]
-; GFX9-NEXT: v_lshlrev_b64 v[8:9], 1, v[8:9]
-; GFX9-NEXT: v_lshrrev_b32_e32 v10, 31, v11
-; GFX9-NEXT: v_or3_b32 v15, v9, 0, v15
-; GFX9-NEXT: v_or3_b32 v14, v8, v10, v14
-; GFX9-NEXT: v_or_b32_e32 v13, v13, v17
-; GFX9-NEXT: v_or_b32_e32 v12, v12, v16
-; GFX9-NEXT: .LBB3_6: ; %Flow3
-; GFX9-NEXT: s_or_b64 exec, exec, s[8:9]
-; GFX9-NEXT: v_mul_lo_u32 v19, v12, v7
-; GFX9-NEXT: v_mad_u64_u32 v[7:8], s[4:5], v4, v12, 0
-; GFX9-NEXT: v_mov_b32_e32 v17, 0
-; GFX9-NEXT: v_mad_u64_u32 v[9:10], s[4:5], v12, v6, 0
-; GFX9-NEXT: v_mov_b32_e32 v16, v8
-; GFX9-NEXT: v_mad_u64_u32 v[11:12], s[4:5], v5, v12, v[16:17]
-; GFX9-NEXT: v_mul_lo_u32 v18, v13, v6
-; GFX9-NEXT: v_mul_lo_u32 v16, v15, v4
-; GFX9-NEXT: v_mov_b32_e32 v6, v12
-; GFX9-NEXT: v_mov_b32_e32 v12, v17
-; GFX9-NEXT: v_mad_u64_u32 v[11:12], s[4:5], v4, v13, v[11:12]
-; GFX9-NEXT: v_add3_u32 v10, v10, v19, v18
-; GFX9-NEXT: v_mad_u64_u32 v[8:9], s[4:5], v14, v4, v[9:10]
-; GFX9-NEXT: v_mov_b32_e32 v4, v12
-; GFX9-NEXT: v_mul_lo_u32 v10, v14, v5
-; GFX9-NEXT: v_add_co_u32_e32 v14, vcc, v6, v4
-; GFX9-NEXT: v_addc_co_u32_e64 v15, s[4:5], 0, 0, vcc
-; GFX9-NEXT: v_mad_u64_u32 v[4:5], s[4:5], v5, v13, v[14:15]
-; GFX9-NEXT: v_add3_u32 v6, v16, v9, v10
-; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, v4, v8
-; GFX9-NEXT: v_addc_co_u32_e32 v5, vcc, v5, v6, vcc
-; GFX9-NEXT: v_mov_b32_e32 v6, v11
-; GFX9-NEXT: v_sub_co_u32_e32 v0, vcc, v0, v7
-; GFX9-NEXT: v_subb_co_u32_e32 v1, vcc, v1, v6, vcc
-; GFX9-NEXT: v_subb_co_u32_e32 v2, vcc, v2, v4, vcc
-; GFX9-NEXT: v_subb_co_u32_e32 v3, vcc, v3, v5, vcc
-; GFX9-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX9-O0-LABEL: v_urem_i128_vv:
-; GFX9-O0: ; %bb.0: ; %_udiv-special-cases
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-O0-NEXT: s_xor_saveexec_b64 s[4:5], -1
-; GFX9-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_mov_b64 exec, s[4:5]
-; GFX9-O0-NEXT: ; implicit-def: $vgpr8 : SGPR spill to VGPR lane
-; GFX9-O0-NEXT: v_mov_b32_e32 v11, v6
-; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: v_mov_b32_e32 v13, v2
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v1
-; GFX9-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:100 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: v_mov_b32_e32 v9, v0
-; GFX9-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
-; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: s_mov_b64 exec, s[18:19]
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; kill: def $vgpr9 killed $vgpr9 def $vgpr9_vgpr10 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v10, v2
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; kill: def $vgpr13 killed $vgpr13 def $vgpr13_vgpr14 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v14, v3
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v5
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; kill: def $vgpr11 killed $vgpr11 def $vgpr11_vgpr12 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v12, v7
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4_sgpr5
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4_sgpr5
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4_sgpr5
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4_sgpr5
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v13
-; GFX9-O0-NEXT: v_mov_b32_e32 v4, v14
-; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v9
-; GFX9-O0-NEXT: v_mov_b32_e32 v4, v10
-; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v11
-; GFX9-O0-NEXT: v_mov_b32_e32 v4, v12
-; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: v_mov_b32_e32 v4, v2
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v1
-; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v11
-; GFX9-O0-NEXT: v_mov_b32_e32 v4, v12
-; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: v_mov_b32_e32 v4, v2
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v1
-; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v13
-; GFX9-O0-NEXT: v_mov_b32_e32 v4, v14
-; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v9
-; GFX9-O0-NEXT: v_mov_b32_e32 v4, v10
-; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v12
-; GFX9-O0-NEXT: v_mov_b32_e32 v8, v2
-; GFX9-O0-NEXT: v_or_b32_e64 v3, v8, v7
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v11
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v1
-; GFX9-O0-NEXT: v_or_b32_e64 v1, v5, v6
-; GFX9-O0-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v3
-; GFX9-O0-NEXT: s_mov_b64 s[6:7], 0
-; GFX9-O0-NEXT: v_writelane_b32 v0, s6, 0
-; GFX9-O0-NEXT: v_writelane_b32 v0, s7, 1
-; GFX9-O0-NEXT: v_cmp_eq_u64_e64 s[4:5], v[1:2], s[6:7]
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v14
-; GFX9-O0-NEXT: v_mov_b32_e32 v4, v10
-; GFX9-O0-NEXT: v_or_b32_e64 v15, v4, v2
-; GFX9-O0-NEXT: v_mov_b32_e32 v1, v13
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v9
-; GFX9-O0-NEXT: v_or_b32_e64 v9, v3, v1
-; GFX9-O0-NEXT: ; kill: def $vgpr9 killed $vgpr9 def $vgpr9_vgpr10 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v10, v15
-; GFX9-O0-NEXT: v_cmp_eq_u64_e64 s[8:9], v[9:10], s[6:7]
-; GFX9-O0-NEXT: s_or_b64 s[4:5], s[4:5], s[8:9]
-; GFX9-O0-NEXT: v_ffbh_u32_e64 v6, v6
-; GFX9-O0-NEXT: s_mov_b32 s9, 32
-; GFX9-O0-NEXT: v_add_u32_e64 v6, v6, s9
-; GFX9-O0-NEXT: v_ffbh_u32_e64 v7, v7
-; GFX9-O0-NEXT: v_min_u32_e64 v6, v6, v7
-; GFX9-O0-NEXT: s_mov_b32 s8, 0
-; GFX9-O0-NEXT: ; implicit-def: $sgpr10
-; GFX9-O0-NEXT: v_mov_b32_e32 v9, s8
-; GFX9-O0-NEXT: ; kill: def $vgpr6 killed $vgpr6 def $vgpr6_vgpr7 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v9
-; GFX9-O0-NEXT: v_mov_b32_e32 v10, v7
-; GFX9-O0-NEXT: v_ffbh_u32_e64 v5, v5
-; GFX9-O0-NEXT: v_add_u32_e64 v5, v5, s9
-; GFX9-O0-NEXT: v_ffbh_u32_e64 v8, v8
-; GFX9-O0-NEXT: v_min_u32_e64 v15, v5, v8
-; GFX9-O0-NEXT: ; implicit-def: $sgpr10
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, s8
-; GFX9-O0-NEXT: ; kill: def $vgpr15 killed $vgpr15 def $vgpr15_vgpr16 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v16, v5
-; GFX9-O0-NEXT: s_mov_b64 s[10:11], 64
-; GFX9-O0-NEXT: v_mov_b32_e32 v8, v15
-; GFX9-O0-NEXT: s_mov_b32 s12, s10
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v16
-; GFX9-O0-NEXT: s_mov_b32 s14, s11
-; GFX9-O0-NEXT: v_add_co_u32_e64 v8, s[12:13], v8, s12
-; GFX9-O0-NEXT: v_mov_b32_e32 v9, s14
-; GFX9-O0-NEXT: v_addc_co_u32_e64 v5, s[12:13], v5, v9, s[12:13]
-; GFX9-O0-NEXT: ; kill: def $vgpr8 killed $vgpr8 def $vgpr8_vgpr9 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v9, v5
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v9
-; GFX9-O0-NEXT: s_mov_b64 s[12:13], s[6:7]
-; GFX9-O0-NEXT: v_cmp_ne_u64_e64 s[12:13], v[11:12], s[12:13]
-; GFX9-O0-NEXT: v_cndmask_b32_e64 v5, v5, v10, s[12:13]
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v6
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v8
-; GFX9-O0-NEXT: v_cndmask_b32_e64 v9, v6, v7, s[12:13]
-; GFX9-O0-NEXT: ; implicit-def: $sgpr12
-; GFX9-O0-NEXT: ; implicit-def: $sgpr12
-; GFX9-O0-NEXT: ; kill: def $vgpr9 killed $vgpr9 def $vgpr9_vgpr10 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v10, v5
-; GFX9-O0-NEXT: v_ffbh_u32_e64 v5, v1
-; GFX9-O0-NEXT: v_add_u32_e64 v5, v5, s9
-; GFX9-O0-NEXT: v_ffbh_u32_e64 v6, v2
-; GFX9-O0-NEXT: v_min_u32_e64 v6, v5, v6
-; GFX9-O0-NEXT: ; implicit-def: $sgpr12
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, s8
-; GFX9-O0-NEXT: ; kill: def $vgpr6 killed $vgpr6 def $vgpr6_vgpr7 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v5
-; GFX9-O0-NEXT: v_mov_b32_e32 v8, v7
-; GFX9-O0-NEXT: v_ffbh_u32_e64 v5, v3
-; GFX9-O0-NEXT: v_add_u32_e64 v5, v5, s9
-; GFX9-O0-NEXT: v_ffbh_u32_e64 v11, v4
-; GFX9-O0-NEXT: v_min_u32_e64 v15, v5, v11
-; GFX9-O0-NEXT: ; implicit-def: $sgpr9
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, s8
-; GFX9-O0-NEXT: ; kill: def $vgpr15 killed $vgpr15 def $vgpr15_vgpr16 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v16, v5
-; GFX9-O0-NEXT: v_mov_b32_e32 v11, v15
-; GFX9-O0-NEXT: s_mov_b32 s8, s10
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v16
-; GFX9-O0-NEXT: s_mov_b32 s10, s11
-; GFX9-O0-NEXT: v_add_co_u32_e64 v11, s[8:9], v11, s8
-; GFX9-O0-NEXT: v_mov_b32_e32 v12, s10
-; GFX9-O0-NEXT: v_addc_co_u32_e64 v5, s[8:9], v5, v12, s[8:9]
-; GFX9-O0-NEXT: ; kill: def $vgpr11 killed $vgpr11 def $vgpr11_vgpr12 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v12, v5
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v12
-; GFX9-O0-NEXT: s_mov_b64 s[8:9], s[6:7]
-; GFX9-O0-NEXT: v_cmp_ne_u64_e64 s[8:9], v[13:14], s[8:9]
-; GFX9-O0-NEXT: v_cndmask_b32_e64 v5, v5, v8, s[8:9]
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v6
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v11
-; GFX9-O0-NEXT: v_cndmask_b32_e64 v6, v6, v7, s[8:9]
-; GFX9-O0-NEXT: ; implicit-def: $sgpr8
-; GFX9-O0-NEXT: ; implicit-def: $sgpr8
-; GFX9-O0-NEXT: ; kill: def $vgpr6 killed $vgpr6 def $vgpr6_vgpr7 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v5
-; GFX9-O0-NEXT: v_mov_b32_e32 v8, v6
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v9
-; GFX9-O0-NEXT: ; kill: def $vgpr7 killed $vgpr7 killed $vgpr6_vgpr7 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v10
-; GFX9-O0-NEXT: s_mov_b32 s10, s6
-; GFX9-O0-NEXT: s_mov_b32 s11, s7
-; GFX9-O0-NEXT: v_sub_co_u32_e32 v5, vcc, v5, v8
-; GFX9-O0-NEXT: v_subb_co_u32_e32 v9, vcc, v6, v7, vcc
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, s10
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, s10
-; GFX9-O0-NEXT: v_subb_co_u32_e32 v8, vcc, v6, v7, vcc
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, s11
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, s11
-; GFX9-O0-NEXT: v_subb_co_u32_e32 v7, vcc, v6, v7, vcc
-; GFX9-O0-NEXT: ; implicit-def: $sgpr8
-; GFX9-O0-NEXT: ; implicit-def: $sgpr8
-; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v9
-; GFX9-O0-NEXT: ; implicit-def: $sgpr8
-; GFX9-O0-NEXT: ; implicit-def: $sgpr8
-; GFX9-O0-NEXT: ; kill: def $vgpr8 killed $vgpr8 def $vgpr8_vgpr9 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v9, v7
-; GFX9-O0-NEXT: v_mov_b32_e32 v11, v9
-; GFX9-O0-NEXT: v_mov_b32_e32 v10, v8
-; GFX9-O0-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: v_mov_b32_e32 v11, v6
-; GFX9-O0-NEXT: v_mov_b32_e32 v10, v5
-; GFX9-O0-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_mov_b64 s[8:9], s[6:7]
-; GFX9-O0-NEXT: v_cmp_eq_u64_e64 s[8:9], v[8:9], s[8:9]
-; GFX9-O0-NEXT: s_mov_b64 s[12:13], 0x7f
-; GFX9-O0-NEXT: s_mov_b64 s[14:15], s[12:13]
-; GFX9-O0-NEXT: v_cmp_gt_u64_e64 s[14:15], v[5:6], s[14:15]
-; GFX9-O0-NEXT: v_cndmask_b32_e64 v10, 0, 1, s[14:15]
-; GFX9-O0-NEXT: s_mov_b64 s[14:15], s[6:7]
-; GFX9-O0-NEXT: v_cmp_ne_u64_e64 s[14:15], v[8:9], s[14:15]
-; GFX9-O0-NEXT: v_cndmask_b32_e64 v7, 0, 1, s[14:15]
-; GFX9-O0-NEXT: v_cndmask_b32_e64 v7, v7, v10, s[8:9]
-; GFX9-O0-NEXT: v_and_b32_e64 v7, 1, v7
-; GFX9-O0-NEXT: v_cmp_eq_u32_e64 s[8:9], v7, 1
-; GFX9-O0-NEXT: s_or_b64 s[8:9], s[4:5], s[8:9]
-; GFX9-O0-NEXT: s_mov_b64 s[4:5], -1
-; GFX9-O0-NEXT: s_xor_b64 s[4:5], s[8:9], s[4:5]
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v6
-; GFX9-O0-NEXT: s_mov_b32 s14, s13
-; GFX9-O0-NEXT: v_xor_b32_e64 v7, v7, s14
-; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 killed $vgpr5_vgpr6 killed $exec
-; GFX9-O0-NEXT: ; kill: def $sgpr12 killed $sgpr12 killed $sgpr12_sgpr13
-; GFX9-O0-NEXT: v_xor_b32_e64 v5, v5, s12
-; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v7
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v6
-; GFX9-O0-NEXT: v_mov_b32_e32 v10, v9
-; GFX9-O0-NEXT: v_or_b32_e64 v7, v7, v10
-; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 killed $vgpr5_vgpr6 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v8
-; GFX9-O0-NEXT: v_or_b32_e64 v5, v5, v6
-; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v7
-; GFX9-O0-NEXT: v_cmp_ne_u64_e64 s[6:7], v[5:6], s[6:7]
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, s11
-; GFX9-O0-NEXT: v_cndmask_b32_e64 v5, v2, v5, s[8:9]
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, s10
-; GFX9-O0-NEXT: v_cndmask_b32_e64 v1, v1, v2, s[8:9]
-; GFX9-O0-NEXT: ; implicit-def: $sgpr12
-; GFX9-O0-NEXT: ; implicit-def: $sgpr12
-; GFX9-O0-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v5
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, s11
-; GFX9-O0-NEXT: v_cndmask_b32_e64 v5, v4, v5, s[8:9]
-; GFX9-O0-NEXT: v_mov_b32_e32 v4, s10
-; GFX9-O0-NEXT: v_cndmask_b32_e64 v3, v3, v4, s[8:9]
-; GFX9-O0-NEXT: ; implicit-def: $sgpr8
-; GFX9-O0-NEXT: ; implicit-def: $sgpr8
-; GFX9-O0-NEXT: ; kill: def $vgpr3 killed $vgpr3 def $vgpr3_vgpr4 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v4, v5
-; GFX9-O0-NEXT: s_and_b64 s[6:7], s[4:5], s[6:7]
-; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_mov_b64 s[4:5], exec
-; GFX9-O0-NEXT: v_writelane_b32 v0, s4, 2
-; GFX9-O0-NEXT: v_writelane_b32 v0, s5, 3
-; GFX9-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
-; GFX9-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_mov_b64 exec, s[18:19]
-; GFX9-O0-NEXT: s_and_b64 s[4:5], s[4:5], s[6:7]
-; GFX9-O0-NEXT: s_mov_b64 exec, s[4:5]
-; GFX9-O0-NEXT: s_cbranch_execz .LBB3_3
-; GFX9-O0-NEXT: s_branch .LBB3_8
-; GFX9-O0-NEXT: .LBB3_1: ; %Flow
-; GFX9-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
-; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: s_mov_b64 exec, s[18:19]
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: v_readlane_b32 s4, v0, 4
-; GFX9-O0-NEXT: v_readlane_b32 s5, v0, 5
-; GFX9-O0-NEXT: s_or_b64 exec, exec, s[4:5]
-; GFX9-O0-NEXT: ; %bb.2: ; %Flow
-; GFX9-O0-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: s_waitcnt vmcnt(6)
-; GFX9-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_branch .LBB3_5
-; GFX9-O0-NEXT: .LBB3_3: ; %Flow2
-; GFX9-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
-; GFX9-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: s_mov_b64 exec, s[18:19]
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: v_readlane_b32 s4, v4, 2
-; GFX9-O0-NEXT: v_readlane_b32 s5, v4, 3
-; GFX9-O0-NEXT: s_or_b64 exec, exec, s[4:5]
-; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:12 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:16 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_branch .LBB3_9
-; GFX9-O0-NEXT: .LBB3_4: ; %udiv-loop-exit
-; GFX9-O0-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: s_mov_b32 s4, 1
-; GFX9-O0-NEXT: s_waitcnt vmcnt(2)
-; GFX9-O0-NEXT: v_lshlrev_b64 v[2:3], s4, v[0:1]
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: v_lshlrev_b64 v[9:10], s4, v[9:10]
-; GFX9-O0-NEXT: s_mov_b32 s4, 63
-; GFX9-O0-NEXT: v_lshrrev_b64 v[0:1], s4, v[0:1]
-; GFX9-O0-NEXT: v_mov_b32_e32 v11, v1
-; GFX9-O0-NEXT: v_mov_b32_e32 v4, v10
-; GFX9-O0-NEXT: v_mov_b32_e32 v12, v8
-; GFX9-O0-NEXT: v_or3_b32 v4, v4, v11, v12
-; GFX9-O0-NEXT: v_mov_b32_e32 v1, v0
-; GFX9-O0-NEXT: v_mov_b32_e32 v0, v9
-; GFX9-O0-NEXT: v_or3_b32 v0, v0, v1, v7
-; GFX9-O0-NEXT: ; kill: def $vgpr0 killed $vgpr0 def $vgpr0_vgpr1 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v1, v4
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v3
-; GFX9-O0-NEXT: v_mov_b32_e32 v4, v6
-; GFX9-O0-NEXT: v_or_b32_e64 v4, v4, v7
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v2
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v5
-; GFX9-O0-NEXT: v_or_b32_e64 v2, v2, v3
-; GFX9-O0-NEXT: ; kill: def $vgpr2 killed $vgpr2 def $vgpr2_vgpr3 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v4
-; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_branch .LBB3_3
-; GFX9-O0-NEXT: .LBB3_5: ; %Flow1
-; GFX9-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
-; GFX9-O0-NEXT: buffer_load_dword v8, off, s[0:3], s32 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: s_mov_b64 exec, s[18:19]
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: v_readlane_b32 s4, v8, 6
-; GFX9-O0-NEXT: v_readlane_b32 s5, v8, 7
-; GFX9-O0-NEXT: s_or_b64 exec, exec, s[4:5]
-; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:120 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:124 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:104 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:108 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_branch .LBB3_4
-; GFX9-O0-NEXT: .LBB3_6: ; %udiv-do-while
-; GFX9-O0-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX9-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
-; GFX9-O0-NEXT: buffer_load_dword v16, off, s[0:3], s32 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: s_mov_b64 exec, s[18:19]
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: v_readlane_b32 s6, v16, 8
-; GFX9-O0-NEXT: v_readlane_b32 s7, v16, 9
-; GFX9-O0-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:244 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:256 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:260 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:264 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:268 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:272 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:276 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:52 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:56 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:60 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:64 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:288 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:292 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: s_mov_b32 s4, 63
-; GFX9-O0-NEXT: s_waitcnt vmcnt(16)
-; GFX9-O0-NEXT: v_lshrrev_b64 v[29:30], s4, v[2:3]
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v30
-; GFX9-O0-NEXT: s_mov_b32 s5, 1
-; GFX9-O0-NEXT: v_lshlrev_b64 v[23:24], s5, v[23:24]
-; GFX9-O0-NEXT: v_mov_b32_e32 v4, v24
-; GFX9-O0-NEXT: v_or_b32_e64 v4, v4, v5
-; GFX9-O0-NEXT: v_mov_b32_e32 v10, v29
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v23
-; GFX9-O0-NEXT: v_or_b32_e64 v23, v5, v10
-; GFX9-O0-NEXT: ; kill: def $vgpr23 killed $vgpr23 def $vgpr23_vgpr24 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v24, v4
-; GFX9-O0-NEXT: v_lshlrev_b64 v[29:30], s5, v[2:3]
-; GFX9-O0-NEXT: v_lshrrev_b64 v[4:5], s4, v[6:7]
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v30
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v5
-; GFX9-O0-NEXT: v_or_b32_e64 v2, v2, v3
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v29
-; GFX9-O0-NEXT: ; kill: def $vgpr4 killed $vgpr4 killed $vgpr4_vgpr5 killed $exec
-; GFX9-O0-NEXT: v_or_b32_e64 v4, v3, v4
-; GFX9-O0-NEXT: ; kill: def $vgpr4 killed $vgpr4 def $vgpr4_vgpr5 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v2
-; GFX9-O0-NEXT: v_lshlrev_b64 v[2:3], s5, v[0:1]
-; GFX9-O0-NEXT: v_lshlrev_b64 v[29:30], s5, v[6:7]
-; GFX9-O0-NEXT: v_lshrrev_b64 v[0:1], s4, v[0:1]
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v1
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v30
-; GFX9-O0-NEXT: s_waitcnt vmcnt(10)
-; GFX9-O0-NEXT: v_mov_b32_e32 v10, v28
-; GFX9-O0-NEXT: v_or3_b32 v6, v6, v7, v10
-; GFX9-O0-NEXT: v_mov_b32_e32 v1, v0
-; GFX9-O0-NEXT: v_mov_b32_e32 v0, v29
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v27
-; GFX9-O0-NEXT: v_or3_b32 v0, v0, v1, v7
-; GFX9-O0-NEXT: ; kill: def $vgpr0 killed $vgpr0 def $vgpr0_vgpr1 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v1, v6
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v3
-; GFX9-O0-NEXT: s_waitcnt vmcnt(8)
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v26
-; GFX9-O0-NEXT: v_or_b32_e64 v6, v6, v7
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v2
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v25
-; GFX9-O0-NEXT: v_or_b32_e64 v2, v2, v3
-; GFX9-O0-NEXT: ; kill: def $vgpr2 killed $vgpr2 def $vgpr2_vgpr3 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v6
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v4
-; GFX9-O0-NEXT: v_mov_b32_e32 v10, v5
-; GFX9-O0-NEXT: v_mov_b32_e32 v4, v23
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v24
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: v_mov_b32_e32 v13, v11
-; GFX9-O0-NEXT: v_mov_b32_e32 v11, v14
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v15
-; GFX9-O0-NEXT: v_sub_co_u32_e32 v13, vcc, v13, v6
-; GFX9-O0-NEXT: v_subb_co_u32_e32 v12, vcc, v12, v10, vcc
-; GFX9-O0-NEXT: v_subb_co_u32_e32 v11, vcc, v11, v4, vcc
-; GFX9-O0-NEXT: v_subb_co_u32_e32 v7, vcc, v7, v5, vcc
-; GFX9-O0-NEXT: ; implicit-def: $sgpr5
-; GFX9-O0-NEXT: ; implicit-def: $sgpr5
-; GFX9-O0-NEXT: ; kill: def $vgpr11 killed $vgpr11 def $vgpr11_vgpr12 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v12, v7
-; GFX9-O0-NEXT: v_ashrrev_i64 v[13:14], s4, v[11:12]
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v14
-; GFX9-O0-NEXT: s_mov_b64 s[4:5], 1
-; GFX9-O0-NEXT: s_mov_b32 s8, s5
-; GFX9-O0-NEXT: v_and_b32_e64 v12, v7, s8
-; GFX9-O0-NEXT: v_mov_b32_e32 v11, v13
-; GFX9-O0-NEXT: ; kill: def $sgpr4 killed $sgpr4 killed $sgpr4_sgpr5
-; GFX9-O0-NEXT: v_and_b32_e64 v14, v11, s4
-; GFX9-O0-NEXT: ; kill: def $vgpr14 killed $vgpr14 def $vgpr14_vgpr15 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v15, v12
-; GFX9-O0-NEXT: v_mov_b32_e32 v12, 0
-; GFX9-O0-NEXT: v_mov_b32_e32 v13, 0
-; GFX9-O0-NEXT: v_mov_b32_e32 v23, v22
-; GFX9-O0-NEXT: v_and_b32_e64 v23, v7, v23
-; GFX9-O0-NEXT: v_and_b32_e64 v21, v11, v21
-; GFX9-O0-NEXT: ; kill: def $vgpr21 killed $vgpr21 def $vgpr21_vgpr22 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v22, v23
-; GFX9-O0-NEXT: v_mov_b32_e32 v23, v20
-; GFX9-O0-NEXT: v_and_b32_e64 v7, v7, v23
-; GFX9-O0-NEXT: v_and_b32_e64 v23, v11, v19
-; GFX9-O0-NEXT: ; kill: def $vgpr23 killed $vgpr23 def $vgpr23_vgpr24 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v24, v7
-; GFX9-O0-NEXT: v_mov_b32_e32 v20, v23
-; GFX9-O0-NEXT: v_mov_b32_e32 v19, v24
-; GFX9-O0-NEXT: v_mov_b32_e32 v11, v21
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v22
-; GFX9-O0-NEXT: v_sub_co_u32_e32 v6, vcc, v6, v20
-; GFX9-O0-NEXT: v_subb_co_u32_e32 v10, vcc, v10, v19, vcc
-; GFX9-O0-NEXT: v_subb_co_u32_e32 v4, vcc, v4, v11, vcc
-; GFX9-O0-NEXT: v_subb_co_u32_e32 v7, vcc, v5, v7, vcc
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; kill: def $vgpr4 killed $vgpr4 def $vgpr4_vgpr5 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v7
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; kill: def $vgpr6 killed $vgpr6 def $vgpr6_vgpr7 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v10
-; GFX9-O0-NEXT: v_mov_b32_e32 v11, v8
-; GFX9-O0-NEXT: ; kill: def $vgpr9 killed $vgpr9 killed $vgpr8_vgpr9 killed $exec
-; GFX9-O0-NEXT: s_mov_b64 s[8:9], -1
-; GFX9-O0-NEXT: s_mov_b32 s5, s8
-; GFX9-O0-NEXT: s_mov_b32 s4, s9
-; GFX9-O0-NEXT: v_mov_b32_e32 v10, v17
-; GFX9-O0-NEXT: v_mov_b32_e32 v8, v18
-; GFX9-O0-NEXT: v_mov_b32_e32 v17, s5
-; GFX9-O0-NEXT: v_add_co_u32_e32 v20, vcc, v11, v17
-; GFX9-O0-NEXT: v_mov_b32_e32 v11, s4
-; GFX9-O0-NEXT: v_addc_co_u32_e32 v9, vcc, v9, v11, vcc
-; GFX9-O0-NEXT: v_mov_b32_e32 v11, s5
-; GFX9-O0-NEXT: v_addc_co_u32_e32 v17, vcc, v10, v11, vcc
-; GFX9-O0-NEXT: v_mov_b32_e32 v10, s4
-; GFX9-O0-NEXT: v_addc_co_u32_e32 v8, vcc, v8, v10, vcc
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; kill: def $vgpr20 killed $vgpr20 def $vgpr20_vgpr21 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v21, v9
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; kill: def $vgpr17 killed $vgpr17 def $vgpr17_vgpr18 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v18, v8
-; GFX9-O0-NEXT: v_mov_b32_e32 v8, v17
-; GFX9-O0-NEXT: v_mov_b32_e32 v9, v18
-; GFX9-O0-NEXT: v_mov_b32_e32 v10, v20
-; GFX9-O0-NEXT: v_mov_b32_e32 v11, v21
-; GFX9-O0-NEXT: v_mov_b32_e32 v22, v18
-; GFX9-O0-NEXT: v_mov_b32_e32 v19, v21
-; GFX9-O0-NEXT: v_or_b32_e64 v19, v19, v22
-; GFX9-O0-NEXT: v_mov_b32_e32 v18, v17
-; GFX9-O0-NEXT: v_mov_b32_e32 v17, v20
-; GFX9-O0-NEXT: v_or_b32_e64 v17, v17, v18
-; GFX9-O0-NEXT: ; kill: def $vgpr17 killed $vgpr17 def $vgpr17_vgpr18 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v18, v19
-; GFX9-O0-NEXT: v_cmp_eq_u64_e64 s[4:5], v[17:18], v[12:13]
-; GFX9-O0-NEXT: s_or_b64 s[4:5], s[4:5], s[6:7]
-; GFX9-O0-NEXT: v_mov_b32_e32 v18, v3
-; GFX9-O0-NEXT: v_mov_b32_e32 v17, v2
-; GFX9-O0-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: v_mov_b32_e32 v18, v1
-; GFX9-O0-NEXT: v_mov_b32_e32 v17, v0
-; GFX9-O0-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: v_mov_b32_e32 v18, v15
-; GFX9-O0-NEXT: v_mov_b32_e32 v17, v14
-; GFX9-O0-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: v_mov_b32_e32 v18, v13
-; GFX9-O0-NEXT: v_mov_b32_e32 v17, v12
-; GFX9-O0-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_mov_b64 s[6:7], s[4:5]
-; GFX9-O0-NEXT: v_writelane_b32 v16, s6, 4
-; GFX9-O0-NEXT: v_writelane_b32 v16, s7, 5
-; GFX9-O0-NEXT: s_mov_b64 s[6:7], s[4:5]
-; GFX9-O0-NEXT: v_writelane_b32 v16, s6, 8
-; GFX9-O0-NEXT: v_writelane_b32 v16, s7, 9
-; GFX9-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
-; GFX9-O0-NEXT: buffer_store_dword v16, off, s[0:3], s32 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_mov_b64 exec, s[18:19]
-; GFX9-O0-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GFX9-O0-NEXT: s_cbranch_execnz .LBB3_6
-; GFX9-O0-NEXT: s_branch .LBB3_1
-; GFX9-O0-NEXT: .LBB3_7: ; %udiv-preheader
-; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:296 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:300 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:304 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:308 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:312 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:316 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:320 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:324 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
-; GFX9-O0-NEXT: buffer_load_dword v16, off, s[0:3], s32 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: s_mov_b64 exec, s[18:19]
-; GFX9-O0-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:60 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:64 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:52 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:56 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:44 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:48 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:36 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:40 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: s_waitcnt vmcnt(9)
-; GFX9-O0-NEXT: v_mov_b32_e32 v4, v10
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: v_lshrrev_b64 v[6:7], v4, v[21:22]
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v7
-; GFX9-O0-NEXT: s_mov_b32 s6, 64
-; GFX9-O0-NEXT: v_sub_u32_e64 v12, s6, v4
-; GFX9-O0-NEXT: v_lshlrev_b64 v[23:24], v12, v[19:20]
-; GFX9-O0-NEXT: v_mov_b32_e32 v12, v24
-; GFX9-O0-NEXT: v_or_b32_e64 v5, v5, v12
-; GFX9-O0-NEXT: ; kill: def $vgpr6 killed $vgpr6 killed $vgpr6_vgpr7 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v23
-; GFX9-O0-NEXT: v_or_b32_e64 v6, v6, v7
-; GFX9-O0-NEXT: ; kill: def $vgpr6 killed $vgpr6 def $vgpr6_vgpr7 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v5
-; GFX9-O0-NEXT: v_mov_b32_e32 v12, v7
-; GFX9-O0-NEXT: v_cmp_lt_u32_e64 s[4:5], v4, s6
-; GFX9-O0-NEXT: v_sub_u32_e64 v5, v4, s6
-; GFX9-O0-NEXT: v_lshrrev_b64 v[23:24], v5, v[19:20]
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v24
-; GFX9-O0-NEXT: v_cndmask_b32_e64 v5, v5, v12, s[4:5]
-; GFX9-O0-NEXT: s_mov_b32 s6, 0
-; GFX9-O0-NEXT: v_cmp_eq_u32_e64 s[6:7], v4, s6
-; GFX9-O0-NEXT: v_mov_b32_e32 v12, v22
-; GFX9-O0-NEXT: v_cndmask_b32_e64 v5, v5, v12, s[6:7]
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v6
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v23
-; GFX9-O0-NEXT: v_cndmask_b32_e64 v6, v6, v7, s[4:5]
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v21
-; GFX9-O0-NEXT: v_cndmask_b32_e64 v6, v6, v7, s[6:7]
-; GFX9-O0-NEXT: ; implicit-def: $sgpr6
-; GFX9-O0-NEXT: ; implicit-def: $sgpr6
-; GFX9-O0-NEXT: ; kill: def $vgpr6 killed $vgpr6 def $vgpr6_vgpr7 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v5
-; GFX9-O0-NEXT: v_lshrrev_b64 v[4:5], v4, v[19:20]
-; GFX9-O0-NEXT: v_mov_b32_e32 v15, v5
-; GFX9-O0-NEXT: s_mov_b64 s[6:7], 0
-; GFX9-O0-NEXT: s_mov_b32 s8, s7
-; GFX9-O0-NEXT: v_mov_b32_e32 v12, s8
-; GFX9-O0-NEXT: v_cndmask_b32_e64 v12, v12, v15, s[4:5]
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v4
-; GFX9-O0-NEXT: s_mov_b32 s8, s6
-; GFX9-O0-NEXT: v_mov_b32_e32 v4, s8
-; GFX9-O0-NEXT: v_cndmask_b32_e64 v4, v4, v5, s[4:5]
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; kill: def $vgpr4 killed $vgpr4 def $vgpr4_vgpr5 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v12
-; GFX9-O0-NEXT: v_mov_b32_e32 v12, v13
-; GFX9-O0-NEXT: v_mov_b32_e32 v15, v14
-; GFX9-O0-NEXT: s_mov_b64 s[8:9], -1
-; GFX9-O0-NEXT: s_mov_b32 s5, s8
-; GFX9-O0-NEXT: s_mov_b32 s4, s9
-; GFX9-O0-NEXT: v_mov_b32_e32 v14, v17
-; GFX9-O0-NEXT: v_mov_b32_e32 v13, v18
-; GFX9-O0-NEXT: v_mov_b32_e32 v17, s5
-; GFX9-O0-NEXT: v_add_co_u32_e32 v12, vcc, v12, v17
-; GFX9-O0-NEXT: v_mov_b32_e32 v17, s4
-; GFX9-O0-NEXT: v_addc_co_u32_e32 v17, vcc, v15, v17, vcc
-; GFX9-O0-NEXT: v_mov_b32_e32 v15, s5
-; GFX9-O0-NEXT: v_addc_co_u32_e32 v14, vcc, v14, v15, vcc
-; GFX9-O0-NEXT: v_mov_b32_e32 v15, s4
-; GFX9-O0-NEXT: v_addc_co_u32_e32 v13, vcc, v13, v15, vcc
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; kill: def $vgpr14 killed $vgpr14 def $vgpr14_vgpr15 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v15, v13
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; kill: def $vgpr12 killed $vgpr12 def $vgpr12_vgpr13 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v13, v17
-; GFX9-O0-NEXT: s_mov_b64 s[8:9], s[6:7]
-; GFX9-O0-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_mov_b64 s[4:5], s[6:7]
-; GFX9-O0-NEXT: v_mov_b32_e32 v15, s9
-; GFX9-O0-NEXT: v_mov_b32_e32 v14, s8
-; GFX9-O0-NEXT: v_mov_b32_e32 v13, s7
-; GFX9-O0-NEXT: v_mov_b32_e32 v12, s6
-; GFX9-O0-NEXT: v_writelane_b32 v16, s4, 8
-; GFX9-O0-NEXT: v_writelane_b32 v16, s5, 9
-; GFX9-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
-; GFX9-O0-NEXT: buffer_store_dword v16, off, s[0:3], s32 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_mov_b64 exec, s[18:19]
-; GFX9-O0-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_branch .LBB3_6
-; GFX9-O0-NEXT: .LBB3_8: ; %udiv-bb1
-; GFX9-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
-; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: s_mov_b64 exec, s[18:19]
-; GFX9-O0-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:36 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:40 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:44 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:48 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:28 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:32 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:24 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: s_mov_b64 s[6:7], 1
-; GFX9-O0-NEXT: s_mov_b32 s5, s6
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: v_mov_b32_e32 v4, v1
-; GFX9-O0-NEXT: s_mov_b32 s4, s7
-; GFX9-O0-NEXT: s_mov_b64 s[6:7], 0
-; GFX9-O0-NEXT: s_mov_b32 s8, s6
-; GFX9-O0-NEXT: s_mov_b32 s9, s7
-; GFX9-O0-NEXT: v_mov_b32_e32 v1, v5
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v6
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, s5
-; GFX9-O0-NEXT: v_add_co_u32_e32 v9, vcc, v4, v5
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, s4
-; GFX9-O0-NEXT: v_addc_co_u32_e32 v2, vcc, v2, v5, vcc
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, s8
-; GFX9-O0-NEXT: v_addc_co_u32_e32 v1, vcc, v1, v5, vcc
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, s9
-; GFX9-O0-NEXT: v_addc_co_u32_e32 v3, vcc, v3, v5, vcc
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; kill: def $vgpr9 killed $vgpr9 def $vgpr9_vgpr10 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v10, v2
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v3
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v2
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v1
-; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v9
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v10
-; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_mov_b32 s4, 0x7f
-; GFX9-O0-NEXT: v_sub_u32_e64 v3, s4, v4
-; GFX9-O0-NEXT: v_lshlrev_b64 v[5:6], v3, v[11:12]
-; GFX9-O0-NEXT: v_mov_b32_e32 v13, v6
-; GFX9-O0-NEXT: s_mov_b32 s4, 64
-; GFX9-O0-NEXT: v_sub_u32_e64 v14, s4, v3
-; GFX9-O0-NEXT: v_lshrrev_b64 v[14:15], v14, v[7:8]
-; GFX9-O0-NEXT: v_mov_b32_e32 v16, v15
-; GFX9-O0-NEXT: v_or_b32_e64 v13, v13, v16
-; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 killed $vgpr5_vgpr6 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v14
-; GFX9-O0-NEXT: v_or_b32_e64 v5, v5, v6
-; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v13
-; GFX9-O0-NEXT: v_mov_b32_e32 v15, v6
-; GFX9-O0-NEXT: v_cmp_lt_u32_e64 s[4:5], v3, s4
-; GFX9-O0-NEXT: s_mov_b32 s10, 63
-; GFX9-O0-NEXT: v_sub_u32_e64 v4, s10, v4
-; GFX9-O0-NEXT: v_lshlrev_b64 v[13:14], v4, v[7:8]
-; GFX9-O0-NEXT: v_mov_b32_e32 v4, v14
-; GFX9-O0-NEXT: v_cndmask_b32_e64 v4, v4, v15, s[4:5]
-; GFX9-O0-NEXT: s_mov_b32 s10, 0
-; GFX9-O0-NEXT: v_cmp_eq_u32_e64 s[10:11], v3, s10
-; GFX9-O0-NEXT: v_mov_b32_e32 v15, v12
-; GFX9-O0-NEXT: v_cndmask_b32_e64 v4, v4, v15, s[10:11]
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v5
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v13
-; GFX9-O0-NEXT: v_cndmask_b32_e64 v5, v5, v6, s[4:5]
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v11
-; GFX9-O0-NEXT: v_cndmask_b32_e64 v5, v5, v6, s[10:11]
-; GFX9-O0-NEXT: ; implicit-def: $sgpr10
-; GFX9-O0-NEXT: ; implicit-def: $sgpr10
-; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v4
-; GFX9-O0-NEXT: v_lshlrev_b64 v[7:8], v3, v[7:8]
-; GFX9-O0-NEXT: v_mov_b32_e32 v4, v8
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, s9
-; GFX9-O0-NEXT: v_cndmask_b32_e64 v3, v3, v4, s[4:5]
-; GFX9-O0-NEXT: ; kill: def $vgpr7 killed $vgpr7 killed $vgpr7_vgpr8 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v4, s8
-; GFX9-O0-NEXT: v_cndmask_b32_e64 v7, v4, v7, s[4:5]
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; kill: def $vgpr7 killed $vgpr7 def $vgpr7_vgpr8 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v8, v3
-; GFX9-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: v_mov_b32_e32 v4, v2
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v10
-; GFX9-O0-NEXT: v_or_b32_e64 v3, v3, v4
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v1
-; GFX9-O0-NEXT: v_mov_b32_e32 v1, v9
-; GFX9-O0-NEXT: v_or_b32_e64 v1, v1, v2
-; GFX9-O0-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v3
-; GFX9-O0-NEXT: v_cmp_ne_u64_e64 s[4:5], v[1:2], s[6:7]
-; GFX9-O0-NEXT: s_mov_b64 s[8:9], s[6:7]
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, s8
-; GFX9-O0-NEXT: v_mov_b32_e32 v4, s9
-; GFX9-O0-NEXT: v_mov_b32_e32 v1, s6
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, s7
-; GFX9-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_mov_b64 s[6:7], exec
-; GFX9-O0-NEXT: s_and_b64 s[4:5], s[6:7], s[4:5]
-; GFX9-O0-NEXT: s_xor_b64 s[6:7], s[4:5], s[6:7]
-; GFX9-O0-NEXT: v_writelane_b32 v0, s6, 6
-; GFX9-O0-NEXT: v_writelane_b32 v0, s7, 7
-; GFX9-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
-; GFX9-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: s_mov_b64 exec, s[18:19]
-; GFX9-O0-NEXT: s_mov_b64 exec, s[4:5]
-; GFX9-O0-NEXT: s_cbranch_execz .LBB3_5
-; GFX9-O0-NEXT: s_branch .LBB3_7
-; GFX9-O0-NEXT: .LBB3_9: ; %udiv-end
-; GFX9-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
-; GFX9-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: s_mov_b64 exec, s[18:19]
-; GFX9-O0-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:92 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:68 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:72 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: s_mov_b32 s4, 32
-; GFX9-O0-NEXT: s_waitcnt vmcnt(2)
-; GFX9-O0-NEXT: v_lshrrev_b64 v[2:3], s4, v[7:8]
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v2
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v13
-; GFX9-O0-NEXT: v_mul_lo_u32 v5, v6, v2
-; GFX9-O0-NEXT: v_lshrrev_b64 v[13:14], s4, v[13:14]
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v13
-; GFX9-O0-NEXT: ; kill: def $vgpr7 killed $vgpr7 killed $vgpr7_vgpr8 killed $exec
-; GFX9-O0-NEXT: v_mul_lo_u32 v3, v7, v3
-; GFX9-O0-NEXT: v_mad_u64_u32 v[13:14], s[6:7], v7, v2, 0
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v14
-; GFX9-O0-NEXT: v_add3_u32 v2, v2, v3, v5
-; GFX9-O0-NEXT: ; implicit-def: $sgpr5
-; GFX9-O0-NEXT: ; implicit-def: $sgpr6
-; GFX9-O0-NEXT: ; implicit-def: $sgpr6
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, s5
-; GFX9-O0-NEXT: ; kill: def $vgpr2 killed $vgpr2 def $vgpr2_vgpr3 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v5
-; GFX9-O0-NEXT: v_lshlrev_b64 v[17:18], s4, v[2:3]
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v18
-; GFX9-O0-NEXT: ; kill: def $vgpr13 killed $vgpr13 killed $vgpr13_vgpr14 killed $exec
-; GFX9-O0-NEXT: s_mov_b32 s5, 0
-; GFX9-O0-NEXT: ; implicit-def: $sgpr6
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, s5
-; GFX9-O0-NEXT: ; kill: def $vgpr13 killed $vgpr13 def $vgpr13_vgpr14 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v14, v2
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v14
-; GFX9-O0-NEXT: v_or_b32_e64 v2, v2, v3
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v17
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v13
-; GFX9-O0-NEXT: v_or_b32_e64 v13, v3, v5
-; GFX9-O0-NEXT: ; kill: def $vgpr13 killed $vgpr13 def $vgpr13_vgpr14 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v14, v2
-; GFX9-O0-NEXT: v_lshrrev_b64 v[2:3], s4, v[15:16]
-; GFX9-O0-NEXT: v_mov_b32_e32 v8, v2
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v11
-; GFX9-O0-NEXT: v_mul_lo_u32 v3, v2, v8
-; GFX9-O0-NEXT: v_lshrrev_b64 v[11:12], s4, v[11:12]
-; GFX9-O0-NEXT: ; kill: def $vgpr11 killed $vgpr11 killed $vgpr11_vgpr12 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v15
-; GFX9-O0-NEXT: v_mul_lo_u32 v11, v11, v5
-; GFX9-O0-NEXT: v_mad_u64_u32 v[15:16], s[6:7], v2, v5, 0
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v16
-; GFX9-O0-NEXT: v_add3_u32 v2, v2, v3, v11
-; GFX9-O0-NEXT: ; implicit-def: $sgpr6
-; GFX9-O0-NEXT: ; implicit-def: $sgpr7
-; GFX9-O0-NEXT: ; implicit-def: $sgpr7
-; GFX9-O0-NEXT: v_mov_b32_e32 v11, s6
-; GFX9-O0-NEXT: ; kill: def $vgpr2 killed $vgpr2 def $vgpr2_vgpr3 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v11
-; GFX9-O0-NEXT: v_lshlrev_b64 v[2:3], s4, v[2:3]
-; GFX9-O0-NEXT: v_mov_b32_e32 v12, v3
-; GFX9-O0-NEXT: ; kill: def $vgpr15 killed $vgpr15 killed $vgpr15_vgpr16 killed $exec
-; GFX9-O0-NEXT: ; implicit-def: $sgpr6
-; GFX9-O0-NEXT: v_mov_b32_e32 v11, s5
-; GFX9-O0-NEXT: ; kill: def $vgpr15 killed $vgpr15 def $vgpr15_vgpr16 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v16, v11
-; GFX9-O0-NEXT: v_mov_b32_e32 v11, v16
-; GFX9-O0-NEXT: v_or_b32_e64 v11, v11, v12
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v2
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v15
-; GFX9-O0-NEXT: v_or_b32_e64 v2, v2, v3
-; GFX9-O0-NEXT: ; kill: def $vgpr2 killed $vgpr2 def $vgpr2_vgpr3 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v11
-; GFX9-O0-NEXT: v_mov_b32_e32 v11, v2
-; GFX9-O0-NEXT: v_mov_b32_e32 v12, v13
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v3
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v14
-; GFX9-O0-NEXT: v_add_co_u32_e64 v13, s[6:7], v11, v12
-; GFX9-O0-NEXT: v_addc_co_u32_e64 v2, s[6:7], v2, v3, s[6:7]
-; GFX9-O0-NEXT: ; kill: def $vgpr13 killed $vgpr13 def $vgpr13_vgpr14 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v14, v2
-; GFX9-O0-NEXT: v_mad_u64_u32 v[15:16], s[6:7], v8, v6, 0
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v15
-; GFX9-O0-NEXT: ; implicit-def: $sgpr6
-; GFX9-O0-NEXT: v_mov_b32_e32 v11, s5
-; GFX9-O0-NEXT: ; kill: def $vgpr2 killed $vgpr2 def $vgpr2_vgpr3 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v11
-; GFX9-O0-NEXT: v_mov_b32_e32 v11, v3
-; GFX9-O0-NEXT: v_mov_b32_e32 v15, v16
-; GFX9-O0-NEXT: ; implicit-def: $sgpr6
-; GFX9-O0-NEXT: ; implicit-def: $sgpr7
-; GFX9-O0-NEXT: ; implicit-def: $sgpr7
-; GFX9-O0-NEXT: v_mov_b32_e32 v12, s6
-; GFX9-O0-NEXT: ; kill: def $vgpr15 killed $vgpr15 def $vgpr15_vgpr16 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v16, v12
-; GFX9-O0-NEXT: v_lshlrev_b64 v[15:16], s4, v[15:16]
-; GFX9-O0-NEXT: v_mov_b32_e32 v12, v16
-; GFX9-O0-NEXT: v_or_b32_e64 v11, v11, v12
-; GFX9-O0-NEXT: ; kill: def $vgpr2 killed $vgpr2 killed $vgpr2_vgpr3 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v15
-; GFX9-O0-NEXT: v_or_b32_e64 v2, v2, v3
-; GFX9-O0-NEXT: ; kill: def $vgpr2 killed $vgpr2 def $vgpr2_vgpr3 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v11
-; GFX9-O0-NEXT: v_mad_u64_u32 v[15:16], s[6:7], v8, v7, 0
-; GFX9-O0-NEXT: v_mov_b32_e32 v11, v15
-; GFX9-O0-NEXT: ; implicit-def: $sgpr6
-; GFX9-O0-NEXT: v_mov_b32_e32 v8, s5
-; GFX9-O0-NEXT: ; kill: def $vgpr11 killed $vgpr11 def $vgpr11_vgpr12 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v12, v8
-; GFX9-O0-NEXT: v_mov_b32_e32 v8, v12
-; GFX9-O0-NEXT: v_mov_b32_e32 v15, v16
-; GFX9-O0-NEXT: ; implicit-def: $sgpr6
-; GFX9-O0-NEXT: ; implicit-def: $sgpr7
-; GFX9-O0-NEXT: ; implicit-def: $sgpr7
-; GFX9-O0-NEXT: v_mov_b32_e32 v17, s6
-; GFX9-O0-NEXT: ; kill: def $vgpr15 killed $vgpr15 def $vgpr15_vgpr16 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v16, v17
-; GFX9-O0-NEXT: v_lshlrev_b64 v[15:16], s4, v[15:16]
-; GFX9-O0-NEXT: v_mov_b32_e32 v17, v16
-; GFX9-O0-NEXT: v_or_b32_e64 v8, v8, v17
-; GFX9-O0-NEXT: ; kill: def $vgpr11 killed $vgpr11 killed $vgpr11_vgpr12 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v12, v15
-; GFX9-O0-NEXT: v_or_b32_e64 v19, v11, v12
-; GFX9-O0-NEXT: ; kill: def $vgpr19 killed $vgpr19 def $vgpr19_vgpr20 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v20, v8
-; GFX9-O0-NEXT: v_mad_u64_u32 v[11:12], s[6:7], v5, v7, 0
-; GFX9-O0-NEXT: v_mov_b32_e32 v17, v12
-; GFX9-O0-NEXT: ; implicit-def: $sgpr6
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, s5
-; GFX9-O0-NEXT: ; kill: def $vgpr17 killed $vgpr17 def $vgpr17_vgpr18 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v18, v7
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v19
-; GFX9-O0-NEXT: v_mov_b32_e32 v16, v17
-; GFX9-O0-NEXT: v_mov_b32_e32 v8, v20
-; GFX9-O0-NEXT: v_mov_b32_e32 v15, v18
-; GFX9-O0-NEXT: v_add_co_u32_e64 v7, s[6:7], v7, v16
-; GFX9-O0-NEXT: v_addc_co_u32_e64 v15, s[6:7], v8, v15, s[6:7]
-; GFX9-O0-NEXT: ; kill: def $vgpr7 killed $vgpr7 def $vgpr7_vgpr8 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v8, v15
-; GFX9-O0-NEXT: v_mov_b32_e32 v15, v8
-; GFX9-O0-NEXT: s_mov_b64 s[6:7], 0xffffffff
-; GFX9-O0-NEXT: s_mov_b32 s8, s7
-; GFX9-O0-NEXT: v_and_b32_e64 v15, v15, s8
-; GFX9-O0-NEXT: v_mov_b32_e32 v16, v7
-; GFX9-O0-NEXT: ; kill: def $sgpr6 killed $sgpr6 killed $sgpr6_sgpr7
-; GFX9-O0-NEXT: v_and_b32_e64 v17, v16, s6
-; GFX9-O0-NEXT: ; kill: def $vgpr17 killed $vgpr17 def $vgpr17_vgpr18 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v18, v15
-; GFX9-O0-NEXT: v_mad_u64_u32 v[15:16], s[6:7], v5, v6, 0
-; GFX9-O0-NEXT: v_mov_b32_e32 v19, v15
-; GFX9-O0-NEXT: ; implicit-def: $sgpr6
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, s5
-; GFX9-O0-NEXT: ; kill: def $vgpr19 killed $vgpr19 def $vgpr19_vgpr20 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v20, v5
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v20
-; GFX9-O0-NEXT: v_mov_b32_e32 v15, v16
-; GFX9-O0-NEXT: ; implicit-def: $sgpr6
-; GFX9-O0-NEXT: ; implicit-def: $sgpr7
-; GFX9-O0-NEXT: ; implicit-def: $sgpr7
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, s6
-; GFX9-O0-NEXT: ; kill: def $vgpr15 killed $vgpr15 def $vgpr15_vgpr16 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v16, v6
-; GFX9-O0-NEXT: v_lshlrev_b64 v[15:16], s4, v[15:16]
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v16
-; GFX9-O0-NEXT: v_or_b32_e64 v5, v5, v6
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v19
-; GFX9-O0-NEXT: ; kill: def $vgpr15 killed $vgpr15 killed $vgpr15_vgpr16 killed $exec
-; GFX9-O0-NEXT: v_or_b32_e64 v19, v6, v15
-; GFX9-O0-NEXT: ; kill: def $vgpr19 killed $vgpr19 def $vgpr19_vgpr20 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v20, v5
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v19
-; GFX9-O0-NEXT: v_mov_b32_e32 v16, v17
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v20
-; GFX9-O0-NEXT: v_mov_b32_e32 v15, v18
-; GFX9-O0-NEXT: v_add_co_u32_e64 v5, s[6:7], v5, v16
-; GFX9-O0-NEXT: v_addc_co_u32_e64 v15, s[6:7], v6, v15, s[6:7]
-; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v15
-; GFX9-O0-NEXT: v_lshrrev_b64 v[17:18], s4, v[5:6]
-; GFX9-O0-NEXT: v_lshrrev_b64 v[7:8], s4, v[7:8]
-; GFX9-O0-NEXT: v_mov_b32_e32 v15, v7
-; GFX9-O0-NEXT: v_mov_b32_e32 v16, v17
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v8
-; GFX9-O0-NEXT: v_mov_b32_e32 v8, v18
-; GFX9-O0-NEXT: v_add_co_u32_e64 v15, s[6:7], v15, v16
-; GFX9-O0-NEXT: v_addc_co_u32_e64 v7, s[6:7], v7, v8, s[6:7]
-; GFX9-O0-NEXT: ; kill: def $vgpr15 killed $vgpr15 def $vgpr15_vgpr16 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v16, v7
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v2
-; GFX9-O0-NEXT: v_mov_b32_e32 v8, v15
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v3
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v16
-; GFX9-O0-NEXT: v_add_co_u32_e64 v15, s[6:7], v7, v8
-; GFX9-O0-NEXT: v_addc_co_u32_e64 v2, s[6:7], v2, v3, s[6:7]
-; GFX9-O0-NEXT: ; kill: def $vgpr15 killed $vgpr15 def $vgpr15_vgpr16 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v16, v2
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v15
-; GFX9-O0-NEXT: v_mov_b32_e32 v8, v13
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v16
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v14
-; GFX9-O0-NEXT: v_add_co_u32_e64 v2, s[6:7], v2, v8
-; GFX9-O0-NEXT: v_addc_co_u32_e64 v7, s[6:7], v3, v7, s[6:7]
-; GFX9-O0-NEXT: ; kill: def $vgpr2 killed $vgpr2 def $vgpr2_vgpr3 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v7
-; GFX9-O0-NEXT: v_lshlrev_b64 v[6:7], s4, v[5:6]
-; GFX9-O0-NEXT: v_mov_b32_e32 v8, v7
-; GFX9-O0-NEXT: ; kill: def $vgpr11 killed $vgpr11 killed $vgpr11_vgpr12 killed $exec
-; GFX9-O0-NEXT: ; implicit-def: $sgpr6
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, s5
-; GFX9-O0-NEXT: ; kill: def $vgpr11 killed $vgpr11 def $vgpr11_vgpr12 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v12, v5
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v12
-; GFX9-O0-NEXT: v_or_b32_e64 v5, v5, v8
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v6
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v11
-; GFX9-O0-NEXT: v_or_b32_e64 v6, v6, v7
-; GFX9-O0-NEXT: ; kill: def $vgpr6 killed $vgpr6 def $vgpr6_vgpr7 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v5
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v2
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v3
-; GFX9-O0-NEXT: v_mov_b32_e32 v8, v6
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v7
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v0
-; GFX9-O0-NEXT: v_mov_b32_e32 v0, v1
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v9
-; GFX9-O0-NEXT: v_mov_b32_e32 v1, v10
-; GFX9-O0-NEXT: v_sub_co_u32_e32 v7, vcc, v7, v8
-; GFX9-O0-NEXT: v_subb_co_u32_e32 v0, vcc, v0, v6, vcc
-; GFX9-O0-NEXT: v_subb_co_u32_e32 v5, vcc, v3, v5, vcc
-; GFX9-O0-NEXT: v_subb_co_u32_e32 v1, vcc, v1, v2, vcc
-; GFX9-O0-NEXT: ; implicit-def: $sgpr5
-; GFX9-O0-NEXT: ; implicit-def: $sgpr5
-; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v1
-; GFX9-O0-NEXT: ; implicit-def: $sgpr5
-; GFX9-O0-NEXT: ; implicit-def: $sgpr5
-; GFX9-O0-NEXT: ; kill: def $vgpr7 killed $vgpr7 def $vgpr7_vgpr8 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v8, v0
-; GFX9-O0-NEXT: v_mov_b32_e32 v0, v7
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v5
-; GFX9-O0-NEXT: v_lshrrev_b64 v[7:8], s4, v[7:8]
-; GFX9-O0-NEXT: v_mov_b32_e32 v1, v7
-; GFX9-O0-NEXT: v_lshrrev_b64 v[5:6], s4, v[5:6]
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v5
-; GFX9-O0-NEXT: ; kill: killed $vgpr4
-; GFX9-O0-NEXT: s_xor_saveexec_b64 s[4:5], -1
-; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:328 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: s_nop 0
-; GFX9-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:332 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:336 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:340 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: s_mov_b64 exec, s[4:5]
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: s_setpc_b64 s[30:31]
- %div = urem i128 %lhs, %rhs
- ret i128 %div
-}
-
define i128 @v_sdiv_i128_v_pow2k(i128 %lhs) {
; GFX9-LABEL: v_sdiv_i128_v_pow2k:
; GFX9: ; %bb.0:
@@ -5246,106 +2392,6 @@ define i128 @v_sdiv_i128_v_pow2k(i128 %lhs) {
ret i128 %div
}
-define i128 @v_srem_i128_v_pow2k(i128 %lhs) {
-; GFX9-LABEL: v_srem_i128_v_pow2k:
-; GFX9: ; %bb.0:
-; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_ashrrev_i32_e32 v4, 31, v3
-; GFX9-NEXT: v_mov_b32_e32 v5, v4
-; GFX9-NEXT: v_lshrrev_b64 v[4:5], 31, v[4:5]
-; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, v0, v4
-; GFX9-NEXT: v_addc_co_u32_e32 v4, vcc, v1, v5, vcc
-; GFX9-NEXT: v_addc_co_u32_e32 v5, vcc, 0, v2, vcc
-; GFX9-NEXT: v_addc_co_u32_e32 v6, vcc, 0, v3, vcc
-; GFX9-NEXT: v_and_b32_e32 v4, -2, v4
-; GFX9-NEXT: v_subrev_co_u32_e32 v0, vcc, 0, v0
-; GFX9-NEXT: v_subb_co_u32_e32 v1, vcc, v1, v4, vcc
-; GFX9-NEXT: v_subb_co_u32_e32 v2, vcc, v2, v5, vcc
-; GFX9-NEXT: v_subb_co_u32_e32 v3, vcc, v3, v6, vcc
-; GFX9-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX9-O0-LABEL: v_srem_i128_v_pow2k:
-; GFX9-O0: ; %bb.0:
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v2
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v1
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; kill: def $vgpr6 killed $vgpr6 def $vgpr6_vgpr7 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v3
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; kill: def $vgpr0 killed $vgpr0 def $vgpr0_vgpr1 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v1, v2
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4_sgpr5
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4_sgpr5
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v0
-; GFX9-O0-NEXT: v_mov_b32_e32 v0, v1
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v6
-; GFX9-O0-NEXT: v_mov_b32_e32 v1, v7
-; GFX9-O0-NEXT: s_mov_b32 s4, 63
-; GFX9-O0-NEXT: v_ashrrev_i64 v[6:7], s4, v[6:7]
-; GFX9-O0-NEXT: s_mov_b32 s4, 31
-; GFX9-O0-NEXT: v_lshrrev_b64 v[6:7], s4, v[6:7]
-; GFX9-O0-NEXT: v_mov_b32_e32 v4, v6
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v7
-; GFX9-O0-NEXT: s_mov_b64 s[6:7], 0
-; GFX9-O0-NEXT: s_mov_b32 s5, s6
-; GFX9-O0-NEXT: s_mov_b32 s4, s7
-; GFX9-O0-NEXT: v_add_co_u32_e32 v6, vcc, v5, v4
-; GFX9-O0-NEXT: v_addc_co_u32_e32 v4, vcc, v0, v2, vcc
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, s5
-; GFX9-O0-NEXT: v_addc_co_u32_e32 v8, vcc, v3, v2, vcc
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, s4
-; GFX9-O0-NEXT: v_addc_co_u32_e32 v2, vcc, v1, v2, vcc
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; kill: def $vgpr6 killed $vgpr6 def $vgpr6_vgpr7 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v4
-; GFX9-O0-NEXT: v_mov_b32_e32 v4, v7
-; GFX9-O0-NEXT: s_mov_b32 s6, -2
-; GFX9-O0-NEXT: s_mov_b32 s4, 0
-; GFX9-O0-NEXT: ; kill: def $sgpr4 killed $sgpr4 def $sgpr4_sgpr5
-; GFX9-O0-NEXT: s_mov_b32 s5, s6
-; GFX9-O0-NEXT: s_mov_b32 s6, s5
-; GFX9-O0-NEXT: v_and_b32_e64 v4, v4, s6
-; GFX9-O0-NEXT: ; kill: def $vgpr6 killed $vgpr6 killed $vgpr6_vgpr7 killed $exec
-; GFX9-O0-NEXT: ; kill: def $sgpr4 killed $sgpr4 killed $sgpr4_sgpr5
-; GFX9-O0-NEXT: v_and_b32_e64 v9, v6, s4
-; GFX9-O0-NEXT: ; kill: def $vgpr9 killed $vgpr9 def $vgpr9_vgpr10 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v10, v4
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v9
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v10
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; kill: def $vgpr8 killed $vgpr8 def $vgpr8_vgpr9 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v9, v2
-; GFX9-O0-NEXT: v_mov_b32_e32 v4, v8
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v9
-; GFX9-O0-NEXT: v_sub_co_u32_e32 v5, vcc, v5, v7
-; GFX9-O0-NEXT: v_subb_co_u32_e32 v0, vcc, v0, v6, vcc
-; GFX9-O0-NEXT: v_subb_co_u32_e32 v3, vcc, v3, v4, vcc
-; GFX9-O0-NEXT: v_subb_co_u32_e32 v1, vcc, v1, v2, vcc
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; kill: def $vgpr3 killed $vgpr3 def $vgpr3_vgpr4 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v4, v1
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v0
-; GFX9-O0-NEXT: v_mov_b32_e32 v0, v5
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v3
-; GFX9-O0-NEXT: s_mov_b32 s4, 32
-; GFX9-O0-NEXT: v_lshrrev_b64 v[5:6], s4, v[5:6]
-; GFX9-O0-NEXT: v_mov_b32_e32 v1, v5
-; GFX9-O0-NEXT: v_lshrrev_b64 v[3:4], s4, v[3:4]
-; GFX9-O0-NEXT: ; kill: def $vgpr3 killed $vgpr3 killed $vgpr3_vgpr4 killed $exec
-; GFX9-O0-NEXT: s_setpc_b64 s[30:31]
- %div = srem i128 %lhs, 8589934592
- ret i128 %div
-}
-
define i128 @v_udiv_i128_v_pow2k(i128 %lhs) {
; GFX9-LABEL: v_udiv_i128_v_pow2k:
; GFX9: ; %bb.0:
@@ -5392,55 +2438,6 @@ define i128 @v_udiv_i128_v_pow2k(i128 %lhs) {
ret i128 %div
}
-define i128 @v_urem_i128_v_pow2k(i128 %lhs) {
-; GFX9-LABEL: v_urem_i128_v_pow2k:
-; GFX9: ; %bb.0:
-; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_and_b32_e32 v1, 1, v1
-; GFX9-NEXT: v_mov_b32_e32 v2, 0
-; GFX9-NEXT: v_mov_b32_e32 v3, 0
-; GFX9-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX9-O0-LABEL: v_urem_i128_v_pow2k:
-; GFX9-O0: ; %bb.0:
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v2
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v1
-; GFX9-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; kill: def $vgpr3 killed $vgpr3 def $vgpr3_vgpr4 killed $exec
-; GFX9-O0-NEXT: ; kill: def $vgpr4 killed $vgpr1 killed $exec
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4
-; GFX9-O0-NEXT: ; kill: def $vgpr0 killed $vgpr0 def $vgpr0_vgpr1 killed $exec
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: v_mov_b32_e32 v1, v2
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4_sgpr5
-; GFX9-O0-NEXT: ; implicit-def: $sgpr4_sgpr5
-; GFX9-O0-NEXT: s_mov_b32 s6, 1
-; GFX9-O0-NEXT: s_mov_b32 s4, -1
-; GFX9-O0-NEXT: ; kill: def $sgpr4 killed $sgpr4 def $sgpr4_sgpr5
-; GFX9-O0-NEXT: s_mov_b32 s5, s6
-; GFX9-O0-NEXT: s_mov_b32 s6, s5
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v1
-; GFX9-O0-NEXT: v_and_b32_e64 v3, v2, s6
-; GFX9-O0-NEXT: ; kill: def $sgpr4 killed $sgpr4 killed $sgpr4_sgpr5
-; GFX9-O0-NEXT: ; kill: def $vgpr0 killed $vgpr0 killed $vgpr0_vgpr1 killed $exec
-; GFX9-O0-NEXT: v_and_b32_e64 v1, v0, s4
-; GFX9-O0-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v3
-; GFX9-O0-NEXT: s_mov_b32 s4, 32
-; GFX9-O0-NEXT: v_lshrrev_b64 v[1:2], s4, v[1:2]
-; GFX9-O0-NEXT: ; kill: def $vgpr1 killed $vgpr1 killed $vgpr1_vgpr2 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, 0
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v3
-; GFX9-O0-NEXT: s_setpc_b64 s[30:31]
- %div = urem i128 %lhs, 8589934592
- ret i128 %div
-}
-
;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
; GFX9-SDAG: {{.*}}
; GFX9-SDAG-O0: {{.*}}
diff --git a/llvm/test/CodeGen/AMDGPU/fmaxnum.ll b/llvm/test/CodeGen/AMDGPU/fmaxnum.ll
index 09898f1..38640a1 100644
--- a/llvm/test/CodeGen/AMDGPU/fmaxnum.ll
+++ b/llvm/test/CodeGen/AMDGPU/fmaxnum.ll
@@ -152,7 +152,7 @@ define amdgpu_kernel void @constant_fold_fmax_f32_p0_n0(ptr addrspace(1) %out) #
; GCN-LABEL: {{^}}constant_fold_fmax_f32_n0_p0:
; GCN-NOT: v_max_f32_e32
-; GCN: v_bfrev_b32_e32 [[REG:v[0-9]+]], 1{{$}}
+; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0{{$}}
; GCN: buffer_store_dword [[REG]]
define amdgpu_kernel void @constant_fold_fmax_f32_n0_p0(ptr addrspace(1) %out) #0 {
%val = call float @llvm.maxnum.f32(float -0.0, float 0.0)
diff --git a/llvm/test/CodeGen/AMDGPU/fminnum.ll b/llvm/test/CodeGen/AMDGPU/fminnum.ll
index 844d26a..65b3118 100644
--- a/llvm/test/CodeGen/AMDGPU/fminnum.ll
+++ b/llvm/test/CodeGen/AMDGPU/fminnum.ll
@@ -150,7 +150,7 @@ define amdgpu_kernel void @constant_fold_fmin_f32_p0_p0(ptr addrspace(1) %out) #
; GCN-LABEL: {{^}}constant_fold_fmin_f32_p0_n0:
; GCN-NOT: v_min_f32_e32
-; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0
+; GCN: v_bfrev_b32_e32 [[REG:v[0-9]+]], 1{{$}}
; GCN: buffer_store_dword [[REG]]
define amdgpu_kernel void @constant_fold_fmin_f32_p0_n0(ptr addrspace(1) %out) #0 {
%val = call float @llvm.minnum.f32(float 0.0, float -0.0)
diff --git a/llvm/test/CodeGen/AMDGPU/rem_i128.ll b/llvm/test/CodeGen/AMDGPU/rem_i128.ll
new file mode 100644
index 0000000..6ba66cc
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/rem_i128.ll
@@ -0,0 +1,3014 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -global-isel=0 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -o - %s | FileCheck -check-prefixes=GFX9,GFX9-SDAG %s
+; RUN: llc -O0 -global-isel=0 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -o - %s | FileCheck -check-prefixes=GFX9-O0,GFX9-SDAG-O0 %s
+
+; FIXME: GlobalISel missing the power-of-2 cases in legalization. https://github.com/llvm/llvm-project/issues/80671
+; xUN: llc -global-isel=1 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -o - %s | FileCheck -check-prefixes=GFX9,GFX9 %s
+; xUN: llc -O0 -global-isel=1 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -o - %s | FileCheck -check-prefixes=GFX9-O0,GFX9-O0 %s}}
+
+define i128 @v_srem_i128_vv(i128 %lhs, i128 %rhs) {
+; GFX9-LABEL: v_srem_i128_vv:
+; GFX9: ; %bb.0: ; %_udiv-special-cases
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_ashrrev_i32_e32 v20, 31, v3
+; GFX9-NEXT: v_xor_b32_e32 v0, v0, v20
+; GFX9-NEXT: v_xor_b32_e32 v10, v2, v20
+; GFX9-NEXT: v_xor_b32_e32 v1, v1, v20
+; GFX9-NEXT: v_sub_co_u32_e32 v2, vcc, v0, v20
+; GFX9-NEXT: v_xor_b32_e32 v9, v3, v20
+; GFX9-NEXT: v_subb_co_u32_e32 v3, vcc, v1, v20, vcc
+; GFX9-NEXT: v_ashrrev_i32_e32 v8, 31, v7
+; GFX9-NEXT: v_subb_co_u32_e32 v0, vcc, v10, v20, vcc
+; GFX9-NEXT: v_xor_b32_e32 v4, v4, v8
+; GFX9-NEXT: v_subb_co_u32_e32 v1, vcc, v9, v20, vcc
+; GFX9-NEXT: v_xor_b32_e32 v5, v5, v8
+; GFX9-NEXT: v_sub_co_u32_e32 v23, vcc, v4, v8
+; GFX9-NEXT: v_xor_b32_e32 v6, v6, v8
+; GFX9-NEXT: v_subb_co_u32_e32 v21, vcc, v5, v8, vcc
+; GFX9-NEXT: v_xor_b32_e32 v7, v7, v8
+; GFX9-NEXT: v_subb_co_u32_e32 v4, vcc, v6, v8, vcc
+; GFX9-NEXT: v_subb_co_u32_e32 v5, vcc, v7, v8, vcc
+; GFX9-NEXT: v_or_b32_e32 v7, v21, v5
+; GFX9-NEXT: v_or_b32_e32 v6, v23, v4
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[6:7]
+; GFX9-NEXT: v_or_b32_e32 v7, v3, v1
+; GFX9-NEXT: v_or_b32_e32 v6, v2, v0
+; GFX9-NEXT: v_cmp_eq_u64_e64 s[4:5], 0, v[6:7]
+; GFX9-NEXT: v_ffbh_u32_e32 v6, v4
+; GFX9-NEXT: v_add_u32_e32 v6, 32, v6
+; GFX9-NEXT: v_ffbh_u32_e32 v7, v5
+; GFX9-NEXT: v_min_u32_e32 v6, v6, v7
+; GFX9-NEXT: v_ffbh_u32_e32 v7, v23
+; GFX9-NEXT: v_add_u32_e32 v7, 32, v7
+; GFX9-NEXT: v_ffbh_u32_e32 v8, v21
+; GFX9-NEXT: v_min_u32_e32 v7, v7, v8
+; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-NEXT: v_add_co_u32_e32 v7, vcc, 64, v7
+; GFX9-NEXT: v_addc_co_u32_e64 v8, s[6:7], 0, 0, vcc
+; GFX9-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[4:5]
+; GFX9-NEXT: v_ffbh_u32_e32 v9, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v6, v7, v6, vcc
+; GFX9-NEXT: v_ffbh_u32_e32 v7, v0
+; GFX9-NEXT: v_add_u32_e32 v7, 32, v7
+; GFX9-NEXT: v_min_u32_e32 v7, v7, v9
+; GFX9-NEXT: v_ffbh_u32_e32 v9, v2
+; GFX9-NEXT: v_add_u32_e32 v9, 32, v9
+; GFX9-NEXT: v_ffbh_u32_e32 v10, v3
+; GFX9-NEXT: v_min_u32_e32 v9, v9, v10
+; GFX9-NEXT: v_cndmask_b32_e64 v8, v8, 0, vcc
+; GFX9-NEXT: v_add_co_u32_e32 v9, vcc, 64, v9
+; GFX9-NEXT: v_addc_co_u32_e64 v10, s[6:7], 0, 0, vcc
+; GFX9-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[0:1]
+; GFX9-NEXT: s_mov_b64 s[6:7], 0x7f
+; GFX9-NEXT: v_cndmask_b32_e32 v7, v9, v7, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v10, v10, 0, vcc
+; GFX9-NEXT: v_sub_co_u32_e32 v6, vcc, v6, v7
+; GFX9-NEXT: v_subb_co_u32_e32 v7, vcc, v8, v10, vcc
+; GFX9-NEXT: v_mov_b32_e32 v9, 0
+; GFX9-NEXT: v_subbrev_co_u32_e32 v8, vcc, 0, v9, vcc
+; GFX9-NEXT: v_subbrev_co_u32_e32 v9, vcc, 0, v9, vcc
+; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, s[6:7], v[6:7]
+; GFX9-NEXT: v_or_b32_e32 v13, v7, v9
+; GFX9-NEXT: v_cndmask_b32_e64 v10, 0, 1, vcc
+; GFX9-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[8:9]
+; GFX9-NEXT: v_mov_b32_e32 v22, v20
+; GFX9-NEXT: v_cndmask_b32_e64 v11, 0, 1, vcc
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[8:9]
+; GFX9-NEXT: v_cndmask_b32_e32 v10, v11, v10, vcc
+; GFX9-NEXT: v_and_b32_e32 v10, 1, v10
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v10
+; GFX9-NEXT: v_xor_b32_e32 v10, 0x7f, v6
+; GFX9-NEXT: v_or_b32_e32 v12, v10, v8
+; GFX9-NEXT: s_or_b64 s[4:5], s[4:5], vcc
+; GFX9-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[12:13]
+; GFX9-NEXT: s_xor_b64 s[6:7], s[4:5], -1
+; GFX9-NEXT: v_cndmask_b32_e64 v11, v1, 0, s[4:5]
+; GFX9-NEXT: v_cndmask_b32_e64 v12, v0, 0, s[4:5]
+; GFX9-NEXT: v_cndmask_b32_e64 v10, v3, 0, s[4:5]
+; GFX9-NEXT: v_cndmask_b32_e64 v13, v2, 0, s[4:5]
+; GFX9-NEXT: s_and_b64 s[4:5], s[6:7], vcc
+; GFX9-NEXT: s_and_saveexec_b64 s[8:9], s[4:5]
+; GFX9-NEXT: s_cbranch_execz .LBB0_6
+; GFX9-NEXT: ; %bb.1: ; %udiv-bb1
+; GFX9-NEXT: v_add_co_u32_e32 v24, vcc, 1, v6
+; GFX9-NEXT: v_addc_co_u32_e32 v25, vcc, 0, v7, vcc
+; GFX9-NEXT: v_addc_co_u32_e32 v26, vcc, 0, v8, vcc
+; GFX9-NEXT: v_sub_u32_e32 v13, 0x7f, v6
+; GFX9-NEXT: v_addc_co_u32_e32 v27, vcc, 0, v9, vcc
+; GFX9-NEXT: v_sub_u32_e32 v11, 64, v13
+; GFX9-NEXT: v_or_b32_e32 v8, v25, v27
+; GFX9-NEXT: v_or_b32_e32 v7, v24, v26
+; GFX9-NEXT: v_lshlrev_b64 v[9:10], v13, v[0:1]
+; GFX9-NEXT: v_lshrrev_b64 v[11:12], v11, v[2:3]
+; GFX9-NEXT: v_sub_u32_e32 v6, 63, v6
+; GFX9-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[7:8]
+; GFX9-NEXT: v_lshlrev_b64 v[6:7], v6, v[2:3]
+; GFX9-NEXT: v_or_b32_e32 v8, v10, v12
+; GFX9-NEXT: v_or_b32_e32 v9, v9, v11
+; GFX9-NEXT: v_cmp_gt_u32_e64 s[4:5], 64, v13
+; GFX9-NEXT: v_cmp_eq_u32_e64 s[6:7], 0, v13
+; GFX9-NEXT: v_lshlrev_b64 v[12:13], v13, v[2:3]
+; GFX9-NEXT: v_cndmask_b32_e64 v7, v7, v8, s[4:5]
+; GFX9-NEXT: v_cndmask_b32_e64 v6, v6, v9, s[4:5]
+; GFX9-NEXT: v_mov_b32_e32 v8, 0
+; GFX9-NEXT: v_mov_b32_e32 v10, 0
+; GFX9-NEXT: v_cndmask_b32_e64 v7, v7, v1, s[6:7]
+; GFX9-NEXT: v_cndmask_b32_e64 v6, v6, v0, s[6:7]
+; GFX9-NEXT: v_cndmask_b32_e64 v13, 0, v13, s[4:5]
+; GFX9-NEXT: v_mov_b32_e32 v9, 0
+; GFX9-NEXT: v_mov_b32_e32 v11, 0
+; GFX9-NEXT: v_cndmask_b32_e64 v12, 0, v12, s[4:5]
+; GFX9-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX9-NEXT: s_xor_b64 s[6:7], exec, s[4:5]
+; GFX9-NEXT: s_cbranch_execz .LBB0_5
+; GFX9-NEXT: ; %bb.2: ; %udiv-preheader
+; GFX9-NEXT: v_sub_u32_e32 v10, 64, v24
+; GFX9-NEXT: v_lshrrev_b64 v[8:9], v24, v[2:3]
+; GFX9-NEXT: v_lshlrev_b64 v[10:11], v10, v[0:1]
+; GFX9-NEXT: v_cmp_gt_u32_e32 vcc, 64, v24
+; GFX9-NEXT: v_or_b32_e32 v10, v8, v10
+; GFX9-NEXT: v_subrev_u32_e32 v8, 64, v24
+; GFX9-NEXT: v_or_b32_e32 v11, v9, v11
+; GFX9-NEXT: v_lshrrev_b64 v[8:9], v8, v[0:1]
+; GFX9-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v24
+; GFX9-NEXT: v_cndmask_b32_e32 v9, v9, v11, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v15, v9, v3, s[4:5]
+; GFX9-NEXT: v_cndmask_b32_e32 v10, v8, v10, vcc
+; GFX9-NEXT: v_lshrrev_b64 v[8:9], v24, v[0:1]
+; GFX9-NEXT: v_cndmask_b32_e64 v14, v10, v2, s[4:5]
+; GFX9-NEXT: v_cndmask_b32_e32 v17, 0, v9, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v16, 0, v8, vcc
+; GFX9-NEXT: v_add_co_u32_e32 v28, vcc, -1, v23
+; GFX9-NEXT: v_addc_co_u32_e32 v29, vcc, -1, v21, vcc
+; GFX9-NEXT: v_addc_co_u32_e32 v30, vcc, -1, v4, vcc
+; GFX9-NEXT: v_mov_b32_e32 v18, 0
+; GFX9-NEXT: v_mov_b32_e32 v10, 0
+; GFX9-NEXT: v_addc_co_u32_e32 v31, vcc, -1, v5, vcc
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: v_mov_b32_e32 v19, 0
+; GFX9-NEXT: v_mov_b32_e32 v11, 0
+; GFX9-NEXT: v_mov_b32_e32 v9, 0
+; GFX9-NEXT: .LBB0_3: ; %udiv-do-while
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_lshrrev_b32_e32 v32, 31, v15
+; GFX9-NEXT: v_lshlrev_b64 v[14:15], 1, v[14:15]
+; GFX9-NEXT: v_lshrrev_b32_e32 v33, 31, v7
+; GFX9-NEXT: v_lshlrev_b64 v[6:7], 1, v[6:7]
+; GFX9-NEXT: v_lshrrev_b32_e32 v8, 31, v13
+; GFX9-NEXT: v_lshlrev_b64 v[16:17], 1, v[16:17]
+; GFX9-NEXT: v_or_b32_e32 v14, v14, v33
+; GFX9-NEXT: v_or3_b32 v6, v6, v8, v10
+; GFX9-NEXT: v_sub_co_u32_e32 v8, vcc, v28, v14
+; GFX9-NEXT: v_or_b32_e32 v16, v16, v32
+; GFX9-NEXT: v_subb_co_u32_e32 v8, vcc, v29, v15, vcc
+; GFX9-NEXT: v_subb_co_u32_e32 v8, vcc, v30, v16, vcc
+; GFX9-NEXT: v_lshlrev_b64 v[12:13], 1, v[12:13]
+; GFX9-NEXT: v_subb_co_u32_e32 v8, vcc, v31, v17, vcc
+; GFX9-NEXT: v_ashrrev_i32_e32 v8, 31, v8
+; GFX9-NEXT: v_or_b32_e32 v12, v18, v12
+; GFX9-NEXT: v_and_b32_e32 v18, v8, v23
+; GFX9-NEXT: v_or_b32_e32 v13, v19, v13
+; GFX9-NEXT: v_and_b32_e32 v19, v8, v21
+; GFX9-NEXT: v_sub_co_u32_e32 v14, vcc, v14, v18
+; GFX9-NEXT: v_and_b32_e32 v32, v8, v4
+; GFX9-NEXT: v_subb_co_u32_e32 v15, vcc, v15, v19, vcc
+; GFX9-NEXT: v_and_b32_e32 v33, v8, v5
+; GFX9-NEXT: v_subb_co_u32_e32 v16, vcc, v16, v32, vcc
+; GFX9-NEXT: v_subb_co_u32_e32 v17, vcc, v17, v33, vcc
+; GFX9-NEXT: v_add_co_u32_e32 v24, vcc, -1, v24
+; GFX9-NEXT: v_addc_co_u32_e32 v25, vcc, -1, v25, vcc
+; GFX9-NEXT: v_addc_co_u32_e32 v26, vcc, -1, v26, vcc
+; GFX9-NEXT: v_addc_co_u32_e32 v27, vcc, -1, v27, vcc
+; GFX9-NEXT: v_or_b32_e32 v18, v24, v26
+; GFX9-NEXT: v_or_b32_e32 v19, v25, v27
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[18:19]
+; GFX9-NEXT: v_and_b32_e32 v8, 1, v8
+; GFX9-NEXT: v_mov_b32_e32 v19, v9
+; GFX9-NEXT: v_or3_b32 v7, v7, 0, v11
+; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-NEXT: v_mov_b32_e32 v18, v8
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_execnz .LBB0_3
+; GFX9-NEXT: ; %bb.4: ; %Flow
+; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX9-NEXT: .LBB0_5: ; %Flow2
+; GFX9-NEXT: s_or_b64 exec, exec, s[6:7]
+; GFX9-NEXT: v_lshlrev_b64 v[14:15], 1, v[12:13]
+; GFX9-NEXT: v_lshlrev_b64 v[6:7], 1, v[6:7]
+; GFX9-NEXT: v_lshrrev_b32_e32 v12, 31, v13
+; GFX9-NEXT: v_or3_b32 v11, v7, 0, v11
+; GFX9-NEXT: v_or3_b32 v12, v6, v12, v10
+; GFX9-NEXT: v_or_b32_e32 v10, v9, v15
+; GFX9-NEXT: v_or_b32_e32 v13, v8, v14
+; GFX9-NEXT: .LBB0_6: ; %Flow3
+; GFX9-NEXT: s_or_b64 exec, exec, s[8:9]
+; GFX9-NEXT: v_mul_lo_u32 v16, v13, v5
+; GFX9-NEXT: v_mad_u64_u32 v[5:6], s[4:5], v23, v13, 0
+; GFX9-NEXT: v_mov_b32_e32 v15, 0
+; GFX9-NEXT: v_mad_u64_u32 v[7:8], s[4:5], v13, v4, 0
+; GFX9-NEXT: v_mov_b32_e32 v14, v6
+; GFX9-NEXT: v_mad_u64_u32 v[13:14], s[4:5], v21, v13, v[14:15]
+; GFX9-NEXT: v_mul_lo_u32 v9, v10, v4
+; GFX9-NEXT: v_mul_lo_u32 v11, v11, v23
+; GFX9-NEXT: v_mov_b32_e32 v4, v14
+; GFX9-NEXT: v_mov_b32_e32 v14, v15
+; GFX9-NEXT: v_mad_u64_u32 v[13:14], s[4:5], v23, v10, v[13:14]
+; GFX9-NEXT: v_add3_u32 v8, v8, v16, v9
+; GFX9-NEXT: v_mad_u64_u32 v[6:7], s[4:5], v12, v23, v[7:8]
+; GFX9-NEXT: v_mov_b32_e32 v8, v14
+; GFX9-NEXT: v_add_co_u32_e32 v8, vcc, v4, v8
+; GFX9-NEXT: v_addc_co_u32_e64 v9, s[4:5], 0, 0, vcc
+; GFX9-NEXT: v_mul_lo_u32 v12, v12, v21
+; GFX9-NEXT: v_mad_u64_u32 v[8:9], s[4:5], v21, v10, v[8:9]
+; GFX9-NEXT: v_add3_u32 v4, v11, v7, v12
+; GFX9-NEXT: v_add_co_u32_e32 v6, vcc, v8, v6
+; GFX9-NEXT: v_addc_co_u32_e32 v4, vcc, v9, v4, vcc
+; GFX9-NEXT: v_mov_b32_e32 v7, v13
+; GFX9-NEXT: v_sub_co_u32_e32 v2, vcc, v2, v5
+; GFX9-NEXT: v_subb_co_u32_e32 v3, vcc, v3, v7, vcc
+; GFX9-NEXT: v_subb_co_u32_e32 v0, vcc, v0, v6, vcc
+; GFX9-NEXT: v_subb_co_u32_e32 v1, vcc, v1, v4, vcc
+; GFX9-NEXT: v_xor_b32_e32 v5, v0, v20
+; GFX9-NEXT: v_xor_b32_e32 v0, v2, v20
+; GFX9-NEXT: v_xor_b32_e32 v4, v1, v22
+; GFX9-NEXT: v_xor_b32_e32 v1, v3, v22
+; GFX9-NEXT: v_sub_co_u32_e32 v0, vcc, v0, v20
+; GFX9-NEXT: v_subb_co_u32_e32 v1, vcc, v1, v22, vcc
+; GFX9-NEXT: v_subb_co_u32_e32 v2, vcc, v5, v20, vcc
+; GFX9-NEXT: v_subb_co_u32_e32 v3, vcc, v4, v22, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-O0-LABEL: v_srem_i128_vv:
+; GFX9-O0: ; %bb.0: ; %_udiv-special-cases
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-O0-NEXT: s_xor_saveexec_b64 s[4:5], -1
+; GFX9-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:348 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:352 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:356 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:360 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_mov_b64 exec, s[4:5]
+; GFX9-O0-NEXT: ; implicit-def: $vgpr8 : SGPR spill to VGPR lane
+; GFX9-O0-NEXT: v_mov_b32_e32 v8, v6
+; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: v_mov_b32_e32 v14, v2
+; GFX9-O0-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:120 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v1
+; GFX9-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: v_mov_b32_e32 v4, v0
+; GFX9-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
+; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: s_mov_b64 exec, s[18:19]
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; kill: def $vgpr2 killed $vgpr2 def $vgpr2_vgpr3 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v5
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; kill: def $vgpr4 killed $vgpr4 def $vgpr4_vgpr5 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v6
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; kill: def $vgpr8 killed $vgpr8 def $vgpr8_vgpr9 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v9, v7
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; kill: def $vgpr14 killed $vgpr14 def $vgpr14_vgpr15 killed $exec
+; GFX9-O0-NEXT: s_waitcnt vmcnt(1)
+; GFX9-O0-NEXT: v_mov_b32_e32 v15, v1
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4_sgpr5
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4_sgpr5
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4_sgpr5
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4_sgpr5
+; GFX9-O0-NEXT: s_mov_b32 s4, 63
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v14
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v15
+; GFX9-O0-NEXT: v_ashrrev_i64 v[12:13], s4, v[6:7]
+; GFX9-O0-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v12
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v13
+; GFX9-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v8
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v9
+; GFX9-O0-NEXT: v_ashrrev_i64 v[6:7], s4, v[6:7]
+; GFX9-O0-NEXT: v_mov_b32_e32 v1, v15
+; GFX9-O0-NEXT: v_mov_b32_e32 v10, v13
+; GFX9-O0-NEXT: v_xor_b32_e64 v1, v1, v10
+; GFX9-O0-NEXT: v_mov_b32_e32 v11, v14
+; GFX9-O0-NEXT: v_xor_b32_e64 v13, v11, v12
+; GFX9-O0-NEXT: ; kill: def $vgpr13 killed $vgpr13 def $vgpr13_vgpr14 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v14, v1
+; GFX9-O0-NEXT: v_mov_b32_e32 v1, v5
+; GFX9-O0-NEXT: v_xor_b32_e64 v1, v1, v10
+; GFX9-O0-NEXT: ; kill: def $vgpr4 killed $vgpr4 killed $vgpr4_vgpr5 killed $exec
+; GFX9-O0-NEXT: v_xor_b32_e64 v15, v4, v12
+; GFX9-O0-NEXT: ; kill: def $vgpr15 killed $vgpr15 def $vgpr15_vgpr16 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v16, v1
+; GFX9-O0-NEXT: v_mov_b32_e32 v1, v9
+; GFX9-O0-NEXT: v_mov_b32_e32 v4, v7
+; GFX9-O0-NEXT: v_xor_b32_e64 v1, v1, v4
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v8
+; GFX9-O0-NEXT: ; kill: def $vgpr6 killed $vgpr6 killed $vgpr6_vgpr7 killed $exec
+; GFX9-O0-NEXT: v_xor_b32_e64 v7, v5, v6
+; GFX9-O0-NEXT: ; kill: def $vgpr7 killed $vgpr7 def $vgpr7_vgpr8 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v8, v1
+; GFX9-O0-NEXT: v_mov_b32_e32 v1, v3
+; GFX9-O0-NEXT: v_xor_b32_e64 v1, v1, v4
+; GFX9-O0-NEXT: ; kill: def $vgpr2 killed $vgpr2 killed $vgpr2_vgpr3 killed $exec
+; GFX9-O0-NEXT: v_xor_b32_e64 v2, v2, v6
+; GFX9-O0-NEXT: ; kill: def $vgpr2 killed $vgpr2 def $vgpr2_vgpr3 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-O0-NEXT: v_mov_b32_e32 v9, v15
+; GFX9-O0-NEXT: v_mov_b32_e32 v1, v16
+; GFX9-O0-NEXT: v_mov_b32_e32 v11, v13
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v14
+; GFX9-O0-NEXT: v_sub_co_u32_e32 v9, vcc, v9, v12
+; GFX9-O0-NEXT: v_subb_co_u32_e32 v1, vcc, v1, v10, vcc
+; GFX9-O0-NEXT: v_subb_co_u32_e32 v13, vcc, v11, v12, vcc
+; GFX9-O0-NEXT: v_subb_co_u32_e32 v5, vcc, v5, v10, vcc
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; kill: def $vgpr13 killed $vgpr13 def $vgpr13_vgpr14 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v14, v5
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; kill: def $vgpr9 killed $vgpr9 def $vgpr9_vgpr10 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v10, v1
+; GFX9-O0-NEXT: v_mov_b32_e32 v1, v2
+; GFX9-O0-NEXT: ; kill: def $vgpr3 killed $vgpr3 killed $vgpr2_vgpr3 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v7
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v8
+; GFX9-O0-NEXT: v_sub_co_u32_e32 v1, vcc, v1, v6
+; GFX9-O0-NEXT: v_subb_co_u32_e32 v3, vcc, v3, v4, vcc
+; GFX9-O0-NEXT: v_subb_co_u32_e32 v11, vcc, v5, v6, vcc
+; GFX9-O0-NEXT: v_subb_co_u32_e32 v2, vcc, v2, v4, vcc
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; kill: def $vgpr11 killed $vgpr11 def $vgpr11_vgpr12 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v12, v2
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v3
+; GFX9-O0-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v11
+; GFX9-O0-NEXT: v_mov_b32_e32 v4, v12
+; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: v_mov_b32_e32 v4, v2
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v13
+; GFX9-O0-NEXT: v_mov_b32_e32 v4, v14
+; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v9
+; GFX9-O0-NEXT: v_mov_b32_e32 v4, v10
+; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v12
+; GFX9-O0-NEXT: v_mov_b32_e32 v8, v2
+; GFX9-O0-NEXT: v_or_b32_e64 v3, v8, v7
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v11
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v1
+; GFX9-O0-NEXT: v_or_b32_e64 v1, v5, v6
+; GFX9-O0-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v3
+; GFX9-O0-NEXT: s_mov_b64 s[6:7], 0
+; GFX9-O0-NEXT: v_writelane_b32 v0, s6, 0
+; GFX9-O0-NEXT: v_writelane_b32 v0, s7, 1
+; GFX9-O0-NEXT: v_cmp_eq_u64_e64 s[4:5], v[1:2], s[6:7]
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v14
+; GFX9-O0-NEXT: v_mov_b32_e32 v4, v10
+; GFX9-O0-NEXT: v_or_b32_e64 v15, v4, v2
+; GFX9-O0-NEXT: v_mov_b32_e32 v1, v13
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v9
+; GFX9-O0-NEXT: v_or_b32_e64 v9, v3, v1
+; GFX9-O0-NEXT: ; kill: def $vgpr9 killed $vgpr9 def $vgpr9_vgpr10 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v10, v15
+; GFX9-O0-NEXT: v_cmp_eq_u64_e64 s[8:9], v[9:10], s[6:7]
+; GFX9-O0-NEXT: s_or_b64 s[4:5], s[4:5], s[8:9]
+; GFX9-O0-NEXT: v_ffbh_u32_e64 v6, v6
+; GFX9-O0-NEXT: s_mov_b32 s9, 32
+; GFX9-O0-NEXT: v_add_u32_e64 v6, v6, s9
+; GFX9-O0-NEXT: v_ffbh_u32_e64 v7, v7
+; GFX9-O0-NEXT: v_min_u32_e64 v6, v6, v7
+; GFX9-O0-NEXT: s_mov_b32 s8, 0
+; GFX9-O0-NEXT: ; implicit-def: $sgpr10
+; GFX9-O0-NEXT: v_mov_b32_e32 v9, s8
+; GFX9-O0-NEXT: ; kill: def $vgpr6 killed $vgpr6 def $vgpr6_vgpr7 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v9
+; GFX9-O0-NEXT: v_mov_b32_e32 v10, v7
+; GFX9-O0-NEXT: v_ffbh_u32_e64 v5, v5
+; GFX9-O0-NEXT: v_add_u32_e64 v5, v5, s9
+; GFX9-O0-NEXT: v_ffbh_u32_e64 v8, v8
+; GFX9-O0-NEXT: v_min_u32_e64 v15, v5, v8
+; GFX9-O0-NEXT: ; implicit-def: $sgpr10
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, s8
+; GFX9-O0-NEXT: ; kill: def $vgpr15 killed $vgpr15 def $vgpr15_vgpr16 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v16, v5
+; GFX9-O0-NEXT: s_mov_b64 s[10:11], 64
+; GFX9-O0-NEXT: v_mov_b32_e32 v8, v15
+; GFX9-O0-NEXT: s_mov_b32 s12, s10
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v16
+; GFX9-O0-NEXT: s_mov_b32 s14, s11
+; GFX9-O0-NEXT: v_add_co_u32_e64 v8, s[12:13], v8, s12
+; GFX9-O0-NEXT: v_mov_b32_e32 v9, s14
+; GFX9-O0-NEXT: v_addc_co_u32_e64 v5, s[12:13], v5, v9, s[12:13]
+; GFX9-O0-NEXT: ; kill: def $vgpr8 killed $vgpr8 def $vgpr8_vgpr9 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v9, v5
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v9
+; GFX9-O0-NEXT: v_cmp_ne_u64_e64 s[12:13], v[11:12], s[6:7]
+; GFX9-O0-NEXT: v_cndmask_b32_e64 v5, v5, v10, s[12:13]
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v6
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v8
+; GFX9-O0-NEXT: v_cndmask_b32_e64 v9, v6, v7, s[12:13]
+; GFX9-O0-NEXT: ; implicit-def: $sgpr12
+; GFX9-O0-NEXT: ; implicit-def: $sgpr12
+; GFX9-O0-NEXT: ; kill: def $vgpr9 killed $vgpr9 def $vgpr9_vgpr10 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v10, v5
+; GFX9-O0-NEXT: v_ffbh_u32_e64 v5, v1
+; GFX9-O0-NEXT: v_add_u32_e64 v5, v5, s9
+; GFX9-O0-NEXT: v_ffbh_u32_e64 v6, v2
+; GFX9-O0-NEXT: v_min_u32_e64 v6, v5, v6
+; GFX9-O0-NEXT: ; implicit-def: $sgpr12
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, s8
+; GFX9-O0-NEXT: ; kill: def $vgpr6 killed $vgpr6 def $vgpr6_vgpr7 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v5
+; GFX9-O0-NEXT: v_mov_b32_e32 v8, v7
+; GFX9-O0-NEXT: v_ffbh_u32_e64 v5, v3
+; GFX9-O0-NEXT: v_add_u32_e64 v5, v5, s9
+; GFX9-O0-NEXT: v_ffbh_u32_e64 v11, v4
+; GFX9-O0-NEXT: v_min_u32_e64 v15, v5, v11
+; GFX9-O0-NEXT: ; implicit-def: $sgpr9
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, s8
+; GFX9-O0-NEXT: ; kill: def $vgpr15 killed $vgpr15 def $vgpr15_vgpr16 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v16, v5
+; GFX9-O0-NEXT: v_mov_b32_e32 v11, v15
+; GFX9-O0-NEXT: s_mov_b32 s8, s10
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v16
+; GFX9-O0-NEXT: s_mov_b32 s10, s11
+; GFX9-O0-NEXT: v_add_co_u32_e64 v11, s[8:9], v11, s8
+; GFX9-O0-NEXT: v_mov_b32_e32 v12, s10
+; GFX9-O0-NEXT: v_addc_co_u32_e64 v5, s[8:9], v5, v12, s[8:9]
+; GFX9-O0-NEXT: ; kill: def $vgpr11 killed $vgpr11 def $vgpr11_vgpr12 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v12, v5
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v12
+; GFX9-O0-NEXT: v_cmp_ne_u64_e64 s[8:9], v[13:14], s[6:7]
+; GFX9-O0-NEXT: v_cndmask_b32_e64 v5, v5, v8, s[8:9]
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v6
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v11
+; GFX9-O0-NEXT: v_cndmask_b32_e64 v6, v6, v7, s[8:9]
+; GFX9-O0-NEXT: ; implicit-def: $sgpr8
+; GFX9-O0-NEXT: ; implicit-def: $sgpr8
+; GFX9-O0-NEXT: ; kill: def $vgpr6 killed $vgpr6 def $vgpr6_vgpr7 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v5
+; GFX9-O0-NEXT: v_mov_b32_e32 v8, v6
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v9
+; GFX9-O0-NEXT: ; kill: def $vgpr7 killed $vgpr7 killed $vgpr6_vgpr7 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v10
+; GFX9-O0-NEXT: s_mov_b32 s10, s6
+; GFX9-O0-NEXT: s_mov_b32 s11, s7
+; GFX9-O0-NEXT: v_sub_co_u32_e32 v5, vcc, v5, v8
+; GFX9-O0-NEXT: v_subb_co_u32_e32 v9, vcc, v6, v7, vcc
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, s10
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, s10
+; GFX9-O0-NEXT: v_subb_co_u32_e32 v8, vcc, v6, v7, vcc
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, s11
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, s11
+; GFX9-O0-NEXT: v_subb_co_u32_e32 v7, vcc, v6, v7, vcc
+; GFX9-O0-NEXT: ; implicit-def: $sgpr8
+; GFX9-O0-NEXT: ; implicit-def: $sgpr8
+; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v9
+; GFX9-O0-NEXT: ; implicit-def: $sgpr8
+; GFX9-O0-NEXT: ; implicit-def: $sgpr8
+; GFX9-O0-NEXT: ; kill: def $vgpr8 killed $vgpr8 def $vgpr8_vgpr9 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v9, v7
+; GFX9-O0-NEXT: v_mov_b32_e32 v11, v9
+; GFX9-O0-NEXT: v_mov_b32_e32 v10, v8
+; GFX9-O0-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: v_mov_b32_e32 v11, v6
+; GFX9-O0-NEXT: v_mov_b32_e32 v10, v5
+; GFX9-O0-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_mov_b64 s[8:9], s[6:7]
+; GFX9-O0-NEXT: v_cmp_eq_u64_e64 s[8:9], v[8:9], s[8:9]
+; GFX9-O0-NEXT: s_mov_b64 s[12:13], 0x7f
+; GFX9-O0-NEXT: s_mov_b64 s[14:15], s[12:13]
+; GFX9-O0-NEXT: v_cmp_gt_u64_e64 s[14:15], v[5:6], s[14:15]
+; GFX9-O0-NEXT: v_cndmask_b32_e64 v10, 0, 1, s[14:15]
+; GFX9-O0-NEXT: s_mov_b64 s[14:15], s[6:7]
+; GFX9-O0-NEXT: v_cmp_ne_u64_e64 s[14:15], v[8:9], s[14:15]
+; GFX9-O0-NEXT: v_cndmask_b32_e64 v7, 0, 1, s[14:15]
+; GFX9-O0-NEXT: v_cndmask_b32_e64 v7, v7, v10, s[8:9]
+; GFX9-O0-NEXT: v_and_b32_e64 v7, 1, v7
+; GFX9-O0-NEXT: v_cmp_eq_u32_e64 s[8:9], v7, 1
+; GFX9-O0-NEXT: s_or_b64 s[8:9], s[4:5], s[8:9]
+; GFX9-O0-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-O0-NEXT: s_xor_b64 s[4:5], s[8:9], s[4:5]
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v6
+; GFX9-O0-NEXT: s_mov_b32 s14, s13
+; GFX9-O0-NEXT: v_xor_b32_e64 v7, v7, s14
+; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 killed $vgpr5_vgpr6 killed $exec
+; GFX9-O0-NEXT: ; kill: def $sgpr12 killed $sgpr12 killed $sgpr12_sgpr13
+; GFX9-O0-NEXT: v_xor_b32_e64 v5, v5, s12
+; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v7
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v6
+; GFX9-O0-NEXT: v_mov_b32_e32 v10, v9
+; GFX9-O0-NEXT: v_or_b32_e64 v7, v7, v10
+; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 killed $vgpr5_vgpr6 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v8
+; GFX9-O0-NEXT: v_or_b32_e64 v5, v5, v6
+; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v7
+; GFX9-O0-NEXT: v_cmp_ne_u64_e64 s[6:7], v[5:6], s[6:7]
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, s11
+; GFX9-O0-NEXT: v_cndmask_b32_e64 v5, v2, v5, s[8:9]
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, s10
+; GFX9-O0-NEXT: v_cndmask_b32_e64 v1, v1, v2, s[8:9]
+; GFX9-O0-NEXT: ; implicit-def: $sgpr12
+; GFX9-O0-NEXT: ; implicit-def: $sgpr12
+; GFX9-O0-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v5
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, s11
+; GFX9-O0-NEXT: v_cndmask_b32_e64 v5, v4, v5, s[8:9]
+; GFX9-O0-NEXT: v_mov_b32_e32 v4, s10
+; GFX9-O0-NEXT: v_cndmask_b32_e64 v3, v3, v4, s[8:9]
+; GFX9-O0-NEXT: ; implicit-def: $sgpr8
+; GFX9-O0-NEXT: ; implicit-def: $sgpr8
+; GFX9-O0-NEXT: ; kill: def $vgpr3 killed $vgpr3 def $vgpr3_vgpr4 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v4, v5
+; GFX9-O0-NEXT: s_and_b64 s[6:7], s[4:5], s[6:7]
+; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_mov_b64 s[4:5], exec
+; GFX9-O0-NEXT: v_writelane_b32 v0, s4, 2
+; GFX9-O0-NEXT: v_writelane_b32 v0, s5, 3
+; GFX9-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
+; GFX9-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_mov_b64 exec, s[18:19]
+; GFX9-O0-NEXT: s_and_b64 s[4:5], s[4:5], s[6:7]
+; GFX9-O0-NEXT: s_mov_b64 exec, s[4:5]
+; GFX9-O0-NEXT: s_cbranch_execz .LBB0_3
+; GFX9-O0-NEXT: s_branch .LBB0_8
+; GFX9-O0-NEXT: .LBB0_1: ; %Flow
+; GFX9-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
+; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: s_mov_b64 exec, s[18:19]
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: v_readlane_b32 s4, v0, 4
+; GFX9-O0-NEXT: v_readlane_b32 s5, v0, 5
+; GFX9-O0-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX9-O0-NEXT: ; %bb.2: ; %Flow
+; GFX9-O0-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: s_waitcnt vmcnt(6)
+; GFX9-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_branch .LBB0_5
+; GFX9-O0-NEXT: .LBB0_3: ; %Flow2
+; GFX9-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
+; GFX9-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: s_mov_b64 exec, s[18:19]
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: v_readlane_b32 s4, v4, 2
+; GFX9-O0-NEXT: v_readlane_b32 s5, v4, 3
+; GFX9-O0-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:12 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:16 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_branch .LBB0_9
+; GFX9-O0-NEXT: .LBB0_4: ; %udiv-loop-exit
+; GFX9-O0-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: s_mov_b32 s4, 1
+; GFX9-O0-NEXT: s_waitcnt vmcnt(2)
+; GFX9-O0-NEXT: v_lshlrev_b64 v[2:3], s4, v[0:1]
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: v_lshlrev_b64 v[9:10], s4, v[9:10]
+; GFX9-O0-NEXT: s_mov_b32 s4, 63
+; GFX9-O0-NEXT: v_lshrrev_b64 v[0:1], s4, v[0:1]
+; GFX9-O0-NEXT: v_mov_b32_e32 v11, v1
+; GFX9-O0-NEXT: v_mov_b32_e32 v4, v10
+; GFX9-O0-NEXT: v_mov_b32_e32 v12, v8
+; GFX9-O0-NEXT: v_or3_b32 v4, v4, v11, v12
+; GFX9-O0-NEXT: v_mov_b32_e32 v1, v0
+; GFX9-O0-NEXT: v_mov_b32_e32 v0, v9
+; GFX9-O0-NEXT: v_or3_b32 v0, v0, v1, v7
+; GFX9-O0-NEXT: ; kill: def $vgpr0 killed $vgpr0 def $vgpr0_vgpr1 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v1, v4
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v3
+; GFX9-O0-NEXT: v_mov_b32_e32 v4, v6
+; GFX9-O0-NEXT: v_or_b32_e64 v4, v4, v7
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v2
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v5
+; GFX9-O0-NEXT: v_or_b32_e64 v2, v2, v3
+; GFX9-O0-NEXT: ; kill: def $vgpr2 killed $vgpr2 def $vgpr2_vgpr3 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v4
+; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_branch .LBB0_3
+; GFX9-O0-NEXT: .LBB0_5: ; %Flow1
+; GFX9-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
+; GFX9-O0-NEXT: buffer_load_dword v8, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: s_mov_b64 exec, s[18:19]
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: v_readlane_b32 s4, v8, 6
+; GFX9-O0-NEXT: v_readlane_b32 s5, v8, 7
+; GFX9-O0-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:124 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_branch .LBB0_4
+; GFX9-O0-NEXT: .LBB0_6: ; %udiv-do-while
+; GFX9-O0-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
+; GFX9-O0-NEXT: buffer_load_dword v16, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: s_mov_b64 exec, s[18:19]
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: v_readlane_b32 s6, v16, 8
+; GFX9-O0-NEXT: v_readlane_b32 s7, v16, 9
+; GFX9-O0-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:244 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:256 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:260 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:264 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:268 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:272 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:276 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:288 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:292 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:296 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:52 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:56 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:60 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:64 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:300 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:304 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:308 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:312 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: s_mov_b32 s4, 63
+; GFX9-O0-NEXT: s_waitcnt vmcnt(16)
+; GFX9-O0-NEXT: v_lshrrev_b64 v[29:30], s4, v[2:3]
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v30
+; GFX9-O0-NEXT: s_mov_b32 s5, 1
+; GFX9-O0-NEXT: v_lshlrev_b64 v[23:24], s5, v[23:24]
+; GFX9-O0-NEXT: v_mov_b32_e32 v4, v24
+; GFX9-O0-NEXT: v_or_b32_e64 v4, v4, v5
+; GFX9-O0-NEXT: v_mov_b32_e32 v10, v29
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v23
+; GFX9-O0-NEXT: v_or_b32_e64 v23, v5, v10
+; GFX9-O0-NEXT: ; kill: def $vgpr23 killed $vgpr23 def $vgpr23_vgpr24 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v24, v4
+; GFX9-O0-NEXT: v_lshlrev_b64 v[29:30], s5, v[2:3]
+; GFX9-O0-NEXT: v_lshrrev_b64 v[4:5], s4, v[6:7]
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v30
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v5
+; GFX9-O0-NEXT: v_or_b32_e64 v2, v2, v3
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v29
+; GFX9-O0-NEXT: ; kill: def $vgpr4 killed $vgpr4 killed $vgpr4_vgpr5 killed $exec
+; GFX9-O0-NEXT: v_or_b32_e64 v4, v3, v4
+; GFX9-O0-NEXT: ; kill: def $vgpr4 killed $vgpr4 def $vgpr4_vgpr5 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v2
+; GFX9-O0-NEXT: v_lshlrev_b64 v[2:3], s5, v[0:1]
+; GFX9-O0-NEXT: v_lshlrev_b64 v[29:30], s5, v[6:7]
+; GFX9-O0-NEXT: v_lshrrev_b64 v[0:1], s4, v[0:1]
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v1
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v30
+; GFX9-O0-NEXT: s_waitcnt vmcnt(10)
+; GFX9-O0-NEXT: v_mov_b32_e32 v10, v28
+; GFX9-O0-NEXT: v_or3_b32 v6, v6, v7, v10
+; GFX9-O0-NEXT: v_mov_b32_e32 v1, v0
+; GFX9-O0-NEXT: v_mov_b32_e32 v0, v29
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v27
+; GFX9-O0-NEXT: v_or3_b32 v0, v0, v1, v7
+; GFX9-O0-NEXT: ; kill: def $vgpr0 killed $vgpr0 def $vgpr0_vgpr1 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v1, v6
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v3
+; GFX9-O0-NEXT: s_waitcnt vmcnt(8)
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v26
+; GFX9-O0-NEXT: v_or_b32_e64 v6, v6, v7
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v2
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v25
+; GFX9-O0-NEXT: v_or_b32_e64 v2, v2, v3
+; GFX9-O0-NEXT: ; kill: def $vgpr2 killed $vgpr2 def $vgpr2_vgpr3 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v6
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v4
+; GFX9-O0-NEXT: v_mov_b32_e32 v10, v5
+; GFX9-O0-NEXT: v_mov_b32_e32 v4, v23
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v24
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: v_mov_b32_e32 v13, v11
+; GFX9-O0-NEXT: v_mov_b32_e32 v11, v14
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v15
+; GFX9-O0-NEXT: v_sub_co_u32_e32 v13, vcc, v13, v6
+; GFX9-O0-NEXT: v_subb_co_u32_e32 v12, vcc, v12, v10, vcc
+; GFX9-O0-NEXT: v_subb_co_u32_e32 v11, vcc, v11, v4, vcc
+; GFX9-O0-NEXT: v_subb_co_u32_e32 v7, vcc, v7, v5, vcc
+; GFX9-O0-NEXT: ; implicit-def: $sgpr5
+; GFX9-O0-NEXT: ; implicit-def: $sgpr5
+; GFX9-O0-NEXT: ; kill: def $vgpr11 killed $vgpr11 def $vgpr11_vgpr12 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v12, v7
+; GFX9-O0-NEXT: v_ashrrev_i64 v[13:14], s4, v[11:12]
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v14
+; GFX9-O0-NEXT: s_mov_b64 s[4:5], 1
+; GFX9-O0-NEXT: s_mov_b32 s8, s5
+; GFX9-O0-NEXT: v_and_b32_e64 v12, v7, s8
+; GFX9-O0-NEXT: v_mov_b32_e32 v11, v13
+; GFX9-O0-NEXT: ; kill: def $sgpr4 killed $sgpr4 killed $sgpr4_sgpr5
+; GFX9-O0-NEXT: v_and_b32_e64 v14, v11, s4
+; GFX9-O0-NEXT: ; kill: def $vgpr14 killed $vgpr14 def $vgpr14_vgpr15 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v15, v12
+; GFX9-O0-NEXT: v_mov_b32_e32 v12, 0
+; GFX9-O0-NEXT: v_mov_b32_e32 v13, 0
+; GFX9-O0-NEXT: v_mov_b32_e32 v23, v22
+; GFX9-O0-NEXT: v_and_b32_e64 v23, v7, v23
+; GFX9-O0-NEXT: v_and_b32_e64 v21, v11, v21
+; GFX9-O0-NEXT: ; kill: def $vgpr21 killed $vgpr21 def $vgpr21_vgpr22 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v22, v23
+; GFX9-O0-NEXT: v_mov_b32_e32 v23, v20
+; GFX9-O0-NEXT: v_and_b32_e64 v7, v7, v23
+; GFX9-O0-NEXT: v_and_b32_e64 v23, v11, v19
+; GFX9-O0-NEXT: ; kill: def $vgpr23 killed $vgpr23 def $vgpr23_vgpr24 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v24, v7
+; GFX9-O0-NEXT: v_mov_b32_e32 v20, v23
+; GFX9-O0-NEXT: v_mov_b32_e32 v19, v24
+; GFX9-O0-NEXT: v_mov_b32_e32 v11, v21
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v22
+; GFX9-O0-NEXT: v_sub_co_u32_e32 v6, vcc, v6, v20
+; GFX9-O0-NEXT: v_subb_co_u32_e32 v10, vcc, v10, v19, vcc
+; GFX9-O0-NEXT: v_subb_co_u32_e32 v4, vcc, v4, v11, vcc
+; GFX9-O0-NEXT: v_subb_co_u32_e32 v7, vcc, v5, v7, vcc
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; kill: def $vgpr4 killed $vgpr4 def $vgpr4_vgpr5 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v7
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; kill: def $vgpr6 killed $vgpr6 def $vgpr6_vgpr7 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v10
+; GFX9-O0-NEXT: v_mov_b32_e32 v11, v8
+; GFX9-O0-NEXT: ; kill: def $vgpr9 killed $vgpr9 killed $vgpr8_vgpr9 killed $exec
+; GFX9-O0-NEXT: s_mov_b64 s[8:9], -1
+; GFX9-O0-NEXT: s_mov_b32 s5, s8
+; GFX9-O0-NEXT: s_mov_b32 s4, s9
+; GFX9-O0-NEXT: v_mov_b32_e32 v10, v17
+; GFX9-O0-NEXT: v_mov_b32_e32 v8, v18
+; GFX9-O0-NEXT: v_mov_b32_e32 v17, s5
+; GFX9-O0-NEXT: v_add_co_u32_e32 v20, vcc, v11, v17
+; GFX9-O0-NEXT: v_mov_b32_e32 v11, s4
+; GFX9-O0-NEXT: v_addc_co_u32_e32 v9, vcc, v9, v11, vcc
+; GFX9-O0-NEXT: v_mov_b32_e32 v11, s5
+; GFX9-O0-NEXT: v_addc_co_u32_e32 v17, vcc, v10, v11, vcc
+; GFX9-O0-NEXT: v_mov_b32_e32 v10, s4
+; GFX9-O0-NEXT: v_addc_co_u32_e32 v8, vcc, v8, v10, vcc
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; kill: def $vgpr20 killed $vgpr20 def $vgpr20_vgpr21 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v21, v9
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; kill: def $vgpr17 killed $vgpr17 def $vgpr17_vgpr18 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v18, v8
+; GFX9-O0-NEXT: v_mov_b32_e32 v8, v17
+; GFX9-O0-NEXT: v_mov_b32_e32 v9, v18
+; GFX9-O0-NEXT: v_mov_b32_e32 v10, v20
+; GFX9-O0-NEXT: v_mov_b32_e32 v11, v21
+; GFX9-O0-NEXT: v_mov_b32_e32 v22, v18
+; GFX9-O0-NEXT: v_mov_b32_e32 v19, v21
+; GFX9-O0-NEXT: v_or_b32_e64 v19, v19, v22
+; GFX9-O0-NEXT: v_mov_b32_e32 v18, v17
+; GFX9-O0-NEXT: v_mov_b32_e32 v17, v20
+; GFX9-O0-NEXT: v_or_b32_e64 v17, v17, v18
+; GFX9-O0-NEXT: ; kill: def $vgpr17 killed $vgpr17 def $vgpr17_vgpr18 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v18, v19
+; GFX9-O0-NEXT: v_cmp_eq_u64_e64 s[4:5], v[17:18], v[12:13]
+; GFX9-O0-NEXT: s_or_b64 s[4:5], s[4:5], s[6:7]
+; GFX9-O0-NEXT: v_mov_b32_e32 v18, v3
+; GFX9-O0-NEXT: v_mov_b32_e32 v17, v2
+; GFX9-O0-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: v_mov_b32_e32 v18, v1
+; GFX9-O0-NEXT: v_mov_b32_e32 v17, v0
+; GFX9-O0-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: v_mov_b32_e32 v18, v15
+; GFX9-O0-NEXT: v_mov_b32_e32 v17, v14
+; GFX9-O0-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: v_mov_b32_e32 v18, v13
+; GFX9-O0-NEXT: v_mov_b32_e32 v17, v12
+; GFX9-O0-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_mov_b64 s[6:7], s[4:5]
+; GFX9-O0-NEXT: v_writelane_b32 v16, s6, 4
+; GFX9-O0-NEXT: v_writelane_b32 v16, s7, 5
+; GFX9-O0-NEXT: s_mov_b64 s[6:7], s[4:5]
+; GFX9-O0-NEXT: v_writelane_b32 v16, s6, 8
+; GFX9-O0-NEXT: v_writelane_b32 v16, s7, 9
+; GFX9-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
+; GFX9-O0-NEXT: buffer_store_dword v16, off, s[0:3], s32 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_mov_b64 exec, s[18:19]
+; GFX9-O0-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX9-O0-NEXT: s_cbranch_execnz .LBB0_6
+; GFX9-O0-NEXT: s_branch .LBB0_1
+; GFX9-O0-NEXT: .LBB0_7: ; %udiv-preheader
+; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:316 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:320 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:324 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:328 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:332 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:336 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:340 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:344 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
+; GFX9-O0-NEXT: buffer_load_dword v16, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: s_mov_b64 exec, s[18:19]
+; GFX9-O0-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:60 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:64 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:52 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:56 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:44 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:48 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:36 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:40 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: s_waitcnt vmcnt(9)
+; GFX9-O0-NEXT: v_mov_b32_e32 v4, v10
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: v_lshrrev_b64 v[6:7], v4, v[21:22]
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v7
+; GFX9-O0-NEXT: s_mov_b32 s6, 64
+; GFX9-O0-NEXT: v_sub_u32_e64 v12, s6, v4
+; GFX9-O0-NEXT: v_lshlrev_b64 v[23:24], v12, v[19:20]
+; GFX9-O0-NEXT: v_mov_b32_e32 v12, v24
+; GFX9-O0-NEXT: v_or_b32_e64 v5, v5, v12
+; GFX9-O0-NEXT: ; kill: def $vgpr6 killed $vgpr6 killed $vgpr6_vgpr7 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v23
+; GFX9-O0-NEXT: v_or_b32_e64 v6, v6, v7
+; GFX9-O0-NEXT: ; kill: def $vgpr6 killed $vgpr6 def $vgpr6_vgpr7 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v5
+; GFX9-O0-NEXT: v_mov_b32_e32 v12, v7
+; GFX9-O0-NEXT: v_cmp_lt_u32_e64 s[4:5], v4, s6
+; GFX9-O0-NEXT: v_sub_u32_e64 v5, v4, s6
+; GFX9-O0-NEXT: v_lshrrev_b64 v[23:24], v5, v[19:20]
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v24
+; GFX9-O0-NEXT: v_cndmask_b32_e64 v5, v5, v12, s[4:5]
+; GFX9-O0-NEXT: s_mov_b32 s6, 0
+; GFX9-O0-NEXT: v_cmp_eq_u32_e64 s[6:7], v4, s6
+; GFX9-O0-NEXT: v_mov_b32_e32 v12, v22
+; GFX9-O0-NEXT: v_cndmask_b32_e64 v5, v5, v12, s[6:7]
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v6
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v23
+; GFX9-O0-NEXT: v_cndmask_b32_e64 v6, v6, v7, s[4:5]
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v21
+; GFX9-O0-NEXT: v_cndmask_b32_e64 v6, v6, v7, s[6:7]
+; GFX9-O0-NEXT: ; implicit-def: $sgpr6
+; GFX9-O0-NEXT: ; implicit-def: $sgpr6
+; GFX9-O0-NEXT: ; kill: def $vgpr6 killed $vgpr6 def $vgpr6_vgpr7 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v5
+; GFX9-O0-NEXT: v_lshrrev_b64 v[4:5], v4, v[19:20]
+; GFX9-O0-NEXT: v_mov_b32_e32 v15, v5
+; GFX9-O0-NEXT: s_mov_b64 s[6:7], 0
+; GFX9-O0-NEXT: s_mov_b32 s8, s7
+; GFX9-O0-NEXT: v_mov_b32_e32 v12, s8
+; GFX9-O0-NEXT: v_cndmask_b32_e64 v12, v12, v15, s[4:5]
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v4
+; GFX9-O0-NEXT: s_mov_b32 s8, s6
+; GFX9-O0-NEXT: v_mov_b32_e32 v4, s8
+; GFX9-O0-NEXT: v_cndmask_b32_e64 v4, v4, v5, s[4:5]
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; kill: def $vgpr4 killed $vgpr4 def $vgpr4_vgpr5 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v12
+; GFX9-O0-NEXT: v_mov_b32_e32 v12, v13
+; GFX9-O0-NEXT: v_mov_b32_e32 v15, v14
+; GFX9-O0-NEXT: s_mov_b64 s[8:9], -1
+; GFX9-O0-NEXT: s_mov_b32 s5, s8
+; GFX9-O0-NEXT: s_mov_b32 s4, s9
+; GFX9-O0-NEXT: v_mov_b32_e32 v14, v17
+; GFX9-O0-NEXT: v_mov_b32_e32 v13, v18
+; GFX9-O0-NEXT: v_mov_b32_e32 v17, s5
+; GFX9-O0-NEXT: v_add_co_u32_e32 v12, vcc, v12, v17
+; GFX9-O0-NEXT: v_mov_b32_e32 v17, s4
+; GFX9-O0-NEXT: v_addc_co_u32_e32 v17, vcc, v15, v17, vcc
+; GFX9-O0-NEXT: v_mov_b32_e32 v15, s5
+; GFX9-O0-NEXT: v_addc_co_u32_e32 v14, vcc, v14, v15, vcc
+; GFX9-O0-NEXT: v_mov_b32_e32 v15, s4
+; GFX9-O0-NEXT: v_addc_co_u32_e32 v13, vcc, v13, v15, vcc
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; kill: def $vgpr14 killed $vgpr14 def $vgpr14_vgpr15 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v15, v13
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; kill: def $vgpr12 killed $vgpr12 def $vgpr12_vgpr13 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v13, v17
+; GFX9-O0-NEXT: s_mov_b64 s[8:9], s[6:7]
+; GFX9-O0-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_mov_b64 s[4:5], s[6:7]
+; GFX9-O0-NEXT: v_mov_b32_e32 v15, s9
+; GFX9-O0-NEXT: v_mov_b32_e32 v14, s8
+; GFX9-O0-NEXT: v_mov_b32_e32 v13, s7
+; GFX9-O0-NEXT: v_mov_b32_e32 v12, s6
+; GFX9-O0-NEXT: v_writelane_b32 v16, s4, 8
+; GFX9-O0-NEXT: v_writelane_b32 v16, s5, 9
+; GFX9-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
+; GFX9-O0-NEXT: buffer_store_dword v16, off, s[0:3], s32 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_mov_b64 exec, s[18:19]
+; GFX9-O0-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_branch .LBB0_6
+; GFX9-O0-NEXT: .LBB0_8: ; %udiv-bb1
+; GFX9-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
+; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: s_mov_b64 exec, s[18:19]
+; GFX9-O0-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:36 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:40 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:44 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:48 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:28 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:32 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:24 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: s_mov_b64 s[6:7], 1
+; GFX9-O0-NEXT: s_mov_b32 s5, s6
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: v_mov_b32_e32 v4, v1
+; GFX9-O0-NEXT: s_mov_b32 s4, s7
+; GFX9-O0-NEXT: s_mov_b64 s[6:7], 0
+; GFX9-O0-NEXT: s_mov_b32 s8, s6
+; GFX9-O0-NEXT: s_mov_b32 s9, s7
+; GFX9-O0-NEXT: v_mov_b32_e32 v1, v5
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v6
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, s5
+; GFX9-O0-NEXT: v_add_co_u32_e32 v9, vcc, v4, v5
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, s4
+; GFX9-O0-NEXT: v_addc_co_u32_e32 v2, vcc, v2, v5, vcc
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, s8
+; GFX9-O0-NEXT: v_addc_co_u32_e32 v1, vcc, v1, v5, vcc
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, s9
+; GFX9-O0-NEXT: v_addc_co_u32_e32 v3, vcc, v3, v5, vcc
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; kill: def $vgpr9 killed $vgpr9 def $vgpr9_vgpr10 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v10, v2
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v3
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v2
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v1
+; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v9
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v10
+; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_mov_b32 s4, 0x7f
+; GFX9-O0-NEXT: v_sub_u32_e64 v3, s4, v4
+; GFX9-O0-NEXT: v_lshlrev_b64 v[5:6], v3, v[11:12]
+; GFX9-O0-NEXT: v_mov_b32_e32 v13, v6
+; GFX9-O0-NEXT: s_mov_b32 s4, 64
+; GFX9-O0-NEXT: v_sub_u32_e64 v14, s4, v3
+; GFX9-O0-NEXT: v_lshrrev_b64 v[14:15], v14, v[7:8]
+; GFX9-O0-NEXT: v_mov_b32_e32 v16, v15
+; GFX9-O0-NEXT: v_or_b32_e64 v13, v13, v16
+; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 killed $vgpr5_vgpr6 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v14
+; GFX9-O0-NEXT: v_or_b32_e64 v5, v5, v6
+; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v13
+; GFX9-O0-NEXT: v_mov_b32_e32 v15, v6
+; GFX9-O0-NEXT: v_cmp_lt_u32_e64 s[4:5], v3, s4
+; GFX9-O0-NEXT: s_mov_b32 s10, 63
+; GFX9-O0-NEXT: v_sub_u32_e64 v4, s10, v4
+; GFX9-O0-NEXT: v_lshlrev_b64 v[13:14], v4, v[7:8]
+; GFX9-O0-NEXT: v_mov_b32_e32 v4, v14
+; GFX9-O0-NEXT: v_cndmask_b32_e64 v4, v4, v15, s[4:5]
+; GFX9-O0-NEXT: s_mov_b32 s10, 0
+; GFX9-O0-NEXT: v_cmp_eq_u32_e64 s[10:11], v3, s10
+; GFX9-O0-NEXT: v_mov_b32_e32 v15, v12
+; GFX9-O0-NEXT: v_cndmask_b32_e64 v4, v4, v15, s[10:11]
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v5
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v13
+; GFX9-O0-NEXT: v_cndmask_b32_e64 v5, v5, v6, s[4:5]
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v11
+; GFX9-O0-NEXT: v_cndmask_b32_e64 v5, v5, v6, s[10:11]
+; GFX9-O0-NEXT: ; implicit-def: $sgpr10
+; GFX9-O0-NEXT: ; implicit-def: $sgpr10
+; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v4
+; GFX9-O0-NEXT: v_lshlrev_b64 v[7:8], v3, v[7:8]
+; GFX9-O0-NEXT: v_mov_b32_e32 v4, v8
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, s9
+; GFX9-O0-NEXT: v_cndmask_b32_e64 v3, v3, v4, s[4:5]
+; GFX9-O0-NEXT: ; kill: def $vgpr7 killed $vgpr7 killed $vgpr7_vgpr8 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v4, s8
+; GFX9-O0-NEXT: v_cndmask_b32_e64 v7, v4, v7, s[4:5]
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; kill: def $vgpr7 killed $vgpr7 def $vgpr7_vgpr8 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v8, v3
+; GFX9-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: v_mov_b32_e32 v4, v2
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v10
+; GFX9-O0-NEXT: v_or_b32_e64 v3, v3, v4
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v1
+; GFX9-O0-NEXT: v_mov_b32_e32 v1, v9
+; GFX9-O0-NEXT: v_or_b32_e64 v1, v1, v2
+; GFX9-O0-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v3
+; GFX9-O0-NEXT: v_cmp_ne_u64_e64 s[4:5], v[1:2], s[6:7]
+; GFX9-O0-NEXT: s_mov_b64 s[8:9], s[6:7]
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, s8
+; GFX9-O0-NEXT: v_mov_b32_e32 v4, s9
+; GFX9-O0-NEXT: v_mov_b32_e32 v1, s6
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, s7
+; GFX9-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_mov_b64 s[6:7], exec
+; GFX9-O0-NEXT: s_and_b64 s[4:5], s[6:7], s[4:5]
+; GFX9-O0-NEXT: s_xor_b64 s[6:7], s[4:5], s[6:7]
+; GFX9-O0-NEXT: v_writelane_b32 v0, s6, 6
+; GFX9-O0-NEXT: v_writelane_b32 v0, s7, 7
+; GFX9-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
+; GFX9-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_mov_b64 exec, s[18:19]
+; GFX9-O0-NEXT: s_mov_b64 exec, s[4:5]
+; GFX9-O0-NEXT: s_cbranch_execz .LBB0_5
+; GFX9-O0-NEXT: s_branch .LBB0_7
+; GFX9-O0-NEXT: .LBB0_9: ; %udiv-end
+; GFX9-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
+; GFX9-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: s_mov_b64 exec, s[18:19]
+; GFX9-O0-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:108 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:100 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:104 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:92 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:68 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:72 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: s_mov_b32 s4, 32
+; GFX9-O0-NEXT: s_waitcnt vmcnt(2)
+; GFX9-O0-NEXT: v_lshrrev_b64 v[0:1], s4, v[5:6]
+; GFX9-O0-NEXT: v_mov_b32_e32 v1, v0
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: v_mov_b32_e32 v0, v17
+; GFX9-O0-NEXT: v_mul_lo_u32 v3, v1, v0
+; GFX9-O0-NEXT: v_lshrrev_b64 v[17:18], s4, v[17:18]
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v17
+; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 killed $vgpr5_vgpr6 killed $exec
+; GFX9-O0-NEXT: v_mul_lo_u32 v2, v5, v2
+; GFX9-O0-NEXT: v_mad_u64_u32 v[17:18], s[6:7], v5, v0, 0
+; GFX9-O0-NEXT: v_mov_b32_e32 v0, v18
+; GFX9-O0-NEXT: v_add3_u32 v2, v0, v2, v3
+; GFX9-O0-NEXT: ; implicit-def: $sgpr5
+; GFX9-O0-NEXT: ; implicit-def: $sgpr6
+; GFX9-O0-NEXT: ; implicit-def: $sgpr6
+; GFX9-O0-NEXT: v_mov_b32_e32 v0, s5
+; GFX9-O0-NEXT: ; kill: def $vgpr2 killed $vgpr2 def $vgpr2_vgpr3 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v0
+; GFX9-O0-NEXT: v_lshlrev_b64 v[2:3], s4, v[2:3]
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v3
+; GFX9-O0-NEXT: ; kill: def $vgpr17 killed $vgpr17 killed $vgpr17_vgpr18 killed $exec
+; GFX9-O0-NEXT: s_mov_b32 s5, 0
+; GFX9-O0-NEXT: ; implicit-def: $sgpr6
+; GFX9-O0-NEXT: v_mov_b32_e32 v0, s5
+; GFX9-O0-NEXT: ; kill: def $vgpr17 killed $vgpr17 def $vgpr17_vgpr18 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v18, v0
+; GFX9-O0-NEXT: v_mov_b32_e32 v0, v18
+; GFX9-O0-NEXT: v_or_b32_e64 v0, v0, v6
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v2
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v17
+; GFX9-O0-NEXT: v_or_b32_e64 v17, v2, v3
+; GFX9-O0-NEXT: ; kill: def $vgpr17 killed $vgpr17 def $vgpr17_vgpr18 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v18, v0
+; GFX9-O0-NEXT: v_lshrrev_b64 v[2:3], s4, v[19:20]
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v2
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v11
+; GFX9-O0-NEXT: v_mul_lo_u32 v3, v2, v6
+; GFX9-O0-NEXT: v_lshrrev_b64 v[11:12], s4, v[11:12]
+; GFX9-O0-NEXT: ; kill: def $vgpr11 killed $vgpr11 killed $vgpr11_vgpr12 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v0, v19
+; GFX9-O0-NEXT: v_mul_lo_u32 v11, v11, v0
+; GFX9-O0-NEXT: v_mad_u64_u32 v[19:20], s[6:7], v2, v0, 0
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v20
+; GFX9-O0-NEXT: v_add3_u32 v2, v2, v3, v11
+; GFX9-O0-NEXT: ; implicit-def: $sgpr6
+; GFX9-O0-NEXT: ; implicit-def: $sgpr7
+; GFX9-O0-NEXT: ; implicit-def: $sgpr7
+; GFX9-O0-NEXT: v_mov_b32_e32 v11, s6
+; GFX9-O0-NEXT: ; kill: def $vgpr2 killed $vgpr2 def $vgpr2_vgpr3 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v11
+; GFX9-O0-NEXT: v_lshlrev_b64 v[2:3], s4, v[2:3]
+; GFX9-O0-NEXT: v_mov_b32_e32 v12, v3
+; GFX9-O0-NEXT: ; kill: def $vgpr19 killed $vgpr19 killed $vgpr19_vgpr20 killed $exec
+; GFX9-O0-NEXT: ; implicit-def: $sgpr6
+; GFX9-O0-NEXT: v_mov_b32_e32 v11, s5
+; GFX9-O0-NEXT: ; kill: def $vgpr19 killed $vgpr19 def $vgpr19_vgpr20 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v20, v11
+; GFX9-O0-NEXT: v_mov_b32_e32 v11, v20
+; GFX9-O0-NEXT: v_or_b32_e64 v11, v11, v12
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v2
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v19
+; GFX9-O0-NEXT: v_or_b32_e64 v2, v2, v3
+; GFX9-O0-NEXT: ; kill: def $vgpr2 killed $vgpr2 def $vgpr2_vgpr3 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v11
+; GFX9-O0-NEXT: v_mov_b32_e32 v11, v2
+; GFX9-O0-NEXT: v_mov_b32_e32 v12, v17
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v3
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v18
+; GFX9-O0-NEXT: v_add_co_u32_e64 v17, s[6:7], v11, v12
+; GFX9-O0-NEXT: v_addc_co_u32_e64 v2, s[6:7], v2, v3, s[6:7]
+; GFX9-O0-NEXT: ; kill: def $vgpr17 killed $vgpr17 def $vgpr17_vgpr18 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v18, v2
+; GFX9-O0-NEXT: v_mad_u64_u32 v[19:20], s[6:7], v6, v1, 0
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v19
+; GFX9-O0-NEXT: ; implicit-def: $sgpr6
+; GFX9-O0-NEXT: v_mov_b32_e32 v11, s5
+; GFX9-O0-NEXT: ; kill: def $vgpr2 killed $vgpr2 def $vgpr2_vgpr3 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v11
+; GFX9-O0-NEXT: v_mov_b32_e32 v11, v3
+; GFX9-O0-NEXT: v_mov_b32_e32 v19, v20
+; GFX9-O0-NEXT: ; implicit-def: $sgpr6
+; GFX9-O0-NEXT: ; implicit-def: $sgpr7
+; GFX9-O0-NEXT: ; implicit-def: $sgpr7
+; GFX9-O0-NEXT: v_mov_b32_e32 v12, s6
+; GFX9-O0-NEXT: ; kill: def $vgpr19 killed $vgpr19 def $vgpr19_vgpr20 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v20, v12
+; GFX9-O0-NEXT: v_lshlrev_b64 v[19:20], s4, v[19:20]
+; GFX9-O0-NEXT: v_mov_b32_e32 v12, v20
+; GFX9-O0-NEXT: v_or_b32_e64 v11, v11, v12
+; GFX9-O0-NEXT: ; kill: def $vgpr2 killed $vgpr2 killed $vgpr2_vgpr3 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v19
+; GFX9-O0-NEXT: v_or_b32_e64 v2, v2, v3
+; GFX9-O0-NEXT: ; kill: def $vgpr2 killed $vgpr2 def $vgpr2_vgpr3 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v11
+; GFX9-O0-NEXT: v_mad_u64_u32 v[19:20], s[6:7], v6, v5, 0
+; GFX9-O0-NEXT: v_mov_b32_e32 v11, v19
+; GFX9-O0-NEXT: ; implicit-def: $sgpr6
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, s5
+; GFX9-O0-NEXT: ; kill: def $vgpr11 killed $vgpr11 def $vgpr11_vgpr12 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v12, v6
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v12
+; GFX9-O0-NEXT: v_mov_b32_e32 v19, v20
+; GFX9-O0-NEXT: ; implicit-def: $sgpr6
+; GFX9-O0-NEXT: ; implicit-def: $sgpr7
+; GFX9-O0-NEXT: ; implicit-def: $sgpr7
+; GFX9-O0-NEXT: v_mov_b32_e32 v21, s6
+; GFX9-O0-NEXT: ; kill: def $vgpr19 killed $vgpr19 def $vgpr19_vgpr20 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v20, v21
+; GFX9-O0-NEXT: v_lshlrev_b64 v[19:20], s4, v[19:20]
+; GFX9-O0-NEXT: v_mov_b32_e32 v21, v20
+; GFX9-O0-NEXT: v_or_b32_e64 v6, v6, v21
+; GFX9-O0-NEXT: ; kill: def $vgpr11 killed $vgpr11 killed $vgpr11_vgpr12 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v12, v19
+; GFX9-O0-NEXT: v_or_b32_e64 v23, v11, v12
+; GFX9-O0-NEXT: ; kill: def $vgpr23 killed $vgpr23 def $vgpr23_vgpr24 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v24, v6
+; GFX9-O0-NEXT: v_mad_u64_u32 v[11:12], s[6:7], v0, v5, 0
+; GFX9-O0-NEXT: v_mov_b32_e32 v21, v12
+; GFX9-O0-NEXT: ; implicit-def: $sgpr6
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, s5
+; GFX9-O0-NEXT: ; kill: def $vgpr21 killed $vgpr21 def $vgpr21_vgpr22 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v22, v5
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v23
+; GFX9-O0-NEXT: v_mov_b32_e32 v20, v21
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v24
+; GFX9-O0-NEXT: v_mov_b32_e32 v19, v22
+; GFX9-O0-NEXT: v_add_co_u32_e64 v5, s[6:7], v5, v20
+; GFX9-O0-NEXT: v_addc_co_u32_e64 v19, s[6:7], v6, v19, s[6:7]
+; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v19
+; GFX9-O0-NEXT: v_mov_b32_e32 v19, v6
+; GFX9-O0-NEXT: s_mov_b64 s[6:7], 0xffffffff
+; GFX9-O0-NEXT: s_mov_b32 s8, s7
+; GFX9-O0-NEXT: v_and_b32_e64 v19, v19, s8
+; GFX9-O0-NEXT: v_mov_b32_e32 v20, v5
+; GFX9-O0-NEXT: ; kill: def $sgpr6 killed $sgpr6 killed $sgpr6_sgpr7
+; GFX9-O0-NEXT: v_and_b32_e64 v21, v20, s6
+; GFX9-O0-NEXT: ; kill: def $vgpr21 killed $vgpr21 def $vgpr21_vgpr22 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v22, v19
+; GFX9-O0-NEXT: v_mad_u64_u32 v[19:20], s[6:7], v0, v1, 0
+; GFX9-O0-NEXT: v_mov_b32_e32 v23, v19
+; GFX9-O0-NEXT: ; implicit-def: $sgpr6
+; GFX9-O0-NEXT: v_mov_b32_e32 v0, s5
+; GFX9-O0-NEXT: ; kill: def $vgpr23 killed $vgpr23 def $vgpr23_vgpr24 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v24, v0
+; GFX9-O0-NEXT: v_mov_b32_e32 v0, v24
+; GFX9-O0-NEXT: v_mov_b32_e32 v19, v20
+; GFX9-O0-NEXT: ; implicit-def: $sgpr6
+; GFX9-O0-NEXT: ; implicit-def: $sgpr7
+; GFX9-O0-NEXT: ; implicit-def: $sgpr7
+; GFX9-O0-NEXT: v_mov_b32_e32 v1, s6
+; GFX9-O0-NEXT: ; kill: def $vgpr19 killed $vgpr19 def $vgpr19_vgpr20 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v20, v1
+; GFX9-O0-NEXT: v_lshlrev_b64 v[19:20], s4, v[19:20]
+; GFX9-O0-NEXT: v_mov_b32_e32 v1, v20
+; GFX9-O0-NEXT: v_or_b32_e64 v0, v0, v1
+; GFX9-O0-NEXT: v_mov_b32_e32 v1, v23
+; GFX9-O0-NEXT: ; kill: def $vgpr19 killed $vgpr19 killed $vgpr19_vgpr20 killed $exec
+; GFX9-O0-NEXT: v_or_b32_e64 v23, v1, v19
+; GFX9-O0-NEXT: ; kill: def $vgpr23 killed $vgpr23 def $vgpr23_vgpr24 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v24, v0
+; GFX9-O0-NEXT: v_mov_b32_e32 v0, v23
+; GFX9-O0-NEXT: v_mov_b32_e32 v20, v21
+; GFX9-O0-NEXT: v_mov_b32_e32 v1, v24
+; GFX9-O0-NEXT: v_mov_b32_e32 v19, v22
+; GFX9-O0-NEXT: v_add_co_u32_e64 v0, s[6:7], v0, v20
+; GFX9-O0-NEXT: v_addc_co_u32_e64 v19, s[6:7], v1, v19, s[6:7]
+; GFX9-O0-NEXT: ; kill: def $vgpr0 killed $vgpr0 def $vgpr0_vgpr1 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v1, v19
+; GFX9-O0-NEXT: v_lshrrev_b64 v[21:22], s4, v[0:1]
+; GFX9-O0-NEXT: v_lshrrev_b64 v[5:6], s4, v[5:6]
+; GFX9-O0-NEXT: v_mov_b32_e32 v19, v5
+; GFX9-O0-NEXT: v_mov_b32_e32 v20, v21
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v6
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v22
+; GFX9-O0-NEXT: v_add_co_u32_e64 v19, s[6:7], v19, v20
+; GFX9-O0-NEXT: v_addc_co_u32_e64 v5, s[6:7], v5, v6, s[6:7]
+; GFX9-O0-NEXT: ; kill: def $vgpr19 killed $vgpr19 def $vgpr19_vgpr20 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v20, v5
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v2
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v19
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v3
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v20
+; GFX9-O0-NEXT: v_add_co_u32_e64 v19, s[6:7], v5, v6
+; GFX9-O0-NEXT: v_addc_co_u32_e64 v2, s[6:7], v2, v3, s[6:7]
+; GFX9-O0-NEXT: ; kill: def $vgpr19 killed $vgpr19 def $vgpr19_vgpr20 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v20, v2
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v19
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v17
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v20
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v18
+; GFX9-O0-NEXT: v_add_co_u32_e64 v2, s[6:7], v2, v6
+; GFX9-O0-NEXT: v_addc_co_u32_e64 v5, s[6:7], v3, v5, s[6:7]
+; GFX9-O0-NEXT: ; kill: def $vgpr2 killed $vgpr2 def $vgpr2_vgpr3 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v5
+; GFX9-O0-NEXT: v_lshlrev_b64 v[0:1], s4, v[0:1]
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v1
+; GFX9-O0-NEXT: ; kill: def $vgpr11 killed $vgpr11 killed $vgpr11_vgpr12 killed $exec
+; GFX9-O0-NEXT: ; implicit-def: $sgpr6
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, s5
+; GFX9-O0-NEXT: ; kill: def $vgpr11 killed $vgpr11 def $vgpr11_vgpr12 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v12, v5
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v12
+; GFX9-O0-NEXT: v_or_b32_e64 v5, v5, v6
+; GFX9-O0-NEXT: v_mov_b32_e32 v1, v0
+; GFX9-O0-NEXT: v_mov_b32_e32 v0, v11
+; GFX9-O0-NEXT: v_or_b32_e64 v0, v0, v1
+; GFX9-O0-NEXT: ; kill: def $vgpr0 killed $vgpr0 def $vgpr0_vgpr1 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v1, v5
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v2
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v3
+; GFX9-O0-NEXT: v_mov_b32_e32 v12, v0
+; GFX9-O0-NEXT: v_mov_b32_e32 v11, v1
+; GFX9-O0-NEXT: v_mov_b32_e32 v0, v15
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v16
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v13
+; GFX9-O0-NEXT: v_mov_b32_e32 v1, v14
+; GFX9-O0-NEXT: v_sub_co_u32_e32 v0, vcc, v0, v12
+; GFX9-O0-NEXT: v_subb_co_u32_e32 v3, vcc, v3, v11, vcc
+; GFX9-O0-NEXT: v_subb_co_u32_e32 v5, vcc, v5, v6, vcc
+; GFX9-O0-NEXT: v_subb_co_u32_e32 v2, vcc, v1, v2, vcc
+; GFX9-O0-NEXT: ; implicit-def: $sgpr5
+; GFX9-O0-NEXT: ; implicit-def: $sgpr5
+; GFX9-O0-NEXT: ; kill: def $vgpr0 killed $vgpr0 def $vgpr0_vgpr1 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v1, v3
+; GFX9-O0-NEXT: ; implicit-def: $sgpr5
+; GFX9-O0-NEXT: ; implicit-def: $sgpr5
+; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v2
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v6
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v10
+; GFX9-O0-NEXT: v_xor_b32_e64 v3, v3, v2
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v5
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v9
+; GFX9-O0-NEXT: v_xor_b32_e64 v9, v6, v5
+; GFX9-O0-NEXT: ; kill: def $vgpr9 killed $vgpr9 def $vgpr9_vgpr10 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v10, v3
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v8
+; GFX9-O0-NEXT: v_xor_b32_e64 v3, v3, v6
+; GFX9-O0-NEXT: ; kill: def $vgpr0 killed $vgpr0 killed $vgpr0_vgpr1 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v8, v7
+; GFX9-O0-NEXT: v_xor_b32_e64 v0, v0, v8
+; GFX9-O0-NEXT: ; kill: def $vgpr0 killed $vgpr0 def $vgpr0_vgpr1 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v1, v3
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v0
+; GFX9-O0-NEXT: v_mov_b32_e32 v0, v1
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v9
+; GFX9-O0-NEXT: v_mov_b32_e32 v1, v10
+; GFX9-O0-NEXT: v_sub_co_u32_e32 v7, vcc, v7, v8
+; GFX9-O0-NEXT: v_subb_co_u32_e32 v0, vcc, v0, v6, vcc
+; GFX9-O0-NEXT: v_subb_co_u32_e32 v5, vcc, v3, v5, vcc
+; GFX9-O0-NEXT: v_subb_co_u32_e32 v1, vcc, v1, v2, vcc
+; GFX9-O0-NEXT: ; implicit-def: $sgpr5
+; GFX9-O0-NEXT: ; implicit-def: $sgpr5
+; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v1
+; GFX9-O0-NEXT: ; implicit-def: $sgpr5
+; GFX9-O0-NEXT: ; implicit-def: $sgpr5
+; GFX9-O0-NEXT: ; kill: def $vgpr7 killed $vgpr7 def $vgpr7_vgpr8 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v8, v0
+; GFX9-O0-NEXT: v_mov_b32_e32 v0, v7
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v5
+; GFX9-O0-NEXT: v_lshrrev_b64 v[7:8], s4, v[7:8]
+; GFX9-O0-NEXT: v_mov_b32_e32 v1, v7
+; GFX9-O0-NEXT: v_lshrrev_b64 v[5:6], s4, v[5:6]
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v5
+; GFX9-O0-NEXT: ; kill: killed $vgpr4
+; GFX9-O0-NEXT: s_xor_saveexec_b64 s[4:5], -1
+; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:348 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: s_nop 0
+; GFX9-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:352 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:356 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:360 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: s_mov_b64 exec, s[4:5]
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: s_setpc_b64 s[30:31]
+ %div = srem i128 %lhs, %rhs
+ ret i128 %div
+}
+
+define i128 @v_urem_i128_vv(i128 %lhs, i128 %rhs) {
+; GFX9-LABEL: v_urem_i128_vv:
+; GFX9: ; %bb.0: ; %_udiv-special-cases
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_or_b32_e32 v9, v5, v7
+; GFX9-NEXT: v_or_b32_e32 v8, v4, v6
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[8:9]
+; GFX9-NEXT: v_or_b32_e32 v9, v1, v3
+; GFX9-NEXT: v_or_b32_e32 v8, v0, v2
+; GFX9-NEXT: v_cmp_eq_u64_e64 s[4:5], 0, v[8:9]
+; GFX9-NEXT: v_ffbh_u32_e32 v8, v6
+; GFX9-NEXT: v_add_u32_e32 v8, 32, v8
+; GFX9-NEXT: v_ffbh_u32_e32 v9, v7
+; GFX9-NEXT: v_min_u32_e32 v8, v8, v9
+; GFX9-NEXT: v_ffbh_u32_e32 v9, v4
+; GFX9-NEXT: v_add_u32_e32 v9, 32, v9
+; GFX9-NEXT: v_ffbh_u32_e32 v10, v5
+; GFX9-NEXT: v_min_u32_e32 v9, v9, v10
+; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-NEXT: v_add_co_u32_e32 v9, vcc, 64, v9
+; GFX9-NEXT: v_addc_co_u32_e64 v10, s[6:7], 0, 0, vcc
+; GFX9-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[6:7]
+; GFX9-NEXT: v_ffbh_u32_e32 v11, v3
+; GFX9-NEXT: v_cndmask_b32_e32 v8, v9, v8, vcc
+; GFX9-NEXT: v_ffbh_u32_e32 v9, v2
+; GFX9-NEXT: v_add_u32_e32 v9, 32, v9
+; GFX9-NEXT: v_min_u32_e32 v9, v9, v11
+; GFX9-NEXT: v_ffbh_u32_e32 v11, v0
+; GFX9-NEXT: v_add_u32_e32 v11, 32, v11
+; GFX9-NEXT: v_ffbh_u32_e32 v12, v1
+; GFX9-NEXT: v_min_u32_e32 v11, v11, v12
+; GFX9-NEXT: v_cndmask_b32_e64 v10, v10, 0, vcc
+; GFX9-NEXT: v_add_co_u32_e32 v11, vcc, 64, v11
+; GFX9-NEXT: v_addc_co_u32_e64 v12, s[6:7], 0, 0, vcc
+; GFX9-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[2:3]
+; GFX9-NEXT: s_mov_b64 s[6:7], 0x7f
+; GFX9-NEXT: v_cndmask_b32_e32 v9, v11, v9, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v12, v12, 0, vcc
+; GFX9-NEXT: v_sub_co_u32_e32 v8, vcc, v8, v9
+; GFX9-NEXT: v_subb_co_u32_e32 v9, vcc, v10, v12, vcc
+; GFX9-NEXT: v_mov_b32_e32 v11, 0
+; GFX9-NEXT: v_subbrev_co_u32_e32 v10, vcc, 0, v11, vcc
+; GFX9-NEXT: v_subbrev_co_u32_e32 v11, vcc, 0, v11, vcc
+; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, s[6:7], v[8:9]
+; GFX9-NEXT: v_cndmask_b32_e64 v12, 0, 1, vcc
+; GFX9-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[10:11]
+; GFX9-NEXT: v_cndmask_b32_e64 v13, 0, 1, vcc
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[10:11]
+; GFX9-NEXT: v_cndmask_b32_e32 v12, v13, v12, vcc
+; GFX9-NEXT: v_and_b32_e32 v12, 1, v12
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v12
+; GFX9-NEXT: v_xor_b32_e32 v12, 0x7f, v8
+; GFX9-NEXT: v_or_b32_e32 v13, v9, v11
+; GFX9-NEXT: v_or_b32_e32 v12, v12, v10
+; GFX9-NEXT: s_or_b64 s[4:5], s[4:5], vcc
+; GFX9-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[12:13]
+; GFX9-NEXT: s_xor_b64 s[6:7], s[4:5], -1
+; GFX9-NEXT: v_cndmask_b32_e64 v15, v3, 0, s[4:5]
+; GFX9-NEXT: v_cndmask_b32_e64 v14, v2, 0, s[4:5]
+; GFX9-NEXT: v_cndmask_b32_e64 v13, v1, 0, s[4:5]
+; GFX9-NEXT: v_cndmask_b32_e64 v12, v0, 0, s[4:5]
+; GFX9-NEXT: s_and_b64 s[4:5], s[6:7], vcc
+; GFX9-NEXT: s_and_saveexec_b64 s[8:9], s[4:5]
+; GFX9-NEXT: s_cbranch_execz .LBB1_6
+; GFX9-NEXT: ; %bb.1: ; %udiv-bb1
+; GFX9-NEXT: v_add_co_u32_e32 v22, vcc, 1, v8
+; GFX9-NEXT: v_addc_co_u32_e32 v23, vcc, 0, v9, vcc
+; GFX9-NEXT: v_addc_co_u32_e32 v24, vcc, 0, v10, vcc
+; GFX9-NEXT: v_sub_u32_e32 v15, 0x7f, v8
+; GFX9-NEXT: v_addc_co_u32_e32 v25, vcc, 0, v11, vcc
+; GFX9-NEXT: v_sub_u32_e32 v13, 64, v15
+; GFX9-NEXT: v_or_b32_e32 v10, v23, v25
+; GFX9-NEXT: v_or_b32_e32 v9, v22, v24
+; GFX9-NEXT: v_lshlrev_b64 v[11:12], v15, v[2:3]
+; GFX9-NEXT: v_lshrrev_b64 v[13:14], v13, v[0:1]
+; GFX9-NEXT: v_sub_u32_e32 v8, 63, v8
+; GFX9-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[9:10]
+; GFX9-NEXT: v_lshlrev_b64 v[8:9], v8, v[0:1]
+; GFX9-NEXT: v_or_b32_e32 v10, v12, v14
+; GFX9-NEXT: v_or_b32_e32 v11, v11, v13
+; GFX9-NEXT: v_cmp_gt_u32_e64 s[4:5], 64, v15
+; GFX9-NEXT: v_cndmask_b32_e64 v9, v9, v10, s[4:5]
+; GFX9-NEXT: v_cndmask_b32_e64 v8, v8, v11, s[4:5]
+; GFX9-NEXT: v_lshlrev_b64 v[10:11], v15, v[0:1]
+; GFX9-NEXT: v_cmp_eq_u32_e64 s[6:7], 0, v15
+; GFX9-NEXT: v_mov_b32_e32 v12, 0
+; GFX9-NEXT: v_mov_b32_e32 v14, 0
+; GFX9-NEXT: v_cndmask_b32_e64 v9, v9, v3, s[6:7]
+; GFX9-NEXT: v_cndmask_b32_e64 v8, v8, v2, s[6:7]
+; GFX9-NEXT: v_cndmask_b32_e64 v11, 0, v11, s[4:5]
+; GFX9-NEXT: v_mov_b32_e32 v13, 0
+; GFX9-NEXT: v_mov_b32_e32 v15, 0
+; GFX9-NEXT: v_cndmask_b32_e64 v10, 0, v10, s[4:5]
+; GFX9-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX9-NEXT: s_xor_b64 s[6:7], exec, s[4:5]
+; GFX9-NEXT: s_cbranch_execz .LBB1_5
+; GFX9-NEXT: ; %bb.2: ; %udiv-preheader
+; GFX9-NEXT: v_sub_u32_e32 v14, 64, v22
+; GFX9-NEXT: v_lshrrev_b64 v[12:13], v22, v[0:1]
+; GFX9-NEXT: v_lshlrev_b64 v[14:15], v14, v[2:3]
+; GFX9-NEXT: v_cmp_gt_u32_e32 vcc, 64, v22
+; GFX9-NEXT: v_or_b32_e32 v14, v12, v14
+; GFX9-NEXT: v_subrev_u32_e32 v12, 64, v22
+; GFX9-NEXT: v_or_b32_e32 v15, v13, v15
+; GFX9-NEXT: v_lshrrev_b64 v[12:13], v12, v[2:3]
+; GFX9-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v22
+; GFX9-NEXT: v_cndmask_b32_e32 v13, v13, v15, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v17, v13, v1, s[4:5]
+; GFX9-NEXT: v_cndmask_b32_e32 v14, v12, v14, vcc
+; GFX9-NEXT: v_lshrrev_b64 v[12:13], v22, v[2:3]
+; GFX9-NEXT: v_cndmask_b32_e64 v16, v14, v0, s[4:5]
+; GFX9-NEXT: v_cndmask_b32_e32 v19, 0, v13, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v18, 0, v12, vcc
+; GFX9-NEXT: v_add_co_u32_e32 v26, vcc, -1, v4
+; GFX9-NEXT: v_addc_co_u32_e32 v27, vcc, -1, v5, vcc
+; GFX9-NEXT: v_addc_co_u32_e32 v28, vcc, -1, v6, vcc
+; GFX9-NEXT: v_mov_b32_e32 v20, 0
+; GFX9-NEXT: v_mov_b32_e32 v14, 0
+; GFX9-NEXT: v_addc_co_u32_e32 v29, vcc, -1, v7, vcc
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: v_mov_b32_e32 v21, 0
+; GFX9-NEXT: v_mov_b32_e32 v15, 0
+; GFX9-NEXT: v_mov_b32_e32 v13, 0
+; GFX9-NEXT: .LBB1_3: ; %udiv-do-while
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_lshrrev_b32_e32 v12, 31, v11
+; GFX9-NEXT: v_lshlrev_b64 v[10:11], 1, v[10:11]
+; GFX9-NEXT: v_lshlrev_b64 v[18:19], 1, v[18:19]
+; GFX9-NEXT: v_or_b32_e32 v10, v20, v10
+; GFX9-NEXT: v_lshrrev_b32_e32 v20, 31, v17
+; GFX9-NEXT: v_lshlrev_b64 v[16:17], 1, v[16:17]
+; GFX9-NEXT: v_or_b32_e32 v18, v18, v20
+; GFX9-NEXT: v_lshrrev_b32_e32 v20, 31, v9
+; GFX9-NEXT: v_or_b32_e32 v16, v16, v20
+; GFX9-NEXT: v_sub_co_u32_e32 v20, vcc, v26, v16
+; GFX9-NEXT: v_subb_co_u32_e32 v20, vcc, v27, v17, vcc
+; GFX9-NEXT: v_subb_co_u32_e32 v20, vcc, v28, v18, vcc
+; GFX9-NEXT: v_subb_co_u32_e32 v20, vcc, v29, v19, vcc
+; GFX9-NEXT: v_ashrrev_i32_e32 v30, 31, v20
+; GFX9-NEXT: v_and_b32_e32 v20, v30, v4
+; GFX9-NEXT: v_sub_co_u32_e32 v16, vcc, v16, v20
+; GFX9-NEXT: v_and_b32_e32 v20, v30, v5
+; GFX9-NEXT: v_subb_co_u32_e32 v17, vcc, v17, v20, vcc
+; GFX9-NEXT: v_and_b32_e32 v20, v30, v6
+; GFX9-NEXT: v_subb_co_u32_e32 v18, vcc, v18, v20, vcc
+; GFX9-NEXT: v_and_b32_e32 v20, v30, v7
+; GFX9-NEXT: v_subb_co_u32_e32 v19, vcc, v19, v20, vcc
+; GFX9-NEXT: v_add_co_u32_e32 v22, vcc, -1, v22
+; GFX9-NEXT: v_addc_co_u32_e32 v23, vcc, -1, v23, vcc
+; GFX9-NEXT: v_addc_co_u32_e32 v24, vcc, -1, v24, vcc
+; GFX9-NEXT: v_addc_co_u32_e32 v25, vcc, -1, v25, vcc
+; GFX9-NEXT: v_or_b32_e32 v11, v21, v11
+; GFX9-NEXT: v_lshlrev_b64 v[8:9], 1, v[8:9]
+; GFX9-NEXT: v_or_b32_e32 v20, v22, v24
+; GFX9-NEXT: v_or_b32_e32 v21, v23, v25
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[20:21]
+; GFX9-NEXT: v_or3_b32 v8, v8, v12, v14
+; GFX9-NEXT: v_and_b32_e32 v12, 1, v30
+; GFX9-NEXT: v_mov_b32_e32 v21, v13
+; GFX9-NEXT: v_or3_b32 v9, v9, 0, v15
+; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-NEXT: v_mov_b32_e32 v20, v12
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_execnz .LBB1_3
+; GFX9-NEXT: ; %bb.4: ; %Flow
+; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX9-NEXT: .LBB1_5: ; %Flow2
+; GFX9-NEXT: s_or_b64 exec, exec, s[6:7]
+; GFX9-NEXT: v_lshlrev_b64 v[16:17], 1, v[10:11]
+; GFX9-NEXT: v_lshlrev_b64 v[8:9], 1, v[8:9]
+; GFX9-NEXT: v_lshrrev_b32_e32 v10, 31, v11
+; GFX9-NEXT: v_or3_b32 v15, v9, 0, v15
+; GFX9-NEXT: v_or3_b32 v14, v8, v10, v14
+; GFX9-NEXT: v_or_b32_e32 v13, v13, v17
+; GFX9-NEXT: v_or_b32_e32 v12, v12, v16
+; GFX9-NEXT: .LBB1_6: ; %Flow3
+; GFX9-NEXT: s_or_b64 exec, exec, s[8:9]
+; GFX9-NEXT: v_mul_lo_u32 v19, v12, v7
+; GFX9-NEXT: v_mad_u64_u32 v[7:8], s[4:5], v4, v12, 0
+; GFX9-NEXT: v_mov_b32_e32 v17, 0
+; GFX9-NEXT: v_mad_u64_u32 v[9:10], s[4:5], v12, v6, 0
+; GFX9-NEXT: v_mov_b32_e32 v16, v8
+; GFX9-NEXT: v_mad_u64_u32 v[11:12], s[4:5], v5, v12, v[16:17]
+; GFX9-NEXT: v_mul_lo_u32 v18, v13, v6
+; GFX9-NEXT: v_mul_lo_u32 v16, v15, v4
+; GFX9-NEXT: v_mov_b32_e32 v6, v12
+; GFX9-NEXT: v_mov_b32_e32 v12, v17
+; GFX9-NEXT: v_mad_u64_u32 v[11:12], s[4:5], v4, v13, v[11:12]
+; GFX9-NEXT: v_add3_u32 v10, v10, v19, v18
+; GFX9-NEXT: v_mad_u64_u32 v[8:9], s[4:5], v14, v4, v[9:10]
+; GFX9-NEXT: v_mov_b32_e32 v4, v12
+; GFX9-NEXT: v_mul_lo_u32 v10, v14, v5
+; GFX9-NEXT: v_add_co_u32_e32 v14, vcc, v6, v4
+; GFX9-NEXT: v_addc_co_u32_e64 v15, s[4:5], 0, 0, vcc
+; GFX9-NEXT: v_mad_u64_u32 v[4:5], s[4:5], v5, v13, v[14:15]
+; GFX9-NEXT: v_add3_u32 v6, v16, v9, v10
+; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, v4, v8
+; GFX9-NEXT: v_addc_co_u32_e32 v5, vcc, v5, v6, vcc
+; GFX9-NEXT: v_mov_b32_e32 v6, v11
+; GFX9-NEXT: v_sub_co_u32_e32 v0, vcc, v0, v7
+; GFX9-NEXT: v_subb_co_u32_e32 v1, vcc, v1, v6, vcc
+; GFX9-NEXT: v_subb_co_u32_e32 v2, vcc, v2, v4, vcc
+; GFX9-NEXT: v_subb_co_u32_e32 v3, vcc, v3, v5, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-O0-LABEL: v_urem_i128_vv:
+; GFX9-O0: ; %bb.0: ; %_udiv-special-cases
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-O0-NEXT: s_xor_saveexec_b64 s[4:5], -1
+; GFX9-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_mov_b64 exec, s[4:5]
+; GFX9-O0-NEXT: ; implicit-def: $vgpr8 : SGPR spill to VGPR lane
+; GFX9-O0-NEXT: v_mov_b32_e32 v11, v6
+; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: v_mov_b32_e32 v13, v2
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v1
+; GFX9-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:100 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: v_mov_b32_e32 v9, v0
+; GFX9-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
+; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: s_mov_b64 exec, s[18:19]
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; kill: def $vgpr9 killed $vgpr9 def $vgpr9_vgpr10 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v10, v2
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; kill: def $vgpr13 killed $vgpr13 def $vgpr13_vgpr14 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v14, v3
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v5
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; kill: def $vgpr11 killed $vgpr11 def $vgpr11_vgpr12 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v12, v7
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4_sgpr5
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4_sgpr5
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4_sgpr5
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4_sgpr5
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v13
+; GFX9-O0-NEXT: v_mov_b32_e32 v4, v14
+; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v9
+; GFX9-O0-NEXT: v_mov_b32_e32 v4, v10
+; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v11
+; GFX9-O0-NEXT: v_mov_b32_e32 v4, v12
+; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: v_mov_b32_e32 v4, v2
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v11
+; GFX9-O0-NEXT: v_mov_b32_e32 v4, v12
+; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: v_mov_b32_e32 v4, v2
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v13
+; GFX9-O0-NEXT: v_mov_b32_e32 v4, v14
+; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v9
+; GFX9-O0-NEXT: v_mov_b32_e32 v4, v10
+; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v12
+; GFX9-O0-NEXT: v_mov_b32_e32 v8, v2
+; GFX9-O0-NEXT: v_or_b32_e64 v3, v8, v7
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v11
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v1
+; GFX9-O0-NEXT: v_or_b32_e64 v1, v5, v6
+; GFX9-O0-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v3
+; GFX9-O0-NEXT: s_mov_b64 s[6:7], 0
+; GFX9-O0-NEXT: v_writelane_b32 v0, s6, 0
+; GFX9-O0-NEXT: v_writelane_b32 v0, s7, 1
+; GFX9-O0-NEXT: v_cmp_eq_u64_e64 s[4:5], v[1:2], s[6:7]
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v14
+; GFX9-O0-NEXT: v_mov_b32_e32 v4, v10
+; GFX9-O0-NEXT: v_or_b32_e64 v15, v4, v2
+; GFX9-O0-NEXT: v_mov_b32_e32 v1, v13
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v9
+; GFX9-O0-NEXT: v_or_b32_e64 v9, v3, v1
+; GFX9-O0-NEXT: ; kill: def $vgpr9 killed $vgpr9 def $vgpr9_vgpr10 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v10, v15
+; GFX9-O0-NEXT: v_cmp_eq_u64_e64 s[8:9], v[9:10], s[6:7]
+; GFX9-O0-NEXT: s_or_b64 s[4:5], s[4:5], s[8:9]
+; GFX9-O0-NEXT: v_ffbh_u32_e64 v6, v6
+; GFX9-O0-NEXT: s_mov_b32 s9, 32
+; GFX9-O0-NEXT: v_add_u32_e64 v6, v6, s9
+; GFX9-O0-NEXT: v_ffbh_u32_e64 v7, v7
+; GFX9-O0-NEXT: v_min_u32_e64 v6, v6, v7
+; GFX9-O0-NEXT: s_mov_b32 s8, 0
+; GFX9-O0-NEXT: ; implicit-def: $sgpr10
+; GFX9-O0-NEXT: v_mov_b32_e32 v9, s8
+; GFX9-O0-NEXT: ; kill: def $vgpr6 killed $vgpr6 def $vgpr6_vgpr7 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v9
+; GFX9-O0-NEXT: v_mov_b32_e32 v10, v7
+; GFX9-O0-NEXT: v_ffbh_u32_e64 v5, v5
+; GFX9-O0-NEXT: v_add_u32_e64 v5, v5, s9
+; GFX9-O0-NEXT: v_ffbh_u32_e64 v8, v8
+; GFX9-O0-NEXT: v_min_u32_e64 v15, v5, v8
+; GFX9-O0-NEXT: ; implicit-def: $sgpr10
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, s8
+; GFX9-O0-NEXT: ; kill: def $vgpr15 killed $vgpr15 def $vgpr15_vgpr16 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v16, v5
+; GFX9-O0-NEXT: s_mov_b64 s[10:11], 64
+; GFX9-O0-NEXT: v_mov_b32_e32 v8, v15
+; GFX9-O0-NEXT: s_mov_b32 s12, s10
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v16
+; GFX9-O0-NEXT: s_mov_b32 s14, s11
+; GFX9-O0-NEXT: v_add_co_u32_e64 v8, s[12:13], v8, s12
+; GFX9-O0-NEXT: v_mov_b32_e32 v9, s14
+; GFX9-O0-NEXT: v_addc_co_u32_e64 v5, s[12:13], v5, v9, s[12:13]
+; GFX9-O0-NEXT: ; kill: def $vgpr8 killed $vgpr8 def $vgpr8_vgpr9 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v9, v5
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v9
+; GFX9-O0-NEXT: s_mov_b64 s[12:13], s[6:7]
+; GFX9-O0-NEXT: v_cmp_ne_u64_e64 s[12:13], v[11:12], s[12:13]
+; GFX9-O0-NEXT: v_cndmask_b32_e64 v5, v5, v10, s[12:13]
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v6
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v8
+; GFX9-O0-NEXT: v_cndmask_b32_e64 v9, v6, v7, s[12:13]
+; GFX9-O0-NEXT: ; implicit-def: $sgpr12
+; GFX9-O0-NEXT: ; implicit-def: $sgpr12
+; GFX9-O0-NEXT: ; kill: def $vgpr9 killed $vgpr9 def $vgpr9_vgpr10 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v10, v5
+; GFX9-O0-NEXT: v_ffbh_u32_e64 v5, v1
+; GFX9-O0-NEXT: v_add_u32_e64 v5, v5, s9
+; GFX9-O0-NEXT: v_ffbh_u32_e64 v6, v2
+; GFX9-O0-NEXT: v_min_u32_e64 v6, v5, v6
+; GFX9-O0-NEXT: ; implicit-def: $sgpr12
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, s8
+; GFX9-O0-NEXT: ; kill: def $vgpr6 killed $vgpr6 def $vgpr6_vgpr7 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v5
+; GFX9-O0-NEXT: v_mov_b32_e32 v8, v7
+; GFX9-O0-NEXT: v_ffbh_u32_e64 v5, v3
+; GFX9-O0-NEXT: v_add_u32_e64 v5, v5, s9
+; GFX9-O0-NEXT: v_ffbh_u32_e64 v11, v4
+; GFX9-O0-NEXT: v_min_u32_e64 v15, v5, v11
+; GFX9-O0-NEXT: ; implicit-def: $sgpr9
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, s8
+; GFX9-O0-NEXT: ; kill: def $vgpr15 killed $vgpr15 def $vgpr15_vgpr16 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v16, v5
+; GFX9-O0-NEXT: v_mov_b32_e32 v11, v15
+; GFX9-O0-NEXT: s_mov_b32 s8, s10
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v16
+; GFX9-O0-NEXT: s_mov_b32 s10, s11
+; GFX9-O0-NEXT: v_add_co_u32_e64 v11, s[8:9], v11, s8
+; GFX9-O0-NEXT: v_mov_b32_e32 v12, s10
+; GFX9-O0-NEXT: v_addc_co_u32_e64 v5, s[8:9], v5, v12, s[8:9]
+; GFX9-O0-NEXT: ; kill: def $vgpr11 killed $vgpr11 def $vgpr11_vgpr12 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v12, v5
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v12
+; GFX9-O0-NEXT: s_mov_b64 s[8:9], s[6:7]
+; GFX9-O0-NEXT: v_cmp_ne_u64_e64 s[8:9], v[13:14], s[8:9]
+; GFX9-O0-NEXT: v_cndmask_b32_e64 v5, v5, v8, s[8:9]
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v6
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v11
+; GFX9-O0-NEXT: v_cndmask_b32_e64 v6, v6, v7, s[8:9]
+; GFX9-O0-NEXT: ; implicit-def: $sgpr8
+; GFX9-O0-NEXT: ; implicit-def: $sgpr8
+; GFX9-O0-NEXT: ; kill: def $vgpr6 killed $vgpr6 def $vgpr6_vgpr7 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v5
+; GFX9-O0-NEXT: v_mov_b32_e32 v8, v6
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v9
+; GFX9-O0-NEXT: ; kill: def $vgpr7 killed $vgpr7 killed $vgpr6_vgpr7 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v10
+; GFX9-O0-NEXT: s_mov_b32 s10, s6
+; GFX9-O0-NEXT: s_mov_b32 s11, s7
+; GFX9-O0-NEXT: v_sub_co_u32_e32 v5, vcc, v5, v8
+; GFX9-O0-NEXT: v_subb_co_u32_e32 v9, vcc, v6, v7, vcc
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, s10
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, s10
+; GFX9-O0-NEXT: v_subb_co_u32_e32 v8, vcc, v6, v7, vcc
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, s11
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, s11
+; GFX9-O0-NEXT: v_subb_co_u32_e32 v7, vcc, v6, v7, vcc
+; GFX9-O0-NEXT: ; implicit-def: $sgpr8
+; GFX9-O0-NEXT: ; implicit-def: $sgpr8
+; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v9
+; GFX9-O0-NEXT: ; implicit-def: $sgpr8
+; GFX9-O0-NEXT: ; implicit-def: $sgpr8
+; GFX9-O0-NEXT: ; kill: def $vgpr8 killed $vgpr8 def $vgpr8_vgpr9 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v9, v7
+; GFX9-O0-NEXT: v_mov_b32_e32 v11, v9
+; GFX9-O0-NEXT: v_mov_b32_e32 v10, v8
+; GFX9-O0-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: v_mov_b32_e32 v11, v6
+; GFX9-O0-NEXT: v_mov_b32_e32 v10, v5
+; GFX9-O0-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_mov_b64 s[8:9], s[6:7]
+; GFX9-O0-NEXT: v_cmp_eq_u64_e64 s[8:9], v[8:9], s[8:9]
+; GFX9-O0-NEXT: s_mov_b64 s[12:13], 0x7f
+; GFX9-O0-NEXT: s_mov_b64 s[14:15], s[12:13]
+; GFX9-O0-NEXT: v_cmp_gt_u64_e64 s[14:15], v[5:6], s[14:15]
+; GFX9-O0-NEXT: v_cndmask_b32_e64 v10, 0, 1, s[14:15]
+; GFX9-O0-NEXT: s_mov_b64 s[14:15], s[6:7]
+; GFX9-O0-NEXT: v_cmp_ne_u64_e64 s[14:15], v[8:9], s[14:15]
+; GFX9-O0-NEXT: v_cndmask_b32_e64 v7, 0, 1, s[14:15]
+; GFX9-O0-NEXT: v_cndmask_b32_e64 v7, v7, v10, s[8:9]
+; GFX9-O0-NEXT: v_and_b32_e64 v7, 1, v7
+; GFX9-O0-NEXT: v_cmp_eq_u32_e64 s[8:9], v7, 1
+; GFX9-O0-NEXT: s_or_b64 s[8:9], s[4:5], s[8:9]
+; GFX9-O0-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-O0-NEXT: s_xor_b64 s[4:5], s[8:9], s[4:5]
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v6
+; GFX9-O0-NEXT: s_mov_b32 s14, s13
+; GFX9-O0-NEXT: v_xor_b32_e64 v7, v7, s14
+; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 killed $vgpr5_vgpr6 killed $exec
+; GFX9-O0-NEXT: ; kill: def $sgpr12 killed $sgpr12 killed $sgpr12_sgpr13
+; GFX9-O0-NEXT: v_xor_b32_e64 v5, v5, s12
+; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v7
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v6
+; GFX9-O0-NEXT: v_mov_b32_e32 v10, v9
+; GFX9-O0-NEXT: v_or_b32_e64 v7, v7, v10
+; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 killed $vgpr5_vgpr6 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v8
+; GFX9-O0-NEXT: v_or_b32_e64 v5, v5, v6
+; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v7
+; GFX9-O0-NEXT: v_cmp_ne_u64_e64 s[6:7], v[5:6], s[6:7]
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, s11
+; GFX9-O0-NEXT: v_cndmask_b32_e64 v5, v2, v5, s[8:9]
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, s10
+; GFX9-O0-NEXT: v_cndmask_b32_e64 v1, v1, v2, s[8:9]
+; GFX9-O0-NEXT: ; implicit-def: $sgpr12
+; GFX9-O0-NEXT: ; implicit-def: $sgpr12
+; GFX9-O0-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v5
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, s11
+; GFX9-O0-NEXT: v_cndmask_b32_e64 v5, v4, v5, s[8:9]
+; GFX9-O0-NEXT: v_mov_b32_e32 v4, s10
+; GFX9-O0-NEXT: v_cndmask_b32_e64 v3, v3, v4, s[8:9]
+; GFX9-O0-NEXT: ; implicit-def: $sgpr8
+; GFX9-O0-NEXT: ; implicit-def: $sgpr8
+; GFX9-O0-NEXT: ; kill: def $vgpr3 killed $vgpr3 def $vgpr3_vgpr4 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v4, v5
+; GFX9-O0-NEXT: s_and_b64 s[6:7], s[4:5], s[6:7]
+; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_mov_b64 s[4:5], exec
+; GFX9-O0-NEXT: v_writelane_b32 v0, s4, 2
+; GFX9-O0-NEXT: v_writelane_b32 v0, s5, 3
+; GFX9-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
+; GFX9-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_mov_b64 exec, s[18:19]
+; GFX9-O0-NEXT: s_and_b64 s[4:5], s[4:5], s[6:7]
+; GFX9-O0-NEXT: s_mov_b64 exec, s[4:5]
+; GFX9-O0-NEXT: s_cbranch_execz .LBB1_3
+; GFX9-O0-NEXT: s_branch .LBB1_8
+; GFX9-O0-NEXT: .LBB1_1: ; %Flow
+; GFX9-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
+; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: s_mov_b64 exec, s[18:19]
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: v_readlane_b32 s4, v0, 4
+; GFX9-O0-NEXT: v_readlane_b32 s5, v0, 5
+; GFX9-O0-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX9-O0-NEXT: ; %bb.2: ; %Flow
+; GFX9-O0-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: s_waitcnt vmcnt(6)
+; GFX9-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_branch .LBB1_5
+; GFX9-O0-NEXT: .LBB1_3: ; %Flow2
+; GFX9-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
+; GFX9-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: s_mov_b64 exec, s[18:19]
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: v_readlane_b32 s4, v4, 2
+; GFX9-O0-NEXT: v_readlane_b32 s5, v4, 3
+; GFX9-O0-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:12 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:16 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_branch .LBB1_9
+; GFX9-O0-NEXT: .LBB1_4: ; %udiv-loop-exit
+; GFX9-O0-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: s_mov_b32 s4, 1
+; GFX9-O0-NEXT: s_waitcnt vmcnt(2)
+; GFX9-O0-NEXT: v_lshlrev_b64 v[2:3], s4, v[0:1]
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: v_lshlrev_b64 v[9:10], s4, v[9:10]
+; GFX9-O0-NEXT: s_mov_b32 s4, 63
+; GFX9-O0-NEXT: v_lshrrev_b64 v[0:1], s4, v[0:1]
+; GFX9-O0-NEXT: v_mov_b32_e32 v11, v1
+; GFX9-O0-NEXT: v_mov_b32_e32 v4, v10
+; GFX9-O0-NEXT: v_mov_b32_e32 v12, v8
+; GFX9-O0-NEXT: v_or3_b32 v4, v4, v11, v12
+; GFX9-O0-NEXT: v_mov_b32_e32 v1, v0
+; GFX9-O0-NEXT: v_mov_b32_e32 v0, v9
+; GFX9-O0-NEXT: v_or3_b32 v0, v0, v1, v7
+; GFX9-O0-NEXT: ; kill: def $vgpr0 killed $vgpr0 def $vgpr0_vgpr1 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v1, v4
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v3
+; GFX9-O0-NEXT: v_mov_b32_e32 v4, v6
+; GFX9-O0-NEXT: v_or_b32_e64 v4, v4, v7
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v2
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v5
+; GFX9-O0-NEXT: v_or_b32_e64 v2, v2, v3
+; GFX9-O0-NEXT: ; kill: def $vgpr2 killed $vgpr2 def $vgpr2_vgpr3 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v4
+; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_branch .LBB1_3
+; GFX9-O0-NEXT: .LBB1_5: ; %Flow1
+; GFX9-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
+; GFX9-O0-NEXT: buffer_load_dword v8, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: s_mov_b64 exec, s[18:19]
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: v_readlane_b32 s4, v8, 6
+; GFX9-O0-NEXT: v_readlane_b32 s5, v8, 7
+; GFX9-O0-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:120 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:124 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:104 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:108 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_branch .LBB1_4
+; GFX9-O0-NEXT: .LBB1_6: ; %udiv-do-while
+; GFX9-O0-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
+; GFX9-O0-NEXT: buffer_load_dword v16, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: s_mov_b64 exec, s[18:19]
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: v_readlane_b32 s6, v16, 8
+; GFX9-O0-NEXT: v_readlane_b32 s7, v16, 9
+; GFX9-O0-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:244 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:256 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:260 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:264 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:268 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:272 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:276 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:52 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:56 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:60 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:64 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:288 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:292 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: s_mov_b32 s4, 63
+; GFX9-O0-NEXT: s_waitcnt vmcnt(16)
+; GFX9-O0-NEXT: v_lshrrev_b64 v[29:30], s4, v[2:3]
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v30
+; GFX9-O0-NEXT: s_mov_b32 s5, 1
+; GFX9-O0-NEXT: v_lshlrev_b64 v[23:24], s5, v[23:24]
+; GFX9-O0-NEXT: v_mov_b32_e32 v4, v24
+; GFX9-O0-NEXT: v_or_b32_e64 v4, v4, v5
+; GFX9-O0-NEXT: v_mov_b32_e32 v10, v29
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v23
+; GFX9-O0-NEXT: v_or_b32_e64 v23, v5, v10
+; GFX9-O0-NEXT: ; kill: def $vgpr23 killed $vgpr23 def $vgpr23_vgpr24 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v24, v4
+; GFX9-O0-NEXT: v_lshlrev_b64 v[29:30], s5, v[2:3]
+; GFX9-O0-NEXT: v_lshrrev_b64 v[4:5], s4, v[6:7]
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v30
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v5
+; GFX9-O0-NEXT: v_or_b32_e64 v2, v2, v3
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v29
+; GFX9-O0-NEXT: ; kill: def $vgpr4 killed $vgpr4 killed $vgpr4_vgpr5 killed $exec
+; GFX9-O0-NEXT: v_or_b32_e64 v4, v3, v4
+; GFX9-O0-NEXT: ; kill: def $vgpr4 killed $vgpr4 def $vgpr4_vgpr5 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v2
+; GFX9-O0-NEXT: v_lshlrev_b64 v[2:3], s5, v[0:1]
+; GFX9-O0-NEXT: v_lshlrev_b64 v[29:30], s5, v[6:7]
+; GFX9-O0-NEXT: v_lshrrev_b64 v[0:1], s4, v[0:1]
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v1
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v30
+; GFX9-O0-NEXT: s_waitcnt vmcnt(10)
+; GFX9-O0-NEXT: v_mov_b32_e32 v10, v28
+; GFX9-O0-NEXT: v_or3_b32 v6, v6, v7, v10
+; GFX9-O0-NEXT: v_mov_b32_e32 v1, v0
+; GFX9-O0-NEXT: v_mov_b32_e32 v0, v29
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v27
+; GFX9-O0-NEXT: v_or3_b32 v0, v0, v1, v7
+; GFX9-O0-NEXT: ; kill: def $vgpr0 killed $vgpr0 def $vgpr0_vgpr1 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v1, v6
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v3
+; GFX9-O0-NEXT: s_waitcnt vmcnt(8)
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v26
+; GFX9-O0-NEXT: v_or_b32_e64 v6, v6, v7
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v2
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v25
+; GFX9-O0-NEXT: v_or_b32_e64 v2, v2, v3
+; GFX9-O0-NEXT: ; kill: def $vgpr2 killed $vgpr2 def $vgpr2_vgpr3 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v6
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v4
+; GFX9-O0-NEXT: v_mov_b32_e32 v10, v5
+; GFX9-O0-NEXT: v_mov_b32_e32 v4, v23
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v24
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: v_mov_b32_e32 v13, v11
+; GFX9-O0-NEXT: v_mov_b32_e32 v11, v14
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v15
+; GFX9-O0-NEXT: v_sub_co_u32_e32 v13, vcc, v13, v6
+; GFX9-O0-NEXT: v_subb_co_u32_e32 v12, vcc, v12, v10, vcc
+; GFX9-O0-NEXT: v_subb_co_u32_e32 v11, vcc, v11, v4, vcc
+; GFX9-O0-NEXT: v_subb_co_u32_e32 v7, vcc, v7, v5, vcc
+; GFX9-O0-NEXT: ; implicit-def: $sgpr5
+; GFX9-O0-NEXT: ; implicit-def: $sgpr5
+; GFX9-O0-NEXT: ; kill: def $vgpr11 killed $vgpr11 def $vgpr11_vgpr12 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v12, v7
+; GFX9-O0-NEXT: v_ashrrev_i64 v[13:14], s4, v[11:12]
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v14
+; GFX9-O0-NEXT: s_mov_b64 s[4:5], 1
+; GFX9-O0-NEXT: s_mov_b32 s8, s5
+; GFX9-O0-NEXT: v_and_b32_e64 v12, v7, s8
+; GFX9-O0-NEXT: v_mov_b32_e32 v11, v13
+; GFX9-O0-NEXT: ; kill: def $sgpr4 killed $sgpr4 killed $sgpr4_sgpr5
+; GFX9-O0-NEXT: v_and_b32_e64 v14, v11, s4
+; GFX9-O0-NEXT: ; kill: def $vgpr14 killed $vgpr14 def $vgpr14_vgpr15 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v15, v12
+; GFX9-O0-NEXT: v_mov_b32_e32 v12, 0
+; GFX9-O0-NEXT: v_mov_b32_e32 v13, 0
+; GFX9-O0-NEXT: v_mov_b32_e32 v23, v22
+; GFX9-O0-NEXT: v_and_b32_e64 v23, v7, v23
+; GFX9-O0-NEXT: v_and_b32_e64 v21, v11, v21
+; GFX9-O0-NEXT: ; kill: def $vgpr21 killed $vgpr21 def $vgpr21_vgpr22 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v22, v23
+; GFX9-O0-NEXT: v_mov_b32_e32 v23, v20
+; GFX9-O0-NEXT: v_and_b32_e64 v7, v7, v23
+; GFX9-O0-NEXT: v_and_b32_e64 v23, v11, v19
+; GFX9-O0-NEXT: ; kill: def $vgpr23 killed $vgpr23 def $vgpr23_vgpr24 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v24, v7
+; GFX9-O0-NEXT: v_mov_b32_e32 v20, v23
+; GFX9-O0-NEXT: v_mov_b32_e32 v19, v24
+; GFX9-O0-NEXT: v_mov_b32_e32 v11, v21
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v22
+; GFX9-O0-NEXT: v_sub_co_u32_e32 v6, vcc, v6, v20
+; GFX9-O0-NEXT: v_subb_co_u32_e32 v10, vcc, v10, v19, vcc
+; GFX9-O0-NEXT: v_subb_co_u32_e32 v4, vcc, v4, v11, vcc
+; GFX9-O0-NEXT: v_subb_co_u32_e32 v7, vcc, v5, v7, vcc
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; kill: def $vgpr4 killed $vgpr4 def $vgpr4_vgpr5 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v7
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; kill: def $vgpr6 killed $vgpr6 def $vgpr6_vgpr7 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v10
+; GFX9-O0-NEXT: v_mov_b32_e32 v11, v8
+; GFX9-O0-NEXT: ; kill: def $vgpr9 killed $vgpr9 killed $vgpr8_vgpr9 killed $exec
+; GFX9-O0-NEXT: s_mov_b64 s[8:9], -1
+; GFX9-O0-NEXT: s_mov_b32 s5, s8
+; GFX9-O0-NEXT: s_mov_b32 s4, s9
+; GFX9-O0-NEXT: v_mov_b32_e32 v10, v17
+; GFX9-O0-NEXT: v_mov_b32_e32 v8, v18
+; GFX9-O0-NEXT: v_mov_b32_e32 v17, s5
+; GFX9-O0-NEXT: v_add_co_u32_e32 v20, vcc, v11, v17
+; GFX9-O0-NEXT: v_mov_b32_e32 v11, s4
+; GFX9-O0-NEXT: v_addc_co_u32_e32 v9, vcc, v9, v11, vcc
+; GFX9-O0-NEXT: v_mov_b32_e32 v11, s5
+; GFX9-O0-NEXT: v_addc_co_u32_e32 v17, vcc, v10, v11, vcc
+; GFX9-O0-NEXT: v_mov_b32_e32 v10, s4
+; GFX9-O0-NEXT: v_addc_co_u32_e32 v8, vcc, v8, v10, vcc
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; kill: def $vgpr20 killed $vgpr20 def $vgpr20_vgpr21 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v21, v9
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; kill: def $vgpr17 killed $vgpr17 def $vgpr17_vgpr18 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v18, v8
+; GFX9-O0-NEXT: v_mov_b32_e32 v8, v17
+; GFX9-O0-NEXT: v_mov_b32_e32 v9, v18
+; GFX9-O0-NEXT: v_mov_b32_e32 v10, v20
+; GFX9-O0-NEXT: v_mov_b32_e32 v11, v21
+; GFX9-O0-NEXT: v_mov_b32_e32 v22, v18
+; GFX9-O0-NEXT: v_mov_b32_e32 v19, v21
+; GFX9-O0-NEXT: v_or_b32_e64 v19, v19, v22
+; GFX9-O0-NEXT: v_mov_b32_e32 v18, v17
+; GFX9-O0-NEXT: v_mov_b32_e32 v17, v20
+; GFX9-O0-NEXT: v_or_b32_e64 v17, v17, v18
+; GFX9-O0-NEXT: ; kill: def $vgpr17 killed $vgpr17 def $vgpr17_vgpr18 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v18, v19
+; GFX9-O0-NEXT: v_cmp_eq_u64_e64 s[4:5], v[17:18], v[12:13]
+; GFX9-O0-NEXT: s_or_b64 s[4:5], s[4:5], s[6:7]
+; GFX9-O0-NEXT: v_mov_b32_e32 v18, v3
+; GFX9-O0-NEXT: v_mov_b32_e32 v17, v2
+; GFX9-O0-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: v_mov_b32_e32 v18, v1
+; GFX9-O0-NEXT: v_mov_b32_e32 v17, v0
+; GFX9-O0-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: v_mov_b32_e32 v18, v15
+; GFX9-O0-NEXT: v_mov_b32_e32 v17, v14
+; GFX9-O0-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: v_mov_b32_e32 v18, v13
+; GFX9-O0-NEXT: v_mov_b32_e32 v17, v12
+; GFX9-O0-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_mov_b64 s[6:7], s[4:5]
+; GFX9-O0-NEXT: v_writelane_b32 v16, s6, 4
+; GFX9-O0-NEXT: v_writelane_b32 v16, s7, 5
+; GFX9-O0-NEXT: s_mov_b64 s[6:7], s[4:5]
+; GFX9-O0-NEXT: v_writelane_b32 v16, s6, 8
+; GFX9-O0-NEXT: v_writelane_b32 v16, s7, 9
+; GFX9-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
+; GFX9-O0-NEXT: buffer_store_dword v16, off, s[0:3], s32 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_mov_b64 exec, s[18:19]
+; GFX9-O0-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX9-O0-NEXT: s_cbranch_execnz .LBB1_6
+; GFX9-O0-NEXT: s_branch .LBB1_1
+; GFX9-O0-NEXT: .LBB1_7: ; %udiv-preheader
+; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:296 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:300 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:304 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:308 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:312 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:316 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:320 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:324 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
+; GFX9-O0-NEXT: buffer_load_dword v16, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: s_mov_b64 exec, s[18:19]
+; GFX9-O0-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:60 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:64 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:52 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:56 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:44 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:48 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:36 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:40 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: s_waitcnt vmcnt(9)
+; GFX9-O0-NEXT: v_mov_b32_e32 v4, v10
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: v_lshrrev_b64 v[6:7], v4, v[21:22]
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v7
+; GFX9-O0-NEXT: s_mov_b32 s6, 64
+; GFX9-O0-NEXT: v_sub_u32_e64 v12, s6, v4
+; GFX9-O0-NEXT: v_lshlrev_b64 v[23:24], v12, v[19:20]
+; GFX9-O0-NEXT: v_mov_b32_e32 v12, v24
+; GFX9-O0-NEXT: v_or_b32_e64 v5, v5, v12
+; GFX9-O0-NEXT: ; kill: def $vgpr6 killed $vgpr6 killed $vgpr6_vgpr7 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v23
+; GFX9-O0-NEXT: v_or_b32_e64 v6, v6, v7
+; GFX9-O0-NEXT: ; kill: def $vgpr6 killed $vgpr6 def $vgpr6_vgpr7 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v5
+; GFX9-O0-NEXT: v_mov_b32_e32 v12, v7
+; GFX9-O0-NEXT: v_cmp_lt_u32_e64 s[4:5], v4, s6
+; GFX9-O0-NEXT: v_sub_u32_e64 v5, v4, s6
+; GFX9-O0-NEXT: v_lshrrev_b64 v[23:24], v5, v[19:20]
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v24
+; GFX9-O0-NEXT: v_cndmask_b32_e64 v5, v5, v12, s[4:5]
+; GFX9-O0-NEXT: s_mov_b32 s6, 0
+; GFX9-O0-NEXT: v_cmp_eq_u32_e64 s[6:7], v4, s6
+; GFX9-O0-NEXT: v_mov_b32_e32 v12, v22
+; GFX9-O0-NEXT: v_cndmask_b32_e64 v5, v5, v12, s[6:7]
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v6
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v23
+; GFX9-O0-NEXT: v_cndmask_b32_e64 v6, v6, v7, s[4:5]
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v21
+; GFX9-O0-NEXT: v_cndmask_b32_e64 v6, v6, v7, s[6:7]
+; GFX9-O0-NEXT: ; implicit-def: $sgpr6
+; GFX9-O0-NEXT: ; implicit-def: $sgpr6
+; GFX9-O0-NEXT: ; kill: def $vgpr6 killed $vgpr6 def $vgpr6_vgpr7 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v5
+; GFX9-O0-NEXT: v_lshrrev_b64 v[4:5], v4, v[19:20]
+; GFX9-O0-NEXT: v_mov_b32_e32 v15, v5
+; GFX9-O0-NEXT: s_mov_b64 s[6:7], 0
+; GFX9-O0-NEXT: s_mov_b32 s8, s7
+; GFX9-O0-NEXT: v_mov_b32_e32 v12, s8
+; GFX9-O0-NEXT: v_cndmask_b32_e64 v12, v12, v15, s[4:5]
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v4
+; GFX9-O0-NEXT: s_mov_b32 s8, s6
+; GFX9-O0-NEXT: v_mov_b32_e32 v4, s8
+; GFX9-O0-NEXT: v_cndmask_b32_e64 v4, v4, v5, s[4:5]
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; kill: def $vgpr4 killed $vgpr4 def $vgpr4_vgpr5 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v12
+; GFX9-O0-NEXT: v_mov_b32_e32 v12, v13
+; GFX9-O0-NEXT: v_mov_b32_e32 v15, v14
+; GFX9-O0-NEXT: s_mov_b64 s[8:9], -1
+; GFX9-O0-NEXT: s_mov_b32 s5, s8
+; GFX9-O0-NEXT: s_mov_b32 s4, s9
+; GFX9-O0-NEXT: v_mov_b32_e32 v14, v17
+; GFX9-O0-NEXT: v_mov_b32_e32 v13, v18
+; GFX9-O0-NEXT: v_mov_b32_e32 v17, s5
+; GFX9-O0-NEXT: v_add_co_u32_e32 v12, vcc, v12, v17
+; GFX9-O0-NEXT: v_mov_b32_e32 v17, s4
+; GFX9-O0-NEXT: v_addc_co_u32_e32 v17, vcc, v15, v17, vcc
+; GFX9-O0-NEXT: v_mov_b32_e32 v15, s5
+; GFX9-O0-NEXT: v_addc_co_u32_e32 v14, vcc, v14, v15, vcc
+; GFX9-O0-NEXT: v_mov_b32_e32 v15, s4
+; GFX9-O0-NEXT: v_addc_co_u32_e32 v13, vcc, v13, v15, vcc
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; kill: def $vgpr14 killed $vgpr14 def $vgpr14_vgpr15 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v15, v13
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; kill: def $vgpr12 killed $vgpr12 def $vgpr12_vgpr13 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v13, v17
+; GFX9-O0-NEXT: s_mov_b64 s[8:9], s[6:7]
+; GFX9-O0-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_mov_b64 s[4:5], s[6:7]
+; GFX9-O0-NEXT: v_mov_b32_e32 v15, s9
+; GFX9-O0-NEXT: v_mov_b32_e32 v14, s8
+; GFX9-O0-NEXT: v_mov_b32_e32 v13, s7
+; GFX9-O0-NEXT: v_mov_b32_e32 v12, s6
+; GFX9-O0-NEXT: v_writelane_b32 v16, s4, 8
+; GFX9-O0-NEXT: v_writelane_b32 v16, s5, 9
+; GFX9-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
+; GFX9-O0-NEXT: buffer_store_dword v16, off, s[0:3], s32 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_mov_b64 exec, s[18:19]
+; GFX9-O0-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_branch .LBB1_6
+; GFX9-O0-NEXT: .LBB1_8: ; %udiv-bb1
+; GFX9-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
+; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: s_mov_b64 exec, s[18:19]
+; GFX9-O0-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:36 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:40 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:44 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:48 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:28 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:32 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:24 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: s_mov_b64 s[6:7], 1
+; GFX9-O0-NEXT: s_mov_b32 s5, s6
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: v_mov_b32_e32 v4, v1
+; GFX9-O0-NEXT: s_mov_b32 s4, s7
+; GFX9-O0-NEXT: s_mov_b64 s[6:7], 0
+; GFX9-O0-NEXT: s_mov_b32 s8, s6
+; GFX9-O0-NEXT: s_mov_b32 s9, s7
+; GFX9-O0-NEXT: v_mov_b32_e32 v1, v5
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v6
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, s5
+; GFX9-O0-NEXT: v_add_co_u32_e32 v9, vcc, v4, v5
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, s4
+; GFX9-O0-NEXT: v_addc_co_u32_e32 v2, vcc, v2, v5, vcc
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, s8
+; GFX9-O0-NEXT: v_addc_co_u32_e32 v1, vcc, v1, v5, vcc
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, s9
+; GFX9-O0-NEXT: v_addc_co_u32_e32 v3, vcc, v3, v5, vcc
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; kill: def $vgpr9 killed $vgpr9 def $vgpr9_vgpr10 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v10, v2
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v3
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v2
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v1
+; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v9
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v10
+; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_mov_b32 s4, 0x7f
+; GFX9-O0-NEXT: v_sub_u32_e64 v3, s4, v4
+; GFX9-O0-NEXT: v_lshlrev_b64 v[5:6], v3, v[11:12]
+; GFX9-O0-NEXT: v_mov_b32_e32 v13, v6
+; GFX9-O0-NEXT: s_mov_b32 s4, 64
+; GFX9-O0-NEXT: v_sub_u32_e64 v14, s4, v3
+; GFX9-O0-NEXT: v_lshrrev_b64 v[14:15], v14, v[7:8]
+; GFX9-O0-NEXT: v_mov_b32_e32 v16, v15
+; GFX9-O0-NEXT: v_or_b32_e64 v13, v13, v16
+; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 killed $vgpr5_vgpr6 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v14
+; GFX9-O0-NEXT: v_or_b32_e64 v5, v5, v6
+; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v13
+; GFX9-O0-NEXT: v_mov_b32_e32 v15, v6
+; GFX9-O0-NEXT: v_cmp_lt_u32_e64 s[4:5], v3, s4
+; GFX9-O0-NEXT: s_mov_b32 s10, 63
+; GFX9-O0-NEXT: v_sub_u32_e64 v4, s10, v4
+; GFX9-O0-NEXT: v_lshlrev_b64 v[13:14], v4, v[7:8]
+; GFX9-O0-NEXT: v_mov_b32_e32 v4, v14
+; GFX9-O0-NEXT: v_cndmask_b32_e64 v4, v4, v15, s[4:5]
+; GFX9-O0-NEXT: s_mov_b32 s10, 0
+; GFX9-O0-NEXT: v_cmp_eq_u32_e64 s[10:11], v3, s10
+; GFX9-O0-NEXT: v_mov_b32_e32 v15, v12
+; GFX9-O0-NEXT: v_cndmask_b32_e64 v4, v4, v15, s[10:11]
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v5
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v13
+; GFX9-O0-NEXT: v_cndmask_b32_e64 v5, v5, v6, s[4:5]
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v11
+; GFX9-O0-NEXT: v_cndmask_b32_e64 v5, v5, v6, s[10:11]
+; GFX9-O0-NEXT: ; implicit-def: $sgpr10
+; GFX9-O0-NEXT: ; implicit-def: $sgpr10
+; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v4
+; GFX9-O0-NEXT: v_lshlrev_b64 v[7:8], v3, v[7:8]
+; GFX9-O0-NEXT: v_mov_b32_e32 v4, v8
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, s9
+; GFX9-O0-NEXT: v_cndmask_b32_e64 v3, v3, v4, s[4:5]
+; GFX9-O0-NEXT: ; kill: def $vgpr7 killed $vgpr7 killed $vgpr7_vgpr8 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v4, s8
+; GFX9-O0-NEXT: v_cndmask_b32_e64 v7, v4, v7, s[4:5]
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; kill: def $vgpr7 killed $vgpr7 def $vgpr7_vgpr8 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v8, v3
+; GFX9-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: v_mov_b32_e32 v4, v2
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v10
+; GFX9-O0-NEXT: v_or_b32_e64 v3, v3, v4
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v1
+; GFX9-O0-NEXT: v_mov_b32_e32 v1, v9
+; GFX9-O0-NEXT: v_or_b32_e64 v1, v1, v2
+; GFX9-O0-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v3
+; GFX9-O0-NEXT: v_cmp_ne_u64_e64 s[4:5], v[1:2], s[6:7]
+; GFX9-O0-NEXT: s_mov_b64 s[8:9], s[6:7]
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, s8
+; GFX9-O0-NEXT: v_mov_b32_e32 v4, s9
+; GFX9-O0-NEXT: v_mov_b32_e32 v1, s6
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, s7
+; GFX9-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_mov_b64 s[6:7], exec
+; GFX9-O0-NEXT: s_and_b64 s[4:5], s[6:7], s[4:5]
+; GFX9-O0-NEXT: s_xor_b64 s[6:7], s[4:5], s[6:7]
+; GFX9-O0-NEXT: v_writelane_b32 v0, s6, 6
+; GFX9-O0-NEXT: v_writelane_b32 v0, s7, 7
+; GFX9-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
+; GFX9-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: s_mov_b64 exec, s[18:19]
+; GFX9-O0-NEXT: s_mov_b64 exec, s[4:5]
+; GFX9-O0-NEXT: s_cbranch_execz .LBB1_5
+; GFX9-O0-NEXT: s_branch .LBB1_7
+; GFX9-O0-NEXT: .LBB1_9: ; %udiv-end
+; GFX9-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
+; GFX9-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: s_mov_b64 exec, s[18:19]
+; GFX9-O0-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:92 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:68 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:72 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: s_mov_b32 s4, 32
+; GFX9-O0-NEXT: s_waitcnt vmcnt(2)
+; GFX9-O0-NEXT: v_lshrrev_b64 v[2:3], s4, v[7:8]
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v2
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v13
+; GFX9-O0-NEXT: v_mul_lo_u32 v5, v6, v2
+; GFX9-O0-NEXT: v_lshrrev_b64 v[13:14], s4, v[13:14]
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v13
+; GFX9-O0-NEXT: ; kill: def $vgpr7 killed $vgpr7 killed $vgpr7_vgpr8 killed $exec
+; GFX9-O0-NEXT: v_mul_lo_u32 v3, v7, v3
+; GFX9-O0-NEXT: v_mad_u64_u32 v[13:14], s[6:7], v7, v2, 0
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v14
+; GFX9-O0-NEXT: v_add3_u32 v2, v2, v3, v5
+; GFX9-O0-NEXT: ; implicit-def: $sgpr5
+; GFX9-O0-NEXT: ; implicit-def: $sgpr6
+; GFX9-O0-NEXT: ; implicit-def: $sgpr6
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, s5
+; GFX9-O0-NEXT: ; kill: def $vgpr2 killed $vgpr2 def $vgpr2_vgpr3 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v5
+; GFX9-O0-NEXT: v_lshlrev_b64 v[17:18], s4, v[2:3]
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v18
+; GFX9-O0-NEXT: ; kill: def $vgpr13 killed $vgpr13 killed $vgpr13_vgpr14 killed $exec
+; GFX9-O0-NEXT: s_mov_b32 s5, 0
+; GFX9-O0-NEXT: ; implicit-def: $sgpr6
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, s5
+; GFX9-O0-NEXT: ; kill: def $vgpr13 killed $vgpr13 def $vgpr13_vgpr14 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v14, v2
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v14
+; GFX9-O0-NEXT: v_or_b32_e64 v2, v2, v3
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v17
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v13
+; GFX9-O0-NEXT: v_or_b32_e64 v13, v3, v5
+; GFX9-O0-NEXT: ; kill: def $vgpr13 killed $vgpr13 def $vgpr13_vgpr14 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v14, v2
+; GFX9-O0-NEXT: v_lshrrev_b64 v[2:3], s4, v[15:16]
+; GFX9-O0-NEXT: v_mov_b32_e32 v8, v2
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v11
+; GFX9-O0-NEXT: v_mul_lo_u32 v3, v2, v8
+; GFX9-O0-NEXT: v_lshrrev_b64 v[11:12], s4, v[11:12]
+; GFX9-O0-NEXT: ; kill: def $vgpr11 killed $vgpr11 killed $vgpr11_vgpr12 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v15
+; GFX9-O0-NEXT: v_mul_lo_u32 v11, v11, v5
+; GFX9-O0-NEXT: v_mad_u64_u32 v[15:16], s[6:7], v2, v5, 0
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v16
+; GFX9-O0-NEXT: v_add3_u32 v2, v2, v3, v11
+; GFX9-O0-NEXT: ; implicit-def: $sgpr6
+; GFX9-O0-NEXT: ; implicit-def: $sgpr7
+; GFX9-O0-NEXT: ; implicit-def: $sgpr7
+; GFX9-O0-NEXT: v_mov_b32_e32 v11, s6
+; GFX9-O0-NEXT: ; kill: def $vgpr2 killed $vgpr2 def $vgpr2_vgpr3 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v11
+; GFX9-O0-NEXT: v_lshlrev_b64 v[2:3], s4, v[2:3]
+; GFX9-O0-NEXT: v_mov_b32_e32 v12, v3
+; GFX9-O0-NEXT: ; kill: def $vgpr15 killed $vgpr15 killed $vgpr15_vgpr16 killed $exec
+; GFX9-O0-NEXT: ; implicit-def: $sgpr6
+; GFX9-O0-NEXT: v_mov_b32_e32 v11, s5
+; GFX9-O0-NEXT: ; kill: def $vgpr15 killed $vgpr15 def $vgpr15_vgpr16 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v16, v11
+; GFX9-O0-NEXT: v_mov_b32_e32 v11, v16
+; GFX9-O0-NEXT: v_or_b32_e64 v11, v11, v12
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v2
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v15
+; GFX9-O0-NEXT: v_or_b32_e64 v2, v2, v3
+; GFX9-O0-NEXT: ; kill: def $vgpr2 killed $vgpr2 def $vgpr2_vgpr3 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v11
+; GFX9-O0-NEXT: v_mov_b32_e32 v11, v2
+; GFX9-O0-NEXT: v_mov_b32_e32 v12, v13
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v3
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v14
+; GFX9-O0-NEXT: v_add_co_u32_e64 v13, s[6:7], v11, v12
+; GFX9-O0-NEXT: v_addc_co_u32_e64 v2, s[6:7], v2, v3, s[6:7]
+; GFX9-O0-NEXT: ; kill: def $vgpr13 killed $vgpr13 def $vgpr13_vgpr14 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v14, v2
+; GFX9-O0-NEXT: v_mad_u64_u32 v[15:16], s[6:7], v8, v6, 0
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v15
+; GFX9-O0-NEXT: ; implicit-def: $sgpr6
+; GFX9-O0-NEXT: v_mov_b32_e32 v11, s5
+; GFX9-O0-NEXT: ; kill: def $vgpr2 killed $vgpr2 def $vgpr2_vgpr3 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v11
+; GFX9-O0-NEXT: v_mov_b32_e32 v11, v3
+; GFX9-O0-NEXT: v_mov_b32_e32 v15, v16
+; GFX9-O0-NEXT: ; implicit-def: $sgpr6
+; GFX9-O0-NEXT: ; implicit-def: $sgpr7
+; GFX9-O0-NEXT: ; implicit-def: $sgpr7
+; GFX9-O0-NEXT: v_mov_b32_e32 v12, s6
+; GFX9-O0-NEXT: ; kill: def $vgpr15 killed $vgpr15 def $vgpr15_vgpr16 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v16, v12
+; GFX9-O0-NEXT: v_lshlrev_b64 v[15:16], s4, v[15:16]
+; GFX9-O0-NEXT: v_mov_b32_e32 v12, v16
+; GFX9-O0-NEXT: v_or_b32_e64 v11, v11, v12
+; GFX9-O0-NEXT: ; kill: def $vgpr2 killed $vgpr2 killed $vgpr2_vgpr3 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v15
+; GFX9-O0-NEXT: v_or_b32_e64 v2, v2, v3
+; GFX9-O0-NEXT: ; kill: def $vgpr2 killed $vgpr2 def $vgpr2_vgpr3 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v11
+; GFX9-O0-NEXT: v_mad_u64_u32 v[15:16], s[6:7], v8, v7, 0
+; GFX9-O0-NEXT: v_mov_b32_e32 v11, v15
+; GFX9-O0-NEXT: ; implicit-def: $sgpr6
+; GFX9-O0-NEXT: v_mov_b32_e32 v8, s5
+; GFX9-O0-NEXT: ; kill: def $vgpr11 killed $vgpr11 def $vgpr11_vgpr12 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v12, v8
+; GFX9-O0-NEXT: v_mov_b32_e32 v8, v12
+; GFX9-O0-NEXT: v_mov_b32_e32 v15, v16
+; GFX9-O0-NEXT: ; implicit-def: $sgpr6
+; GFX9-O0-NEXT: ; implicit-def: $sgpr7
+; GFX9-O0-NEXT: ; implicit-def: $sgpr7
+; GFX9-O0-NEXT: v_mov_b32_e32 v17, s6
+; GFX9-O0-NEXT: ; kill: def $vgpr15 killed $vgpr15 def $vgpr15_vgpr16 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v16, v17
+; GFX9-O0-NEXT: v_lshlrev_b64 v[15:16], s4, v[15:16]
+; GFX9-O0-NEXT: v_mov_b32_e32 v17, v16
+; GFX9-O0-NEXT: v_or_b32_e64 v8, v8, v17
+; GFX9-O0-NEXT: ; kill: def $vgpr11 killed $vgpr11 killed $vgpr11_vgpr12 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v12, v15
+; GFX9-O0-NEXT: v_or_b32_e64 v19, v11, v12
+; GFX9-O0-NEXT: ; kill: def $vgpr19 killed $vgpr19 def $vgpr19_vgpr20 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v20, v8
+; GFX9-O0-NEXT: v_mad_u64_u32 v[11:12], s[6:7], v5, v7, 0
+; GFX9-O0-NEXT: v_mov_b32_e32 v17, v12
+; GFX9-O0-NEXT: ; implicit-def: $sgpr6
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, s5
+; GFX9-O0-NEXT: ; kill: def $vgpr17 killed $vgpr17 def $vgpr17_vgpr18 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v18, v7
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v19
+; GFX9-O0-NEXT: v_mov_b32_e32 v16, v17
+; GFX9-O0-NEXT: v_mov_b32_e32 v8, v20
+; GFX9-O0-NEXT: v_mov_b32_e32 v15, v18
+; GFX9-O0-NEXT: v_add_co_u32_e64 v7, s[6:7], v7, v16
+; GFX9-O0-NEXT: v_addc_co_u32_e64 v15, s[6:7], v8, v15, s[6:7]
+; GFX9-O0-NEXT: ; kill: def $vgpr7 killed $vgpr7 def $vgpr7_vgpr8 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v8, v15
+; GFX9-O0-NEXT: v_mov_b32_e32 v15, v8
+; GFX9-O0-NEXT: s_mov_b64 s[6:7], 0xffffffff
+; GFX9-O0-NEXT: s_mov_b32 s8, s7
+; GFX9-O0-NEXT: v_and_b32_e64 v15, v15, s8
+; GFX9-O0-NEXT: v_mov_b32_e32 v16, v7
+; GFX9-O0-NEXT: ; kill: def $sgpr6 killed $sgpr6 killed $sgpr6_sgpr7
+; GFX9-O0-NEXT: v_and_b32_e64 v17, v16, s6
+; GFX9-O0-NEXT: ; kill: def $vgpr17 killed $vgpr17 def $vgpr17_vgpr18 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v18, v15
+; GFX9-O0-NEXT: v_mad_u64_u32 v[15:16], s[6:7], v5, v6, 0
+; GFX9-O0-NEXT: v_mov_b32_e32 v19, v15
+; GFX9-O0-NEXT: ; implicit-def: $sgpr6
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, s5
+; GFX9-O0-NEXT: ; kill: def $vgpr19 killed $vgpr19 def $vgpr19_vgpr20 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v20, v5
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v20
+; GFX9-O0-NEXT: v_mov_b32_e32 v15, v16
+; GFX9-O0-NEXT: ; implicit-def: $sgpr6
+; GFX9-O0-NEXT: ; implicit-def: $sgpr7
+; GFX9-O0-NEXT: ; implicit-def: $sgpr7
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, s6
+; GFX9-O0-NEXT: ; kill: def $vgpr15 killed $vgpr15 def $vgpr15_vgpr16 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v16, v6
+; GFX9-O0-NEXT: v_lshlrev_b64 v[15:16], s4, v[15:16]
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v16
+; GFX9-O0-NEXT: v_or_b32_e64 v5, v5, v6
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v19
+; GFX9-O0-NEXT: ; kill: def $vgpr15 killed $vgpr15 killed $vgpr15_vgpr16 killed $exec
+; GFX9-O0-NEXT: v_or_b32_e64 v19, v6, v15
+; GFX9-O0-NEXT: ; kill: def $vgpr19 killed $vgpr19 def $vgpr19_vgpr20 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v20, v5
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v19
+; GFX9-O0-NEXT: v_mov_b32_e32 v16, v17
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v20
+; GFX9-O0-NEXT: v_mov_b32_e32 v15, v18
+; GFX9-O0-NEXT: v_add_co_u32_e64 v5, s[6:7], v5, v16
+; GFX9-O0-NEXT: v_addc_co_u32_e64 v15, s[6:7], v6, v15, s[6:7]
+; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v15
+; GFX9-O0-NEXT: v_lshrrev_b64 v[17:18], s4, v[5:6]
+; GFX9-O0-NEXT: v_lshrrev_b64 v[7:8], s4, v[7:8]
+; GFX9-O0-NEXT: v_mov_b32_e32 v15, v7
+; GFX9-O0-NEXT: v_mov_b32_e32 v16, v17
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v8
+; GFX9-O0-NEXT: v_mov_b32_e32 v8, v18
+; GFX9-O0-NEXT: v_add_co_u32_e64 v15, s[6:7], v15, v16
+; GFX9-O0-NEXT: v_addc_co_u32_e64 v7, s[6:7], v7, v8, s[6:7]
+; GFX9-O0-NEXT: ; kill: def $vgpr15 killed $vgpr15 def $vgpr15_vgpr16 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v16, v7
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v2
+; GFX9-O0-NEXT: v_mov_b32_e32 v8, v15
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v3
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v16
+; GFX9-O0-NEXT: v_add_co_u32_e64 v15, s[6:7], v7, v8
+; GFX9-O0-NEXT: v_addc_co_u32_e64 v2, s[6:7], v2, v3, s[6:7]
+; GFX9-O0-NEXT: ; kill: def $vgpr15 killed $vgpr15 def $vgpr15_vgpr16 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v16, v2
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v15
+; GFX9-O0-NEXT: v_mov_b32_e32 v8, v13
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v16
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v14
+; GFX9-O0-NEXT: v_add_co_u32_e64 v2, s[6:7], v2, v8
+; GFX9-O0-NEXT: v_addc_co_u32_e64 v7, s[6:7], v3, v7, s[6:7]
+; GFX9-O0-NEXT: ; kill: def $vgpr2 killed $vgpr2 def $vgpr2_vgpr3 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v7
+; GFX9-O0-NEXT: v_lshlrev_b64 v[6:7], s4, v[5:6]
+; GFX9-O0-NEXT: v_mov_b32_e32 v8, v7
+; GFX9-O0-NEXT: ; kill: def $vgpr11 killed $vgpr11 killed $vgpr11_vgpr12 killed $exec
+; GFX9-O0-NEXT: ; implicit-def: $sgpr6
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, s5
+; GFX9-O0-NEXT: ; kill: def $vgpr11 killed $vgpr11 def $vgpr11_vgpr12 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v12, v5
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v12
+; GFX9-O0-NEXT: v_or_b32_e64 v5, v5, v8
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v6
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v11
+; GFX9-O0-NEXT: v_or_b32_e64 v6, v6, v7
+; GFX9-O0-NEXT: ; kill: def $vgpr6 killed $vgpr6 def $vgpr6_vgpr7 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v5
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v2
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v3
+; GFX9-O0-NEXT: v_mov_b32_e32 v8, v6
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v7
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v0
+; GFX9-O0-NEXT: v_mov_b32_e32 v0, v1
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v9
+; GFX9-O0-NEXT: v_mov_b32_e32 v1, v10
+; GFX9-O0-NEXT: v_sub_co_u32_e32 v7, vcc, v7, v8
+; GFX9-O0-NEXT: v_subb_co_u32_e32 v0, vcc, v0, v6, vcc
+; GFX9-O0-NEXT: v_subb_co_u32_e32 v5, vcc, v3, v5, vcc
+; GFX9-O0-NEXT: v_subb_co_u32_e32 v1, vcc, v1, v2, vcc
+; GFX9-O0-NEXT: ; implicit-def: $sgpr5
+; GFX9-O0-NEXT: ; implicit-def: $sgpr5
+; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v1
+; GFX9-O0-NEXT: ; implicit-def: $sgpr5
+; GFX9-O0-NEXT: ; implicit-def: $sgpr5
+; GFX9-O0-NEXT: ; kill: def $vgpr7 killed $vgpr7 def $vgpr7_vgpr8 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v8, v0
+; GFX9-O0-NEXT: v_mov_b32_e32 v0, v7
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v5
+; GFX9-O0-NEXT: v_lshrrev_b64 v[7:8], s4, v[7:8]
+; GFX9-O0-NEXT: v_mov_b32_e32 v1, v7
+; GFX9-O0-NEXT: v_lshrrev_b64 v[5:6], s4, v[5:6]
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v5
+; GFX9-O0-NEXT: ; kill: killed $vgpr4
+; GFX9-O0-NEXT: s_xor_saveexec_b64 s[4:5], -1
+; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:328 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: s_nop 0
+; GFX9-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:332 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:336 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:340 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: s_mov_b64 exec, s[4:5]
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: s_setpc_b64 s[30:31]
+ %div = urem i128 %lhs, %rhs
+ ret i128 %div
+}
+
+define i128 @v_srem_i128_v_pow2k(i128 %lhs) {
+; GFX9-LABEL: v_srem_i128_v_pow2k:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_ashrrev_i32_e32 v4, 31, v3
+; GFX9-NEXT: v_mov_b32_e32 v5, v4
+; GFX9-NEXT: v_lshrrev_b64 v[4:5], 31, v[4:5]
+; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, v0, v4
+; GFX9-NEXT: v_addc_co_u32_e32 v4, vcc, v1, v5, vcc
+; GFX9-NEXT: v_addc_co_u32_e32 v5, vcc, 0, v2, vcc
+; GFX9-NEXT: v_addc_co_u32_e32 v6, vcc, 0, v3, vcc
+; GFX9-NEXT: v_and_b32_e32 v4, -2, v4
+; GFX9-NEXT: v_subrev_co_u32_e32 v0, vcc, 0, v0
+; GFX9-NEXT: v_subb_co_u32_e32 v1, vcc, v1, v4, vcc
+; GFX9-NEXT: v_subb_co_u32_e32 v2, vcc, v2, v5, vcc
+; GFX9-NEXT: v_subb_co_u32_e32 v3, vcc, v3, v6, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-O0-LABEL: v_srem_i128_v_pow2k:
+; GFX9-O0: ; %bb.0:
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v2
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v1
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; kill: def $vgpr6 killed $vgpr6 def $vgpr6_vgpr7 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v3
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; kill: def $vgpr0 killed $vgpr0 def $vgpr0_vgpr1 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v1, v2
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4_sgpr5
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4_sgpr5
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v0
+; GFX9-O0-NEXT: v_mov_b32_e32 v0, v1
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v6
+; GFX9-O0-NEXT: v_mov_b32_e32 v1, v7
+; GFX9-O0-NEXT: s_mov_b32 s4, 63
+; GFX9-O0-NEXT: v_ashrrev_i64 v[6:7], s4, v[6:7]
+; GFX9-O0-NEXT: s_mov_b32 s4, 31
+; GFX9-O0-NEXT: v_lshrrev_b64 v[6:7], s4, v[6:7]
+; GFX9-O0-NEXT: v_mov_b32_e32 v4, v6
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v7
+; GFX9-O0-NEXT: s_mov_b64 s[6:7], 0
+; GFX9-O0-NEXT: s_mov_b32 s5, s6
+; GFX9-O0-NEXT: s_mov_b32 s4, s7
+; GFX9-O0-NEXT: v_add_co_u32_e32 v6, vcc, v5, v4
+; GFX9-O0-NEXT: v_addc_co_u32_e32 v4, vcc, v0, v2, vcc
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, s5
+; GFX9-O0-NEXT: v_addc_co_u32_e32 v8, vcc, v3, v2, vcc
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-O0-NEXT: v_addc_co_u32_e32 v2, vcc, v1, v2, vcc
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; kill: def $vgpr6 killed $vgpr6 def $vgpr6_vgpr7 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v4
+; GFX9-O0-NEXT: v_mov_b32_e32 v4, v7
+; GFX9-O0-NEXT: s_mov_b32 s6, -2
+; GFX9-O0-NEXT: s_mov_b32 s4, 0
+; GFX9-O0-NEXT: ; kill: def $sgpr4 killed $sgpr4 def $sgpr4_sgpr5
+; GFX9-O0-NEXT: s_mov_b32 s5, s6
+; GFX9-O0-NEXT: s_mov_b32 s6, s5
+; GFX9-O0-NEXT: v_and_b32_e64 v4, v4, s6
+; GFX9-O0-NEXT: ; kill: def $vgpr6 killed $vgpr6 killed $vgpr6_vgpr7 killed $exec
+; GFX9-O0-NEXT: ; kill: def $sgpr4 killed $sgpr4 killed $sgpr4_sgpr5
+; GFX9-O0-NEXT: v_and_b32_e64 v9, v6, s4
+; GFX9-O0-NEXT: ; kill: def $vgpr9 killed $vgpr9 def $vgpr9_vgpr10 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v10, v4
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v9
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v10
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; kill: def $vgpr8 killed $vgpr8 def $vgpr8_vgpr9 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v9, v2
+; GFX9-O0-NEXT: v_mov_b32_e32 v4, v8
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v9
+; GFX9-O0-NEXT: v_sub_co_u32_e32 v5, vcc, v5, v7
+; GFX9-O0-NEXT: v_subb_co_u32_e32 v0, vcc, v0, v6, vcc
+; GFX9-O0-NEXT: v_subb_co_u32_e32 v3, vcc, v3, v4, vcc
+; GFX9-O0-NEXT: v_subb_co_u32_e32 v1, vcc, v1, v2, vcc
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; kill: def $vgpr3 killed $vgpr3 def $vgpr3_vgpr4 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v4, v1
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v0
+; GFX9-O0-NEXT: v_mov_b32_e32 v0, v5
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v3
+; GFX9-O0-NEXT: s_mov_b32 s4, 32
+; GFX9-O0-NEXT: v_lshrrev_b64 v[5:6], s4, v[5:6]
+; GFX9-O0-NEXT: v_mov_b32_e32 v1, v5
+; GFX9-O0-NEXT: v_lshrrev_b64 v[3:4], s4, v[3:4]
+; GFX9-O0-NEXT: ; kill: def $vgpr3 killed $vgpr3 killed $vgpr3_vgpr4 killed $exec
+; GFX9-O0-NEXT: s_setpc_b64 s[30:31]
+ %div = srem i128 %lhs, 8589934592
+ ret i128 %div
+}
+
+define i128 @v_urem_i128_v_pow2k(i128 %lhs) {
+; GFX9-LABEL: v_urem_i128_v_pow2k:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_and_b32_e32 v1, 1, v1
+; GFX9-NEXT: v_mov_b32_e32 v2, 0
+; GFX9-NEXT: v_mov_b32_e32 v3, 0
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-O0-LABEL: v_urem_i128_v_pow2k:
+; GFX9-O0: ; %bb.0:
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v2
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v1
+; GFX9-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; kill: def $vgpr3 killed $vgpr3 def $vgpr3_vgpr4 killed $exec
+; GFX9-O0-NEXT: ; kill: def $vgpr4 killed $vgpr1 killed $exec
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4
+; GFX9-O0-NEXT: ; kill: def $vgpr0 killed $vgpr0 def $vgpr0_vgpr1 killed $exec
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: v_mov_b32_e32 v1, v2
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4_sgpr5
+; GFX9-O0-NEXT: ; implicit-def: $sgpr4_sgpr5
+; GFX9-O0-NEXT: s_mov_b32 s6, 1
+; GFX9-O0-NEXT: s_mov_b32 s4, -1
+; GFX9-O0-NEXT: ; kill: def $sgpr4 killed $sgpr4 def $sgpr4_sgpr5
+; GFX9-O0-NEXT: s_mov_b32 s5, s6
+; GFX9-O0-NEXT: s_mov_b32 s6, s5
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v1
+; GFX9-O0-NEXT: v_and_b32_e64 v3, v2, s6
+; GFX9-O0-NEXT: ; kill: def $sgpr4 killed $sgpr4 killed $sgpr4_sgpr5
+; GFX9-O0-NEXT: ; kill: def $vgpr0 killed $vgpr0 killed $vgpr0_vgpr1 killed $exec
+; GFX9-O0-NEXT: v_and_b32_e64 v1, v0, s4
+; GFX9-O0-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v3
+; GFX9-O0-NEXT: s_mov_b32 s4, 32
+; GFX9-O0-NEXT: v_lshrrev_b64 v[1:2], s4, v[1:2]
+; GFX9-O0-NEXT: ; kill: def $vgpr1 killed $vgpr1 killed $vgpr1_vgpr2 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, 0
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v3
+; GFX9-O0-NEXT: s_setpc_b64 s[30:31]
+ %div = urem i128 %lhs, 8589934592
+ ret i128 %div
+}
+
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; GFX9-SDAG: {{.*}}
+; GFX9-SDAG-O0: {{.*}}
diff --git a/llvm/test/CodeGen/ARM/cmse-vlldm-no-reorder.mir b/llvm/test/CodeGen/ARM/cmse-vlldm-no-reorder.mir
index d006aa9..2bc4288 100644
--- a/llvm/test/CodeGen/ARM/cmse-vlldm-no-reorder.mir
+++ b/llvm/test/CodeGen/ARM/cmse-vlldm-no-reorder.mir
@@ -89,7 +89,7 @@ body: |
# CHECK: $sp = t2STMDB_UPD $sp, 14 /* CC::al */, $noreg, $r4, $r5, $r6, undef $r7, $r8, $r9, $r10, $r11
# CHECK-NEXT: $r0 = t2BICri $r0, 1, 14 /* CC::al */, $noreg, $noreg
# CHECK-NEXT: $sp = tSUBspi $sp, 34, 14 /* CC::al */, $noreg
-# CHECK-NEXT: VLSTM $sp, 14 /* CC::al */, $noreg, 0, implicit-def $vpr, implicit-def $fpscr, implicit-def $fpscr_nzcv, implicit undef $vpr, implicit undef $fpscr, implicit undef $fpscr_nzcv, implicit undef $d0, implicit undef $d1, implicit undef $d2, implicit undef $d3, implicit undef $d4, implicit undef $d5, implicit undef $d6, implicit undef $d7, implicit $d8, implicit $d9, implicit $d10, implicit $d11, implicit $d12, implicit $d13, implicit $d14, implicit $d15
+# CHECK-NEXT: VLSTM $sp, 14 /* CC::al */, $noreg, implicit undef $vpr, implicit undef $fpscr, implicit undef $fpscr_nzcv, implicit undef $q0, implicit undef $q1, implicit undef $q2, implicit undef $q3, implicit undef $q4, implicit undef $q5, implicit undef $q6, implicit undef $q7
# CHECK-NEXT: $r1 = tMOVr $r0, 14 /* CC::al */, $noreg
# CHECK-NEXT: $r2 = tMOVr $r0, 14 /* CC::al */, $noreg
# CHECK-NEXT: $r3 = tMOVr $r0, 14 /* CC::al */, $noreg
@@ -105,7 +105,7 @@ body: |
# CHECK-NEXT: t2MSR_M 3072, $r0, 14 /* CC::al */, $noreg, implicit-def $cpsr
# CHECK-NEXT: tBLXNSr 14 /* CC::al */, $noreg, killed $r0, csr_aapcs, implicit-def $lr, implicit $sp, implicit-def dead $lr, implicit $sp, implicit-def $sp, implicit-def $s0
# CHECK-NEXT: $r12 = VMOVRS $s0, 14 /* CC::al */, $noreg
-# CHECK-NEXT: VLLDM $sp, 14 /* CC::al */, $noreg, 0, implicit-def $vpr, implicit-def $fpscr, implicit-def $fpscr_nzcv, implicit-def $d0, implicit-def $d1, implicit-def $d2, implicit-def $d3, implicit-def $d4, implicit-def $d5, implicit-def $d6, implicit-def $d7, implicit-def $d8, implicit-def $d9, implicit-def $d10, implicit-def $d11, implicit-def $d12, implicit-def $d13, implicit-def $d14, implicit-def $d15
+# CHECK-NEXT: VLLDM $sp, 14 /* CC::al */, $noreg, implicit-def $q0, implicit-def $q1, implicit-def $q2, implicit-def $q3, implicit-def $q4, implicit-def $q5, implicit-def $q6, implicit-def $q7, implicit-def $vpr, implicit-def $fpscr, implicit-def $fpscr_nzcv
# CHECK-NEXT: $s0 = VMOVSR $r12, 14 /* CC::al */, $noreg
# CHECK-NEXT: $sp = tADDspi $sp, 34, 14 /* CC::al */, $noreg
# CHECK-NEXT: $sp = t2LDMIA_UPD $sp, 14 /* CC::al */, $noreg, def $r4, def $r5, def $r6, def $r7, def $r8, def $r9, def $r10, def $r11
diff --git a/llvm/test/CodeGen/ARM/vlldm-vlstm-uops.mir b/llvm/test/CodeGen/ARM/vlldm-vlstm-uops.mir
index ad53add..8c49a53 100644
--- a/llvm/test/CodeGen/ARM/vlldm-vlstm-uops.mir
+++ b/llvm/test/CodeGen/ARM/vlldm-vlstm-uops.mir
@@ -2,7 +2,7 @@
--- |
target triple = "thumbv8m.main-arm-none-eabi"
- define hidden void @foo(void ()* nocapture %baz) local_unnamed_addr #0 {
+ define hidden void @foo(ptr nocapture %baz) local_unnamed_addr #0 {
entry:
%call = call i32 @bar() #0
%tobool = icmp eq i32 %call, 0
@@ -55,13 +55,14 @@ body: |
tBL 14, $noreg, @bar, csr_aapcs, implicit-def dead $lr, implicit $sp, implicit-def $sp, implicit-def dead $r0
bb.2.land.end:
- liveins: $r4, $vpr, $fpscr, $fpscr_nzcv, $d0, $d1, $d2, $d3, $d4, $d5, $d6, $d7, $d8, $d9, $d10, $d11, $d12, $d13, $d14, $d15
+ liveins: $r4
+
$sp = t2STMDB_UPD $sp, 14, $noreg, $r4, killed $r5, killed $r6, killed $r7, killed $r8, killed $r9, killed $r10, killed $r11
$r4 = t2BICri $r4, 1, 14, $noreg, $noreg
$sp = tSUBspi $sp, 34, 14, $noreg
- VLSTM $sp, 14, $noreg, 0, implicit-def $vpr, implicit-def $fpscr, implicit-def $fpscr_nzcv, implicit $vpr, implicit $fpscr, implicit $fpscr_nzcv, implicit $d0, implicit $d1, implicit $d2, implicit $d3, implicit $d4, implicit $d5, implicit $d6, implicit $d7, implicit $d8, implicit $d9, implicit $d10, implicit $d11, implicit $d12, implicit $d13, implicit $d14, implicit $d15
+ VLSTM $sp, 14, $noreg
tBLXNSr 14, $noreg, killed $r4, csr_aapcs, implicit-def $lr, implicit $sp, implicit-def dead $lr, implicit $sp, implicit-def $sp
- VLLDM $sp, 14, $noreg, 0, implicit-def $vpr, implicit-def $fpscr, implicit-def $fpscr_nzcv, implicit-def $d0, implicit-def $d1, implicit-def $d2, implicit-def $d3, implicit-def $d4, implicit-def $d5, implicit-def $d6, implicit-def $d7, implicit-def $d8, implicit-def $d9, implicit-def $d10, implicit-def $d11, implicit-def $d12, implicit-def $d13, implicit-def $d14, implicit-def $d15
+ VLLDM $sp, 14, $noreg, implicit-def $q0, implicit-def $q1, implicit-def $q2, implicit-def $q3, implicit-def $q4, implicit-def $q5, implicit-def $q6, implicit-def $q7, implicit-def $vpr, implicit-def $fpscr, implicit-def $fpscr_nzcv
$sp = tADDspi $sp, 34, 14, $noreg
$sp = t2LDMIA_UPD $sp, 14, $noreg, def $r4, def $r5, def $r6, def $r7, def $r8, def $r9, def $r10, def $r11
$sp = t2LDMIA_RET $sp, 14, $noreg, def $r4, def $pc
diff --git a/llvm/test/CodeGen/PowerPC/crsave.ll b/llvm/test/CodeGen/PowerPC/crsave.ll
index 81e7a0a..bde49d0 100644
--- a/llvm/test/CodeGen/PowerPC/crsave.ll
+++ b/llvm/test/CodeGen/PowerPC/crsave.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
; RUN: llc -O0 -frame-pointer=all -mtriple=powerpc-unknown-linux-gnu -mcpu=g5 < %s | FileCheck %s -check-prefix=PPC32
; RUN: llc -O0 -mtriple=powerpc64-unknown-linux-gnu -mcpu=g5 < %s | FileCheck %s -check-prefix=PPC64
; RUN: llc -O0 -mtriple=powerpc64le-unknown-linux-gnu -verify-machineinstrs < %s | FileCheck %s -check-prefix=PPC64-ELFv2
@@ -5,6 +6,101 @@
declare void @foo()
define i32 @test_cr2() nounwind uwtable {
+; PPC32-LABEL: test_cr2:
+; PPC32: # %bb.0: # %entry
+; PPC32-NEXT: mflr 0
+; PPC32-NEXT: stwu 1, -32(1)
+; PPC32-NEXT: stw 31, 28(1)
+; PPC32-NEXT: stw 0, 36(1)
+; PPC32-NEXT: .cfi_def_cfa_offset 32
+; PPC32-NEXT: .cfi_offset r31, -4
+; PPC32-NEXT: .cfi_offset lr, 4
+; PPC32-NEXT: mr 31, 1
+; PPC32-NEXT: .cfi_def_cfa_register r31
+; PPC32-NEXT: mfcr 12
+; PPC32-NEXT: stw 12, 24(31)
+; PPC32-NEXT: li 3, 1
+; PPC32-NEXT: li 4, 2
+; PPC32-NEXT: li 5, 3
+; PPC32-NEXT: li 6, 0
+; PPC32-NEXT: #APP
+; PPC32-EMPTY:
+; PPC32-NEXT: mtcr 6
+; PPC32-NEXT: cmpw 2, 4, 3
+; PPC32-NEXT: mfcr 3
+; PPC32-NEXT: #NO_APP
+; PPC32-NEXT: stw 3, 20(31)
+; PPC32-NEXT: bl foo
+; PPC32-NEXT: lwz 3, 20(31)
+; PPC32-NEXT: lwz 12, 24(31)
+; PPC32-NEXT: mtocrf 32, 12
+; PPC32-NEXT: lwz 0, 36(1)
+; PPC32-NEXT: lwz 31, 28(1)
+; PPC32-NEXT: addi 1, 1, 32
+; PPC32-NEXT: mtlr 0
+; PPC32-NEXT: blr
+;
+; PPC64-LABEL: test_cr2:
+; PPC64: # %bb.0: # %entry
+; PPC64-NEXT: mflr 0
+; PPC64-NEXT: mfcr 12
+; PPC64-NEXT: stw 12, 8(1)
+; PPC64-NEXT: stdu 1, -128(1)
+; PPC64-NEXT: std 0, 144(1)
+; PPC64-NEXT: .cfi_def_cfa_offset 128
+; PPC64-NEXT: .cfi_offset lr, 16
+; PPC64-NEXT: .cfi_offset cr2, 8
+; PPC64-NEXT: li 3, 1
+; PPC64-NEXT: li 4, 2
+; PPC64-NEXT: li 5, 3
+; PPC64-NEXT: li 6, 0
+; PPC64-NEXT: #APP
+; PPC64-EMPTY:
+; PPC64-NEXT: mtcr 6
+; PPC64-NEXT: cmpw 2, 4, 3
+; PPC64-NEXT: mfcr 3
+; PPC64-NEXT: #NO_APP
+; PPC64-NEXT: stw 3, 124(1)
+; PPC64-NEXT: bl foo
+; PPC64-NEXT: nop
+; PPC64-NEXT: lwz 3, 124(1)
+; PPC64-NEXT: addi 1, 1, 128
+; PPC64-NEXT: ld 0, 16(1)
+; PPC64-NEXT: lwz 12, 8(1)
+; PPC64-NEXT: mtocrf 32, 12
+; PPC64-NEXT: mtlr 0
+; PPC64-NEXT: blr
+;
+; PPC64-ELFv2-LABEL: test_cr2:
+; PPC64-ELFv2: # %bb.0: # %entry
+; PPC64-ELFv2-NEXT: mflr 0
+; PPC64-ELFv2-NEXT: mfocrf 12, 32
+; PPC64-ELFv2-NEXT: stw 12, 8(1)
+; PPC64-ELFv2-NEXT: stdu 1, -112(1)
+; PPC64-ELFv2-NEXT: std 0, 128(1)
+; PPC64-ELFv2-NEXT: .cfi_def_cfa_offset 112
+; PPC64-ELFv2-NEXT: .cfi_offset lr, 16
+; PPC64-ELFv2-NEXT: .cfi_offset cr2, 8
+; PPC64-ELFv2-NEXT: li 3, 1
+; PPC64-ELFv2-NEXT: li 4, 2
+; PPC64-ELFv2-NEXT: li 5, 3
+; PPC64-ELFv2-NEXT: li 6, 0
+; PPC64-ELFv2-NEXT: #APP
+; PPC64-ELFv2-EMPTY:
+; PPC64-ELFv2-NEXT: mtcr 6
+; PPC64-ELFv2-NEXT: cmpw 2, 4, 3
+; PPC64-ELFv2-NEXT: mfcr 3
+; PPC64-ELFv2-NEXT: #NO_APP
+; PPC64-ELFv2-NEXT: stw 3, 108(1)
+; PPC64-ELFv2-NEXT: bl foo
+; PPC64-ELFv2-NEXT: nop
+; PPC64-ELFv2-NEXT: lwz 3, 108(1)
+; PPC64-ELFv2-NEXT: addi 1, 1, 112
+; PPC64-ELFv2-NEXT: ld 0, 16(1)
+; PPC64-ELFv2-NEXT: lwz 12, 8(1)
+; PPC64-ELFv2-NEXT: mtocrf 32, 12
+; PPC64-ELFv2-NEXT: mtlr 0
+; PPC64-ELFv2-NEXT: blr
entry:
%ret = alloca i32, align 4
%0 = call i32 asm sideeffect "\0A\09mtcr $4\0A\09cmpw 2,$2,$1\0A\09mfcr $0", "=r,r,r,r,r,~{cr2}"(i32 1, i32 2, i32 3, i32 0) nounwind
@@ -14,27 +110,104 @@ entry:
ret i32 %1
}
-; PPC32-LABEL: test_cr2:
-; PPC32: stwu 1, -32(1)
-; PPC32: stw 31, 28(1)
-; PPC32: mfcr 12
-; PPC32-NEXT: stw 12, 24(31)
-; PPC32: lwz 12, 24(31)
-; PPC32-NEXT: mtocrf 32, 12
-
-; PPC64: .cfi_startproc
-; PPC64: mfcr 12
-; PPC64: stw 12, 8(1)
-; PPC64: stdu 1, -[[AMT:[0-9]+]](1)
-; PPC64: .cfi_def_cfa_offset 128
-; PPC64: .cfi_offset lr, 16
-; PPC64: .cfi_offset cr2, 8
-; PPC64: addi 1, 1, [[AMT]]
-; PPC64: lwz 12, 8(1)
-; PPC64: mtocrf 32, 12
-; PPC64: .cfi_endproc
-
define i32 @test_cr234() nounwind {
+; PPC32-LABEL: test_cr234:
+; PPC32: # %bb.0: # %entry
+; PPC32-NEXT: mflr 0
+; PPC32-NEXT: stwu 1, -32(1)
+; PPC32-NEXT: stw 31, 28(1)
+; PPC32-NEXT: stw 0, 36(1)
+; PPC32-NEXT: mr 31, 1
+; PPC32-NEXT: mfcr 12
+; PPC32-NEXT: stw 12, 24(31)
+; PPC32-NEXT: li 3, 1
+; PPC32-NEXT: li 4, 2
+; PPC32-NEXT: li 5, 3
+; PPC32-NEXT: li 6, 0
+; PPC32-NEXT: #APP
+; PPC32-EMPTY:
+; PPC32-NEXT: mtcr 6
+; PPC32-NEXT: cmpw 2, 4, 3
+; PPC32-NEXT: cmpw 3, 4, 4
+; PPC32-NEXT: cmpw 4, 4, 5
+; PPC32-NEXT: mfcr 3
+; PPC32-NEXT: #NO_APP
+; PPC32-NEXT: stw 3, 20(31)
+; PPC32-NEXT: bl foo
+; PPC32-NEXT: lwz 3, 20(31)
+; PPC32-NEXT: lwz 12, 24(31)
+; PPC32-NEXT: mtocrf 32, 12
+; PPC32-NEXT: mtocrf 16, 12
+; PPC32-NEXT: mtocrf 8, 12
+; PPC32-NEXT: lwz 0, 36(1)
+; PPC32-NEXT: lwz 31, 28(1)
+; PPC32-NEXT: addi 1, 1, 32
+; PPC32-NEXT: mtlr 0
+; PPC32-NEXT: blr
+;
+; PPC64-LABEL: test_cr234:
+; PPC64: # %bb.0: # %entry
+; PPC64-NEXT: mflr 0
+; PPC64-NEXT: mfcr 12
+; PPC64-NEXT: stw 12, 8(1)
+; PPC64-NEXT: stdu 1, -128(1)
+; PPC64-NEXT: std 0, 144(1)
+; PPC64-NEXT: li 3, 1
+; PPC64-NEXT: li 4, 2
+; PPC64-NEXT: li 5, 3
+; PPC64-NEXT: li 6, 0
+; PPC64-NEXT: #APP
+; PPC64-EMPTY:
+; PPC64-NEXT: mtcr 6
+; PPC64-NEXT: cmpw 2, 4, 3
+; PPC64-NEXT: cmpw 3, 4, 4
+; PPC64-NEXT: cmpw 4, 4, 5
+; PPC64-NEXT: mfcr 3
+; PPC64-NEXT: #NO_APP
+; PPC64-NEXT: stw 3, 124(1)
+; PPC64-NEXT: bl foo
+; PPC64-NEXT: nop
+; PPC64-NEXT: lwz 3, 124(1)
+; PPC64-NEXT: addi 1, 1, 128
+; PPC64-NEXT: ld 0, 16(1)
+; PPC64-NEXT: lwz 12, 8(1)
+; PPC64-NEXT: mtocrf 32, 12
+; PPC64-NEXT: mtocrf 16, 12
+; PPC64-NEXT: mtocrf 8, 12
+; PPC64-NEXT: mtlr 0
+; PPC64-NEXT: blr
+;
+; PPC64-ELFv2-LABEL: test_cr234:
+; PPC64-ELFv2: # %bb.0: # %entry
+; PPC64-ELFv2-NEXT: mflr 0
+; PPC64-ELFv2-NEXT: mfcr 12
+; PPC64-ELFv2-NEXT: stw 12, 8(1)
+; PPC64-ELFv2-NEXT: stdu 1, -112(1)
+; PPC64-ELFv2-NEXT: std 0, 128(1)
+; PPC64-ELFv2-NEXT: li 3, 1
+; PPC64-ELFv2-NEXT: li 4, 2
+; PPC64-ELFv2-NEXT: li 5, 3
+; PPC64-ELFv2-NEXT: li 6, 0
+; PPC64-ELFv2-NEXT: #APP
+; PPC64-ELFv2-EMPTY:
+; PPC64-ELFv2-NEXT: mtcr 6
+; PPC64-ELFv2-NEXT: cmpw 2, 4, 3
+; PPC64-ELFv2-NEXT: cmpw 3, 4, 4
+; PPC64-ELFv2-NEXT: cmpw 4, 4, 5
+; PPC64-ELFv2-NEXT: mfcr 3
+; PPC64-ELFv2-NEXT: #NO_APP
+; PPC64-ELFv2-NEXT: stw 3, 108(1)
+; PPC64-ELFv2-NEXT: bl foo
+; PPC64-ELFv2-NEXT: nop
+; PPC64-ELFv2-NEXT: lwz 3, 108(1)
+; PPC64-ELFv2-NEXT: addi 1, 1, 112
+; PPC64-ELFv2-NEXT: ld 0, 16(1)
+; PPC64-ELFv2-NEXT: lwz 12, 8(1)
+; PPC64-ELFv2-NEXT: mtocrf 32, 12
+; PPC64-ELFv2-NEXT: mtocrf 16, 12
+; PPC64-ELFv2-NEXT: mtocrf 8, 12
+; PPC64-ELFv2-NEXT: mtlr 0
+; PPC64-ELFv2-NEXT: blr
entry:
%ret = alloca i32, align 4
%0 = call i32 asm sideeffect "\0A\09mtcr $4\0A\09cmpw 2,$2,$1\0A\09cmpw 3,$2,$2\0A\09cmpw 4,$2,$3\0A\09mfcr $0", "=r,r,r,r,r,~{cr2},~{cr3},~{cr4}"(i32 1, i32 2, i32 3, i32 0) nounwind
@@ -44,41 +217,102 @@ entry:
ret i32 %1
}
-; PPC32-LABEL: test_cr234:
-; PPC32: stwu 1, -32(1)
-; PPC32: stw 31, 28(1)
-; PPC32: mfcr 12
-; PPC32-NEXT: stw 12, 24(31)
-; PPC32: lwz 12, 24(31)
-; PPC32-NEXT: mtocrf 32, 12
-; PPC32-NEXT: mtocrf 16, 12
-; PPC32-NEXT: mtocrf 8, 12
-
-; PPC64: mfcr 12
-; PPC64: stw 12, 8(1)
-; PPC64: stdu 1, -[[AMT:[0-9]+]](1)
-; PPC64: addi 1, 1, [[AMT]]
-; PPC64: lwz 12, 8(1)
-; PPC64: mtocrf 32, 12
-; PPC64: mtocrf 16, 12
-; PPC64: mtocrf 8, 12
-
; Generate mfocrf in prologue when we need to save 1 nonvolatile CR field
define void @cloberOneNvCrField() {
+; PPC32-LABEL: cloberOneNvCrField:
+; PPC32: # %bb.0: # %entry
+; PPC32-NEXT: stwu 1, -32(1)
+; PPC32-NEXT: stw 31, 28(1)
+; PPC32-NEXT: .cfi_def_cfa_offset 32
+; PPC32-NEXT: .cfi_offset r31, -4
+; PPC32-NEXT: mr 31, 1
+; PPC32-NEXT: .cfi_def_cfa_register r31
+; PPC32-NEXT: mfcr 12
+; PPC32-NEXT: stw 12, 24(31)
+; PPC32-NEXT: #APP
+; PPC32-NEXT: # clobbers
+; PPC32-NEXT: #NO_APP
+; PPC32-NEXT: lwz 12, 24(31)
+; PPC32-NEXT: mtocrf 32, 12
+; PPC32-NEXT: lwz 31, 28(1)
+; PPC32-NEXT: addi 1, 1, 32
+; PPC32-NEXT: blr
+;
+; PPC64-LABEL: cloberOneNvCrField:
+; PPC64: # %bb.0: # %entry
+; PPC64-NEXT: mfcr 12
+; PPC64-NEXT: stw 12, 8(1)
+; PPC64-NEXT: #APP
+; PPC64-NEXT: # clobbers
+; PPC64-NEXT: #NO_APP
+; PPC64-NEXT: lwz 12, 8(1)
+; PPC64-NEXT: mtocrf 32, 12
+; PPC64-NEXT: blr
+;
+; PPC64-ELFv2-LABEL: cloberOneNvCrField:
+; PPC64-ELFv2: # %bb.0: # %entry
+; PPC64-ELFv2-NEXT: mfocrf 12, 32
+; PPC64-ELFv2-NEXT: stw 12, 8(1)
+; PPC64-ELFv2-NEXT: #APP
+; PPC64-ELFv2-NEXT: # clobbers
+; PPC64-ELFv2-NEXT: #NO_APP
+; PPC64-ELFv2-NEXT: lwz 12, 8(1)
+; PPC64-ELFv2-NEXT: mtocrf 32, 12
+; PPC64-ELFv2-NEXT: blr
entry:
tail call void asm sideeffect "# clobbers", "~{cr2}"()
ret void
-
-; PPC64-ELFv2-LABEL: @cloberOneNvCrField
-; PPC64-ELFv2: mfocrf [[REG1:[0-9]+]], 32
}
; Generate mfcr in prologue when we need to save all nonvolatile CR field
define void @cloberAllNvCrField() {
+; PPC32-LABEL: cloberAllNvCrField:
+; PPC32: # %bb.0: # %entry
+; PPC32-NEXT: stwu 1, -32(1)
+; PPC32-NEXT: stw 31, 28(1)
+; PPC32-NEXT: .cfi_def_cfa_offset 32
+; PPC32-NEXT: .cfi_offset r31, -4
+; PPC32-NEXT: mr 31, 1
+; PPC32-NEXT: .cfi_def_cfa_register r31
+; PPC32-NEXT: mfcr 12
+; PPC32-NEXT: stw 12, 24(31)
+; PPC32-NEXT: #APP
+; PPC32-NEXT: # clobbers
+; PPC32-NEXT: #NO_APP
+; PPC32-NEXT: lwz 12, 24(31)
+; PPC32-NEXT: mtocrf 32, 12
+; PPC32-NEXT: mtocrf 16, 12
+; PPC32-NEXT: mtocrf 8, 12
+; PPC32-NEXT: lwz 31, 28(1)
+; PPC32-NEXT: addi 1, 1, 32
+; PPC32-NEXT: blr
+;
+; PPC64-LABEL: cloberAllNvCrField:
+; PPC64: # %bb.0: # %entry
+; PPC64-NEXT: mfcr 12
+; PPC64-NEXT: stw 12, 8(1)
+; PPC64-NEXT: #APP
+; PPC64-NEXT: # clobbers
+; PPC64-NEXT: #NO_APP
+; PPC64-NEXT: lwz 12, 8(1)
+; PPC64-NEXT: mtocrf 32, 12
+; PPC64-NEXT: mtocrf 16, 12
+; PPC64-NEXT: mtocrf 8, 12
+; PPC64-NEXT: blr
+;
+; PPC64-ELFv2-LABEL: cloberAllNvCrField:
+; PPC64-ELFv2: # %bb.0: # %entry
+; PPC64-ELFv2-NEXT: mfcr 12
+; PPC64-ELFv2-NEXT: stw 12, 8(1)
+; PPC64-ELFv2-NEXT: #APP
+; PPC64-ELFv2-NEXT: # clobbers
+; PPC64-ELFv2-NEXT: #NO_APP
+; PPC64-ELFv2-NEXT: lwz 12, 8(1)
+; PPC64-ELFv2-NEXT: mtocrf 32, 12
+; PPC64-ELFv2-NEXT: mtocrf 16, 12
+; PPC64-ELFv2-NEXT: mtocrf 8, 12
+; PPC64-ELFv2-NEXT: blr
entry:
tail call void asm sideeffect "# clobbers", "~{cr2},~{cr3},~{cr4}"()
ret void
-
-; PPC64-ELFv2-LABEL: @cloberAllNvCrField
-; PPC64-ELFv2: mfcr [[REG1:[0-9]+]]
}
diff --git a/llvm/test/CodeGen/RISCV/atomic-cmpxchg-branch-on-result.ll b/llvm/test/CodeGen/RISCV/atomic-cmpxchg-branch-on-result.ll
index 90d7877..18b6649 100644
--- a/llvm/test/CodeGen/RISCV/atomic-cmpxchg-branch-on-result.ll
+++ b/llvm/test/CodeGen/RISCV/atomic-cmpxchg-branch-on-result.ll
@@ -1,13 +1,13 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+a -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefixes=NOZACAS,RV32IA %s
-; RUN: llc -mtriple=riscv32 -mattr=+a,+experimental-zacas -verify-machineinstrs < %s \
+; RUN: llc -mtriple=riscv32 -mattr=+a,+zacas -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefixes=ZACAS,RV32IA-ZACAS %s
; RUN: llc -mtriple=riscv64 -mattr=+a -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefixes=NOZACAS,RV64IA %s
-; RUN: llc -mtriple=riscv64 -mattr=+a,+experimental-zacas -verify-machineinstrs < %s \
+; RUN: llc -mtriple=riscv64 -mattr=+a,+zacas -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefixes=ZACAS,RV64IA-ZACAS %s
-; RUN: llc -mtriple=riscv64 -mattr=+a,+experimental-zacas,+experimental-zabha -verify-machineinstrs < %s \
+; RUN: llc -mtriple=riscv64 -mattr=+a,+zacas,+experimental-zabha -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefixes=ZACAS,RV64IA-ZABHA %s
; Test cmpxchg followed by a branch on the cmpxchg success value to see if the
diff --git a/llvm/test/CodeGen/RISCV/atomic-cmpxchg.ll b/llvm/test/CodeGen/RISCV/atomic-cmpxchg.ll
index 8df37bf..394dffa 100644
--- a/llvm/test/CodeGen/RISCV/atomic-cmpxchg.ll
+++ b/llvm/test/CodeGen/RISCV/atomic-cmpxchg.ll
@@ -3,25 +3,25 @@
; RUN: | FileCheck -check-prefix=RV32I %s
; RUN: llc -mtriple=riscv32 -mattr=+a -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefixes=RV32IA,RV32IA-WMO %s
-; RUN: llc -mtriple=riscv32 -mattr=+a,+experimental-zacas -verify-machineinstrs < %s \
+; RUN: llc -mtriple=riscv32 -mattr=+a,+zacas -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefixes=RV32IA,RV32IA-ZACAS,RV32IA-WMO-ZACAS %s
; RUN: llc -mtriple=riscv32 -mattr=+a,+experimental-ztso -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefixes=RV32IA,RV32IA-TSO %s
-; RUN: llc -mtriple=riscv32 -mattr=+a,+experimental-ztso,+experimental-zacas -verify-machineinstrs < %s \
+; RUN: llc -mtriple=riscv32 -mattr=+a,+experimental-ztso,+zacas -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefixes=RV32IA,RV32IA-ZACAS,RV32IA-TSO-ZACAS %s
; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefix=RV64I %s
; RUN: llc -mtriple=riscv64 -mattr=+a -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefixes=RV64IA,RV64IA-WMO %s
-; RUN: llc -mtriple=riscv64 -mattr=+a,+experimental-zacas -verify-machineinstrs < %s \
+; RUN: llc -mtriple=riscv64 -mattr=+a,+zacas -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefixes=RV64IA,RV64IA-ZACAS,RV64IA-WMO-ZACAS %s
-; RUN: llc -mtriple=riscv64 -mattr=+a,+experimental-zacas,+experimental-zabha -verify-machineinstrs < %s \
+; RUN: llc -mtriple=riscv64 -mattr=+a,+zacas,+experimental-zabha -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefixes=RV64IA,RV64IA-ZABHA,RV64IA-WMO-ZABHA %s
; RUN: llc -mtriple=riscv64 -mattr=+a,+experimental-ztso -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefixes=RV64IA,RV64IA-TSO %s
-; RUN: llc -mtriple=riscv64 -mattr=+a,+experimental-ztso,+experimental-zacas -verify-machineinstrs < %s \
+; RUN: llc -mtriple=riscv64 -mattr=+a,+experimental-ztso,+zacas -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefixes=RV64IA,RV64IA-ZACAS,RV64IA-TSO-ZACAS %s
-; RUN: llc -mtriple=riscv64 -mattr=+a,+experimental-ztso,+experimental-zacas,+experimental-zabha -verify-machineinstrs < %s \
+; RUN: llc -mtriple=riscv64 -mattr=+a,+experimental-ztso,+zacas,+experimental-zabha -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefixes=RV64IA,RV64IA-ZABHA,RV64IA-TSO-ZABHA %s
define void @cmpxchg_i8_monotonic_monotonic(ptr %ptr, i8 %cmp, i8 %val) nounwind {
diff --git a/llvm/test/CodeGen/RISCV/atomic-rmw.ll b/llvm/test/CodeGen/RISCV/atomic-rmw.ll
index ee80250..fe53001 100644
--- a/llvm/test/CodeGen/RISCV/atomic-rmw.ll
+++ b/llvm/test/CodeGen/RISCV/atomic-rmw.ll
@@ -12,22 +12,22 @@
; RUN: llc -mtriple=riscv64 -mattr=+a,+experimental-ztso -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefixes=RV64IA,RV64IA-NOZACAS,RV64IA-TSO,RV64IA-TSO-NOZACAS %s
-; RUN: llc -mtriple=riscv32 -mattr=+a,+experimental-zacas -verify-machineinstrs < %s \
+; RUN: llc -mtriple=riscv32 -mattr=+a,+zacas -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefixes=RV32IA,RV32IA-ZACAS,RV32IA-WMO,RV32IA-WMO-ZACAS %s
-; RUN: llc -mtriple=riscv32 -mattr=+a,+experimental-ztso,+experimental-zacas -verify-machineinstrs < %s \
+; RUN: llc -mtriple=riscv32 -mattr=+a,+experimental-ztso,+zacas -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefixes=RV32IA,RV32IA-ZACAS,RV32IA-TSO,RV32IA-TSO-ZACAS %s
-; RUN: llc -mtriple=riscv64 -mattr=+a,+experimental-zacas -verify-machineinstrs < %s \
+; RUN: llc -mtriple=riscv64 -mattr=+a,+zacas -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefixes=RV64IA,RV64IA-ZACAS,RV64IA-WMO,RV64IA-WMO-ZACAS %s
-; RUN: llc -mtriple=riscv64 -mattr=+a,+experimental-ztso,+experimental-zacas -verify-machineinstrs < %s \
+; RUN: llc -mtriple=riscv64 -mattr=+a,+experimental-ztso,+zacas -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefixes=RV64IA,RV64IA-ZACAS,RV64IA-TSO,RV64IA-TSO-ZACAS %s
; RUN: llc -mtriple=riscv64 -mattr=+a,+experimental-zabha -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefixes=RV64IA,RV64IA-WMO,RV64IA-WMO-ZABHA,RV64IA-WMO-ZABHA-NOZACAS %s
; RUN: llc -mtriple=riscv64 -mattr=+a,+experimental-ztso,+experimental-zabha -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefixes=RV64IA,RV64IA-TSO,RV64IA-TSO-ZABHA,RV64IA-TSO-ZABHA-NOZACAS %s
-; RUN: llc -mtriple=riscv64 -mattr=+a,+experimental-zabha,+experimental-zacas -verify-machineinstrs < %s \
+; RUN: llc -mtriple=riscv64 -mattr=+a,+experimental-zabha,+zacas -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefixes=RV64IA,RV64IA-WMO,RV64IA-WMO-ZABHA,RV64IA-WMO-ZABHA-ZACAS %s
-; RUN: llc -mtriple=riscv64 -mattr=+a,+experimental-ztso,+experimental-zabha,+experimental-zacas -verify-machineinstrs < %s \
+; RUN: llc -mtriple=riscv64 -mattr=+a,+experimental-ztso,+experimental-zabha,+zacas -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefixes=RV64IA,RV64IA-TSO,RV64IA-TSO-ZABHA,RV64IA-TSO-ZABHA-ZACAS %s
define i8 @atomicrmw_xchg_i8_monotonic(ptr %a, i8 %b) nounwind {
diff --git a/llvm/test/CodeGen/RISCV/atomic-signext.ll b/llvm/test/CodeGen/RISCV/atomic-signext.ll
index 47807f7..bdf3b28 100644
--- a/llvm/test/CodeGen/RISCV/atomic-signext.ll
+++ b/llvm/test/CodeGen/RISCV/atomic-signext.ll
@@ -3,13 +3,13 @@
; RUN: | FileCheck -check-prefix=RV32I %s
; RUN: llc -mtriple=riscv32 -mattr=+a -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefixes=RV32IA,RV32IA-NOZACAS %s
-; RUN: llc -mtriple=riscv32 -mattr=+a,+experimental-zacas -verify-machineinstrs < %s \
+; RUN: llc -mtriple=riscv32 -mattr=+a,+zacas -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefixes=RV32IA,RV32IA-ZACAS %s
; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefix=RV64I %s
; RUN: llc -mtriple=riscv64 -mattr=+a -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefixes=RV64IA,RV64IA-NOZACAS %s
-; RUN: llc -mtriple=riscv64 -mattr=+a,+experimental-zacas -verify-machineinstrs < %s \
+; RUN: llc -mtriple=riscv64 -mattr=+a,+zacas -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefixes=RV64IA,RV64IA-ZACAS %s
define signext i8 @atomic_load_i8_unordered(ptr %a) nounwind {
diff --git a/llvm/test/CodeGen/RISCV/attributes.ll b/llvm/test/CodeGen/RISCV/attributes.ll
index 13635a9..561b0f2 100644
--- a/llvm/test/CodeGen/RISCV/attributes.ll
+++ b/llvm/test/CodeGen/RISCV/attributes.ll
@@ -111,7 +111,7 @@
; RUN: llc -mtriple=riscv32 -mattr=+experimental-zvfbfmin %s -o - | FileCheck --check-prefixes=CHECK,RV32ZVFBFMIN %s
; RUN: llc -mtriple=riscv32 -mattr=+experimental-zvfbfwma %s -o - | FileCheck --check-prefixes=CHECK,RV32ZVFBFWMA %s
; RUN: llc -mtriple=riscv32 -mattr=+experimental-zaamo %s -o - | FileCheck --check-prefix=RV32ZAAMO %s
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-zacas %s -o - | FileCheck --check-prefix=RV32ZACAS %s
+; RUN: llc -mtriple=riscv32 -mattr=+zacas %s -o - | FileCheck --check-prefix=RV32ZACAS %s
; RUN: llc -mtriple=riscv32 -mattr=+experimental-zalasr %s -o - | FileCheck --check-prefix=RV32ZALASR %s
; RUN: llc -mtriple=riscv32 -mattr=+experimental-zalrsc %s -o - | FileCheck --check-prefix=RV32ZALRSC %s
; RUN: llc -mtriple=riscv32 -mattr=+experimental-zicfilp %s -o - | FileCheck --check-prefix=RV32ZICFILP %s
@@ -240,7 +240,7 @@
; RUN: llc -mtriple=riscv64 -mattr=+experimental-zvfbfmin %s -o - | FileCheck --check-prefixes=CHECK,RV64ZVFBFMIN %s
; RUN: llc -mtriple=riscv64 -mattr=+experimental-zvfbfwma %s -o - | FileCheck --check-prefixes=CHECK,RV64ZVFBFWMA %s
; RUN: llc -mtriple=riscv64 -mattr=+experimental-zaamo %s -o - | FileCheck --check-prefix=RV64ZAAMO %s
-; RUN: llc -mtriple=riscv64 -mattr=+experimental-zacas %s -o - | FileCheck --check-prefix=RV64ZACAS %s
+; RUN: llc -mtriple=riscv64 -mattr=+zacas %s -o - | FileCheck --check-prefix=RV64ZACAS %s
; RUN: llc -mtriple=riscv64 -mattr=+experimental-zalasr %s -o - | FileCheck --check-prefix=RV64ZALASR %s
; RUN: llc -mtriple=riscv64 -mattr=+experimental-zalrsc %s -o - | FileCheck --check-prefix=RV64ZALRSC %s
; RUN: llc -mtriple=riscv64 -mattr=+experimental-zicfilp %s -o - | FileCheck --check-prefix=RV64ZICFILP %s
diff --git a/llvm/test/CodeGen/RISCV/machine-combiner.ll b/llvm/test/CodeGen/RISCV/machine-combiner.ll
index 7c1792e..cfdefec 100644
--- a/llvm/test/CodeGen/RISCV/machine-combiner.ll
+++ b/llvm/test/CodeGen/RISCV/machine-combiner.ll
@@ -1096,10 +1096,10 @@ declare double @llvm.maxnum.f64(double, double)
define double @test_fmadd_strategy(double %a0, double %a1, double %a2, double %a3, i64 %flag) {
; CHECK_LOCAL-LABEL: test_fmadd_strategy:
; CHECK_LOCAL: # %bb.0: # %entry
-; CHECK_LOCAL-NEXT: fmv.d fa5, fa0
; CHECK_LOCAL-NEXT: fsub.d fa4, fa0, fa1
-; CHECK_LOCAL-NEXT: fmul.d fa0, fa4, fa2
; CHECK_LOCAL-NEXT: andi a0, a0, 1
+; CHECK_LOCAL-NEXT: fmv.d fa5, fa0
+; CHECK_LOCAL-NEXT: fmul.d fa0, fa4, fa2
; CHECK_LOCAL-NEXT: beqz a0, .LBB76_2
; CHECK_LOCAL-NEXT: # %bb.1: # %entry
; CHECK_LOCAL-NEXT: fmul.d fa4, fa5, fa1
@@ -1110,10 +1110,10 @@ define double @test_fmadd_strategy(double %a0, double %a1, double %a2, double %a
;
; CHECK_GLOBAL-LABEL: test_fmadd_strategy:
; CHECK_GLOBAL: # %bb.0: # %entry
-; CHECK_GLOBAL-NEXT: fmv.d fa5, fa0
; CHECK_GLOBAL-NEXT: fsub.d fa4, fa0, fa1
-; CHECK_GLOBAL-NEXT: fmul.d fa0, fa4, fa2
; CHECK_GLOBAL-NEXT: andi a0, a0, 1
+; CHECK_GLOBAL-NEXT: fmv.d fa5, fa0
+; CHECK_GLOBAL-NEXT: fmul.d fa0, fa4, fa2
; CHECK_GLOBAL-NEXT: beqz a0, .LBB76_2
; CHECK_GLOBAL-NEXT: # %bb.1: # %entry
; CHECK_GLOBAL-NEXT: fmul.d fa5, fa5, fa1
diff --git a/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll b/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll
index 191f047..5d09c39 100644
--- a/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll
@@ -2849,6 +2849,498 @@ for.body: ; preds = %for.body.preheader,
br i1 %cmp.not, label %for.cond.cleanup, label %for.body
}
+declare <4 x i32> @llvm.smin.v4i32(<4 x i32>, <4 x i32>)
+
+define void @sink_splat_min(ptr nocapture %a, i32 signext %x) {
+; CHECK-LABEL: sink_splat_min:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: li a2, 1024
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: .LBB46_1: # %vector.body
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vmin.vx v8, v8, a1
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: addi a2, a2, 4
+; CHECK-NEXT: addi a0, a0, -16
+; CHECK-NEXT: bnez a2, .LBB46_1
+; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
+; CHECK-NEXT: ret
+entry:
+ %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0
+ %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %entry
+ %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
+ %0 = getelementptr inbounds i32, ptr %a, i64 %index
+ %wide.load = load <4 x i32>, ptr %0, align 4
+ %1 = call <4 x i32> @llvm.smin.v4i32(<4 x i32> %wide.load, <4 x i32> %broadcast.splat)
+ store <4 x i32> %1, ptr %0, align 4
+ %index.next = sub nuw i64 %index, 4
+ %2 = icmp eq i64 %index.next, 1024
+ br i1 %2, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup: ; preds = %vector.body
+ ret void
+}
+
+define void @sink_splat_min_commute(ptr nocapture %a, i32 signext %x) {
+; CHECK-LABEL: sink_splat_min_commute:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: li a2, 1024
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: .LBB47_1: # %vector.body
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vmin.vx v8, v8, a1
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: addi a2, a2, 4
+; CHECK-NEXT: addi a0, a0, -16
+; CHECK-NEXT: bnez a2, .LBB47_1
+; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
+; CHECK-NEXT: ret
+entry:
+ %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0
+ %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %entry
+ %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
+ %0 = getelementptr inbounds i32, ptr %a, i64 %index
+ %wide.load = load <4 x i32>, ptr %0, align 4
+ %1 = call <4 x i32> @llvm.smin.v4i32(<4 x i32> %broadcast.splat, <4 x i32> %wide.load)
+ store <4 x i32> %1, ptr %0, align 4
+ %index.next = sub nuw i64 %index, 4
+ %2 = icmp eq i64 %index.next, 1024
+ br i1 %2, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup: ; preds = %vector.body
+ ret void
+}
+
+declare <4 x i32> @llvm.smax.v4i32(<4 x i32>, <4 x i32>)
+
+define void @sink_splat_max(ptr nocapture %a, i32 signext %x) {
+; CHECK-LABEL: sink_splat_max:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: li a2, 1024
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: .LBB48_1: # %vector.body
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vmax.vx v8, v8, a1
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: addi a2, a2, 4
+; CHECK-NEXT: addi a0, a0, -16
+; CHECK-NEXT: bnez a2, .LBB48_1
+; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
+; CHECK-NEXT: ret
+entry:
+ %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0
+ %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %entry
+ %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
+ %0 = getelementptr inbounds i32, ptr %a, i64 %index
+ %wide.load = load <4 x i32>, ptr %0, align 4
+ %1 = call <4 x i32> @llvm.smax.v4i32(<4 x i32> %wide.load, <4 x i32> %broadcast.splat)
+ store <4 x i32> %1, ptr %0, align 4
+ %index.next = sub nuw i64 %index, 4
+ %2 = icmp eq i64 %index.next, 1024
+ br i1 %2, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup: ; preds = %vector.body
+ ret void
+}
+
+define void @sink_splat_max_commute(ptr nocapture %a, i32 signext %x) {
+; CHECK-LABEL: sink_splat_max_commute:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: li a2, 1024
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: .LBB49_1: # %vector.body
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vmax.vx v8, v8, a1
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: addi a2, a2, 4
+; CHECK-NEXT: addi a0, a0, -16
+; CHECK-NEXT: bnez a2, .LBB49_1
+; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
+; CHECK-NEXT: ret
+entry:
+ %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0
+ %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %entry
+ %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
+ %0 = getelementptr inbounds i32, ptr %a, i64 %index
+ %wide.load = load <4 x i32>, ptr %0, align 4
+ %1 = call <4 x i32> @llvm.smax.v4i32(<4 x i32> %broadcast.splat, <4 x i32> %wide.load)
+ store <4 x i32> %1, ptr %0, align 4
+ %index.next = sub nuw i64 %index, 4
+ %2 = icmp eq i64 %index.next, 1024
+ br i1 %2, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup: ; preds = %vector.body
+ ret void
+}
+
+declare <4 x i32> @llvm.umin.v4i32(<4 x i32>, <4 x i32>)
+
+define void @sink_splat_umin(ptr nocapture %a, i32 signext %x) {
+; CHECK-LABEL: sink_splat_umin:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: li a2, 1024
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: .LBB50_1: # %vector.body
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vminu.vx v8, v8, a1
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: addi a2, a2, 4
+; CHECK-NEXT: addi a0, a0, -16
+; CHECK-NEXT: bnez a2, .LBB50_1
+; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
+; CHECK-NEXT: ret
+entry:
+ %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0
+ %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %entry
+ %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
+ %0 = getelementptr inbounds i32, ptr %a, i64 %index
+ %wide.load = load <4 x i32>, ptr %0, align 4
+ %1 = call <4 x i32> @llvm.umin.v4i32(<4 x i32> %wide.load, <4 x i32> %broadcast.splat)
+ store <4 x i32> %1, ptr %0, align 4
+ %index.next = sub nuw i64 %index, 4
+ %2 = icmp eq i64 %index.next, 1024
+ br i1 %2, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup: ; preds = %vector.body
+ ret void
+}
+
+define void @sink_splat_umin_commute(ptr nocapture %a, i32 signext %x) {
+; CHECK-LABEL: sink_splat_umin_commute:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: li a2, 1024
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: .LBB51_1: # %vector.body
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vminu.vx v8, v8, a1
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: addi a2, a2, 4
+; CHECK-NEXT: addi a0, a0, -16
+; CHECK-NEXT: bnez a2, .LBB51_1
+; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
+; CHECK-NEXT: ret
+entry:
+ %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0
+ %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %entry
+ %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
+ %0 = getelementptr inbounds i32, ptr %a, i64 %index
+ %wide.load = load <4 x i32>, ptr %0, align 4
+ %1 = call <4 x i32> @llvm.umin.v4i32(<4 x i32> %broadcast.splat, <4 x i32> %wide.load)
+ store <4 x i32> %1, ptr %0, align 4
+ %index.next = sub nuw i64 %index, 4
+ %2 = icmp eq i64 %index.next, 1024
+ br i1 %2, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup: ; preds = %vector.body
+ ret void
+}
+
+declare <4 x i32> @llvm.umax.v4i32(<4 x i32>, <4 x i32>)
+
+define void @sink_splat_umax(ptr nocapture %a, i32 signext %x) {
+; CHECK-LABEL: sink_splat_umax:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: li a2, 1024
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: .LBB52_1: # %vector.body
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vmaxu.vx v8, v8, a1
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: addi a2, a2, 4
+; CHECK-NEXT: addi a0, a0, -16
+; CHECK-NEXT: bnez a2, .LBB52_1
+; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
+; CHECK-NEXT: ret
+entry:
+ %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0
+ %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %entry
+ %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
+ %0 = getelementptr inbounds i32, ptr %a, i64 %index
+ %wide.load = load <4 x i32>, ptr %0, align 4
+ %1 = call <4 x i32> @llvm.umax.v4i32(<4 x i32> %wide.load, <4 x i32> %broadcast.splat)
+ store <4 x i32> %1, ptr %0, align 4
+ %index.next = sub nuw i64 %index, 4
+ %2 = icmp eq i64 %index.next, 1024
+ br i1 %2, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup: ; preds = %vector.body
+ ret void
+}
+
+define void @sink_splat_umax_commute(ptr nocapture %a, i32 signext %x) {
+; CHECK-LABEL: sink_splat_umax_commute:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: li a2, 1024
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: .LBB53_1: # %vector.body
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vmaxu.vx v8, v8, a1
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: addi a2, a2, 4
+; CHECK-NEXT: addi a0, a0, -16
+; CHECK-NEXT: bnez a2, .LBB53_1
+; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
+; CHECK-NEXT: ret
+entry:
+ %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0
+ %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %entry
+ %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
+ %0 = getelementptr inbounds i32, ptr %a, i64 %index
+ %wide.load = load <4 x i32>, ptr %0, align 4
+ %1 = call <4 x i32> @llvm.umax.v4i32(<4 x i32> %broadcast.splat, <4 x i32> %wide.load)
+ store <4 x i32> %1, ptr %0, align 4
+ %index.next = sub nuw i64 %index, 4
+ %2 = icmp eq i64 %index.next, 1024
+ br i1 %2, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup: ; preds = %vector.body
+ ret void
+}
+
+declare <4 x i32> @llvm.sadd.sat.v4i32(<4 x i32>, <4 x i32>)
+
+define void @sink_splat_sadd_sat(ptr nocapture %a, i32 signext %x) {
+; CHECK-LABEL: sink_splat_sadd_sat:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: lui a2, 1
+; CHECK-NEXT: add a2, a0, a2
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: .LBB54_1: # %vector.body
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vsadd.vx v8, v8, a1
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: addi a0, a0, 16
+; CHECK-NEXT: bne a0, a2, .LBB54_1
+; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
+; CHECK-NEXT: ret
+entry:
+ %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0
+ %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %entry
+ %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
+ %0 = getelementptr inbounds i32, ptr %a, i64 %index
+ %wide.load = load <4 x i32>, ptr %0, align 4
+ %1 = call <4 x i32> @llvm.sadd.sat.v4i32(<4 x i32> %wide.load, <4 x i32> %broadcast.splat)
+ store <4 x i32> %1, ptr %0, align 4
+ %index.next = add nuw i64 %index, 4
+ %2 = icmp eq i64 %index.next, 1024
+ br i1 %2, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup: ; preds = %vector.body
+ ret void
+}
+
+define void @sink_splat_sadd_sat_commute(ptr nocapture %a, i32 signext %x) {
+; CHECK-LABEL: sink_splat_sadd_sat_commute:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: lui a2, 1
+; CHECK-NEXT: add a2, a0, a2
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: .LBB55_1: # %vector.body
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vsadd.vx v8, v8, a1
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: addi a0, a0, 16
+; CHECK-NEXT: bne a0, a2, .LBB55_1
+; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
+; CHECK-NEXT: ret
+entry:
+ %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0
+ %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %entry
+ %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
+ %0 = getelementptr inbounds i32, ptr %a, i64 %index
+ %wide.load = load <4 x i32>, ptr %0, align 4
+ %1 = call <4 x i32> @llvm.sadd.sat.v4i32(<4 x i32> %broadcast.splat, <4 x i32> %wide.load)
+ store <4 x i32> %1, ptr %0, align 4
+ %index.next = add nuw i64 %index, 4
+ %2 = icmp eq i64 %index.next, 1024
+ br i1 %2, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup: ; preds = %vector.body
+ ret void
+}
+
+declare <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32>, <4 x i32>)
+
+define void @sink_splat_ssub_sat(ptr nocapture %a, i32 signext %x) {
+; CHECK-LABEL: sink_splat_ssub_sat:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: li a2, 1024
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: .LBB56_1: # %vector.body
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vssub.vx v8, v8, a1
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: addi a2, a2, 4
+; CHECK-NEXT: addi a0, a0, -16
+; CHECK-NEXT: bnez a2, .LBB56_1
+; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
+; CHECK-NEXT: ret
+entry:
+ %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0
+ %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %entry
+ %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
+ %0 = getelementptr inbounds i32, ptr %a, i64 %index
+ %wide.load = load <4 x i32>, ptr %0, align 4
+ %1 = call <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32> %wide.load, <4 x i32> %broadcast.splat)
+ store <4 x i32> %1, ptr %0, align 4
+ %index.next = sub nuw i64 %index, 4
+ %2 = icmp eq i64 %index.next, 1024
+ br i1 %2, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup: ; preds = %vector.body
+ ret void
+}
+
+declare <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32>, <4 x i32>)
+
+define void @sink_splat_uadd_sat(ptr nocapture %a, i32 signext %x) {
+; CHECK-LABEL: sink_splat_uadd_sat:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: lui a2, 1
+; CHECK-NEXT: add a2, a0, a2
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: .LBB57_1: # %vector.body
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vsaddu.vx v8, v8, a1
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: addi a0, a0, 16
+; CHECK-NEXT: bne a0, a2, .LBB57_1
+; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
+; CHECK-NEXT: ret
+entry:
+ %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0
+ %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %entry
+ %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
+ %0 = getelementptr inbounds i32, ptr %a, i64 %index
+ %wide.load = load <4 x i32>, ptr %0, align 4
+ %1 = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> %wide.load, <4 x i32> %broadcast.splat)
+ store <4 x i32> %1, ptr %0, align 4
+ %index.next = add nuw i64 %index, 4
+ %2 = icmp eq i64 %index.next, 1024
+ br i1 %2, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup: ; preds = %vector.body
+ ret void
+}
+
+define void @sink_splat_uadd_sat_commute(ptr nocapture %a, i32 signext %x) {
+; CHECK-LABEL: sink_splat_uadd_sat_commute:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: lui a2, 1
+; CHECK-NEXT: add a2, a0, a2
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: .LBB58_1: # %vector.body
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vsaddu.vx v8, v8, a1
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: addi a0, a0, 16
+; CHECK-NEXT: bne a0, a2, .LBB58_1
+; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
+; CHECK-NEXT: ret
+entry:
+ %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0
+ %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %entry
+ %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
+ %0 = getelementptr inbounds i32, ptr %a, i64 %index
+ %wide.load = load <4 x i32>, ptr %0, align 4
+ %1 = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> %broadcast.splat, <4 x i32> %wide.load)
+ store <4 x i32> %1, ptr %0, align 4
+ %index.next = add nuw i64 %index, 4
+ %2 = icmp eq i64 %index.next, 1024
+ br i1 %2, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup: ; preds = %vector.body
+ ret void
+}
+
+declare <4 x i32> @llvm.usub.sat.v4i32(<4 x i32>, <4 x i32>)
+
+define void @sink_splat_usub_sat(ptr nocapture %a, i32 signext %x) {
+; CHECK-LABEL: sink_splat_usub_sat:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: li a2, 1024
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: .LBB59_1: # %vector.body
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vssubu.vx v8, v8, a1
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: addi a2, a2, 4
+; CHECK-NEXT: addi a0, a0, -16
+; CHECK-NEXT: bnez a2, .LBB59_1
+; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
+; CHECK-NEXT: ret
+entry:
+ %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0
+ %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %entry
+ %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
+ %0 = getelementptr inbounds i32, ptr %a, i64 %index
+ %wide.load = load <4 x i32>, ptr %0, align 4
+ %1 = call <4 x i32> @llvm.usub.sat.v4i32(<4 x i32> %wide.load, <4 x i32> %broadcast.splat)
+ store <4 x i32> %1, ptr %0, align 4
+ %index.next = sub nuw i64 %index, 4
+ %2 = icmp eq i64 %index.next, 1024
+ br i1 %2, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup: ; preds = %vector.body
+ ret void
+}
+
declare <4 x i32> @llvm.vp.mul.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32)
define void @sink_splat_vp_mul(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i32 zeroext %vl) {
@@ -2857,7 +3349,7 @@ define void @sink_splat_vp_mul(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i3
; CHECK-NEXT: lui a3, 1
; CHECK-NEXT: add a3, a0, a3
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: .LBB46_1: # %vector.body
+; CHECK-NEXT: .LBB60_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
@@ -2865,7 +3357,7 @@ define void @sink_splat_vp_mul(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i3
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: bne a0, a3, .LBB46_1
+; CHECK-NEXT: bne a0, a3, .LBB60_1
; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
; CHECK-NEXT: ret
entry:
@@ -2895,7 +3387,7 @@ define void @sink_splat_vp_add(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i3
; CHECK-NEXT: lui a3, 1
; CHECK-NEXT: add a3, a0, a3
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: .LBB47_1: # %vector.body
+; CHECK-NEXT: .LBB61_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
@@ -2903,7 +3395,7 @@ define void @sink_splat_vp_add(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i3
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: bne a0, a3, .LBB47_1
+; CHECK-NEXT: bne a0, a3, .LBB61_1
; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
; CHECK-NEXT: ret
entry:
@@ -2931,7 +3423,7 @@ define void @sink_splat_vp_add_commute(ptr nocapture %a, i32 signext %x, <4 x i1
; CHECK-NEXT: lui a3, 1
; CHECK-NEXT: add a3, a0, a3
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: .LBB48_1: # %vector.body
+; CHECK-NEXT: .LBB62_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
@@ -2939,7 +3431,7 @@ define void @sink_splat_vp_add_commute(ptr nocapture %a, i32 signext %x, <4 x i1
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: bne a0, a3, .LBB48_1
+; CHECK-NEXT: bne a0, a3, .LBB62_1
; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
; CHECK-NEXT: ret
entry:
@@ -2969,7 +3461,7 @@ define void @sink_splat_vp_sub(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i3
; CHECK-NEXT: lui a3, 1
; CHECK-NEXT: add a3, a0, a3
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: .LBB49_1: # %vector.body
+; CHECK-NEXT: .LBB63_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
@@ -2977,7 +3469,7 @@ define void @sink_splat_vp_sub(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i3
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: bne a0, a3, .LBB49_1
+; CHECK-NEXT: bne a0, a3, .LBB63_1
; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
; CHECK-NEXT: ret
entry:
@@ -3005,7 +3497,7 @@ define void @sink_splat_vp_rsub(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i
; CHECK-NEXT: lui a3, 1
; CHECK-NEXT: add a3, a0, a3
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: .LBB50_1: # %vector.body
+; CHECK-NEXT: .LBB64_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
@@ -3013,7 +3505,7 @@ define void @sink_splat_vp_rsub(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: bne a0, a3, .LBB50_1
+; CHECK-NEXT: bne a0, a3, .LBB64_1
; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
; CHECK-NEXT: ret
entry:
@@ -3043,7 +3535,7 @@ define void @sink_splat_vp_shl(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i3
; CHECK-NEXT: lui a3, 1
; CHECK-NEXT: add a3, a0, a3
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: .LBB51_1: # %vector.body
+; CHECK-NEXT: .LBB65_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
@@ -3051,7 +3543,7 @@ define void @sink_splat_vp_shl(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i3
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: bne a0, a3, .LBB51_1
+; CHECK-NEXT: bne a0, a3, .LBB65_1
; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
; CHECK-NEXT: ret
entry:
@@ -3081,7 +3573,7 @@ define void @sink_splat_vp_lshr(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i
; CHECK-NEXT: lui a3, 1
; CHECK-NEXT: add a3, a0, a3
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: .LBB52_1: # %vector.body
+; CHECK-NEXT: .LBB66_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
@@ -3089,7 +3581,7 @@ define void @sink_splat_vp_lshr(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: bne a0, a3, .LBB52_1
+; CHECK-NEXT: bne a0, a3, .LBB66_1
; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
; CHECK-NEXT: ret
entry:
@@ -3119,7 +3611,7 @@ define void @sink_splat_vp_ashr(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i
; CHECK-NEXT: lui a3, 1
; CHECK-NEXT: add a3, a0, a3
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: .LBB53_1: # %vector.body
+; CHECK-NEXT: .LBB67_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
@@ -3127,7 +3619,7 @@ define void @sink_splat_vp_ashr(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: bne a0, a3, .LBB53_1
+; CHECK-NEXT: bne a0, a3, .LBB67_1
; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
; CHECK-NEXT: ret
entry:
@@ -3157,7 +3649,7 @@ define void @sink_splat_vp_fmul(ptr nocapture %a, float %x, <4 x i1> %m, i32 zer
; CHECK-NEXT: lui a2, 1
; CHECK-NEXT: add a2, a0, a2
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: .LBB54_1: # %vector.body
+; CHECK-NEXT: .LBB68_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
@@ -3165,7 +3657,7 @@ define void @sink_splat_vp_fmul(ptr nocapture %a, float %x, <4 x i1> %m, i32 zer
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: bne a0, a2, .LBB54_1
+; CHECK-NEXT: bne a0, a2, .LBB68_1
; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
; CHECK-NEXT: ret
entry:
@@ -3195,7 +3687,7 @@ define void @sink_splat_vp_fdiv(ptr nocapture %a, float %x, <4 x i1> %m, i32 zer
; CHECK-NEXT: lui a2, 1
; CHECK-NEXT: add a2, a0, a2
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: .LBB55_1: # %vector.body
+; CHECK-NEXT: .LBB69_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
@@ -3203,7 +3695,7 @@ define void @sink_splat_vp_fdiv(ptr nocapture %a, float %x, <4 x i1> %m, i32 zer
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: bne a0, a2, .LBB55_1
+; CHECK-NEXT: bne a0, a2, .LBB69_1
; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
; CHECK-NEXT: ret
entry:
@@ -3231,7 +3723,7 @@ define void @sink_splat_vp_frdiv(ptr nocapture %a, float %x, <4 x i1> %m, i32 ze
; CHECK-NEXT: lui a2, 1
; CHECK-NEXT: add a2, a0, a2
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: .LBB56_1: # %vector.body
+; CHECK-NEXT: .LBB70_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
@@ -3239,7 +3731,7 @@ define void @sink_splat_vp_frdiv(ptr nocapture %a, float %x, <4 x i1> %m, i32 ze
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: bne a0, a2, .LBB56_1
+; CHECK-NEXT: bne a0, a2, .LBB70_1
; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
; CHECK-NEXT: ret
entry:
@@ -3269,7 +3761,7 @@ define void @sink_splat_vp_fadd(ptr nocapture %a, float %x, <4 x i1> %m, i32 zer
; CHECK-NEXT: lui a2, 1
; CHECK-NEXT: add a2, a0, a2
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: .LBB57_1: # %vector.body
+; CHECK-NEXT: .LBB71_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
@@ -3277,7 +3769,7 @@ define void @sink_splat_vp_fadd(ptr nocapture %a, float %x, <4 x i1> %m, i32 zer
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: bne a0, a2, .LBB57_1
+; CHECK-NEXT: bne a0, a2, .LBB71_1
; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
; CHECK-NEXT: ret
entry:
@@ -3307,7 +3799,7 @@ define void @sink_splat_vp_fsub(ptr nocapture %a, float %x, <4 x i1> %m, i32 zer
; CHECK-NEXT: lui a2, 1
; CHECK-NEXT: add a2, a0, a2
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: .LBB58_1: # %vector.body
+; CHECK-NEXT: .LBB72_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
@@ -3315,7 +3807,7 @@ define void @sink_splat_vp_fsub(ptr nocapture %a, float %x, <4 x i1> %m, i32 zer
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: bne a0, a2, .LBB58_1
+; CHECK-NEXT: bne a0, a2, .LBB72_1
; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
; CHECK-NEXT: ret
entry:
@@ -3345,7 +3837,7 @@ define void @sink_splat_vp_frsub(ptr nocapture %a, float %x, <4 x i1> %m, i32 ze
; CHECK-NEXT: lui a2, 1
; CHECK-NEXT: add a2, a0, a2
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: .LBB59_1: # %vector.body
+; CHECK-NEXT: .LBB73_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
@@ -3353,7 +3845,7 @@ define void @sink_splat_vp_frsub(ptr nocapture %a, float %x, <4 x i1> %m, i32 ze
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: bne a0, a2, .LBB59_1
+; CHECK-NEXT: bne a0, a2, .LBB73_1
; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
; CHECK-NEXT: ret
entry:
@@ -3383,7 +3875,7 @@ define void @sink_splat_vp_udiv(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i
; CHECK-NEXT: lui a3, 1
; CHECK-NEXT: add a3, a0, a3
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: .LBB60_1: # %vector.body
+; CHECK-NEXT: .LBB74_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
@@ -3391,7 +3883,7 @@ define void @sink_splat_vp_udiv(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: bne a0, a3, .LBB60_1
+; CHECK-NEXT: bne a0, a3, .LBB74_1
; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
; CHECK-NEXT: ret
entry:
@@ -3421,7 +3913,7 @@ define void @sink_splat_vp_sdiv(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i
; CHECK-NEXT: lui a3, 1
; CHECK-NEXT: add a3, a0, a3
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: .LBB61_1: # %vector.body
+; CHECK-NEXT: .LBB75_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
@@ -3429,7 +3921,7 @@ define void @sink_splat_vp_sdiv(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: bne a0, a3, .LBB61_1
+; CHECK-NEXT: bne a0, a3, .LBB75_1
; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
; CHECK-NEXT: ret
entry:
@@ -3459,7 +3951,7 @@ define void @sink_splat_vp_urem(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i
; CHECK-NEXT: lui a3, 1
; CHECK-NEXT: add a3, a0, a3
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: .LBB62_1: # %vector.body
+; CHECK-NEXT: .LBB76_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
@@ -3467,7 +3959,7 @@ define void @sink_splat_vp_urem(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: bne a0, a3, .LBB62_1
+; CHECK-NEXT: bne a0, a3, .LBB76_1
; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
; CHECK-NEXT: ret
entry:
@@ -3497,7 +3989,7 @@ define void @sink_splat_vp_srem(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i
; CHECK-NEXT: lui a3, 1
; CHECK-NEXT: add a3, a0, a3
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: .LBB63_1: # %vector.body
+; CHECK-NEXT: .LBB77_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
@@ -3505,7 +3997,7 @@ define void @sink_splat_vp_srem(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: bne a0, a3, .LBB63_1
+; CHECK-NEXT: bne a0, a3, .LBB77_1
; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
; CHECK-NEXT: ret
entry:
@@ -3536,7 +4028,7 @@ define void @sink_splat_vp_srem_commute(ptr nocapture %a, i32 signext %x, <4 x i
; CHECK-NEXT: vmv.v.x v8, a1
; CHECK-NEXT: lui a1, 1
; CHECK-NEXT: add a1, a0, a1
-; CHECK-NEXT: .LBB64_1: # %vector.body
+; CHECK-NEXT: .LBB78_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v9, (a0)
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
@@ -3544,7 +4036,7 @@ define void @sink_splat_vp_srem_commute(ptr nocapture %a, i32 signext %x, <4 x i
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vse32.v v9, (a0)
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: bne a0, a1, .LBB64_1
+; CHECK-NEXT: bne a0, a1, .LBB78_1
; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
; CHECK-NEXT: ret
entry:
@@ -3574,7 +4066,7 @@ define void @sink_splat_vp_fma(ptr noalias nocapture %a, ptr nocapture readonly
; CHECK-NEXT: lui a3, 1
; CHECK-NEXT: add a3, a1, a3
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: .LBB65_1: # %vector.body
+; CHECK-NEXT: .LBB79_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vle32.v v9, (a1)
@@ -3584,7 +4076,7 @@ define void @sink_splat_vp_fma(ptr noalias nocapture %a, ptr nocapture readonly
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: bne a1, a3, .LBB65_1
+; CHECK-NEXT: bne a1, a3, .LBB79_1
; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
; CHECK-NEXT: ret
entry:
@@ -3614,7 +4106,7 @@ define void @sink_splat_vp_fma_commute(ptr noalias nocapture %a, ptr nocapture r
; CHECK-NEXT: lui a3, 1
; CHECK-NEXT: add a3, a1, a3
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: .LBB66_1: # %vector.body
+; CHECK-NEXT: .LBB80_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vle32.v v9, (a1)
@@ -3624,7 +4116,7 @@ define void @sink_splat_vp_fma_commute(ptr noalias nocapture %a, ptr nocapture r
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: bne a1, a3, .LBB66_1
+; CHECK-NEXT: bne a1, a3, .LBB80_1
; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
; CHECK-NEXT: ret
entry:
@@ -3655,13 +4147,13 @@ define void @sink_splat_mul_lmul2(ptr nocapture %a, i64 signext %x) {
; CHECK-NEXT: lui a2, 2
; CHECK-NEXT: add a2, a0, a2
; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; CHECK-NEXT: .LBB67_1: # %vector.body
+; CHECK-NEXT: .LBB81_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle64.v v8, (a0)
; CHECK-NEXT: vmul.vx v8, v8, a1
; CHECK-NEXT: vse64.v v8, (a0)
; CHECK-NEXT: addi a0, a0, 32
-; CHECK-NEXT: bne a0, a2, .LBB67_1
+; CHECK-NEXT: bne a0, a2, .LBB81_1
; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
; CHECK-NEXT: ret
entry:
@@ -3689,13 +4181,13 @@ define void @sink_splat_add_lmul2(ptr nocapture %a, i64 signext %x) {
; CHECK-NEXT: lui a2, 2
; CHECK-NEXT: add a2, a0, a2
; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; CHECK-NEXT: .LBB68_1: # %vector.body
+; CHECK-NEXT: .LBB82_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle64.v v8, (a0)
; CHECK-NEXT: vadd.vx v8, v8, a1
; CHECK-NEXT: vse64.v v8, (a0)
; CHECK-NEXT: addi a0, a0, 32
-; CHECK-NEXT: bne a0, a2, .LBB68_1
+; CHECK-NEXT: bne a0, a2, .LBB82_1
; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
; CHECK-NEXT: ret
entry:
@@ -3723,13 +4215,13 @@ define void @sink_splat_sub_lmul2(ptr nocapture %a, i64 signext %x) {
; CHECK-NEXT: lui a2, 2
; CHECK-NEXT: add a2, a0, a2
; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; CHECK-NEXT: .LBB69_1: # %vector.body
+; CHECK-NEXT: .LBB83_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle64.v v8, (a0)
; CHECK-NEXT: vsub.vx v8, v8, a1
; CHECK-NEXT: vse64.v v8, (a0)
; CHECK-NEXT: addi a0, a0, 32
-; CHECK-NEXT: bne a0, a2, .LBB69_1
+; CHECK-NEXT: bne a0, a2, .LBB83_1
; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
; CHECK-NEXT: ret
entry:
@@ -3757,13 +4249,13 @@ define void @sink_splat_rsub_lmul2(ptr nocapture %a, i64 signext %x) {
; CHECK-NEXT: lui a2, 2
; CHECK-NEXT: add a2, a0, a2
; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; CHECK-NEXT: .LBB70_1: # %vector.body
+; CHECK-NEXT: .LBB84_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle64.v v8, (a0)
; CHECK-NEXT: vrsub.vx v8, v8, a1
; CHECK-NEXT: vse64.v v8, (a0)
; CHECK-NEXT: addi a0, a0, 32
-; CHECK-NEXT: bne a0, a2, .LBB70_1
+; CHECK-NEXT: bne a0, a2, .LBB84_1
; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
; CHECK-NEXT: ret
entry:
@@ -3791,13 +4283,13 @@ define void @sink_splat_and_lmul2(ptr nocapture %a, i64 signext %x) {
; CHECK-NEXT: lui a2, 2
; CHECK-NEXT: add a2, a0, a2
; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; CHECK-NEXT: .LBB71_1: # %vector.body
+; CHECK-NEXT: .LBB85_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle64.v v8, (a0)
; CHECK-NEXT: vand.vx v8, v8, a1
; CHECK-NEXT: vse64.v v8, (a0)
; CHECK-NEXT: addi a0, a0, 32
-; CHECK-NEXT: bne a0, a2, .LBB71_1
+; CHECK-NEXT: bne a0, a2, .LBB85_1
; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
; CHECK-NEXT: ret
entry:
@@ -3825,13 +4317,13 @@ define void @sink_splat_or_lmul2(ptr nocapture %a, i64 signext %x) {
; CHECK-NEXT: lui a2, 2
; CHECK-NEXT: add a2, a0, a2
; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; CHECK-NEXT: .LBB72_1: # %vector.body
+; CHECK-NEXT: .LBB86_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle64.v v8, (a0)
; CHECK-NEXT: vor.vx v8, v8, a1
; CHECK-NEXT: vse64.v v8, (a0)
; CHECK-NEXT: addi a0, a0, 32
-; CHECK-NEXT: bne a0, a2, .LBB72_1
+; CHECK-NEXT: bne a0, a2, .LBB86_1
; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
; CHECK-NEXT: ret
entry:
@@ -3859,13 +4351,13 @@ define void @sink_splat_xor_lmul2(ptr nocapture %a, i64 signext %x) {
; CHECK-NEXT: lui a2, 2
; CHECK-NEXT: add a2, a0, a2
; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; CHECK-NEXT: .LBB73_1: # %vector.body
+; CHECK-NEXT: .LBB87_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle64.v v8, (a0)
; CHECK-NEXT: vxor.vx v8, v8, a1
; CHECK-NEXT: vse64.v v8, (a0)
; CHECK-NEXT: addi a0, a0, 32
-; CHECK-NEXT: bne a0, a2, .LBB73_1
+; CHECK-NEXT: bne a0, a2, .LBB87_1
; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
; CHECK-NEXT: ret
entry:
@@ -3894,13 +4386,13 @@ define void @sink_splat_mul_lmul8(ptr nocapture %a, i32 signext %x) {
; CHECK-NEXT: add a2, a0, a2
; CHECK-NEXT: li a3, 32
; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma
-; CHECK-NEXT: .LBB74_1: # %vector.body
+; CHECK-NEXT: .LBB88_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vmul.vx v8, v8, a1
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: bne a0, a2, .LBB74_1
+; CHECK-NEXT: bne a0, a2, .LBB88_1
; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
; CHECK-NEXT: ret
entry:
@@ -3929,13 +4421,13 @@ define void @sink_splat_add_lmul8(ptr nocapture %a, i32 signext %x) {
; CHECK-NEXT: add a2, a0, a2
; CHECK-NEXT: li a3, 32
; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma
-; CHECK-NEXT: .LBB75_1: # %vector.body
+; CHECK-NEXT: .LBB89_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vadd.vx v8, v8, a1
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: bne a0, a2, .LBB75_1
+; CHECK-NEXT: bne a0, a2, .LBB89_1
; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
; CHECK-NEXT: ret
entry:
@@ -3964,13 +4456,13 @@ define void @sink_splat_sub_lmul8(ptr nocapture %a, i32 signext %x) {
; CHECK-NEXT: add a2, a0, a2
; CHECK-NEXT: li a3, 32
; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma
-; CHECK-NEXT: .LBB76_1: # %vector.body
+; CHECK-NEXT: .LBB90_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vsub.vx v8, v8, a1
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: bne a0, a2, .LBB76_1
+; CHECK-NEXT: bne a0, a2, .LBB90_1
; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
; CHECK-NEXT: ret
entry:
@@ -3999,13 +4491,13 @@ define void @sink_splat_rsub_lmul8(ptr nocapture %a, i32 signext %x) {
; CHECK-NEXT: add a2, a0, a2
; CHECK-NEXT: li a3, 32
; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma
-; CHECK-NEXT: .LBB77_1: # %vector.body
+; CHECK-NEXT: .LBB91_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vrsub.vx v8, v8, a1
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: bne a0, a2, .LBB77_1
+; CHECK-NEXT: bne a0, a2, .LBB91_1
; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
; CHECK-NEXT: ret
entry:
@@ -4034,13 +4526,13 @@ define void @sink_splat_and_lmul8(ptr nocapture %a, i32 signext %x) {
; CHECK-NEXT: add a2, a0, a2
; CHECK-NEXT: li a3, 32
; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma
-; CHECK-NEXT: .LBB78_1: # %vector.body
+; CHECK-NEXT: .LBB92_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vand.vx v8, v8, a1
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: bne a0, a2, .LBB78_1
+; CHECK-NEXT: bne a0, a2, .LBB92_1
; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
; CHECK-NEXT: ret
entry:
@@ -4069,13 +4561,13 @@ define void @sink_splat_or_lmul8(ptr nocapture %a, i32 signext %x) {
; CHECK-NEXT: add a2, a0, a2
; CHECK-NEXT: li a3, 32
; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma
-; CHECK-NEXT: .LBB79_1: # %vector.body
+; CHECK-NEXT: .LBB93_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vor.vx v8, v8, a1
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: bne a0, a2, .LBB79_1
+; CHECK-NEXT: bne a0, a2, .LBB93_1
; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
; CHECK-NEXT: ret
entry:
@@ -4104,13 +4596,13 @@ define void @sink_splat_xor_lmul8(ptr nocapture %a, i32 signext %x) {
; CHECK-NEXT: add a2, a0, a2
; CHECK-NEXT: li a3, 32
; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma
-; CHECK-NEXT: .LBB80_1: # %vector.body
+; CHECK-NEXT: .LBB94_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vxor.vx v8, v8, a1
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: bne a0, a2, .LBB80_1
+; CHECK-NEXT: bne a0, a2, .LBB94_1
; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
; CHECK-NEXT: ret
entry:
@@ -4138,13 +4630,13 @@ define void @sink_splat_mul_lmulmf2(ptr nocapture %a, i32 signext %x) {
; CHECK-NEXT: lui a2, 2
; CHECK-NEXT: add a2, a0, a2
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT: .LBB81_1: # %vector.body
+; CHECK-NEXT: .LBB95_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vmul.vx v8, v8, a1
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: addi a0, a0, 32
-; CHECK-NEXT: bne a0, a2, .LBB81_1
+; CHECK-NEXT: bne a0, a2, .LBB95_1
; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
; CHECK-NEXT: ret
entry:
@@ -4172,13 +4664,13 @@ define void @sink_splat_add_lmulmf2(ptr nocapture %a, i32 signext %x) {
; CHECK-NEXT: lui a2, 2
; CHECK-NEXT: add a2, a0, a2
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT: .LBB82_1: # %vector.body
+; CHECK-NEXT: .LBB96_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vadd.vx v8, v8, a1
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: addi a0, a0, 32
-; CHECK-NEXT: bne a0, a2, .LBB82_1
+; CHECK-NEXT: bne a0, a2, .LBB96_1
; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
; CHECK-NEXT: ret
entry:
@@ -4206,13 +4698,13 @@ define void @sink_splat_sub_lmulmf2(ptr nocapture %a, i32 signext %x) {
; CHECK-NEXT: lui a2, 2
; CHECK-NEXT: add a2, a0, a2
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT: .LBB83_1: # %vector.body
+; CHECK-NEXT: .LBB97_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vsub.vx v8, v8, a1
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: addi a0, a0, 32
-; CHECK-NEXT: bne a0, a2, .LBB83_1
+; CHECK-NEXT: bne a0, a2, .LBB97_1
; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
; CHECK-NEXT: ret
entry:
@@ -4240,13 +4732,13 @@ define void @sink_splat_rsub_lmulmf2(ptr nocapture %a, i32 signext %x) {
; CHECK-NEXT: lui a2, 2
; CHECK-NEXT: add a2, a0, a2
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT: .LBB84_1: # %vector.body
+; CHECK-NEXT: .LBB98_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vrsub.vx v8, v8, a1
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: addi a0, a0, 32
-; CHECK-NEXT: bne a0, a2, .LBB84_1
+; CHECK-NEXT: bne a0, a2, .LBB98_1
; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
; CHECK-NEXT: ret
entry:
@@ -4274,13 +4766,13 @@ define void @sink_splat_and_lmulmf2(ptr nocapture %a, i32 signext %x) {
; CHECK-NEXT: lui a2, 2
; CHECK-NEXT: add a2, a0, a2
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT: .LBB85_1: # %vector.body
+; CHECK-NEXT: .LBB99_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vand.vx v8, v8, a1
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: addi a0, a0, 32
-; CHECK-NEXT: bne a0, a2, .LBB85_1
+; CHECK-NEXT: bne a0, a2, .LBB99_1
; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
; CHECK-NEXT: ret
entry:
@@ -4308,13 +4800,13 @@ define void @sink_splat_or_lmulmf2(ptr nocapture %a, i32 signext %x) {
; CHECK-NEXT: lui a2, 2
; CHECK-NEXT: add a2, a0, a2
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT: .LBB86_1: # %vector.body
+; CHECK-NEXT: .LBB100_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vor.vx v8, v8, a1
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: addi a0, a0, 32
-; CHECK-NEXT: bne a0, a2, .LBB86_1
+; CHECK-NEXT: bne a0, a2, .LBB100_1
; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
; CHECK-NEXT: ret
entry:
@@ -4342,13 +4834,13 @@ define void @sink_splat_xor_lmulmf2(ptr nocapture %a, i32 signext %x) {
; CHECK-NEXT: lui a2, 2
; CHECK-NEXT: add a2, a0, a2
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT: .LBB87_1: # %vector.body
+; CHECK-NEXT: .LBB101_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vxor.vx v8, v8, a1
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: addi a0, a0, 32
-; CHECK-NEXT: bne a0, a2, .LBB87_1
+; CHECK-NEXT: bne a0, a2, .LBB101_1
; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
; CHECK-NEXT: ret
entry:
@@ -4380,7 +4872,7 @@ define void @sink_splat_vp_icmp(ptr nocapture %x, i32 signext %y, <4 x i1> %m, i
; CHECK-NEXT: add a3, a0, a3
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vmv.v.i v9, 0
-; CHECK-NEXT: .LBB88_1: # %vector.body
+; CHECK-NEXT: .LBB102_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v10, (a0)
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
@@ -4389,7 +4881,7 @@ define void @sink_splat_vp_icmp(ptr nocapture %x, i32 signext %y, <4 x i1> %m, i
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vse32.v v9, (a0), v0.t
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: bne a0, a3, .LBB88_1
+; CHECK-NEXT: bne a0, a3, .LBB102_1
; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
; CHECK-NEXT: ret
entry:
@@ -4421,7 +4913,7 @@ define void @sink_splat_vp_fcmp(ptr nocapture %x, float %y, <4 x i1> %m, i32 zer
; CHECK-NEXT: add a2, a0, a2
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vmv.v.i v9, 0
-; CHECK-NEXT: .LBB89_1: # %vector.body
+; CHECK-NEXT: .LBB103_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v10, (a0)
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
@@ -4430,7 +4922,7 @@ define void @sink_splat_vp_fcmp(ptr nocapture %x, float %y, <4 x i1> %m, i32 zer
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vse32.v v9, (a0), v0.t
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: bne a0, a2, .LBB89_1
+; CHECK-NEXT: bne a0, a2, .LBB103_1
; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
; CHECK-NEXT: ret
entry:
@@ -4451,3 +4943,485 @@ vector.body: ; preds = %vector.body, %entry
for.cond.cleanup: ; preds = %vector.body
ret void
}
+
+declare <4 x i32> @llvm.vp.smin.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32)
+
+define void @sink_splat_vp_min(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i32 zeroext %vl) {
+; CHECK-LABEL: sink_splat_vp_min:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: lui a3, 1
+; CHECK-NEXT: add a3, a0, a3
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: .LBB104_1: # %vector.body
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
+; CHECK-NEXT: vmin.vx v8, v8, a1, v0.t
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: addi a0, a0, 16
+; CHECK-NEXT: bne a0, a3, .LBB104_1
+; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
+; CHECK-NEXT: ret
+entry:
+ %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0
+ %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %entry
+ %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
+ %0 = getelementptr inbounds i32, ptr %a, i64 %index
+ %wide.load = load <4 x i32>, ptr %0, align 4
+ %1 = call <4 x i32> @llvm.vp.smin.v4i32(<4 x i32> %wide.load, <4 x i32> %broadcast.splat, <4 x i1> %m, i32 %vl)
+ store <4 x i32> %1, ptr %0, align 4
+ %index.next = add nuw i64 %index, 4
+ %2 = icmp eq i64 %index.next, 1024
+ br i1 %2, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup: ; preds = %vector.body
+ ret void
+}
+
+define void @sink_splat_vp_min_commute(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i32 zeroext %vl) {
+; CHECK-LABEL: sink_splat_vp_min_commute:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: lui a3, 1
+; CHECK-NEXT: add a3, a0, a3
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: .LBB105_1: # %vector.body
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
+; CHECK-NEXT: vmin.vx v8, v8, a1, v0.t
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: addi a0, a0, 16
+; CHECK-NEXT: bne a0, a3, .LBB105_1
+; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
+; CHECK-NEXT: ret
+entry:
+ %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0
+ %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %entry
+ %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
+ %0 = getelementptr inbounds i32, ptr %a, i64 %index
+ %wide.load = load <4 x i32>, ptr %0, align 4
+ %1 = call <4 x i32> @llvm.vp.smin.v4i32(<4 x i32> %broadcast.splat, <4 x i32> %wide.load, <4 x i1> %m, i32 %vl)
+ store <4 x i32> %1, ptr %0, align 4
+ %index.next = add nuw i64 %index, 4
+ %2 = icmp eq i64 %index.next, 1024
+ br i1 %2, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup: ; preds = %vector.body
+ ret void
+}
+
+declare <4 x i32> @llvm.vp.smax.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32)
+
+define void @sink_splat_vp_max(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i32 zeroext %vl) {
+; CHECK-LABEL: sink_splat_vp_max:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: lui a3, 1
+; CHECK-NEXT: add a3, a0, a3
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: .LBB106_1: # %vector.body
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
+; CHECK-NEXT: vmax.vx v8, v8, a1, v0.t
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: addi a0, a0, 16
+; CHECK-NEXT: bne a0, a3, .LBB106_1
+; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
+; CHECK-NEXT: ret
+entry:
+ %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0
+ %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %entry
+ %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
+ %0 = getelementptr inbounds i32, ptr %a, i64 %index
+ %wide.load = load <4 x i32>, ptr %0, align 4
+ %1 = call <4 x i32> @llvm.vp.smax.v4i32(<4 x i32> %wide.load, <4 x i32> %broadcast.splat, <4 x i1> %m, i32 %vl)
+ store <4 x i32> %1, ptr %0, align 4
+ %index.next = add nuw i64 %index, 4
+ %2 = icmp eq i64 %index.next, 1024
+ br i1 %2, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup: ; preds = %vector.body
+ ret void
+}
+
+define void @sink_splat_vp_max_commute(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i32 zeroext %vl) {
+; CHECK-LABEL: sink_splat_vp_max_commute:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: lui a3, 1
+; CHECK-NEXT: add a3, a0, a3
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: .LBB107_1: # %vector.body
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
+; CHECK-NEXT: vmax.vx v8, v8, a1, v0.t
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: addi a0, a0, 16
+; CHECK-NEXT: bne a0, a3, .LBB107_1
+; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
+; CHECK-NEXT: ret
+entry:
+ %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0
+ %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %entry
+ %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
+ %0 = getelementptr inbounds i32, ptr %a, i64 %index
+ %wide.load = load <4 x i32>, ptr %0, align 4
+ %1 = call <4 x i32> @llvm.vp.smax.v4i32(<4 x i32> %broadcast.splat, <4 x i32> %wide.load, <4 x i1> %m, i32 %vl)
+ store <4 x i32> %1, ptr %0, align 4
+ %index.next = add nuw i64 %index, 4
+ %2 = icmp eq i64 %index.next, 1024
+ br i1 %2, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup: ; preds = %vector.body
+ ret void
+}
+
+define void @sink_splat_vp_umin_commute(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i32 zeroext %vl) {
+; CHECK-LABEL: sink_splat_vp_umin_commute:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: lui a3, 1
+; CHECK-NEXT: add a3, a0, a3
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: .LBB108_1: # %vector.body
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
+; CHECK-NEXT: vminu.vx v8, v8, a1, v0.t
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: addi a0, a0, 16
+; CHECK-NEXT: bne a0, a3, .LBB108_1
+; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
+; CHECK-NEXT: ret
+entry:
+ %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0
+ %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %entry
+ %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
+ %0 = getelementptr inbounds i32, ptr %a, i64 %index
+ %wide.load = load <4 x i32>, ptr %0, align 4
+ %1 = call <4 x i32> @llvm.vp.umin.v4i32(<4 x i32> %broadcast.splat, <4 x i32> %wide.load, <4 x i1> %m, i32 %vl)
+ store <4 x i32> %1, ptr %0, align 4
+ %index.next = add nuw i64 %index, 4
+ %2 = icmp eq i64 %index.next, 1024
+ br i1 %2, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup: ; preds = %vector.body
+ ret void
+}
+
+declare <4 x i32> @llvm.vp.umax.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32)
+
+define void @sink_splat_vp_umax(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i32 zeroext %vl) {
+; CHECK-LABEL: sink_splat_vp_umax:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: lui a3, 1
+; CHECK-NEXT: add a3, a0, a3
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: .LBB109_1: # %vector.body
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
+; CHECK-NEXT: vmaxu.vx v8, v8, a1, v0.t
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: addi a0, a0, 16
+; CHECK-NEXT: bne a0, a3, .LBB109_1
+; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
+; CHECK-NEXT: ret
+entry:
+ %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0
+ %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %entry
+ %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
+ %0 = getelementptr inbounds i32, ptr %a, i64 %index
+ %wide.load = load <4 x i32>, ptr %0, align 4
+ %1 = call <4 x i32> @llvm.vp.umax.v4i32(<4 x i32> %wide.load, <4 x i32> %broadcast.splat, <4 x i1> %m, i32 %vl)
+ store <4 x i32> %1, ptr %0, align 4
+ %index.next = add nuw i64 %index, 4
+ %2 = icmp eq i64 %index.next, 1024
+ br i1 %2, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup: ; preds = %vector.body
+ ret void
+}
+
+define void @sink_splat_vp_umax_commute(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i32 zeroext %vl) {
+; CHECK-LABEL: sink_splat_vp_umax_commute:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: lui a3, 1
+; CHECK-NEXT: add a3, a0, a3
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: .LBB110_1: # %vector.body
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
+; CHECK-NEXT: vmaxu.vx v8, v8, a1, v0.t
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: addi a0, a0, 16
+; CHECK-NEXT: bne a0, a3, .LBB110_1
+; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
+; CHECK-NEXT: ret
+entry:
+ %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0
+ %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %entry
+ %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
+ %0 = getelementptr inbounds i32, ptr %a, i64 %index
+ %wide.load = load <4 x i32>, ptr %0, align 4
+ %1 = call <4 x i32> @llvm.vp.umax.v4i32(<4 x i32> %broadcast.splat, <4 x i32> %wide.load, <4 x i1> %m, i32 %vl)
+ store <4 x i32> %1, ptr %0, align 4
+ %index.next = add nuw i64 %index, 4
+ %2 = icmp eq i64 %index.next, 1024
+ br i1 %2, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup: ; preds = %vector.body
+ ret void
+}
+
+declare <4 x i32> @llvm.vp.sadd.sat.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32)
+
+define void @sink_splat_vp_sadd_sat(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i32 zeroext %vl) {
+; CHECK-LABEL: sink_splat_vp_sadd_sat:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: lui a3, 1
+; CHECK-NEXT: add a3, a0, a3
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: .LBB111_1: # %vector.body
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
+; CHECK-NEXT: vsadd.vx v8, v8, a1, v0.t
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: addi a0, a0, 16
+; CHECK-NEXT: bne a0, a3, .LBB111_1
+; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
+; CHECK-NEXT: ret
+entry:
+ %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0
+ %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %entry
+ %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
+ %0 = getelementptr inbounds i32, ptr %a, i64 %index
+ %wide.load = load <4 x i32>, ptr %0, align 4
+ %1 = call <4 x i32> @llvm.vp.sadd.sat.v4i32(<4 x i32> %wide.load, <4 x i32> %broadcast.splat, <4 x i1> %m, i32 %vl)
+ store <4 x i32> %1, ptr %0, align 4
+ %index.next = add nuw i64 %index, 4
+ %2 = icmp eq i64 %index.next, 1024
+ br i1 %2, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup: ; preds = %vector.body
+ ret void
+}
+
+define void @sink_splat_vp_sadd_sat_commute(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i32 zeroext %vl) {
+; CHECK-LABEL: sink_splat_vp_sadd_sat_commute:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: lui a3, 1
+; CHECK-NEXT: add a3, a0, a3
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: .LBB112_1: # %vector.body
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
+; CHECK-NEXT: vsadd.vx v8, v8, a1, v0.t
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: addi a0, a0, 16
+; CHECK-NEXT: bne a0, a3, .LBB112_1
+; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
+; CHECK-NEXT: ret
+entry:
+ %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0
+ %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %entry
+ %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
+ %0 = getelementptr inbounds i32, ptr %a, i64 %index
+ %wide.load = load <4 x i32>, ptr %0, align 4
+ %1 = call <4 x i32> @llvm.vp.sadd.sat.v4i32(<4 x i32> %broadcast.splat, <4 x i32> %wide.load, <4 x i1> %m, i32 %vl)
+ store <4 x i32> %1, ptr %0, align 4
+ %index.next = add nuw i64 %index, 4
+ %2 = icmp eq i64 %index.next, 1024
+ br i1 %2, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup: ; preds = %vector.body
+ ret void
+}
+
+declare <4 x i32> @llvm.vp.ssub.sat.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32)
+
+define void @sink_splat_vp_ssub_sat(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i32 zeroext %vl) {
+; CHECK-LABEL: sink_splat_vp_ssub_sat:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: li a3, 1024
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: .LBB113_1: # %vector.body
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a1, v0.t
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: addi a3, a3, 4
+; CHECK-NEXT: addi a0, a0, -16
+; CHECK-NEXT: bnez a3, .LBB113_1
+; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
+; CHECK-NEXT: ret
+entry:
+ %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0
+ %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %entry
+ %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
+ %0 = getelementptr inbounds i32, ptr %a, i64 %index
+ %wide.load = load <4 x i32>, ptr %0, align 4
+ %1 = call <4 x i32> @llvm.vp.ssub.sat.v4i32(<4 x i32> %wide.load, <4 x i32> %broadcast.splat, <4 x i1> %m, i32 %vl)
+ store <4 x i32> %1, ptr %0, align 4
+ %index.next = sub nuw i64 %index, 4
+ %2 = icmp eq i64 %index.next, 1024
+ br i1 %2, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup: ; preds = %vector.body
+ ret void
+}
+
+declare <4 x i32> @llvm.vp.uadd.sat.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32)
+
+define void @sink_splat_vp_uadd_sat(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i32 zeroext %vl) {
+; CHECK-LABEL: sink_splat_vp_uadd_sat:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: lui a3, 1
+; CHECK-NEXT: add a3, a0, a3
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: .LBB114_1: # %vector.body
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
+; CHECK-NEXT: vsaddu.vx v8, v8, a1, v0.t
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: addi a0, a0, 16
+; CHECK-NEXT: bne a0, a3, .LBB114_1
+; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
+; CHECK-NEXT: ret
+entry:
+ %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0
+ %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %entry
+ %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
+ %0 = getelementptr inbounds i32, ptr %a, i64 %index
+ %wide.load = load <4 x i32>, ptr %0, align 4
+ %1 = call <4 x i32> @llvm.vp.uadd.sat.v4i32(<4 x i32> %wide.load, <4 x i32> %broadcast.splat, <4 x i1> %m, i32 %vl)
+ store <4 x i32> %1, ptr %0, align 4
+ %index.next = add nuw i64 %index, 4
+ %2 = icmp eq i64 %index.next, 1024
+ br i1 %2, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup: ; preds = %vector.body
+ ret void
+}
+
+define void @sink_splat_vp_uadd_sat_commute(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i32 zeroext %vl) {
+; CHECK-LABEL: sink_splat_vp_uadd_sat_commute:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: lui a3, 1
+; CHECK-NEXT: add a3, a0, a3
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: .LBB115_1: # %vector.body
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
+; CHECK-NEXT: vsaddu.vx v8, v8, a1, v0.t
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: addi a0, a0, 16
+; CHECK-NEXT: bne a0, a3, .LBB115_1
+; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
+; CHECK-NEXT: ret
+entry:
+ %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0
+ %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %entry
+ %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
+ %0 = getelementptr inbounds i32, ptr %a, i64 %index
+ %wide.load = load <4 x i32>, ptr %0, align 4
+ %1 = call <4 x i32> @llvm.vp.uadd.sat.v4i32(<4 x i32> %broadcast.splat, <4 x i32> %wide.load, <4 x i1> %m, i32 %vl)
+ store <4 x i32> %1, ptr %0, align 4
+ %index.next = add nuw i64 %index, 4
+ %2 = icmp eq i64 %index.next, 1024
+ br i1 %2, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup: ; preds = %vector.body
+ ret void
+}
+
+declare <4 x i32> @llvm.vp.usub.sat.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32)
+
+define void @sink_splat_vp_usub_sat(ptr nocapture %a, i32 signext %x, <4 x i1> %m, i32 zeroext %vl) {
+; CHECK-LABEL: sink_splat_vp_usub_sat:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: li a3, 1024
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: .LBB116_1: # %vector.body
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a1, v0.t
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: addi a3, a3, 4
+; CHECK-NEXT: addi a0, a0, -16
+; CHECK-NEXT: bnez a3, .LBB116_1
+; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
+; CHECK-NEXT: ret
+entry:
+ %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0
+ %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %entry
+ %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
+ %0 = getelementptr inbounds i32, ptr %a, i64 %index
+ %wide.load = load <4 x i32>, ptr %0, align 4
+ %1 = call <4 x i32> @llvm.vp.usub.sat.v4i32(<4 x i32> %wide.load, <4 x i32> %broadcast.splat, <4 x i1> %m, i32 %vl)
+ store <4 x i32> %1, ptr %0, align 4
+ %index.next = sub nuw i64 %index, 4
+ %2 = icmp eq i64 %index.next, 1024
+ br i1 %2, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup: ; preds = %vector.body
+ ret void
+}
diff --git a/llvm/test/CodeGen/RISCV/short-forward-branch-opt.ll b/llvm/test/CodeGen/RISCV/short-forward-branch-opt.ll
index 87406f2..c0c11fe 100644
--- a/llvm/test/CodeGen/RISCV/short-forward-branch-opt.ll
+++ b/llvm/test/CodeGen/RISCV/short-forward-branch-opt.ll
@@ -813,24 +813,24 @@ define i64 @select_sll(i64 %A, i64 %B, i64 %C, i1 zeroext %cond) {
; RV32SFB-NEXT: not a7, a2
; RV32SFB-NEXT: srli a0, a0, 1
; RV32SFB-NEXT: sll t0, a1, a2
-; RV32SFB-NEXT: srl a0, a0, a7
; RV32SFB-NEXT: addi a2, a2, -32
+; RV32SFB-NEXT: srl a0, a0, a7
; RV32SFB-NEXT: mv a1, a3
-; RV32SFB-NEXT: bgez a2, .LBB20_2
+; RV32SFB-NEXT: bltz a2, .LBB20_2
; RV32SFB-NEXT: # %bb.1: # %entry
-; RV32SFB-NEXT: or a1, t0, a0
+; RV32SFB-NEXT: li a3, 0
; RV32SFB-NEXT: .LBB20_2: # %entry
-; RV32SFB-NEXT: bltz a2, .LBB20_4
+; RV32SFB-NEXT: bgez a2, .LBB20_4
; RV32SFB-NEXT: # %bb.3: # %entry
-; RV32SFB-NEXT: li a3, 0
+; RV32SFB-NEXT: or a1, t0, a0
; RV32SFB-NEXT: .LBB20_4: # %entry
; RV32SFB-NEXT: beqz a6, .LBB20_6
; RV32SFB-NEXT: # %bb.5: # %entry
-; RV32SFB-NEXT: mv a1, a5
+; RV32SFB-NEXT: mv a3, a4
; RV32SFB-NEXT: .LBB20_6: # %entry
; RV32SFB-NEXT: beqz a6, .LBB20_8
; RV32SFB-NEXT: # %bb.7: # %entry
-; RV32SFB-NEXT: mv a3, a4
+; RV32SFB-NEXT: mv a1, a5
; RV32SFB-NEXT: .LBB20_8: # %entry
; RV32SFB-NEXT: mv a0, a3
; RV32SFB-NEXT: ret
@@ -874,24 +874,24 @@ define i64 @select_srl(i64 %A, i64 %B, i64 %C, i1 zeroext %cond) {
; RV32SFB-NEXT: not a7, a2
; RV32SFB-NEXT: slli a1, a1, 1
; RV32SFB-NEXT: srl t0, a0, a2
-; RV32SFB-NEXT: sll a1, a1, a7
; RV32SFB-NEXT: addi a2, a2, -32
+; RV32SFB-NEXT: sll a1, a1, a7
; RV32SFB-NEXT: mv a0, a3
-; RV32SFB-NEXT: bgez a2, .LBB21_2
+; RV32SFB-NEXT: bltz a2, .LBB21_2
; RV32SFB-NEXT: # %bb.1: # %entry
-; RV32SFB-NEXT: or a0, t0, a1
+; RV32SFB-NEXT: li a3, 0
; RV32SFB-NEXT: .LBB21_2: # %entry
-; RV32SFB-NEXT: bltz a2, .LBB21_4
+; RV32SFB-NEXT: bgez a2, .LBB21_4
; RV32SFB-NEXT: # %bb.3: # %entry
-; RV32SFB-NEXT: li a3, 0
+; RV32SFB-NEXT: or a0, t0, a1
; RV32SFB-NEXT: .LBB21_4: # %entry
; RV32SFB-NEXT: beqz a6, .LBB21_6
; RV32SFB-NEXT: # %bb.5: # %entry
-; RV32SFB-NEXT: mv a0, a4
+; RV32SFB-NEXT: mv a3, a5
; RV32SFB-NEXT: .LBB21_6: # %entry
; RV32SFB-NEXT: beqz a6, .LBB21_8
; RV32SFB-NEXT: # %bb.7: # %entry
-; RV32SFB-NEXT: mv a3, a5
+; RV32SFB-NEXT: mv a0, a4
; RV32SFB-NEXT: .LBB21_8: # %entry
; RV32SFB-NEXT: mv a1, a3
; RV32SFB-NEXT: ret
@@ -935,24 +935,24 @@ define i64 @select_sra(i64 %A, i64 %B, i64 %C, i1 zeroext %cond) {
; RV32SFB-NEXT: not a7, a2
; RV32SFB-NEXT: slli t0, a1, 1
; RV32SFB-NEXT: srl t1, a0, a2
-; RV32SFB-NEXT: sll a7, t0, a7
; RV32SFB-NEXT: addi a2, a2, -32
+; RV32SFB-NEXT: sll a7, t0, a7
; RV32SFB-NEXT: mv a0, a3
-; RV32SFB-NEXT: bgez a2, .LBB22_2
+; RV32SFB-NEXT: bltz a2, .LBB22_2
; RV32SFB-NEXT: # %bb.1: # %entry
-; RV32SFB-NEXT: or a0, t1, a7
+; RV32SFB-NEXT: srai a3, a1, 31
; RV32SFB-NEXT: .LBB22_2: # %entry
-; RV32SFB-NEXT: bltz a2, .LBB22_4
+; RV32SFB-NEXT: bgez a2, .LBB22_4
; RV32SFB-NEXT: # %bb.3: # %entry
-; RV32SFB-NEXT: srai a3, a1, 31
+; RV32SFB-NEXT: or a0, t1, a7
; RV32SFB-NEXT: .LBB22_4: # %entry
; RV32SFB-NEXT: beqz a6, .LBB22_6
; RV32SFB-NEXT: # %bb.5: # %entry
-; RV32SFB-NEXT: mv a0, a4
+; RV32SFB-NEXT: mv a3, a5
; RV32SFB-NEXT: .LBB22_6: # %entry
; RV32SFB-NEXT: beqz a6, .LBB22_8
; RV32SFB-NEXT: # %bb.7: # %entry
-; RV32SFB-NEXT: mv a3, a5
+; RV32SFB-NEXT: mv a0, a4
; RV32SFB-NEXT: .LBB22_8: # %entry
; RV32SFB-NEXT: mv a1, a3
; RV32SFB-NEXT: ret
@@ -1088,11 +1088,11 @@ define i64 @select_andi(i64 %A, i64 %C, i1 zeroext %cond) {
; RV32SFB-NEXT: # %bb.1: # %entry
; RV32SFB-NEXT: andi a2, a0, 567
; RV32SFB-NEXT: .LBB25_2: # %entry
+; RV32SFB-NEXT: mv a0, a2
; RV32SFB-NEXT: bnez a4, .LBB25_4
; RV32SFB-NEXT: # %bb.3: # %entry
; RV32SFB-NEXT: li a1, 0
; RV32SFB-NEXT: .LBB25_4: # %entry
-; RV32SFB-NEXT: mv a0, a2
; RV32SFB-NEXT: ret
entry:
%0 = and i64 %A, 567
@@ -1130,13 +1130,13 @@ define i64 @select_ori(i64 %A, i64 %C, i1 zeroext %cond) {
;
; RV32SFB-LABEL: select_ori:
; RV32SFB: # %bb.0: # %entry
-; RV32SFB-NEXT: beqz a4, .LBB26_2
+; RV32SFB-NEXT: bnez a4, .LBB26_2
; RV32SFB-NEXT: # %bb.1: # %entry
-; RV32SFB-NEXT: mv a1, a3
+; RV32SFB-NEXT: ori a2, a0, 890
; RV32SFB-NEXT: .LBB26_2: # %entry
-; RV32SFB-NEXT: bnez a4, .LBB26_4
+; RV32SFB-NEXT: beqz a4, .LBB26_4
; RV32SFB-NEXT: # %bb.3: # %entry
-; RV32SFB-NEXT: ori a2, a0, 890
+; RV32SFB-NEXT: mv a1, a3
; RV32SFB-NEXT: .LBB26_4: # %entry
; RV32SFB-NEXT: mv a0, a2
; RV32SFB-NEXT: ret
@@ -1176,13 +1176,13 @@ define i64 @select_xori(i64 %A, i64 %C, i1 zeroext %cond) {
;
; RV32SFB-LABEL: select_xori:
; RV32SFB: # %bb.0: # %entry
-; RV32SFB-NEXT: beqz a4, .LBB27_2
+; RV32SFB-NEXT: bnez a4, .LBB27_2
; RV32SFB-NEXT: # %bb.1: # %entry
-; RV32SFB-NEXT: mv a1, a3
+; RV32SFB-NEXT: xori a2, a0, 321
; RV32SFB-NEXT: .LBB27_2: # %entry
-; RV32SFB-NEXT: bnez a4, .LBB27_4
+; RV32SFB-NEXT: beqz a4, .LBB27_4
; RV32SFB-NEXT: # %bb.3: # %entry
-; RV32SFB-NEXT: xori a2, a0, 321
+; RV32SFB-NEXT: mv a1, a3
; RV32SFB-NEXT: .LBB27_4: # %entry
; RV32SFB-NEXT: mv a0, a2
; RV32SFB-NEXT: ret
@@ -1272,11 +1272,11 @@ define i64 @select_srli(i64 %A, i64 %C, i1 zeroext %cond) {
; RV32SFB-NEXT: mv a0, a2
; RV32SFB-NEXT: bnez a4, .LBB29_2
; RV32SFB-NEXT: # %bb.1: # %entry
-; RV32SFB-NEXT: srli a0, a1, 3
+; RV32SFB-NEXT: li a3, 0
; RV32SFB-NEXT: .LBB29_2: # %entry
; RV32SFB-NEXT: bnez a4, .LBB29_4
; RV32SFB-NEXT: # %bb.3: # %entry
-; RV32SFB-NEXT: li a3, 0
+; RV32SFB-NEXT: srli a0, a1, 3
; RV32SFB-NEXT: .LBB29_4: # %entry
; RV32SFB-NEXT: mv a1, a3
; RV32SFB-NEXT: ret
diff --git a/llvm/test/CodeGen/X86/avx512-insert-extract.ll b/llvm/test/CodeGen/X86/avx512-insert-extract.ll
index abfe3e6..3e40bfa 100644
--- a/llvm/test/CodeGen/X86/avx512-insert-extract.ll
+++ b/llvm/test/CodeGen/X86/avx512-insert-extract.ll
@@ -2171,19 +2171,13 @@ define void @test_concat_v2i1(ptr %arg, ptr %arg1, ptr %arg2) nounwind {
; KNL-LABEL: test_concat_v2i1:
; KNL: ## %bb.0:
; KNL-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
-; KNL-NEXT: vpextrw $0, %xmm0, %eax
-; KNL-NEXT: movzwl %ax, %eax
-; KNL-NEXT: vmovd %eax, %xmm1
-; KNL-NEXT: vcvtph2ps %xmm1, %xmm1
+; KNL-NEXT: vcvtph2ps %xmm0, %xmm1
; KNL-NEXT: vmovss {{.*#+}} xmm2 = [6.0E+0,0.0E+0,0.0E+0,0.0E+0]
; KNL-NEXT: vucomiss %xmm2, %xmm1
; KNL-NEXT: setb %al
; KNL-NEXT: andl $1, %eax
; KNL-NEXT: kmovw %eax, %k0
-; KNL-NEXT: vpsrld $16, %xmm0, %xmm0
-; KNL-NEXT: vpextrw $0, %xmm0, %eax
-; KNL-NEXT: movzwl %ax, %eax
-; KNL-NEXT: vmovd %eax, %xmm0
+; KNL-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[1,1,1,1,4,5,6,7]
; KNL-NEXT: vcvtph2ps %xmm0, %xmm0
; KNL-NEXT: vucomiss %xmm2, %xmm0
; KNL-NEXT: setb %al
@@ -2212,19 +2206,13 @@ define void @test_concat_v2i1(ptr %arg, ptr %arg1, ptr %arg2) nounwind {
; SKX-LABEL: test_concat_v2i1:
; SKX: ## %bb.0:
; SKX-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
-; SKX-NEXT: vpsrld $16, %xmm0, %xmm1
-; SKX-NEXT: vpextrw $0, %xmm1, %eax
-; SKX-NEXT: movzwl %ax, %eax
-; SKX-NEXT: vmovd %eax, %xmm1
+; SKX-NEXT: vpshuflw {{.*#+}} xmm1 = xmm0[1,1,1,1,4,5,6,7]
; SKX-NEXT: vcvtph2ps %xmm1, %xmm1
; SKX-NEXT: vmovss {{.*#+}} xmm2 = [6.0E+0,0.0E+0,0.0E+0,0.0E+0]
; SKX-NEXT: vucomiss %xmm2, %xmm1
; SKX-NEXT: setb %al
; SKX-NEXT: kmovd %eax, %k0
; SKX-NEXT: kshiftlb $1, %k0, %k0
-; SKX-NEXT: vpextrw $0, %xmm0, %eax
-; SKX-NEXT: movzwl %ax, %eax
-; SKX-NEXT: vmovd %eax, %xmm0
; SKX-NEXT: vcvtph2ps %xmm0, %xmm0
; SKX-NEXT: vucomiss %xmm2, %xmm0
; SKX-NEXT: setb %al
diff --git a/llvm/test/CodeGen/X86/avx512-vec-cmp.ll b/llvm/test/CodeGen/X86/avx512-vec-cmp.ll
index f5cca78..86ebb1e 100644
--- a/llvm/test/CodeGen/X86/avx512-vec-cmp.ll
+++ b/llvm/test/CodeGen/X86/avx512-vec-cmp.ll
@@ -1436,10 +1436,8 @@ define void @half_vec_compare(ptr %x, ptr %y) {
; KNL: ## %bb.0: ## %entry
; KNL-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; KNL-NEXT: ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0x07]
-; KNL-NEXT: vpsrld $16, %xmm0, %xmm1 ## encoding: [0xc5,0xf1,0x72,0xd0,0x10]
-; KNL-NEXT: vpextrw $0, %xmm1, %eax ## encoding: [0xc5,0xf9,0xc5,0xc1,0x00]
-; KNL-NEXT: movzwl %ax, %eax ## encoding: [0x0f,0xb7,0xc0]
-; KNL-NEXT: vmovd %eax, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0xc8]
+; KNL-NEXT: vpshuflw $85, %xmm0, %xmm1 ## encoding: [0xc5,0xfb,0x70,0xc8,0x55]
+; KNL-NEXT: ## xmm1 = xmm0[1,1,1,1,4,5,6,7]
; KNL-NEXT: vcvtph2ps %xmm1, %xmm1 ## encoding: [0xc4,0xe2,0x79,0x13,0xc9]
; KNL-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
; KNL-NEXT: vxorps %xmm2, %xmm2, %xmm2 ## encoding: [0xc5,0xe8,0x57,0xd2]
@@ -1449,9 +1447,6 @@ define void @half_vec_compare(ptr %x, ptr %y) {
; KNL-NEXT: movl $0, %edx ## encoding: [0xba,0x00,0x00,0x00,0x00]
; KNL-NEXT: cmovnel %ecx, %edx ## encoding: [0x0f,0x45,0xd1]
; KNL-NEXT: cmovpl %ecx, %edx ## encoding: [0x0f,0x4a,0xd1]
-; KNL-NEXT: vpextrw $0, %xmm0, %edi ## encoding: [0xc5,0xf9,0xc5,0xf8,0x00]
-; KNL-NEXT: movzwl %di, %edi ## encoding: [0x0f,0xb7,0xff]
-; KNL-NEXT: vmovd %edi, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0xc7]
; KNL-NEXT: vcvtph2ps %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x13,0xc0]
; KNL-NEXT: vucomiss %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x2e,0xc2]
; KNL-NEXT: cmovnel %ecx, %eax ## encoding: [0x0f,0x45,0xc1]
@@ -1468,10 +1463,8 @@ define void @half_vec_compare(ptr %x, ptr %y) {
; AVX512BW: ## %bb.0: ## %entry
; AVX512BW-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX512BW-NEXT: ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0x07]
-; AVX512BW-NEXT: vpsrld $16, %xmm0, %xmm1 ## encoding: [0xc5,0xf1,0x72,0xd0,0x10]
-; AVX512BW-NEXT: vpextrw $0, %xmm1, %eax ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc5,0xc1,0x00]
-; AVX512BW-NEXT: movzwl %ax, %eax ## encoding: [0x0f,0xb7,0xc0]
-; AVX512BW-NEXT: vmovd %eax, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0xc8]
+; AVX512BW-NEXT: vpshuflw $85, %xmm0, %xmm1 ## encoding: [0xc5,0xfb,0x70,0xc8,0x55]
+; AVX512BW-NEXT: ## xmm1 = xmm0[1,1,1,1,4,5,6,7]
; AVX512BW-NEXT: vcvtph2ps %xmm1, %xmm1 ## encoding: [0xc4,0xe2,0x79,0x13,0xc9]
; AVX512BW-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
; AVX512BW-NEXT: vxorps %xmm2, %xmm2, %xmm2 ## encoding: [0xc5,0xe8,0x57,0xd2]
@@ -1481,9 +1474,6 @@ define void @half_vec_compare(ptr %x, ptr %y) {
; AVX512BW-NEXT: movl $0, %edx ## encoding: [0xba,0x00,0x00,0x00,0x00]
; AVX512BW-NEXT: cmovnel %ecx, %edx ## encoding: [0x0f,0x45,0xd1]
; AVX512BW-NEXT: cmovpl %ecx, %edx ## encoding: [0x0f,0x4a,0xd1]
-; AVX512BW-NEXT: vpextrw $0, %xmm0, %edi ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc5,0xf8,0x00]
-; AVX512BW-NEXT: movzwl %di, %edi ## encoding: [0x0f,0xb7,0xff]
-; AVX512BW-NEXT: vmovd %edi, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0xc7]
; AVX512BW-NEXT: vcvtph2ps %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x13,0xc0]
; AVX512BW-NEXT: vucomiss %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x2e,0xc2]
; AVX512BW-NEXT: cmovnel %ecx, %eax ## encoding: [0x0f,0x45,0xc1]
@@ -1500,10 +1490,8 @@ define void @half_vec_compare(ptr %x, ptr %y) {
; SKX: ## %bb.0: ## %entry
; SKX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SKX-NEXT: ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0x07]
-; SKX-NEXT: vpsrld $16, %xmm0, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0x72,0xd0,0x10]
-; SKX-NEXT: vpextrw $0, %xmm1, %eax ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc5,0xc1,0x00]
-; SKX-NEXT: movzwl %ax, %eax ## encoding: [0x0f,0xb7,0xc0]
-; SKX-NEXT: vmovd %eax, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0xc8]
+; SKX-NEXT: vpshuflw $85, %xmm0, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xfb,0x70,0xc8,0x55]
+; SKX-NEXT: ## xmm1 = xmm0[1,1,1,1,4,5,6,7]
; SKX-NEXT: vcvtph2ps %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x13,0xc9]
; SKX-NEXT: vxorps %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe8,0x57,0xd2]
; SKX-NEXT: vucomiss %xmm2, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x2e,0xca]
@@ -1512,9 +1500,6 @@ define void @half_vec_compare(ptr %x, ptr %y) {
; SKX-NEXT: orb %al, %cl ## encoding: [0x08,0xc1]
; SKX-NEXT: testb %cl, %cl ## encoding: [0x84,0xc9]
; SKX-NEXT: setne %al ## encoding: [0x0f,0x95,0xc0]
-; SKX-NEXT: vpextrw $0, %xmm0, %ecx ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc5,0xc8,0x00]
-; SKX-NEXT: movzwl %cx, %ecx ## encoding: [0x0f,0xb7,0xc9]
-; SKX-NEXT: vmovd %ecx, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0xc1]
; SKX-NEXT: vcvtph2ps %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x13,0xc0]
; SKX-NEXT: vucomiss %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x2e,0xc2]
; SKX-NEXT: setp %cl ## encoding: [0x0f,0x9a,0xc1]
diff --git a/llvm/test/CodeGen/X86/avx512fp16-fp-logic.ll b/llvm/test/CodeGen/X86/avx512fp16-fp-logic.ll
index e2ea897..f6fb2fc 100644
--- a/llvm/test/CodeGen/X86/avx512fp16-fp-logic.ll
+++ b/llvm/test/CodeGen/X86/avx512fp16-fp-logic.ll
@@ -92,7 +92,7 @@ define half @f6(half %x, i16 %y) {
define half @f7(half %x) {
; CHECK-LABEL: f7:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmovsh {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; CHECK-NEXT: vmovsh {{.*#+}} xmm1 = [1.7881E-7,0.0E+0,0.0E+0,0.0E+0,0.0E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: vandps %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%bc1 = bitcast half %x to i16
@@ -106,7 +106,7 @@ define half @f7(half %x) {
define half @f8(half %x) {
; CHECK-LABEL: f8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmovsh {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; CHECK-NEXT: vmovsh {{.*#+}} xmm1 = [2.3842E-7,0.0E+0,0.0E+0,0.0E+0,0.0E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: vandps %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%bc1 = bitcast half %x to i16
@@ -171,7 +171,7 @@ define half @xor(half %x, half %y) {
define half @f7_or(half %x) {
; CHECK-LABEL: f7_or:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmovsh {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; CHECK-NEXT: vmovsh {{.*#+}} xmm1 = [1.7881E-7,0.0E+0,0.0E+0,0.0E+0,0.0E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: vorps %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%bc1 = bitcast half %x to i16
@@ -183,7 +183,7 @@ define half @f7_or(half %x) {
define half @f7_xor(half %x) {
; CHECK-LABEL: f7_xor:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmovsh {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; CHECK-NEXT: vmovsh {{.*#+}} xmm1 = [1.7881E-7,0.0E+0,0.0E+0,0.0E+0,0.0E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: vxorps %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%bc1 = bitcast half %x to i16
@@ -199,7 +199,7 @@ define half @f7_xor(half %x) {
define half @movmsk(half %x) {
; CHECK-LABEL: movmsk:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmovsh {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; CHECK-NEXT: vmovsh {{.*#+}} xmm1 = [-0.0E+0,0.0E+0,0.0E+0,0.0E+0,0.0E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: vandps %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%bc1 = bitcast half %x to i16
@@ -271,7 +271,7 @@ define half @fadd_bitcast_fneg(half %x, half %y) {
define half @fsub_bitcast_fneg(half %x, half %y) {
; CHECK-LABEL: fsub_bitcast_fneg:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmovsh {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
+; CHECK-NEXT: vmovsh {{.*#+}} xmm2 = [NaN,0.0E+0,0.0E+0,0.0E+0,0.0E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: vxorps %xmm2, %xmm1, %xmm1
; CHECK-NEXT: vsubsh %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
diff --git a/llvm/test/CodeGen/X86/cmov-fp.ll b/llvm/test/CodeGen/X86/cmov-fp.ll
index 26e720f..77665d0 100644
--- a/llvm/test/CodeGen/X86/cmov-fp.ll
+++ b/llvm/test/CodeGen/X86/cmov-fp.ll
@@ -1,7 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=i686-- -mcpu pentium4 < %s | FileCheck %s -check-prefix=SSE
-; RUN: llc -mtriple=i686-- -mcpu pentium3 < %s | FileCheck %s -check-prefix=NOSSE2
-; RUN: llc -mtriple=i686-- -mcpu pentium2 < %s | FileCheck %s -check-prefix=NOSSE1
+; RUN: llc -mtriple=i686-- -mcpu pentium3 < %s | FileCheck %s -check-prefixes=NOSSE,NOSSE2
+; RUN: llc -mtriple=i686-- -mcpu pentium2 < %s | FileCheck %s -check-prefixes=NOSSE,NOSSE1
; RUN: llc -mtriple=i686-- -mcpu pentium < %s | FileCheck %s -check-prefix=NOCMOV
; PR14035
@@ -27,27 +27,16 @@ define double @test1(i32 %a, i32 %b, double %x) nounwind {
; SSE-NEXT: popl %ebp
; SSE-NEXT: retl
;
-; NOSSE2-LABEL: test1:
-; NOSSE2: # %bb.0:
-; NOSSE2-NEXT: fldl {{[0-9]+}}(%esp)
-; NOSSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; NOSSE2-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOSSE2-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
-; NOSSE2-NEXT: fxch %st(1)
-; NOSSE2-NEXT: fcmovnbe %st(1), %st
-; NOSSE2-NEXT: fstp %st(1)
-; NOSSE2-NEXT: retl
-;
-; NOSSE1-LABEL: test1:
-; NOSSE1: # %bb.0:
-; NOSSE1-NEXT: fldl {{[0-9]+}}(%esp)
-; NOSSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
-; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOSSE1-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
-; NOSSE1-NEXT: fxch %st(1)
-; NOSSE1-NEXT: fcmovnbe %st(1), %st
-; NOSSE1-NEXT: fstp %st(1)
-; NOSSE1-NEXT: retl
+; NOSSE-LABEL: test1:
+; NOSSE: # %bb.0:
+; NOSSE-NEXT: fldl {{[0-9]+}}(%esp)
+; NOSSE-NEXT: movl {{[0-9]+}}(%esp), %eax
+; NOSSE-NEXT: cmpl {{[0-9]+}}(%esp), %eax
+; NOSSE-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
+; NOSSE-NEXT: fxch %st(1)
+; NOSSE-NEXT: fcmovnbe %st(1), %st
+; NOSSE-NEXT: fstp %st(1)
+; NOSSE-NEXT: retl
;
; NOCMOV-LABEL: test1:
; NOCMOV: # %bb.0:
@@ -90,27 +79,16 @@ define double @test2(i32 %a, i32 %b, double %x) nounwind {
; SSE-NEXT: popl %ebp
; SSE-NEXT: retl
;
-; NOSSE2-LABEL: test2:
-; NOSSE2: # %bb.0:
-; NOSSE2-NEXT: fldl {{[0-9]+}}(%esp)
-; NOSSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; NOSSE2-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOSSE2-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
-; NOSSE2-NEXT: fxch %st(1)
-; NOSSE2-NEXT: fcmovnb %st(1), %st
-; NOSSE2-NEXT: fstp %st(1)
-; NOSSE2-NEXT: retl
-;
-; NOSSE1-LABEL: test2:
-; NOSSE1: # %bb.0:
-; NOSSE1-NEXT: fldl {{[0-9]+}}(%esp)
-; NOSSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
-; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOSSE1-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
-; NOSSE1-NEXT: fxch %st(1)
-; NOSSE1-NEXT: fcmovnb %st(1), %st
-; NOSSE1-NEXT: fstp %st(1)
-; NOSSE1-NEXT: retl
+; NOSSE-LABEL: test2:
+; NOSSE: # %bb.0:
+; NOSSE-NEXT: fldl {{[0-9]+}}(%esp)
+; NOSSE-NEXT: movl {{[0-9]+}}(%esp), %eax
+; NOSSE-NEXT: cmpl {{[0-9]+}}(%esp), %eax
+; NOSSE-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
+; NOSSE-NEXT: fxch %st(1)
+; NOSSE-NEXT: fcmovnb %st(1), %st
+; NOSSE-NEXT: fstp %st(1)
+; NOSSE-NEXT: retl
;
; NOCMOV-LABEL: test2:
; NOCMOV: # %bb.0:
@@ -153,27 +131,16 @@ define double @test3(i32 %a, i32 %b, double %x) nounwind {
; SSE-NEXT: popl %ebp
; SSE-NEXT: retl
;
-; NOSSE2-LABEL: test3:
-; NOSSE2: # %bb.0:
-; NOSSE2-NEXT: fldl {{[0-9]+}}(%esp)
-; NOSSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; NOSSE2-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOSSE2-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
-; NOSSE2-NEXT: fxch %st(1)
-; NOSSE2-NEXT: fcmovb %st(1), %st
-; NOSSE2-NEXT: fstp %st(1)
-; NOSSE2-NEXT: retl
-;
-; NOSSE1-LABEL: test3:
-; NOSSE1: # %bb.0:
-; NOSSE1-NEXT: fldl {{[0-9]+}}(%esp)
-; NOSSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
-; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOSSE1-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
-; NOSSE1-NEXT: fxch %st(1)
-; NOSSE1-NEXT: fcmovb %st(1), %st
-; NOSSE1-NEXT: fstp %st(1)
-; NOSSE1-NEXT: retl
+; NOSSE-LABEL: test3:
+; NOSSE: # %bb.0:
+; NOSSE-NEXT: fldl {{[0-9]+}}(%esp)
+; NOSSE-NEXT: movl {{[0-9]+}}(%esp), %eax
+; NOSSE-NEXT: cmpl {{[0-9]+}}(%esp), %eax
+; NOSSE-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
+; NOSSE-NEXT: fxch %st(1)
+; NOSSE-NEXT: fcmovb %st(1), %st
+; NOSSE-NEXT: fstp %st(1)
+; NOSSE-NEXT: retl
;
; NOCMOV-LABEL: test3:
; NOCMOV: # %bb.0:
@@ -216,27 +183,16 @@ define double @test4(i32 %a, i32 %b, double %x) nounwind {
; SSE-NEXT: popl %ebp
; SSE-NEXT: retl
;
-; NOSSE2-LABEL: test4:
-; NOSSE2: # %bb.0:
-; NOSSE2-NEXT: fldl {{[0-9]+}}(%esp)
-; NOSSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; NOSSE2-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOSSE2-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
-; NOSSE2-NEXT: fxch %st(1)
-; NOSSE2-NEXT: fcmovbe %st(1), %st
-; NOSSE2-NEXT: fstp %st(1)
-; NOSSE2-NEXT: retl
-;
-; NOSSE1-LABEL: test4:
-; NOSSE1: # %bb.0:
-; NOSSE1-NEXT: fldl {{[0-9]+}}(%esp)
-; NOSSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
-; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOSSE1-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
-; NOSSE1-NEXT: fxch %st(1)
-; NOSSE1-NEXT: fcmovbe %st(1), %st
-; NOSSE1-NEXT: fstp %st(1)
-; NOSSE1-NEXT: retl
+; NOSSE-LABEL: test4:
+; NOSSE: # %bb.0:
+; NOSSE-NEXT: fldl {{[0-9]+}}(%esp)
+; NOSSE-NEXT: movl {{[0-9]+}}(%esp), %eax
+; NOSSE-NEXT: cmpl {{[0-9]+}}(%esp), %eax
+; NOSSE-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
+; NOSSE-NEXT: fxch %st(1)
+; NOSSE-NEXT: fcmovbe %st(1), %st
+; NOSSE-NEXT: fstp %st(1)
+; NOSSE-NEXT: retl
;
; NOCMOV-LABEL: test4:
; NOCMOV: # %bb.0:
@@ -279,31 +235,18 @@ define double @test5(i32 %a, i32 %b, double %x) nounwind {
; SSE-NEXT: popl %ebp
; SSE-NEXT: retl
;
-; NOSSE2-LABEL: test5:
-; NOSSE2: # %bb.0:
-; NOSSE2-NEXT: fldl {{[0-9]+}}(%esp)
-; NOSSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; NOSSE2-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOSSE2-NEXT: setg %al
-; NOSSE2-NEXT: testb %al, %al
-; NOSSE2-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
-; NOSSE2-NEXT: fxch %st(1)
-; NOSSE2-NEXT: fcmovne %st(1), %st
-; NOSSE2-NEXT: fstp %st(1)
-; NOSSE2-NEXT: retl
-;
-; NOSSE1-LABEL: test5:
-; NOSSE1: # %bb.0:
-; NOSSE1-NEXT: fldl {{[0-9]+}}(%esp)
-; NOSSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
-; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOSSE1-NEXT: setg %al
-; NOSSE1-NEXT: testb %al, %al
-; NOSSE1-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
-; NOSSE1-NEXT: fxch %st(1)
-; NOSSE1-NEXT: fcmovne %st(1), %st
-; NOSSE1-NEXT: fstp %st(1)
-; NOSSE1-NEXT: retl
+; NOSSE-LABEL: test5:
+; NOSSE: # %bb.0:
+; NOSSE-NEXT: fldl {{[0-9]+}}(%esp)
+; NOSSE-NEXT: movl {{[0-9]+}}(%esp), %eax
+; NOSSE-NEXT: cmpl {{[0-9]+}}(%esp), %eax
+; NOSSE-NEXT: setg %al
+; NOSSE-NEXT: testb %al, %al
+; NOSSE-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
+; NOSSE-NEXT: fxch %st(1)
+; NOSSE-NEXT: fcmovne %st(1), %st
+; NOSSE-NEXT: fstp %st(1)
+; NOSSE-NEXT: retl
;
; NOCMOV-LABEL: test5:
; NOCMOV: # %bb.0:
@@ -346,31 +289,18 @@ define double @test6(i32 %a, i32 %b, double %x) nounwind {
; SSE-NEXT: popl %ebp
; SSE-NEXT: retl
;
-; NOSSE2-LABEL: test6:
-; NOSSE2: # %bb.0:
-; NOSSE2-NEXT: fldl {{[0-9]+}}(%esp)
-; NOSSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; NOSSE2-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOSSE2-NEXT: setge %al
-; NOSSE2-NEXT: testb %al, %al
-; NOSSE2-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
-; NOSSE2-NEXT: fxch %st(1)
-; NOSSE2-NEXT: fcmovne %st(1), %st
-; NOSSE2-NEXT: fstp %st(1)
-; NOSSE2-NEXT: retl
-;
-; NOSSE1-LABEL: test6:
-; NOSSE1: # %bb.0:
-; NOSSE1-NEXT: fldl {{[0-9]+}}(%esp)
-; NOSSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
-; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOSSE1-NEXT: setge %al
-; NOSSE1-NEXT: testb %al, %al
-; NOSSE1-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
-; NOSSE1-NEXT: fxch %st(1)
-; NOSSE1-NEXT: fcmovne %st(1), %st
-; NOSSE1-NEXT: fstp %st(1)
-; NOSSE1-NEXT: retl
+; NOSSE-LABEL: test6:
+; NOSSE: # %bb.0:
+; NOSSE-NEXT: fldl {{[0-9]+}}(%esp)
+; NOSSE-NEXT: movl {{[0-9]+}}(%esp), %eax
+; NOSSE-NEXT: cmpl {{[0-9]+}}(%esp), %eax
+; NOSSE-NEXT: setge %al
+; NOSSE-NEXT: testb %al, %al
+; NOSSE-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
+; NOSSE-NEXT: fxch %st(1)
+; NOSSE-NEXT: fcmovne %st(1), %st
+; NOSSE-NEXT: fstp %st(1)
+; NOSSE-NEXT: retl
;
; NOCMOV-LABEL: test6:
; NOCMOV: # %bb.0:
@@ -413,31 +343,18 @@ define double @test7(i32 %a, i32 %b, double %x) nounwind {
; SSE-NEXT: popl %ebp
; SSE-NEXT: retl
;
-; NOSSE2-LABEL: test7:
-; NOSSE2: # %bb.0:
-; NOSSE2-NEXT: fldl {{[0-9]+}}(%esp)
-; NOSSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; NOSSE2-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOSSE2-NEXT: setl %al
-; NOSSE2-NEXT: testb %al, %al
-; NOSSE2-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
-; NOSSE2-NEXT: fxch %st(1)
-; NOSSE2-NEXT: fcmovne %st(1), %st
-; NOSSE2-NEXT: fstp %st(1)
-; NOSSE2-NEXT: retl
-;
-; NOSSE1-LABEL: test7:
-; NOSSE1: # %bb.0:
-; NOSSE1-NEXT: fldl {{[0-9]+}}(%esp)
-; NOSSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
-; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOSSE1-NEXT: setl %al
-; NOSSE1-NEXT: testb %al, %al
-; NOSSE1-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
-; NOSSE1-NEXT: fxch %st(1)
-; NOSSE1-NEXT: fcmovne %st(1), %st
-; NOSSE1-NEXT: fstp %st(1)
-; NOSSE1-NEXT: retl
+; NOSSE-LABEL: test7:
+; NOSSE: # %bb.0:
+; NOSSE-NEXT: fldl {{[0-9]+}}(%esp)
+; NOSSE-NEXT: movl {{[0-9]+}}(%esp), %eax
+; NOSSE-NEXT: cmpl {{[0-9]+}}(%esp), %eax
+; NOSSE-NEXT: setl %al
+; NOSSE-NEXT: testb %al, %al
+; NOSSE-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
+; NOSSE-NEXT: fxch %st(1)
+; NOSSE-NEXT: fcmovne %st(1), %st
+; NOSSE-NEXT: fstp %st(1)
+; NOSSE-NEXT: retl
;
; NOCMOV-LABEL: test7:
; NOCMOV: # %bb.0:
@@ -480,31 +397,18 @@ define double @test8(i32 %a, i32 %b, double %x) nounwind {
; SSE-NEXT: popl %ebp
; SSE-NEXT: retl
;
-; NOSSE2-LABEL: test8:
-; NOSSE2: # %bb.0:
-; NOSSE2-NEXT: fldl {{[0-9]+}}(%esp)
-; NOSSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; NOSSE2-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOSSE2-NEXT: setle %al
-; NOSSE2-NEXT: testb %al, %al
-; NOSSE2-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
-; NOSSE2-NEXT: fxch %st(1)
-; NOSSE2-NEXT: fcmovne %st(1), %st
-; NOSSE2-NEXT: fstp %st(1)
-; NOSSE2-NEXT: retl
-;
-; NOSSE1-LABEL: test8:
-; NOSSE1: # %bb.0:
-; NOSSE1-NEXT: fldl {{[0-9]+}}(%esp)
-; NOSSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
-; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOSSE1-NEXT: setle %al
-; NOSSE1-NEXT: testb %al, %al
-; NOSSE1-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
-; NOSSE1-NEXT: fxch %st(1)
-; NOSSE1-NEXT: fcmovne %st(1), %st
-; NOSSE1-NEXT: fstp %st(1)
-; NOSSE1-NEXT: retl
+; NOSSE-LABEL: test8:
+; NOSSE: # %bb.0:
+; NOSSE-NEXT: fldl {{[0-9]+}}(%esp)
+; NOSSE-NEXT: movl {{[0-9]+}}(%esp), %eax
+; NOSSE-NEXT: cmpl {{[0-9]+}}(%esp), %eax
+; NOSSE-NEXT: setle %al
+; NOSSE-NEXT: testb %al, %al
+; NOSSE-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
+; NOSSE-NEXT: fxch %st(1)
+; NOSSE-NEXT: fcmovne %st(1), %st
+; NOSSE-NEXT: fstp %st(1)
+; NOSSE-NEXT: retl
;
; NOCMOV-LABEL: test8:
; NOCMOV: # %bb.0:
@@ -1065,27 +969,16 @@ define x86_fp80 @test17(i32 %a, i32 %b, x86_fp80 %x) nounwind {
; SSE-NEXT: fstp %st(1)
; SSE-NEXT: retl
;
-; NOSSE2-LABEL: test17:
-; NOSSE2: # %bb.0:
-; NOSSE2-NEXT: fldt {{[0-9]+}}(%esp)
-; NOSSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; NOSSE2-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOSSE2-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
-; NOSSE2-NEXT: fxch %st(1)
-; NOSSE2-NEXT: fcmovnbe %st(1), %st
-; NOSSE2-NEXT: fstp %st(1)
-; NOSSE2-NEXT: retl
-;
-; NOSSE1-LABEL: test17:
-; NOSSE1: # %bb.0:
-; NOSSE1-NEXT: fldt {{[0-9]+}}(%esp)
-; NOSSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
-; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOSSE1-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
-; NOSSE1-NEXT: fxch %st(1)
-; NOSSE1-NEXT: fcmovnbe %st(1), %st
-; NOSSE1-NEXT: fstp %st(1)
-; NOSSE1-NEXT: retl
+; NOSSE-LABEL: test17:
+; NOSSE: # %bb.0:
+; NOSSE-NEXT: fldt {{[0-9]+}}(%esp)
+; NOSSE-NEXT: movl {{[0-9]+}}(%esp), %eax
+; NOSSE-NEXT: cmpl {{[0-9]+}}(%esp), %eax
+; NOSSE-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
+; NOSSE-NEXT: fxch %st(1)
+; NOSSE-NEXT: fcmovnbe %st(1), %st
+; NOSSE-NEXT: fstp %st(1)
+; NOSSE-NEXT: retl
;
; NOCMOV-LABEL: test17:
; NOCMOV: # %bb.0:
@@ -1118,27 +1011,16 @@ define x86_fp80 @test18(i32 %a, i32 %b, x86_fp80 %x) nounwind {
; SSE-NEXT: fstp %st(1)
; SSE-NEXT: retl
;
-; NOSSE2-LABEL: test18:
-; NOSSE2: # %bb.0:
-; NOSSE2-NEXT: fldt {{[0-9]+}}(%esp)
-; NOSSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; NOSSE2-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOSSE2-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
-; NOSSE2-NEXT: fxch %st(1)
-; NOSSE2-NEXT: fcmovnb %st(1), %st
-; NOSSE2-NEXT: fstp %st(1)
-; NOSSE2-NEXT: retl
-;
-; NOSSE1-LABEL: test18:
-; NOSSE1: # %bb.0:
-; NOSSE1-NEXT: fldt {{[0-9]+}}(%esp)
-; NOSSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
-; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOSSE1-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
-; NOSSE1-NEXT: fxch %st(1)
-; NOSSE1-NEXT: fcmovnb %st(1), %st
-; NOSSE1-NEXT: fstp %st(1)
-; NOSSE1-NEXT: retl
+; NOSSE-LABEL: test18:
+; NOSSE: # %bb.0:
+; NOSSE-NEXT: fldt {{[0-9]+}}(%esp)
+; NOSSE-NEXT: movl {{[0-9]+}}(%esp), %eax
+; NOSSE-NEXT: cmpl {{[0-9]+}}(%esp), %eax
+; NOSSE-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
+; NOSSE-NEXT: fxch %st(1)
+; NOSSE-NEXT: fcmovnb %st(1), %st
+; NOSSE-NEXT: fstp %st(1)
+; NOSSE-NEXT: retl
;
; NOCMOV-LABEL: test18:
; NOCMOV: # %bb.0:
@@ -1171,27 +1053,16 @@ define x86_fp80 @test19(i32 %a, i32 %b, x86_fp80 %x) nounwind {
; SSE-NEXT: fstp %st(1)
; SSE-NEXT: retl
;
-; NOSSE2-LABEL: test19:
-; NOSSE2: # %bb.0:
-; NOSSE2-NEXT: fldt {{[0-9]+}}(%esp)
-; NOSSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; NOSSE2-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOSSE2-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
-; NOSSE2-NEXT: fxch %st(1)
-; NOSSE2-NEXT: fcmovb %st(1), %st
-; NOSSE2-NEXT: fstp %st(1)
-; NOSSE2-NEXT: retl
-;
-; NOSSE1-LABEL: test19:
-; NOSSE1: # %bb.0:
-; NOSSE1-NEXT: fldt {{[0-9]+}}(%esp)
-; NOSSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
-; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOSSE1-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
-; NOSSE1-NEXT: fxch %st(1)
-; NOSSE1-NEXT: fcmovb %st(1), %st
-; NOSSE1-NEXT: fstp %st(1)
-; NOSSE1-NEXT: retl
+; NOSSE-LABEL: test19:
+; NOSSE: # %bb.0:
+; NOSSE-NEXT: fldt {{[0-9]+}}(%esp)
+; NOSSE-NEXT: movl {{[0-9]+}}(%esp), %eax
+; NOSSE-NEXT: cmpl {{[0-9]+}}(%esp), %eax
+; NOSSE-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
+; NOSSE-NEXT: fxch %st(1)
+; NOSSE-NEXT: fcmovb %st(1), %st
+; NOSSE-NEXT: fstp %st(1)
+; NOSSE-NEXT: retl
;
; NOCMOV-LABEL: test19:
; NOCMOV: # %bb.0:
@@ -1224,27 +1095,16 @@ define x86_fp80 @test20(i32 %a, i32 %b, x86_fp80 %x) nounwind {
; SSE-NEXT: fstp %st(1)
; SSE-NEXT: retl
;
-; NOSSE2-LABEL: test20:
-; NOSSE2: # %bb.0:
-; NOSSE2-NEXT: fldt {{[0-9]+}}(%esp)
-; NOSSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; NOSSE2-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOSSE2-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
-; NOSSE2-NEXT: fxch %st(1)
-; NOSSE2-NEXT: fcmovbe %st(1), %st
-; NOSSE2-NEXT: fstp %st(1)
-; NOSSE2-NEXT: retl
-;
-; NOSSE1-LABEL: test20:
-; NOSSE1: # %bb.0:
-; NOSSE1-NEXT: fldt {{[0-9]+}}(%esp)
-; NOSSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
-; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOSSE1-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
-; NOSSE1-NEXT: fxch %st(1)
-; NOSSE1-NEXT: fcmovbe %st(1), %st
-; NOSSE1-NEXT: fstp %st(1)
-; NOSSE1-NEXT: retl
+; NOSSE-LABEL: test20:
+; NOSSE: # %bb.0:
+; NOSSE-NEXT: fldt {{[0-9]+}}(%esp)
+; NOSSE-NEXT: movl {{[0-9]+}}(%esp), %eax
+; NOSSE-NEXT: cmpl {{[0-9]+}}(%esp), %eax
+; NOSSE-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
+; NOSSE-NEXT: fxch %st(1)
+; NOSSE-NEXT: fcmovbe %st(1), %st
+; NOSSE-NEXT: fstp %st(1)
+; NOSSE-NEXT: retl
;
; NOCMOV-LABEL: test20:
; NOCMOV: # %bb.0:
@@ -1279,31 +1139,18 @@ define x86_fp80 @test21(i32 %a, i32 %b, x86_fp80 %x) nounwind {
; SSE-NEXT: fstp %st(1)
; SSE-NEXT: retl
;
-; NOSSE2-LABEL: test21:
-; NOSSE2: # %bb.0:
-; NOSSE2-NEXT: fldt {{[0-9]+}}(%esp)
-; NOSSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; NOSSE2-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOSSE2-NEXT: setg %al
-; NOSSE2-NEXT: testb %al, %al
-; NOSSE2-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
-; NOSSE2-NEXT: fxch %st(1)
-; NOSSE2-NEXT: fcmovne %st(1), %st
-; NOSSE2-NEXT: fstp %st(1)
-; NOSSE2-NEXT: retl
-;
-; NOSSE1-LABEL: test21:
-; NOSSE1: # %bb.0:
-; NOSSE1-NEXT: fldt {{[0-9]+}}(%esp)
-; NOSSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
-; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOSSE1-NEXT: setg %al
-; NOSSE1-NEXT: testb %al, %al
-; NOSSE1-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
-; NOSSE1-NEXT: fxch %st(1)
-; NOSSE1-NEXT: fcmovne %st(1), %st
-; NOSSE1-NEXT: fstp %st(1)
-; NOSSE1-NEXT: retl
+; NOSSE-LABEL: test21:
+; NOSSE: # %bb.0:
+; NOSSE-NEXT: fldt {{[0-9]+}}(%esp)
+; NOSSE-NEXT: movl {{[0-9]+}}(%esp), %eax
+; NOSSE-NEXT: cmpl {{[0-9]+}}(%esp), %eax
+; NOSSE-NEXT: setg %al
+; NOSSE-NEXT: testb %al, %al
+; NOSSE-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
+; NOSSE-NEXT: fxch %st(1)
+; NOSSE-NEXT: fcmovne %st(1), %st
+; NOSSE-NEXT: fstp %st(1)
+; NOSSE-NEXT: retl
;
; NOCMOV-LABEL: test21:
; NOCMOV: # %bb.0:
@@ -1339,31 +1186,18 @@ define x86_fp80 @test22(i32 %a, i32 %b, x86_fp80 %x) nounwind {
; SSE-NEXT: fstp %st(1)
; SSE-NEXT: retl
;
-; NOSSE2-LABEL: test22:
-; NOSSE2: # %bb.0:
-; NOSSE2-NEXT: fldt {{[0-9]+}}(%esp)
-; NOSSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; NOSSE2-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOSSE2-NEXT: setge %al
-; NOSSE2-NEXT: testb %al, %al
-; NOSSE2-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
-; NOSSE2-NEXT: fxch %st(1)
-; NOSSE2-NEXT: fcmovne %st(1), %st
-; NOSSE2-NEXT: fstp %st(1)
-; NOSSE2-NEXT: retl
-;
-; NOSSE1-LABEL: test22:
-; NOSSE1: # %bb.0:
-; NOSSE1-NEXT: fldt {{[0-9]+}}(%esp)
-; NOSSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
-; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOSSE1-NEXT: setge %al
-; NOSSE1-NEXT: testb %al, %al
-; NOSSE1-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
-; NOSSE1-NEXT: fxch %st(1)
-; NOSSE1-NEXT: fcmovne %st(1), %st
-; NOSSE1-NEXT: fstp %st(1)
-; NOSSE1-NEXT: retl
+; NOSSE-LABEL: test22:
+; NOSSE: # %bb.0:
+; NOSSE-NEXT: fldt {{[0-9]+}}(%esp)
+; NOSSE-NEXT: movl {{[0-9]+}}(%esp), %eax
+; NOSSE-NEXT: cmpl {{[0-9]+}}(%esp), %eax
+; NOSSE-NEXT: setge %al
+; NOSSE-NEXT: testb %al, %al
+; NOSSE-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
+; NOSSE-NEXT: fxch %st(1)
+; NOSSE-NEXT: fcmovne %st(1), %st
+; NOSSE-NEXT: fstp %st(1)
+; NOSSE-NEXT: retl
;
; NOCMOV-LABEL: test22:
; NOCMOV: # %bb.0:
@@ -1398,31 +1232,18 @@ define x86_fp80 @test23(i32 %a, i32 %b, x86_fp80 %x) nounwind {
; SSE-NEXT: fstp %st(1)
; SSE-NEXT: retl
;
-; NOSSE2-LABEL: test23:
-; NOSSE2: # %bb.0:
-; NOSSE2-NEXT: fldt {{[0-9]+}}(%esp)
-; NOSSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; NOSSE2-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOSSE2-NEXT: setl %al
-; NOSSE2-NEXT: testb %al, %al
-; NOSSE2-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
-; NOSSE2-NEXT: fxch %st(1)
-; NOSSE2-NEXT: fcmovne %st(1), %st
-; NOSSE2-NEXT: fstp %st(1)
-; NOSSE2-NEXT: retl
-;
-; NOSSE1-LABEL: test23:
-; NOSSE1: # %bb.0:
-; NOSSE1-NEXT: fldt {{[0-9]+}}(%esp)
-; NOSSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
-; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOSSE1-NEXT: setl %al
-; NOSSE1-NEXT: testb %al, %al
-; NOSSE1-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
-; NOSSE1-NEXT: fxch %st(1)
-; NOSSE1-NEXT: fcmovne %st(1), %st
-; NOSSE1-NEXT: fstp %st(1)
-; NOSSE1-NEXT: retl
+; NOSSE-LABEL: test23:
+; NOSSE: # %bb.0:
+; NOSSE-NEXT: fldt {{[0-9]+}}(%esp)
+; NOSSE-NEXT: movl {{[0-9]+}}(%esp), %eax
+; NOSSE-NEXT: cmpl {{[0-9]+}}(%esp), %eax
+; NOSSE-NEXT: setl %al
+; NOSSE-NEXT: testb %al, %al
+; NOSSE-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
+; NOSSE-NEXT: fxch %st(1)
+; NOSSE-NEXT: fcmovne %st(1), %st
+; NOSSE-NEXT: fstp %st(1)
+; NOSSE-NEXT: retl
;
; NOCMOV-LABEL: test23:
; NOCMOV: # %bb.0:
@@ -1457,31 +1278,18 @@ define x86_fp80 @test24(i32 %a, i32 %b, x86_fp80 %x) nounwind {
; SSE-NEXT: fstp %st(1)
; SSE-NEXT: retl
;
-; NOSSE2-LABEL: test24:
-; NOSSE2: # %bb.0:
-; NOSSE2-NEXT: fldt {{[0-9]+}}(%esp)
-; NOSSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; NOSSE2-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOSSE2-NEXT: setle %al
-; NOSSE2-NEXT: testb %al, %al
-; NOSSE2-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
-; NOSSE2-NEXT: fxch %st(1)
-; NOSSE2-NEXT: fcmovne %st(1), %st
-; NOSSE2-NEXT: fstp %st(1)
-; NOSSE2-NEXT: retl
-;
-; NOSSE1-LABEL: test24:
-; NOSSE1: # %bb.0:
-; NOSSE1-NEXT: fldt {{[0-9]+}}(%esp)
-; NOSSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
-; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOSSE1-NEXT: setle %al
-; NOSSE1-NEXT: testb %al, %al
-; NOSSE1-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
-; NOSSE1-NEXT: fxch %st(1)
-; NOSSE1-NEXT: fcmovne %st(1), %st
-; NOSSE1-NEXT: fstp %st(1)
-; NOSSE1-NEXT: retl
+; NOSSE-LABEL: test24:
+; NOSSE: # %bb.0:
+; NOSSE-NEXT: fldt {{[0-9]+}}(%esp)
+; NOSSE-NEXT: movl {{[0-9]+}}(%esp), %eax
+; NOSSE-NEXT: cmpl {{[0-9]+}}(%esp), %eax
+; NOSSE-NEXT: setle %al
+; NOSSE-NEXT: testb %al, %al
+; NOSSE-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
+; NOSSE-NEXT: fxch %st(1)
+; NOSSE-NEXT: fcmovne %st(1), %st
+; NOSSE-NEXT: fstp %st(1)
+; NOSSE-NEXT: retl
;
; NOCMOV-LABEL: test24:
; NOCMOV: # %bb.0:
diff --git a/llvm/test/CodeGen/X86/combine-sse41-intrinsics.ll b/llvm/test/CodeGen/X86/combine-sse41-intrinsics.ll
index 7039e33..cbb5bd09 100644
--- a/llvm/test/CodeGen/X86/combine-sse41-intrinsics.ll
+++ b/llvm/test/CodeGen/X86/combine-sse41-intrinsics.ll
@@ -160,6 +160,53 @@ define <16 x i8> @demandedelts_pblendvb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8>
ret <16 x i8> %5
}
+define <4 x float> @demandedbits_sitofp_blendvps(<4 x float> %a0, <4 x float> %a1, <4 x i32> %a2) {
+; SSE-LABEL: demandedbits_sitofp_blendvps:
+; SSE: # %bb.0:
+; SSE-NEXT: movaps %xmm0, %xmm3
+; SSE-NEXT: cvtdq2ps %xmm2, %xmm0
+; SSE-NEXT: blendvps %xmm0, %xmm1, %xmm3
+; SSE-NEXT: movaps %xmm3, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: demandedbits_sitofp_blendvps:
+; AVX: # %bb.0:
+; AVX-NEXT: vcvtdq2ps %xmm2, %xmm2
+; AVX-NEXT: vblendvps %xmm2, %xmm1, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %cvt = sitofp <4 x i32> %a2 to <4 x float>
+ %sel = tail call noundef <4 x float> @llvm.x86.sse41.blendvps(<4 x float> %a0, <4 x float> %a1, <4 x float> %cvt)
+ ret <4 x float> %sel
+}
+
+define <4 x float> @demandedbits_uitofp_blendvps(<4 x float> %a0, <4 x float> %a1, <4 x i32> %a2) {
+; SSE-LABEL: demandedbits_uitofp_blendvps:
+; SSE: # %bb.0:
+; SSE-NEXT: movaps %xmm0, %xmm3
+; SSE-NEXT: movdqa {{.*#+}} xmm0 = [1258291200,1258291200,1258291200,1258291200]
+; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm2[0],xmm0[1],xmm2[2],xmm0[3],xmm2[4],xmm0[5],xmm2[6],xmm0[7]
+; SSE-NEXT: psrld $16, %xmm2
+; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0],mem[1],xmm2[2],mem[3],xmm2[4],mem[5],xmm2[6],mem[7]
+; SSE-NEXT: subps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
+; SSE-NEXT: addps %xmm2, %xmm0
+; SSE-NEXT: blendvps %xmm0, %xmm1, %xmm3
+; SSE-NEXT: movaps %xmm3, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: demandedbits_uitofp_blendvps:
+; AVX: # %bb.0:
+; AVX-NEXT: vpblendw {{.*#+}} xmm3 = xmm2[0],mem[1],xmm2[2],mem[3],xmm2[4],mem[5],xmm2[6],mem[7]
+; AVX-NEXT: vpsrld $16, %xmm2, %xmm2
+; AVX-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],mem[1],xmm2[2],mem[3],xmm2[4],mem[5],xmm2[6],mem[7]
+; AVX-NEXT: vsubps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
+; AVX-NEXT: vaddps %xmm2, %xmm3, %xmm2
+; AVX-NEXT: vblendvps %xmm2, %xmm1, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %cvt = uitofp <4 x i32> %a2 to <4 x float>
+ %sel = tail call noundef <4 x float> @llvm.x86.sse41.blendvps(<4 x float> %a0, <4 x float> %a1, <4 x float> %cvt)
+ ret <4 x float> %sel
+}
+
define <2 x i64> @demandedbits_blendvpd(i64 %a0, i64 %a2, <2 x double> %a3) {
; SSE-LABEL: demandedbits_blendvpd:
; SSE: # %bb.0:
diff --git a/llvm/test/CodeGen/X86/cvt16.ll b/llvm/test/CodeGen/X86/cvt16.ll
index 59097f8..c7ef353 100644
--- a/llvm/test/CodeGen/X86/cvt16.ll
+++ b/llvm/test/CodeGen/X86/cvt16.ll
@@ -89,7 +89,6 @@ define float @test3(float %src) nounwind uwtable readnone {
; F16C-LABEL: test3:
; F16C: # %bb.0:
; F16C-NEXT: vcvtps2ph $4, %xmm0, %xmm0
-; F16C-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
; F16C-NEXT: vcvtph2ps %xmm0, %xmm0
; F16C-NEXT: retq
;
diff --git a/llvm/test/CodeGen/X86/f16c-intrinsics-fast-isel.ll b/llvm/test/CodeGen/X86/f16c-intrinsics-fast-isel.ll
index e114c20..1886e29 100644
--- a/llvm/test/CodeGen/X86/f16c-intrinsics-fast-isel.ll
+++ b/llvm/test/CodeGen/X86/f16c-intrinsics-fast-isel.ll
@@ -18,8 +18,7 @@ define float @test_cvtsh_ss(i16 %a0) nounwind {
;
; X64-LABEL: test_cvtsh_ss:
; X64: # %bb.0:
-; X64-NEXT: movzwl %di, %eax
-; X64-NEXT: vmovd %eax, %xmm0
+; X64-NEXT: vmovd %edi, %xmm0
; X64-NEXT: vcvtph2ps %xmm0, %xmm0
; X64-NEXT: retq
%ins0 = insertelement <8 x i16> undef, i16 %a0, i32 0
@@ -41,8 +40,6 @@ define i16 @test_cvtss_sh(float %a0) nounwind {
; X86-LABEL: test_cvtss_sh:
; X86: # %bb.0:
; X86-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X86-NEXT: vxorps %xmm1, %xmm1, %xmm1
-; X86-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
; X86-NEXT: vcvtps2ph $0, %xmm0, %xmm0
; X86-NEXT: vmovd %xmm0, %eax
; X86-NEXT: # kill: def $ax killed $ax killed $eax
@@ -50,8 +47,6 @@ define i16 @test_cvtss_sh(float %a0) nounwind {
;
; X64-LABEL: test_cvtss_sh:
; X64: # %bb.0:
-; X64-NEXT: vxorps %xmm1, %xmm1, %xmm1
-; X64-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
; X64-NEXT: vcvtps2ph $0, %xmm0, %xmm0
; X64-NEXT: vmovd %xmm0, %eax
; X64-NEXT: # kill: def $ax killed $ax killed $eax
diff --git a/llvm/test/CodeGen/X86/fold-int-pow2-with-fmul-or-fdiv.ll b/llvm/test/CodeGen/X86/fold-int-pow2-with-fmul-or-fdiv.ll
index 5f326b6..8f875c7 100644
--- a/llvm/test/CodeGen/X86/fold-int-pow2-with-fmul-or-fdiv.ll
+++ b/llvm/test/CodeGen/X86/fold-int-pow2-with-fmul-or-fdiv.ll
@@ -1432,7 +1432,6 @@ define half @fdiv_pow_shl_cnt_fail_out_of_bounds(i32 %cnt) nounwind {
; CHECK-NO-FASTFMA-NEXT: shll %cl, %eax
; CHECK-NO-FASTFMA-NEXT: vcvtusi2ss %eax, %xmm0, %xmm0
; CHECK-NO-FASTFMA-NEXT: vcvtps2ph $4, %xmm0, %xmm0
-; CHECK-NO-FASTFMA-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
; CHECK-NO-FASTFMA-NEXT: vcvtph2ps %xmm0, %xmm0
; CHECK-NO-FASTFMA-NEXT: vmovss {{.*#+}} xmm1 = [8.192E+3,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NO-FASTFMA-NEXT: vdivss %xmm0, %xmm1, %xmm0
@@ -1447,7 +1446,6 @@ define half @fdiv_pow_shl_cnt_fail_out_of_bounds(i32 %cnt) nounwind {
; CHECK-FMA-NEXT: shlxl %edi, %eax, %eax
; CHECK-FMA-NEXT: vcvtusi2ss %eax, %xmm0, %xmm0
; CHECK-FMA-NEXT: vcvtps2ph $4, %xmm0, %xmm0
-; CHECK-FMA-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
; CHECK-FMA-NEXT: vcvtph2ps %xmm0, %xmm0
; CHECK-FMA-NEXT: vmovss {{.*#+}} xmm1 = [8.192E+3,0.0E+0,0.0E+0,0.0E+0]
; CHECK-FMA-NEXT: vdivss %xmm0, %xmm1, %xmm0
@@ -1550,7 +1548,6 @@ define half @fdiv_pow_shl_cnt_fail_out_of_bound2(i16 %cnt) nounwind {
; CHECK-NO-FASTFMA-NEXT: movzwl %ax, %eax
; CHECK-NO-FASTFMA-NEXT: vcvtsi2ss %eax, %xmm0, %xmm0
; CHECK-NO-FASTFMA-NEXT: vcvtps2ph $4, %xmm0, %xmm0
-; CHECK-NO-FASTFMA-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
; CHECK-NO-FASTFMA-NEXT: vcvtph2ps %xmm0, %xmm0
; CHECK-NO-FASTFMA-NEXT: vmovss {{.*#+}} xmm1 = [2.0E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NO-FASTFMA-NEXT: vdivss %xmm0, %xmm1, %xmm0
@@ -1566,7 +1563,6 @@ define half @fdiv_pow_shl_cnt_fail_out_of_bound2(i16 %cnt) nounwind {
; CHECK-FMA-NEXT: movzwl %ax, %eax
; CHECK-FMA-NEXT: vcvtsi2ss %eax, %xmm0, %xmm0
; CHECK-FMA-NEXT: vcvtps2ph $4, %xmm0, %xmm0
-; CHECK-FMA-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
; CHECK-FMA-NEXT: vcvtph2ps %xmm0, %xmm0
; CHECK-FMA-NEXT: vmovss {{.*#+}} xmm1 = [2.0E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-FMA-NEXT: vdivss %xmm0, %xmm1, %xmm0
diff --git a/llvm/test/CodeGen/X86/fp-roundeven.ll b/llvm/test/CodeGen/X86/fp-roundeven.ll
index fed2060..8037c78 100644
--- a/llvm/test/CodeGen/X86/fp-roundeven.ll
+++ b/llvm/test/CodeGen/X86/fp-roundeven.ll
@@ -51,7 +51,6 @@ define half @roundeven_f16(half %h) {
; AVX512F-LABEL: roundeven_f16:
; AVX512F: ## %bb.0: ## %entry
; AVX512F-NEXT: vpextrw $0, %xmm0, %eax
-; AVX512F-NEXT: movzwl %ax, %eax
; AVX512F-NEXT: vmovd %eax, %xmm0
; AVX512F-NEXT: vcvtph2ps %xmm0, %xmm0
; AVX512F-NEXT: vroundss $8, %xmm0, %xmm0, %xmm0
diff --git a/llvm/test/CodeGen/X86/fpclamptosat_vec.ll b/llvm/test/CodeGen/X86/fpclamptosat_vec.ll
index c8708ea..6aad4c2 100644
--- a/llvm/test/CodeGen/X86/fpclamptosat_vec.ll
+++ b/llvm/test/CodeGen/X86/fpclamptosat_vec.ll
@@ -698,30 +698,18 @@ define <4 x i32> @stest_f16i32(<4 x half> %x) nounwind {
;
; AVX2-LABEL: stest_f16i32:
; AVX2: # %bb.0: # %entry
-; AVX2-NEXT: vpsrlq $48, %xmm0, %xmm1
-; AVX2-NEXT: vpextrw $0, %xmm1, %eax
-; AVX2-NEXT: movzwl %ax, %eax
-; AVX2-NEXT: vmovd %eax, %xmm1
+; AVX2-NEXT: vpshuflw {{.*#+}} xmm1 = xmm0[3,3,3,3,4,5,6,7]
; AVX2-NEXT: vcvtph2ps %xmm1, %xmm1
-; AVX2-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
-; AVX2-NEXT: vpextrw $0, %xmm2, %eax
-; AVX2-NEXT: movzwl %ax, %eax
-; AVX2-NEXT: vmovd %eax, %xmm2
; AVX2-NEXT: vcvttss2si %xmm1, %rax
-; AVX2-NEXT: vcvtph2ps %xmm2, %xmm1
+; AVX2-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; AVX2-NEXT: vcvtph2ps %xmm1, %xmm1
+; AVX2-NEXT: vcvttss2si %xmm1, %rcx
+; AVX2-NEXT: vcvtph2ps %xmm0, %xmm1
; AVX2-NEXT: vmovq %rax, %xmm2
; AVX2-NEXT: vcvttss2si %xmm1, %rax
-; AVX2-NEXT: vmovq %rax, %xmm1
+; AVX2-NEXT: vmovq %rcx, %xmm1
; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
-; AVX2-NEXT: vpextrw $0, %xmm0, %eax
-; AVX2-NEXT: movzwl %ax, %eax
-; AVX2-NEXT: vmovd %eax, %xmm2
-; AVX2-NEXT: vcvtph2ps %xmm2, %xmm2
-; AVX2-NEXT: vpsrld $16, %xmm0, %xmm0
-; AVX2-NEXT: vpextrw $0, %xmm0, %eax
-; AVX2-NEXT: movzwl %ax, %eax
-; AVX2-NEXT: vmovd %eax, %xmm0
-; AVX2-NEXT: vcvttss2si %xmm2, %rax
+; AVX2-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[1,1,1,1,4,5,6,7]
; AVX2-NEXT: vcvtph2ps %xmm0, %xmm0
; AVX2-NEXT: vmovq %rax, %xmm2
; AVX2-NEXT: vcvttss2si %xmm0, %rax
@@ -848,10 +836,7 @@ define <4 x i32> @utesth_f16i32(<4 x half> %x) nounwind {
;
; AVX2-LABEL: utesth_f16i32:
; AVX2: # %bb.0: # %entry
-; AVX2-NEXT: vpsrlq $48, %xmm0, %xmm1
-; AVX2-NEXT: vpextrw $0, %xmm1, %eax
-; AVX2-NEXT: movzwl %ax, %eax
-; AVX2-NEXT: vmovd %eax, %xmm1
+; AVX2-NEXT: vpshuflw {{.*#+}} xmm1 = xmm0[3,3,3,3,4,5,6,7]
; AVX2-NEXT: vcvtph2ps %xmm1, %xmm2
; AVX2-NEXT: vmovss {{.*#+}} xmm1 = [9.22337203E+18,0.0E+0,0.0E+0,0.0E+0]
; AVX2-NEXT: vsubss %xmm1, %xmm2, %xmm3
@@ -862,20 +847,14 @@ define <4 x i32> @utesth_f16i32(<4 x half> %x) nounwind {
; AVX2-NEXT: andq %rax, %rdx
; AVX2-NEXT: orq %rcx, %rdx
; AVX2-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
-; AVX2-NEXT: vpextrw $0, %xmm2, %eax
-; AVX2-NEXT: movzwl %ax, %eax
-; AVX2-NEXT: vmovd %eax, %xmm2
; AVX2-NEXT: vcvtph2ps %xmm2, %xmm2
; AVX2-NEXT: vsubss %xmm1, %xmm2, %xmm3
; AVX2-NEXT: vcvttss2si %xmm3, %rax
; AVX2-NEXT: vcvttss2si %xmm2, %rcx
; AVX2-NEXT: vmovq %rdx, %xmm2
-; AVX2-NEXT: vpextrw $0, %xmm0, %edx
-; AVX2-NEXT: movzwl %dx, %edx
-; AVX2-NEXT: vmovd %edx, %xmm3
; AVX2-NEXT: movq %rcx, %rdx
; AVX2-NEXT: sarq $63, %rdx
-; AVX2-NEXT: vcvtph2ps %xmm3, %xmm3
+; AVX2-NEXT: vcvtph2ps %xmm0, %xmm3
; AVX2-NEXT: andq %rax, %rdx
; AVX2-NEXT: vsubss %xmm1, %xmm3, %xmm4
; AVX2-NEXT: vcvttss2si %xmm4, %rax
@@ -887,10 +866,7 @@ define <4 x i32> @utesth_f16i32(<4 x half> %x) nounwind {
; AVX2-NEXT: sarq $63, %rdx
; AVX2-NEXT: andq %rax, %rdx
; AVX2-NEXT: orq %rcx, %rdx
-; AVX2-NEXT: vpsrld $16, %xmm0, %xmm0
-; AVX2-NEXT: vpextrw $0, %xmm0, %eax
-; AVX2-NEXT: movzwl %ax, %eax
-; AVX2-NEXT: vmovd %eax, %xmm0
+; AVX2-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[1,1,1,1,4,5,6,7]
; AVX2-NEXT: vcvtph2ps %xmm0, %xmm0
; AVX2-NEXT: vsubss %xmm1, %xmm0, %xmm1
; AVX2-NEXT: vcvttss2si %xmm1, %rax
@@ -1023,31 +999,19 @@ define <4 x i32> @ustest_f16i32(<4 x half> %x) nounwind {
;
; AVX2-LABEL: ustest_f16i32:
; AVX2: # %bb.0: # %entry
-; AVX2-NEXT: vpsrlq $48, %xmm0, %xmm1
-; AVX2-NEXT: vpextrw $0, %xmm1, %eax
-; AVX2-NEXT: movzwl %ax, %eax
-; AVX2-NEXT: vmovd %eax, %xmm1
+; AVX2-NEXT: vpshuflw {{.*#+}} xmm1 = xmm0[3,3,3,3,4,5,6,7]
; AVX2-NEXT: vcvtph2ps %xmm1, %xmm1
; AVX2-NEXT: vcvttss2si %xmm1, %rax
; AVX2-NEXT: vmovq %rax, %xmm1
; AVX2-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
-; AVX2-NEXT: vpextrw $0, %xmm2, %eax
-; AVX2-NEXT: movzwl %ax, %eax
-; AVX2-NEXT: vmovd %eax, %xmm2
; AVX2-NEXT: vcvtph2ps %xmm2, %xmm2
; AVX2-NEXT: vcvttss2si %xmm2, %rax
; AVX2-NEXT: vmovq %rax, %xmm2
; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
-; AVX2-NEXT: vpextrw $0, %xmm0, %eax
-; AVX2-NEXT: movzwl %ax, %eax
-; AVX2-NEXT: vmovd %eax, %xmm2
-; AVX2-NEXT: vcvtph2ps %xmm2, %xmm2
+; AVX2-NEXT: vcvtph2ps %xmm0, %xmm2
; AVX2-NEXT: vcvttss2si %xmm2, %rax
; AVX2-NEXT: vmovq %rax, %xmm2
-; AVX2-NEXT: vpsrld $16, %xmm0, %xmm0
-; AVX2-NEXT: vpextrw $0, %xmm0, %eax
-; AVX2-NEXT: movzwl %ax, %eax
-; AVX2-NEXT: vmovd %eax, %xmm0
+; AVX2-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[1,1,1,1,4,5,6,7]
; AVX2-NEXT: vcvtph2ps %xmm0, %xmm0
; AVX2-NEXT: vcvttss2si %xmm0, %rax
; AVX2-NEXT: vmovq %rax, %xmm0
@@ -3346,30 +3310,18 @@ define <4 x i32> @stest_f16i32_mm(<4 x half> %x) nounwind {
;
; AVX2-LABEL: stest_f16i32_mm:
; AVX2: # %bb.0: # %entry
-; AVX2-NEXT: vpsrlq $48, %xmm0, %xmm1
-; AVX2-NEXT: vpextrw $0, %xmm1, %eax
-; AVX2-NEXT: movzwl %ax, %eax
-; AVX2-NEXT: vmovd %eax, %xmm1
+; AVX2-NEXT: vpshuflw {{.*#+}} xmm1 = xmm0[3,3,3,3,4,5,6,7]
; AVX2-NEXT: vcvtph2ps %xmm1, %xmm1
-; AVX2-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
-; AVX2-NEXT: vpextrw $0, %xmm2, %eax
-; AVX2-NEXT: movzwl %ax, %eax
-; AVX2-NEXT: vmovd %eax, %xmm2
; AVX2-NEXT: vcvttss2si %xmm1, %rax
-; AVX2-NEXT: vcvtph2ps %xmm2, %xmm1
+; AVX2-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; AVX2-NEXT: vcvtph2ps %xmm1, %xmm1
+; AVX2-NEXT: vcvttss2si %xmm1, %rcx
+; AVX2-NEXT: vcvtph2ps %xmm0, %xmm1
; AVX2-NEXT: vmovq %rax, %xmm2
; AVX2-NEXT: vcvttss2si %xmm1, %rax
-; AVX2-NEXT: vmovq %rax, %xmm1
+; AVX2-NEXT: vmovq %rcx, %xmm1
; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
-; AVX2-NEXT: vpextrw $0, %xmm0, %eax
-; AVX2-NEXT: movzwl %ax, %eax
-; AVX2-NEXT: vmovd %eax, %xmm2
-; AVX2-NEXT: vcvtph2ps %xmm2, %xmm2
-; AVX2-NEXT: vpsrld $16, %xmm0, %xmm0
-; AVX2-NEXT: vpextrw $0, %xmm0, %eax
-; AVX2-NEXT: movzwl %ax, %eax
-; AVX2-NEXT: vmovd %eax, %xmm0
-; AVX2-NEXT: vcvttss2si %xmm2, %rax
+; AVX2-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[1,1,1,1,4,5,6,7]
; AVX2-NEXT: vcvtph2ps %xmm0, %xmm0
; AVX2-NEXT: vmovq %rax, %xmm2
; AVX2-NEXT: vcvttss2si %xmm0, %rax
@@ -3494,10 +3446,7 @@ define <4 x i32> @utesth_f16i32_mm(<4 x half> %x) nounwind {
;
; AVX2-LABEL: utesth_f16i32_mm:
; AVX2: # %bb.0: # %entry
-; AVX2-NEXT: vpsrlq $48, %xmm0, %xmm1
-; AVX2-NEXT: vpextrw $0, %xmm1, %eax
-; AVX2-NEXT: movzwl %ax, %eax
-; AVX2-NEXT: vmovd %eax, %xmm1
+; AVX2-NEXT: vpshuflw {{.*#+}} xmm1 = xmm0[3,3,3,3,4,5,6,7]
; AVX2-NEXT: vcvtph2ps %xmm1, %xmm2
; AVX2-NEXT: vmovss {{.*#+}} xmm1 = [9.22337203E+18,0.0E+0,0.0E+0,0.0E+0]
; AVX2-NEXT: vsubss %xmm1, %xmm2, %xmm3
@@ -3508,20 +3457,14 @@ define <4 x i32> @utesth_f16i32_mm(<4 x half> %x) nounwind {
; AVX2-NEXT: andq %rax, %rdx
; AVX2-NEXT: orq %rcx, %rdx
; AVX2-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
-; AVX2-NEXT: vpextrw $0, %xmm2, %eax
-; AVX2-NEXT: movzwl %ax, %eax
-; AVX2-NEXT: vmovd %eax, %xmm2
; AVX2-NEXT: vcvtph2ps %xmm2, %xmm2
; AVX2-NEXT: vsubss %xmm1, %xmm2, %xmm3
; AVX2-NEXT: vcvttss2si %xmm3, %rax
; AVX2-NEXT: vcvttss2si %xmm2, %rcx
; AVX2-NEXT: vmovq %rdx, %xmm2
-; AVX2-NEXT: vpextrw $0, %xmm0, %edx
-; AVX2-NEXT: movzwl %dx, %edx
-; AVX2-NEXT: vmovd %edx, %xmm3
; AVX2-NEXT: movq %rcx, %rdx
; AVX2-NEXT: sarq $63, %rdx
-; AVX2-NEXT: vcvtph2ps %xmm3, %xmm3
+; AVX2-NEXT: vcvtph2ps %xmm0, %xmm3
; AVX2-NEXT: andq %rax, %rdx
; AVX2-NEXT: vsubss %xmm1, %xmm3, %xmm4
; AVX2-NEXT: vcvttss2si %xmm4, %rax
@@ -3533,10 +3476,7 @@ define <4 x i32> @utesth_f16i32_mm(<4 x half> %x) nounwind {
; AVX2-NEXT: sarq $63, %rdx
; AVX2-NEXT: andq %rax, %rdx
; AVX2-NEXT: orq %rcx, %rdx
-; AVX2-NEXT: vpsrld $16, %xmm0, %xmm0
-; AVX2-NEXT: vpextrw $0, %xmm0, %eax
-; AVX2-NEXT: movzwl %ax, %eax
-; AVX2-NEXT: vmovd %eax, %xmm0
+; AVX2-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[1,1,1,1,4,5,6,7]
; AVX2-NEXT: vcvtph2ps %xmm0, %xmm0
; AVX2-NEXT: vsubss %xmm1, %xmm0, %xmm1
; AVX2-NEXT: vcvttss2si %xmm1, %rax
@@ -3668,31 +3608,19 @@ define <4 x i32> @ustest_f16i32_mm(<4 x half> %x) nounwind {
;
; AVX2-LABEL: ustest_f16i32_mm:
; AVX2: # %bb.0: # %entry
-; AVX2-NEXT: vpsrlq $48, %xmm0, %xmm1
-; AVX2-NEXT: vpextrw $0, %xmm1, %eax
-; AVX2-NEXT: movzwl %ax, %eax
-; AVX2-NEXT: vmovd %eax, %xmm1
+; AVX2-NEXT: vpshuflw {{.*#+}} xmm1 = xmm0[3,3,3,3,4,5,6,7]
; AVX2-NEXT: vcvtph2ps %xmm1, %xmm1
; AVX2-NEXT: vcvttss2si %xmm1, %rax
; AVX2-NEXT: vmovq %rax, %xmm1
; AVX2-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
-; AVX2-NEXT: vpextrw $0, %xmm2, %eax
-; AVX2-NEXT: movzwl %ax, %eax
-; AVX2-NEXT: vmovd %eax, %xmm2
; AVX2-NEXT: vcvtph2ps %xmm2, %xmm2
; AVX2-NEXT: vcvttss2si %xmm2, %rax
; AVX2-NEXT: vmovq %rax, %xmm2
; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
-; AVX2-NEXT: vpextrw $0, %xmm0, %eax
-; AVX2-NEXT: movzwl %ax, %eax
-; AVX2-NEXT: vmovd %eax, %xmm2
-; AVX2-NEXT: vcvtph2ps %xmm2, %xmm2
+; AVX2-NEXT: vcvtph2ps %xmm0, %xmm2
; AVX2-NEXT: vcvttss2si %xmm2, %rax
; AVX2-NEXT: vmovq %rax, %xmm2
-; AVX2-NEXT: vpsrld $16, %xmm0, %xmm0
-; AVX2-NEXT: vpextrw $0, %xmm0, %eax
-; AVX2-NEXT: movzwl %ax, %eax
-; AVX2-NEXT: vmovd %eax, %xmm0
+; AVX2-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[1,1,1,1,4,5,6,7]
; AVX2-NEXT: vcvtph2ps %xmm0, %xmm0
; AVX2-NEXT: vcvttss2si %xmm0, %rax
; AVX2-NEXT: vmovq %rax, %xmm0
diff --git a/llvm/test/CodeGen/X86/half.ll b/llvm/test/CodeGen/X86/half.ll
index d0853fdc..9f01d07 100644
--- a/llvm/test/CodeGen/X86/half.ll
+++ b/llvm/test/CodeGen/X86/half.ll
@@ -851,16 +851,14 @@ define float @test_sitofp_fadd_i32(i32 %a, ptr %b) #0 {
;
; BWON-F16C-LABEL: test_sitofp_fadd_i32:
; BWON-F16C: # %bb.0:
-; BWON-F16C-NEXT: movzwl (%rsi), %eax
; BWON-F16C-NEXT: vcvtsi2ss %edi, %xmm0, %xmm0
; BWON-F16C-NEXT: vcvtps2ph $4, %xmm0, %xmm0
-; BWON-F16C-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
; BWON-F16C-NEXT: vcvtph2ps %xmm0, %xmm0
+; BWON-F16C-NEXT: movzwl (%rsi), %eax
; BWON-F16C-NEXT: vmovd %eax, %xmm1
; BWON-F16C-NEXT: vcvtph2ps %xmm1, %xmm1
; BWON-F16C-NEXT: vaddss %xmm0, %xmm1, %xmm0
; BWON-F16C-NEXT: vcvtps2ph $4, %xmm0, %xmm0
-; BWON-F16C-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
; BWON-F16C-NEXT: vcvtph2ps %xmm0, %xmm0
; BWON-F16C-NEXT: retq
;
@@ -919,7 +917,6 @@ define half @PR40273(half) #0 {
; BWON-F16C-LABEL: PR40273:
; BWON-F16C: # %bb.0:
; BWON-F16C-NEXT: vpextrw $0, %xmm0, %eax
-; BWON-F16C-NEXT: movzwl %ax, %eax
; BWON-F16C-NEXT: vmovd %eax, %xmm0
; BWON-F16C-NEXT: vcvtph2ps %xmm0, %xmm0
; BWON-F16C-NEXT: xorl %eax, %eax
@@ -973,7 +970,6 @@ define void @brcond(half %0) #0 {
; BWON-F16C-LABEL: brcond:
; BWON-F16C: # %bb.0: # %entry
; BWON-F16C-NEXT: vpextrw $0, %xmm0, %eax
-; BWON-F16C-NEXT: movzwl %ax, %eax
; BWON-F16C-NEXT: vmovd %eax, %xmm0
; BWON-F16C-NEXT: vcvtph2ps %xmm0, %xmm0
; BWON-F16C-NEXT: vxorps %xmm1, %xmm1, %xmm1
@@ -1029,7 +1025,6 @@ define half @test_sqrt(half %0) #0 {
; BWON-F16C-LABEL: test_sqrt:
; BWON-F16C: # %bb.0: # %entry
; BWON-F16C-NEXT: vpextrw $0, %xmm0, %eax
-; BWON-F16C-NEXT: movzwl %ax, %eax
; BWON-F16C-NEXT: vmovd %eax, %xmm0
; BWON-F16C-NEXT: vcvtph2ps %xmm0, %xmm0
; BWON-F16C-NEXT: vsqrtss %xmm0, %xmm0, %xmm0
@@ -1083,7 +1078,6 @@ define void @main.158() #0 {
; BWON-F16C: # %bb.0: # %entry
; BWON-F16C-NEXT: vxorps %xmm0, %xmm0, %xmm0
; BWON-F16C-NEXT: vcvtps2ph $4, %xmm0, %xmm1
-; BWON-F16C-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
; BWON-F16C-NEXT: vcvtph2ps %xmm1, %xmm1
; BWON-F16C-NEXT: vmovss {{.*#+}} xmm2 = [8.0E+0,0.0E+0,0.0E+0,0.0E+0]
; BWON-F16C-NEXT: vucomiss %xmm1, %xmm2
@@ -1172,8 +1166,7 @@ define void @main.45() #0 {
;
; BWON-F16C-LABEL: main.45:
; BWON-F16C: # %bb.0: # %entry
-; BWON-F16C-NEXT: movzwl (%rax), %eax
-; BWON-F16C-NEXT: vmovd %eax, %xmm0
+; BWON-F16C-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; BWON-F16C-NEXT: vpshuflw {{.*#+}} xmm1 = xmm0[0,0,0,0,4,5,6,7]
; BWON-F16C-NEXT: vcvtph2ps %xmm0, %xmm0
; BWON-F16C-NEXT: xorl %eax, %eax
@@ -1345,10 +1338,8 @@ define half @pr61271(half %0, half %1) #0 {
; BWON-F16C: # %bb.0:
; BWON-F16C-NEXT: vpextrw $0, %xmm0, %eax
; BWON-F16C-NEXT: vpextrw $0, %xmm1, %ecx
-; BWON-F16C-NEXT: movzwl %cx, %ecx
; BWON-F16C-NEXT: vmovd %ecx, %xmm0
; BWON-F16C-NEXT: vcvtph2ps %xmm0, %xmm0
-; BWON-F16C-NEXT: movzwl %ax, %eax
; BWON-F16C-NEXT: vmovd %eax, %xmm1
; BWON-F16C-NEXT: vcvtph2ps %xmm1, %xmm1
; BWON-F16C-NEXT: vminss %xmm0, %xmm1, %xmm0
@@ -1615,14 +1606,8 @@ define <8 x half> @maxnum_v8f16(<8 x half> %0, <8 x half> %1) #0 {
; BWON-F16C-LABEL: maxnum_v8f16:
; BWON-F16C: # %bb.0:
; BWON-F16C-NEXT: vpsrldq {{.*#+}} xmm2 = xmm1[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; BWON-F16C-NEXT: vpextrw $0, %xmm2, %eax
-; BWON-F16C-NEXT: movzwl %ax, %eax
-; BWON-F16C-NEXT: vmovd %eax, %xmm2
; BWON-F16C-NEXT: vcvtph2ps %xmm2, %xmm2
; BWON-F16C-NEXT: vpsrldq {{.*#+}} xmm3 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; BWON-F16C-NEXT: vpextrw $0, %xmm3, %eax
-; BWON-F16C-NEXT: movzwl %ax, %eax
-; BWON-F16C-NEXT: vmovd %eax, %xmm3
; BWON-F16C-NEXT: vcvtph2ps %xmm3, %xmm3
; BWON-F16C-NEXT: vucomiss %xmm2, %xmm3
; BWON-F16C-NEXT: ja .LBB26_2
@@ -1631,14 +1616,8 @@ define <8 x half> @maxnum_v8f16(<8 x half> %0, <8 x half> %1) #0 {
; BWON-F16C-NEXT: .LBB26_2:
; BWON-F16C-NEXT: vcvtps2ph $4, %xmm3, %xmm2
; BWON-F16C-NEXT: vpshufd {{.*#+}} xmm3 = xmm1[3,3,3,3]
-; BWON-F16C-NEXT: vpextrw $0, %xmm3, %eax
-; BWON-F16C-NEXT: movzwl %ax, %eax
-; BWON-F16C-NEXT: vmovd %eax, %xmm3
; BWON-F16C-NEXT: vcvtph2ps %xmm3, %xmm3
; BWON-F16C-NEXT: vpshufd {{.*#+}} xmm4 = xmm0[3,3,3,3]
-; BWON-F16C-NEXT: vpextrw $0, %xmm4, %eax
-; BWON-F16C-NEXT: movzwl %ax, %eax
-; BWON-F16C-NEXT: vmovd %eax, %xmm4
; BWON-F16C-NEXT: vcvtph2ps %xmm4, %xmm4
; BWON-F16C-NEXT: vucomiss %xmm3, %xmm4
; BWON-F16C-NEXT: ja .LBB26_4
@@ -1649,48 +1628,30 @@ define <8 x half> @maxnum_v8f16(<8 x half> %0, <8 x half> %1) #0 {
; BWON-F16C-NEXT: vcvtps2ph $4, %xmm4, %xmm2
; BWON-F16C-NEXT: vmovd %xmm2, %ecx
; BWON-F16C-NEXT: vpsrldq {{.*#+}} xmm2 = xmm1[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; BWON-F16C-NEXT: vpextrw $0, %xmm2, %edx
-; BWON-F16C-NEXT: movzwl %dx, %edx
-; BWON-F16C-NEXT: vmovd %edx, %xmm2
+; BWON-F16C-NEXT: vcvtph2ps %xmm2, %xmm3
+; BWON-F16C-NEXT: vpsrldq {{.*#+}} xmm2 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; BWON-F16C-NEXT: vcvtph2ps %xmm2, %xmm2
-; BWON-F16C-NEXT: vpsrldq {{.*#+}} xmm3 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; BWON-F16C-NEXT: vpextrw $0, %xmm3, %edx
-; BWON-F16C-NEXT: movzwl %dx, %edx
-; BWON-F16C-NEXT: vmovd %edx, %xmm3
-; BWON-F16C-NEXT: vcvtph2ps %xmm3, %xmm3
-; BWON-F16C-NEXT: vucomiss %xmm2, %xmm3
+; BWON-F16C-NEXT: vucomiss %xmm3, %xmm2
; BWON-F16C-NEXT: ja .LBB26_6
; BWON-F16C-NEXT: # %bb.5:
-; BWON-F16C-NEXT: vmovaps %xmm2, %xmm3
+; BWON-F16C-NEXT: vmovaps %xmm3, %xmm2
; BWON-F16C-NEXT: .LBB26_6:
-; BWON-F16C-NEXT: vcvtps2ph $4, %xmm3, %xmm2
+; BWON-F16C-NEXT: vcvtps2ph $4, %xmm2, %xmm2
; BWON-F16C-NEXT: vmovd %xmm2, %edx
; BWON-F16C-NEXT: vshufpd {{.*#+}} xmm2 = xmm1[1,0]
-; BWON-F16C-NEXT: vpextrw $0, %xmm2, %esi
-; BWON-F16C-NEXT: movzwl %si, %esi
-; BWON-F16C-NEXT: vmovd %esi, %xmm2
+; BWON-F16C-NEXT: vcvtph2ps %xmm2, %xmm3
+; BWON-F16C-NEXT: vshufpd {{.*#+}} xmm2 = xmm0[1,0]
; BWON-F16C-NEXT: vcvtph2ps %xmm2, %xmm2
-; BWON-F16C-NEXT: vshufpd {{.*#+}} xmm3 = xmm0[1,0]
-; BWON-F16C-NEXT: vpextrw $0, %xmm3, %esi
-; BWON-F16C-NEXT: movzwl %si, %esi
-; BWON-F16C-NEXT: vmovd %esi, %xmm3
-; BWON-F16C-NEXT: vcvtph2ps %xmm3, %xmm3
-; BWON-F16C-NEXT: vucomiss %xmm2, %xmm3
+; BWON-F16C-NEXT: vucomiss %xmm3, %xmm2
; BWON-F16C-NEXT: ja .LBB26_8
; BWON-F16C-NEXT: # %bb.7:
-; BWON-F16C-NEXT: vmovaps %xmm2, %xmm3
+; BWON-F16C-NEXT: vmovaps %xmm3, %xmm2
; BWON-F16C-NEXT: .LBB26_8:
-; BWON-F16C-NEXT: vcvtps2ph $4, %xmm3, %xmm2
+; BWON-F16C-NEXT: vcvtps2ph $4, %xmm2, %xmm2
; BWON-F16C-NEXT: vmovd %xmm2, %esi
-; BWON-F16C-NEXT: vpsrlq $48, %xmm1, %xmm2
-; BWON-F16C-NEXT: vpextrw $0, %xmm2, %edi
-; BWON-F16C-NEXT: movzwl %di, %edi
-; BWON-F16C-NEXT: vmovd %edi, %xmm2
+; BWON-F16C-NEXT: vpshuflw {{.*#+}} xmm2 = xmm1[3,3,3,3,4,5,6,7]
; BWON-F16C-NEXT: vcvtph2ps %xmm2, %xmm2
-; BWON-F16C-NEXT: vpsrlq $48, %xmm0, %xmm3
-; BWON-F16C-NEXT: vpextrw $0, %xmm3, %edi
-; BWON-F16C-NEXT: movzwl %di, %edi
-; BWON-F16C-NEXT: vmovd %edi, %xmm3
+; BWON-F16C-NEXT: vpshuflw {{.*#+}} xmm3 = xmm0[3,3,3,3,4,5,6,7]
; BWON-F16C-NEXT: vcvtph2ps %xmm3, %xmm6
; BWON-F16C-NEXT: vucomiss %xmm2, %xmm6
; BWON-F16C-NEXT: ja .LBB26_10
@@ -1704,53 +1665,35 @@ define <8 x half> @maxnum_v8f16(<8 x half> %0, <8 x half> %1) #0 {
; BWON-F16C-NEXT: vcvtps2ph $4, %xmm6, %xmm6
; BWON-F16C-NEXT: vmovd %xmm6, %eax
; BWON-F16C-NEXT: vmovshdup {{.*#+}} xmm6 = xmm1[1,1,3,3]
-; BWON-F16C-NEXT: vpextrw $0, %xmm6, %ecx
-; BWON-F16C-NEXT: movzwl %cx, %ecx
-; BWON-F16C-NEXT: vmovd %ecx, %xmm6
+; BWON-F16C-NEXT: vcvtph2ps %xmm6, %xmm7
+; BWON-F16C-NEXT: vmovshdup {{.*#+}} xmm6 = xmm0[1,1,3,3]
; BWON-F16C-NEXT: vcvtph2ps %xmm6, %xmm6
-; BWON-F16C-NEXT: vmovshdup {{.*#+}} xmm7 = xmm0[1,1,3,3]
-; BWON-F16C-NEXT: vpextrw $0, %xmm7, %ecx
-; BWON-F16C-NEXT: movzwl %cx, %ecx
-; BWON-F16C-NEXT: vmovd %ecx, %xmm7
-; BWON-F16C-NEXT: vcvtph2ps %xmm7, %xmm7
-; BWON-F16C-NEXT: vucomiss %xmm6, %xmm7
+; BWON-F16C-NEXT: vucomiss %xmm7, %xmm6
; BWON-F16C-NEXT: ja .LBB26_12
; BWON-F16C-NEXT: # %bb.11:
-; BWON-F16C-NEXT: vmovaps %xmm6, %xmm7
+; BWON-F16C-NEXT: vmovaps %xmm7, %xmm6
; BWON-F16C-NEXT: .LBB26_12:
; BWON-F16C-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
; BWON-F16C-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3]
; BWON-F16C-NEXT: vpinsrw $0, %eax, %xmm0, %xmm4
-; BWON-F16C-NEXT: vcvtps2ph $4, %xmm7, %xmm5
+; BWON-F16C-NEXT: vcvtps2ph $4, %xmm6, %xmm5
; BWON-F16C-NEXT: vmovd %xmm5, %eax
; BWON-F16C-NEXT: vpinsrw $0, %eax, %xmm0, %xmm5
-; BWON-F16C-NEXT: vpextrw $0, %xmm1, %eax
-; BWON-F16C-NEXT: movzwl %ax, %eax
-; BWON-F16C-NEXT: vmovd %eax, %xmm6
-; BWON-F16C-NEXT: vcvtph2ps %xmm6, %xmm6
-; BWON-F16C-NEXT: vpextrw $0, %xmm0, %eax
-; BWON-F16C-NEXT: movzwl %ax, %eax
-; BWON-F16C-NEXT: vmovd %eax, %xmm7
-; BWON-F16C-NEXT: vcvtph2ps %xmm7, %xmm7
-; BWON-F16C-NEXT: vucomiss %xmm6, %xmm7
+; BWON-F16C-NEXT: vcvtph2ps %xmm1, %xmm7
+; BWON-F16C-NEXT: vcvtph2ps %xmm0, %xmm6
+; BWON-F16C-NEXT: vucomiss %xmm7, %xmm6
; BWON-F16C-NEXT: ja .LBB26_14
; BWON-F16C-NEXT: # %bb.13:
-; BWON-F16C-NEXT: vmovaps %xmm6, %xmm7
+; BWON-F16C-NEXT: vmovaps %xmm7, %xmm6
; BWON-F16C-NEXT: .LBB26_14:
; BWON-F16C-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
; BWON-F16C-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3]
-; BWON-F16C-NEXT: vcvtps2ph $4, %xmm7, %xmm4
+; BWON-F16C-NEXT: vcvtps2ph $4, %xmm6, %xmm4
; BWON-F16C-NEXT: vmovd %xmm4, %eax
; BWON-F16C-NEXT: vpinsrw $0, %eax, %xmm0, %xmm4
-; BWON-F16C-NEXT: vpsrld $16, %xmm1, %xmm1
-; BWON-F16C-NEXT: vpextrw $0, %xmm1, %eax
-; BWON-F16C-NEXT: movzwl %ax, %eax
-; BWON-F16C-NEXT: vmovd %eax, %xmm1
+; BWON-F16C-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[1,1,1,1,4,5,6,7]
; BWON-F16C-NEXT: vcvtph2ps %xmm1, %xmm1
-; BWON-F16C-NEXT: vpsrld $16, %xmm0, %xmm0
-; BWON-F16C-NEXT: vpextrw $0, %xmm0, %eax
-; BWON-F16C-NEXT: movzwl %ax, %eax
-; BWON-F16C-NEXT: vmovd %eax, %xmm0
+; BWON-F16C-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[1,1,1,1,4,5,6,7]
; BWON-F16C-NEXT: vcvtph2ps %xmm0, %xmm0
; BWON-F16C-NEXT: vucomiss %xmm1, %xmm0
; BWON-F16C-NEXT: ja .LBB26_16
diff --git a/llvm/test/CodeGen/X86/inline-asm-memop.ll b/llvm/test/CodeGen/X86/inline-asm-memop.ll
new file mode 100644
index 0000000..83442498
--- /dev/null
+++ b/llvm/test/CodeGen/X86/inline-asm-memop.ll
@@ -0,0 +1,27 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -mtriple=x86_64-unknown-linux-gnu -O0 < %s | FileCheck %s
+
+; A bug in X86DAGToDAGISel::matchAddressRecursively create a zext SDValue which
+; is quickly replaced by other SDValue but already pushed into vector for later
+; calling for SelectionDAGISel::Select_INLINEASM getNode builder, see issue
+; 82431 for more infomation.
+
+define void @PR82431(i8 %call, ptr %b) {
+; CHECK-LABEL: PR82431:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: movb %dil, %al
+; CHECK-NEXT: addb $1, %al
+; CHECK-NEXT: movzbl %al, %eax
+; CHECK-NEXT: # kill: def $rax killed $eax
+; CHECK-NEXT: shlq $3, %rax
+; CHECK-NEXT: addq %rax, %rsi
+; CHECK-NEXT: #APP
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: retq
+entry:
+ %narrow = add nuw i8 %call, 1
+ %idxprom = zext i8 %narrow to i64
+ %arrayidx = getelementptr [1 x i64], ptr %b, i64 0, i64 %idxprom
+ tail call void asm "", "=*m,*m,~{dirflag},~{fpsr},~{flags}"(ptr elementtype(i64) %arrayidx, ptr elementtype(i64) %arrayidx)
+ ret void
+}
diff --git a/llvm/test/CodeGen/X86/pr31088.ll b/llvm/test/CodeGen/X86/pr31088.ll
index fa1014e..ce37622 100644
--- a/llvm/test/CodeGen/X86/pr31088.ll
+++ b/llvm/test/CodeGen/X86/pr31088.ll
@@ -41,15 +41,9 @@ define <1 x half> @ir_fadd_v1f16(<1 x half> %arg0, <1 x half> %arg1) nounwind {
;
; F16C-LABEL: ir_fadd_v1f16:
; F16C: # %bb.0:
-; F16C-NEXT: vpextrw $0, %xmm0, %eax
-; F16C-NEXT: vpextrw $0, %xmm1, %ecx
-; F16C-NEXT: movzwl %cx, %ecx
-; F16C-NEXT: vmovd %ecx, %xmm0
-; F16C-NEXT: vcvtph2ps %xmm0, %xmm0
-; F16C-NEXT: movzwl %ax, %eax
-; F16C-NEXT: vmovd %eax, %xmm1
; F16C-NEXT: vcvtph2ps %xmm1, %xmm1
-; F16C-NEXT: vaddss %xmm0, %xmm1, %xmm0
+; F16C-NEXT: vcvtph2ps %xmm0, %xmm0
+; F16C-NEXT: vaddss %xmm1, %xmm0, %xmm0
; F16C-NEXT: vcvtps2ph $4, %xmm0, %xmm0
; F16C-NEXT: vmovd %xmm0, %eax
; F16C-NEXT: vpinsrw $0, %eax, %xmm0, %xmm0
@@ -58,13 +52,15 @@ define <1 x half> @ir_fadd_v1f16(<1 x half> %arg0, <1 x half> %arg1) nounwind {
; F16C-O0-LABEL: ir_fadd_v1f16:
; F16C-O0: # %bb.0:
; F16C-O0-NEXT: vpextrw $0, %xmm1, %eax
-; F16C-O0-NEXT: # kill: def $ax killed $ax killed $eax
-; F16C-O0-NEXT: movzwl %ax, %eax
+; F16C-O0-NEXT: movw %ax, %cx
+; F16C-O0-NEXT: # implicit-def: $eax
+; F16C-O0-NEXT: movw %cx, %ax
; F16C-O0-NEXT: vmovd %eax, %xmm1
; F16C-O0-NEXT: vcvtph2ps %xmm1, %xmm1
; F16C-O0-NEXT: vpextrw $0, %xmm0, %eax
-; F16C-O0-NEXT: # kill: def $ax killed $ax killed $eax
-; F16C-O0-NEXT: movzwl %ax, %eax
+; F16C-O0-NEXT: movw %ax, %cx
+; F16C-O0-NEXT: # implicit-def: $eax
+; F16C-O0-NEXT: movw %cx, %ax
; F16C-O0-NEXT: vmovd %eax, %xmm0
; F16C-O0-NEXT: vcvtph2ps %xmm0, %xmm0
; F16C-O0-NEXT: vaddss %xmm1, %xmm0, %xmm0
diff --git a/llvm/test/CodeGen/X86/pr34605.ll b/llvm/test/CodeGen/X86/pr34605.ll
index 863b0ff..25dd6a7 100644
--- a/llvm/test/CodeGen/X86/pr34605.ll
+++ b/llvm/test/CodeGen/X86/pr34605.ll
@@ -17,7 +17,7 @@ define void @pr34605(ptr nocapture %s, i32 %p) {
; CHECK-NEXT: kmovd %ecx, %k1
; CHECK-NEXT: kmovd %k1, %k1
; CHECK-NEXT: kandq %k1, %k0, %k1
-; CHECK-NEXT: vmovdqu8 {{\.?LCPI[0-9]+_[0-9]+}}, %zmm0 {%k1} {z}
+; CHECK-NEXT: vmovdqu8 {{.*#+}} zmm0 {%k1} {z} = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
; CHECK-NEXT: vmovdqu64 %zmm0, (%eax)
; CHECK-NEXT: vpxor %xmm0, %xmm0, %xmm0
; CHECK-NEXT: vmovdqu64 %zmm0, 64(%eax)
diff --git a/llvm/test/CodeGen/X86/pr38803.ll b/llvm/test/CodeGen/X86/pr38803.ll
index 61dc228..ebac812 100644
--- a/llvm/test/CodeGen/X86/pr38803.ll
+++ b/llvm/test/CodeGen/X86/pr38803.ll
@@ -13,7 +13,7 @@ define dso_local float @_Z3fn2v() {
; CHECK-NEXT: callq _Z1av@PLT
; CHECK-NEXT: # kill: def $al killed $al def $eax
; CHECK-NEXT: kmovd %eax, %k1
-; CHECK-NEXT: vmovss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 {%k1} {z}
+; CHECK-NEXT: vmovss {{.*#+}} xmm0 {%k1} {z} = [7.5E-1,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: cmpl $0, c(%rip)
; CHECK-NEXT: je .LBB0_2
; CHECK-NEXT: # %bb.1: # %if.then
diff --git a/llvm/test/CodeGen/X86/pr43509.ll b/llvm/test/CodeGen/X86/pr43509.ll
index 87ddad0..a29fe4c 100644
--- a/llvm/test/CodeGen/X86/pr43509.ll
+++ b/llvm/test/CodeGen/X86/pr43509.ll
@@ -7,7 +7,7 @@ define <8 x i8> @foo(<8 x float> %arg) {
; CHECK-NEXT: vxorps %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpltps %ymm1, %ymm0, %k1
; CHECK-NEXT: vcmpgtps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm0, %k1 {%k1}
-; CHECK-NEXT: vmovdqu8 {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 {%k1} {z}
+; CHECK-NEXT: vmovdqu8 {{.*#+}} xmm0 {%k1} {z} = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
bb:
diff --git a/llvm/test/CodeGen/X86/pr57340.ll b/llvm/test/CodeGen/X86/pr57340.ll
index 57f52c8..00a52c6 100644
--- a/llvm/test/CodeGen/X86/pr57340.ll
+++ b/llvm/test/CodeGen/X86/pr57340.ll
@@ -5,54 +5,42 @@ define void @main.41() local_unnamed_addr #1 {
; CHECK-LABEL: main.41:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vpbroadcastw (%rax), %xmm0
-; CHECK-NEXT: vmovdqu (%rax), %ymm2
-; CHECK-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm3
-; CHECK-NEXT: vpmovsxbw {{.*#+}} ymm1 = [31,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14]
-; CHECK-NEXT: vpermi2w %ymm3, %ymm2, %ymm1
; CHECK-NEXT: vpextrw $0, %xmm0, %eax
-; CHECK-NEXT: movzwl %ax, %eax
-; CHECK-NEXT: vmovd %eax, %xmm0
-; CHECK-NEXT: vcvtph2ps %xmm0, %xmm0
+; CHECK-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm1
+; CHECK-NEXT: vmovdqu (%rax), %ymm3
+; CHECK-NEXT: vpmovsxbw {{.*#+}} ymm2 = [31,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14]
+; CHECK-NEXT: vpermi2w %ymm1, %ymm3, %ymm2
+; CHECK-NEXT: vprold $16, %xmm2, %xmm1
+; CHECK-NEXT: vcvtph2ps %xmm1, %xmm3
; CHECK-NEXT: vmovdqu (%rax), %xmm5
-; CHECK-NEXT: vpextrw $0, %xmm5, %eax
-; CHECK-NEXT: movzwl %ax, %eax
-; CHECK-NEXT: vmovd %eax, %xmm2
-; CHECK-NEXT: vcvtph2ps %xmm2, %xmm2
-; CHECK-NEXT: vucomiss %xmm0, %xmm2
-; CHECK-NEXT: setnp %al
-; CHECK-NEXT: sete %cl
-; CHECK-NEXT: testb %al, %cl
-; CHECK-NEXT: vpsrld $16, %xmm1, %xmm3
-; CHECK-NEXT: vpextrw $0, %xmm3, %eax
-; CHECK-NEXT: movzwl %ax, %eax
+; CHECK-NEXT: vprold $16, %xmm5, %xmm1
+; CHECK-NEXT: vcvtph2ps %xmm1, %xmm1
+; CHECK-NEXT: vucomiss %xmm3, %xmm1
+; CHECK-NEXT: setnp %cl
+; CHECK-NEXT: sete %dl
+; CHECK-NEXT: testb %cl, %dl
+; CHECK-NEXT: setne %cl
+; CHECK-NEXT: kmovd %ecx, %k0
+; CHECK-NEXT: kshiftlw $15, %k0, %k0
; CHECK-NEXT: vmovd %eax, %xmm3
-; CHECK-NEXT: vpsrld $16, %xmm5, %xmm4
-; CHECK-NEXT: vpextrw $0, %xmm4, %eax
-; CHECK-NEXT: movzwl %ax, %eax
-; CHECK-NEXT: vmovd %eax, %xmm4
-; CHECK-NEXT: setne %al
-; CHECK-NEXT: andl $1, %eax
-; CHECK-NEXT: vcvtph2ps %xmm3, %xmm6
-; CHECK-NEXT: vcvtph2ps %xmm4, %xmm3
-; CHECK-NEXT: kmovw %eax, %k0
-; CHECK-NEXT: vucomiss %xmm6, %xmm3
+; CHECK-NEXT: vcvtph2ps %xmm3, %xmm3
+; CHECK-NEXT: vcvtph2ps %xmm5, %xmm6
+; CHECK-NEXT: kshiftrw $14, %k0, %k0
+; CHECK-NEXT: vucomiss %xmm3, %xmm6
; CHECK-NEXT: setnp %al
; CHECK-NEXT: sete %cl
; CHECK-NEXT: testb %al, %cl
; CHECK-NEXT: setne %al
-; CHECK-NEXT: kmovd %eax, %k1
-; CHECK-NEXT: kshiftlw $15, %k1, %k1
-; CHECK-NEXT: kshiftrw $14, %k1, %k1
-; CHECK-NEXT: korw %k1, %k0, %k0
+; CHECK-NEXT: andl $1, %eax
+; CHECK-NEXT: kmovw %eax, %k1
+; CHECK-NEXT: korw %k0, %k1, %k0
; CHECK-NEXT: movw $-5, %ax
; CHECK-NEXT: kmovd %eax, %k1
; CHECK-NEXT: kandw %k1, %k0, %k0
-; CHECK-NEXT: vprolq $32, %xmm1, %xmm4
-; CHECK-NEXT: vpextrw $0, %xmm4, %eax
-; CHECK-NEXT: movzwl %ax, %eax
-; CHECK-NEXT: vmovd %eax, %xmm4
-; CHECK-NEXT: vcvtph2ps %xmm4, %xmm4
-; CHECK-NEXT: vucomiss %xmm4, %xmm0
+; CHECK-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[1,1,3,3]
+; CHECK-NEXT: vcvtph2ps %xmm3, %xmm3
+; CHECK-NEXT: vcvtph2ps %xmm0, %xmm0
+; CHECK-NEXT: vucomiss %xmm3, %xmm0
; CHECK-NEXT: setnp %al
; CHECK-NEXT: sete %cl
; CHECK-NEXT: testb %al, %cl
@@ -63,18 +51,12 @@ define void @main.41() local_unnamed_addr #1 {
; CHECK-NEXT: korw %k1, %k0, %k0
; CHECK-NEXT: movw $-9, %ax
; CHECK-NEXT: kmovd %eax, %k1
-; CHECK-NEXT: vpsrlq $48, %xmm1, %xmm4
-; CHECK-NEXT: vpextrw $0, %xmm4, %eax
; CHECK-NEXT: kandw %k1, %k0, %k0
-; CHECK-NEXT: movzwl %ax, %eax
-; CHECK-NEXT: vmovd %eax, %xmm4
-; CHECK-NEXT: vcvtph2ps %xmm4, %xmm6
-; CHECK-NEXT: vpsrlq $48, %xmm5, %xmm4
-; CHECK-NEXT: vpextrw $0, %xmm4, %eax
-; CHECK-NEXT: movzwl %ax, %eax
-; CHECK-NEXT: vmovd %eax, %xmm4
-; CHECK-NEXT: vcvtph2ps %xmm4, %xmm4
-; CHECK-NEXT: vucomiss %xmm6, %xmm4
+; CHECK-NEXT: vprolq $16, %xmm2, %xmm3
+; CHECK-NEXT: vcvtph2ps %xmm3, %xmm4
+; CHECK-NEXT: vprolq $16, %xmm5, %xmm3
+; CHECK-NEXT: vcvtph2ps %xmm3, %xmm3
+; CHECK-NEXT: vucomiss %xmm4, %xmm3
; CHECK-NEXT: setnp %al
; CHECK-NEXT: sete %cl
; CHECK-NEXT: testb %al, %cl
@@ -85,13 +67,10 @@ define void @main.41() local_unnamed_addr #1 {
; CHECK-NEXT: korw %k1, %k0, %k0
; CHECK-NEXT: movw $-17, %ax
; CHECK-NEXT: kmovd %eax, %k1
-; CHECK-NEXT: vpshufd {{.*#+}} xmm6 = xmm1[2,3,0,1]
-; CHECK-NEXT: vpextrw $0, %xmm6, %eax
; CHECK-NEXT: kandw %k1, %k0, %k0
-; CHECK-NEXT: movzwl %ax, %eax
-; CHECK-NEXT: vmovd %eax, %xmm6
-; CHECK-NEXT: vcvtph2ps %xmm6, %xmm6
-; CHECK-NEXT: vucomiss %xmm6, %xmm0
+; CHECK-NEXT: vpshufd {{.*#+}} xmm4 = xmm2[2,3,0,1]
+; CHECK-NEXT: vcvtph2ps %xmm4, %xmm4
+; CHECK-NEXT: vucomiss %xmm4, %xmm0
; CHECK-NEXT: setnp %al
; CHECK-NEXT: sete %cl
; CHECK-NEXT: testb %al, %cl
@@ -102,18 +81,12 @@ define void @main.41() local_unnamed_addr #1 {
; CHECK-NEXT: korw %k1, %k0, %k0
; CHECK-NEXT: movw $-33, %ax
; CHECK-NEXT: kmovd %eax, %k1
-; CHECK-NEXT: vpsrldq {{.*#+}} xmm6 = xmm1[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; CHECK-NEXT: vpextrw $0, %xmm6, %eax
-; CHECK-NEXT: movzwl %ax, %eax
-; CHECK-NEXT: vmovd %eax, %xmm6
-; CHECK-NEXT: vcvtph2ps %xmm6, %xmm7
-; CHECK-NEXT: vpsrldq {{.*#+}} xmm6 = xmm5[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; CHECK-NEXT: vpextrw $0, %xmm6, %eax
+; CHECK-NEXT: vpsrldq {{.*#+}} xmm4 = xmm2[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; CHECK-NEXT: vcvtph2ps %xmm4, %xmm7
+; CHECK-NEXT: vpsrldq {{.*#+}} xmm4 = xmm5[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; CHECK-NEXT: vcvtph2ps %xmm4, %xmm4
; CHECK-NEXT: kandw %k1, %k0, %k0
-; CHECK-NEXT: movzwl %ax, %eax
-; CHECK-NEXT: vmovd %eax, %xmm6
-; CHECK-NEXT: vcvtph2ps %xmm6, %xmm6
-; CHECK-NEXT: vucomiss %xmm7, %xmm6
+; CHECK-NEXT: vucomiss %xmm7, %xmm4
; CHECK-NEXT: setnp %al
; CHECK-NEXT: sete %cl
; CHECK-NEXT: testb %al, %cl
@@ -125,10 +98,7 @@ define void @main.41() local_unnamed_addr #1 {
; CHECK-NEXT: movw $-65, %ax
; CHECK-NEXT: kmovd %eax, %k1
; CHECK-NEXT: kandw %k1, %k0, %k0
-; CHECK-NEXT: vpshufd {{.*#+}} xmm7 = xmm1[3,3,3,3]
-; CHECK-NEXT: vpextrw $0, %xmm7, %eax
-; CHECK-NEXT: movzwl %ax, %eax
-; CHECK-NEXT: vmovd %eax, %xmm7
+; CHECK-NEXT: vshufps {{.*#+}} xmm7 = xmm2[3,3,3,3]
; CHECK-NEXT: vcvtph2ps %xmm7, %xmm7
; CHECK-NEXT: vucomiss %xmm7, %xmm0
; CHECK-NEXT: setnp %al
@@ -142,15 +112,9 @@ define void @main.41() local_unnamed_addr #1 {
; CHECK-NEXT: movw $-129, %ax
; CHECK-NEXT: kmovd %eax, %k1
; CHECK-NEXT: kandw %k1, %k0, %k0
-; CHECK-NEXT: vpsrldq {{.*#+}} xmm7 = xmm1[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; CHECK-NEXT: vpextrw $0, %xmm7, %eax
-; CHECK-NEXT: movzwl %ax, %eax
-; CHECK-NEXT: vmovd %eax, %xmm7
+; CHECK-NEXT: vpsrldq {{.*#+}} xmm7 = xmm2[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT: vcvtph2ps %xmm7, %xmm7
; CHECK-NEXT: vpsrldq {{.*#+}} xmm5 = xmm5[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; CHECK-NEXT: vpextrw $0, %xmm5, %eax
-; CHECK-NEXT: movzwl %ax, %eax
-; CHECK-NEXT: vmovd %eax, %xmm5
; CHECK-NEXT: vcvtph2ps %xmm5, %xmm5
; CHECK-NEXT: vucomiss %xmm7, %xmm5
; CHECK-NEXT: setnp %al
@@ -163,13 +127,10 @@ define void @main.41() local_unnamed_addr #1 {
; CHECK-NEXT: korw %k1, %k0, %k0
; CHECK-NEXT: movw $-257, %ax # imm = 0xFEFF
; CHECK-NEXT: kmovd %eax, %k1
+; CHECK-NEXT: vextracti128 $1, %ymm2, %xmm2
+; CHECK-NEXT: vcvtph2ps %xmm2, %xmm7
; CHECK-NEXT: kandw %k1, %k0, %k0
-; CHECK-NEXT: vextracti128 $1, %ymm1, %xmm1
-; CHECK-NEXT: vpextrw $0, %xmm1, %eax
-; CHECK-NEXT: movzwl %ax, %eax
-; CHECK-NEXT: vmovd %eax, %xmm7
-; CHECK-NEXT: vcvtph2ps %xmm7, %xmm7
-; CHECK-NEXT: vucomiss %xmm7, %xmm2
+; CHECK-NEXT: vucomiss %xmm7, %xmm6
; CHECK-NEXT: setnp %al
; CHECK-NEXT: sete %cl
; CHECK-NEXT: testb %al, %cl
@@ -181,12 +142,9 @@ define void @main.41() local_unnamed_addr #1 {
; CHECK-NEXT: movw $-513, %ax # imm = 0xFDFF
; CHECK-NEXT: kmovd %eax, %k1
; CHECK-NEXT: kandw %k1, %k0, %k0
-; CHECK-NEXT: vpsrld $16, %xmm1, %xmm2
-; CHECK-NEXT: vpextrw $0, %xmm2, %eax
-; CHECK-NEXT: movzwl %ax, %eax
-; CHECK-NEXT: vmovd %eax, %xmm2
-; CHECK-NEXT: vcvtph2ps %xmm2, %xmm2
-; CHECK-NEXT: vucomiss %xmm2, %xmm3
+; CHECK-NEXT: vprold $16, %xmm2, %xmm6
+; CHECK-NEXT: vcvtph2ps %xmm6, %xmm6
+; CHECK-NEXT: vucomiss %xmm6, %xmm1
; CHECK-NEXT: setnp %al
; CHECK-NEXT: sete %cl
; CHECK-NEXT: testb %al, %cl
@@ -197,13 +155,10 @@ define void @main.41() local_unnamed_addr #1 {
; CHECK-NEXT: korw %k1, %k0, %k0
; CHECK-NEXT: movw $-1025, %ax # imm = 0xFBFF
; CHECK-NEXT: kmovd %eax, %k1
-; CHECK-NEXT: vprolq $32, %xmm1, %xmm2
-; CHECK-NEXT: vpextrw $0, %xmm2, %eax
-; CHECK-NEXT: movzwl %ax, %eax
-; CHECK-NEXT: vmovd %eax, %xmm2
-; CHECK-NEXT: vcvtph2ps %xmm2, %xmm2
; CHECK-NEXT: kandw %k1, %k0, %k0
-; CHECK-NEXT: vucomiss %xmm2, %xmm0
+; CHECK-NEXT: vpshufd {{.*#+}} xmm1 = xmm2[1,1,3,3]
+; CHECK-NEXT: vcvtph2ps %xmm1, %xmm1
+; CHECK-NEXT: vucomiss %xmm1, %xmm0
; CHECK-NEXT: setnp %al
; CHECK-NEXT: sete %cl
; CHECK-NEXT: testb %al, %cl
@@ -215,12 +170,9 @@ define void @main.41() local_unnamed_addr #1 {
; CHECK-NEXT: movw $-2049, %ax # imm = 0xF7FF
; CHECK-NEXT: kmovd %eax, %k1
; CHECK-NEXT: kandw %k1, %k0, %k0
-; CHECK-NEXT: vpsrlq $48, %xmm1, %xmm2
-; CHECK-NEXT: vpextrw $0, %xmm2, %eax
-; CHECK-NEXT: movzwl %ax, %eax
-; CHECK-NEXT: vmovd %eax, %xmm2
-; CHECK-NEXT: vcvtph2ps %xmm2, %xmm2
-; CHECK-NEXT: vucomiss %xmm2, %xmm4
+; CHECK-NEXT: vprolq $16, %xmm2, %xmm1
+; CHECK-NEXT: vcvtph2ps %xmm1, %xmm1
+; CHECK-NEXT: vucomiss %xmm1, %xmm3
; CHECK-NEXT: setnp %al
; CHECK-NEXT: sete %cl
; CHECK-NEXT: testb %al, %cl
@@ -231,13 +183,10 @@ define void @main.41() local_unnamed_addr #1 {
; CHECK-NEXT: korw %k1, %k0, %k0
; CHECK-NEXT: movw $-4097, %ax # imm = 0xEFFF
; CHECK-NEXT: kmovd %eax, %k1
-; CHECK-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
-; CHECK-NEXT: vpextrw $0, %xmm2, %eax
; CHECK-NEXT: kandw %k1, %k0, %k0
-; CHECK-NEXT: movzwl %ax, %eax
-; CHECK-NEXT: vmovd %eax, %xmm2
-; CHECK-NEXT: vcvtph2ps %xmm2, %xmm2
-; CHECK-NEXT: vucomiss %xmm2, %xmm0
+; CHECK-NEXT: vpshufd {{.*#+}} xmm1 = xmm2[2,3,0,1]
+; CHECK-NEXT: vcvtph2ps %xmm1, %xmm1
+; CHECK-NEXT: vucomiss %xmm1, %xmm0
; CHECK-NEXT: setnp %al
; CHECK-NEXT: sete %cl
; CHECK-NEXT: testb %al, %cl
@@ -249,12 +198,9 @@ define void @main.41() local_unnamed_addr #1 {
; CHECK-NEXT: movw $-8193, %ax # imm = 0xDFFF
; CHECK-NEXT: kmovd %eax, %k1
; CHECK-NEXT: kandw %k1, %k0, %k0
-; CHECK-NEXT: vpsrldq {{.*#+}} xmm2 = xmm1[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; CHECK-NEXT: vpextrw $0, %xmm2, %eax
-; CHECK-NEXT: movzwl %ax, %eax
-; CHECK-NEXT: vmovd %eax, %xmm2
-; CHECK-NEXT: vcvtph2ps %xmm2, %xmm2
-; CHECK-NEXT: vucomiss %xmm2, %xmm6
+; CHECK-NEXT: vpsrldq {{.*#+}} xmm1 = xmm2[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; CHECK-NEXT: vcvtph2ps %xmm1, %xmm1
+; CHECK-NEXT: vucomiss %xmm1, %xmm4
; CHECK-NEXT: setnp %al
; CHECK-NEXT: sete %cl
; CHECK-NEXT: testb %al, %cl
@@ -265,13 +211,10 @@ define void @main.41() local_unnamed_addr #1 {
; CHECK-NEXT: korw %k1, %k0, %k0
; CHECK-NEXT: movw $-16385, %ax # imm = 0xBFFF
; CHECK-NEXT: kmovd %eax, %k1
+; CHECK-NEXT: vshufps {{.*#+}} xmm1 = xmm2[3,3,3,3]
+; CHECK-NEXT: vcvtph2ps %xmm1, %xmm1
; CHECK-NEXT: kandw %k1, %k0, %k0
-; CHECK-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[3,3,3,3]
-; CHECK-NEXT: vpextrw $0, %xmm2, %eax
-; CHECK-NEXT: movzwl %ax, %eax
-; CHECK-NEXT: vmovd %eax, %xmm2
-; CHECK-NEXT: vcvtph2ps %xmm2, %xmm2
-; CHECK-NEXT: vucomiss %xmm2, %xmm0
+; CHECK-NEXT: vucomiss %xmm1, %xmm0
; CHECK-NEXT: setnp %al
; CHECK-NEXT: sete %cl
; CHECK-NEXT: testb %al, %cl
@@ -280,10 +223,7 @@ define void @main.41() local_unnamed_addr #1 {
; CHECK-NEXT: kshiftlw $14, %k1, %k1
; CHECK-NEXT: korw %k1, %k0, %k0
; CHECK-NEXT: kshiftlw $1, %k0, %k0
-; CHECK-NEXT: vpsrldq {{.*#+}} xmm0 = xmm1[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; CHECK-NEXT: vpextrw $0, %xmm0, %eax
-; CHECK-NEXT: movzwl %ax, %eax
-; CHECK-NEXT: vmovd %eax, %xmm0
+; CHECK-NEXT: vpsrldq {{.*#+}} xmm0 = xmm2[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT: vcvtph2ps %xmm0, %xmm0
; CHECK-NEXT: kshiftrw $1, %k0, %k0
; CHECK-NEXT: vucomiss %xmm0, %xmm5
@@ -294,7 +234,7 @@ define void @main.41() local_unnamed_addr #1 {
; CHECK-NEXT: kmovd %eax, %k1
; CHECK-NEXT: kshiftlw $15, %k1, %k1
; CHECK-NEXT: korw %k1, %k0, %k1
-; CHECK-NEXT: vmovdqu8 {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 {%k1} {z}
+; CHECK-NEXT: vmovdqu8 {{.*#+}} xmm0 {%k1} {z} = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
; CHECK-NEXT: vmovdqa %xmm0, (%rax)
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
diff --git a/llvm/test/CodeGen/X86/pr78897.ll b/llvm/test/CodeGen/X86/pr78897.ll
index 0c1c3ca..56e4ec2 100644
--- a/llvm/test/CodeGen/X86/pr78897.ll
+++ b/llvm/test/CodeGen/X86/pr78897.ll
@@ -225,7 +225,7 @@ define <16 x i8> @produceShuffleVectorForByte(i8 zeroext %0) nounwind {
; X86-AVX512-NEXT: pushl %esi
; X86-AVX512-NEXT: vpbroadcastb {{[0-9]+}}(%esp), %xmm0
; X86-AVX512-NEXT: vptestnmb {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %k1
-; X86-AVX512-NEXT: vmovdqu8 {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 {%k1} {z}
+; X86-AVX512-NEXT: vmovdqu8 {{.*#+}} xmm0 {%k1} {z} = [17,17,17,17,17,17,17,17,u,u,u,u,u,u,u,u]
; X86-AVX512-NEXT: vpextrd $1, %xmm0, %eax
; X86-AVX512-NEXT: vmovd %xmm0, %edx
; X86-AVX512-NEXT: movl $286331152, %ecx # imm = 0x11111110
@@ -258,7 +258,7 @@ define <16 x i8> @produceShuffleVectorForByte(i8 zeroext %0) nounwind {
; X64-AVX512: # %bb.0: # %entry
; X64-AVX512-NEXT: vpbroadcastb %edi, %xmm0
; X64-AVX512-NEXT: vptestnmb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %k1
-; X64-AVX512-NEXT: vmovdqu8 {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 {%k1} {z}
+; X64-AVX512-NEXT: vmovdqu8 {{.*#+}} xmm0 {%k1} {z} = [17,17,17,17,17,17,17,17,u,u,u,u,u,u,u,u]
; X64-AVX512-NEXT: vmovq %xmm0, %rax
; X64-AVX512-NEXT: movabsq $1229782938247303440, %rcx # imm = 0x1111111111111110
; X64-AVX512-NEXT: movabsq $76861433640456465, %rdx # imm = 0x111111111111111
diff --git a/llvm/test/CodeGen/X86/prefer-fpext-splat.ll b/llvm/test/CodeGen/X86/prefer-fpext-splat.ll
index 1d8b8b3..c3d7b2e 100644
--- a/llvm/test/CodeGen/X86/prefer-fpext-splat.ll
+++ b/llvm/test/CodeGen/X86/prefer-fpext-splat.ll
@@ -176,8 +176,6 @@ define <2 x double> @prefer_f16_v2f64(ptr %p) nounwind {
; AVX512F-LABEL: prefer_f16_v2f64:
; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpbroadcastw (%rdi), %xmm0
-; AVX512F-NEXT: vpxor %xmm1, %xmm1, %xmm1
-; AVX512F-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
; AVX512F-NEXT: vcvtph2ps %xmm0, %xmm0
; AVX512F-NEXT: vcvtps2pd %xmm0, %xmm0
; AVX512F-NEXT: retq
diff --git a/llvm/test/CodeGen/X86/select-of-fp-constants.ll b/llvm/test/CodeGen/X86/select-of-fp-constants.ll
index 76b8ea8..2cdaa11 100644
--- a/llvm/test/CodeGen/X86/select-of-fp-constants.ll
+++ b/llvm/test/CodeGen/X86/select-of-fp-constants.ll
@@ -86,7 +86,7 @@ define float @fcmp_select_fp_constants(float %x) nounwind readnone {
; X64-AVX512F: # %bb.0:
; X64-AVX512F-NEXT: vcmpneqss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %k1
; X64-AVX512F-NEXT: vmovss {{.*#+}} xmm0 = [2.3E+1,0.0E+0,0.0E+0,0.0E+0]
-; X64-AVX512F-NEXT: vmovss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 {%k1}
+; X64-AVX512F-NEXT: vmovss {{.*#+}} xmm0 {%k1} = [4.2E+1,0.0E+0,0.0E+0,0.0E+0]
; X64-AVX512F-NEXT: retq
%c = fcmp une float %x, -4.0
%r = select i1 %c, float 42.0, float 23.0
diff --git a/llvm/test/CodeGen/X86/select-of-half-constants.ll b/llvm/test/CodeGen/X86/select-of-half-constants.ll
index e22d4c8..e3d92eb 100644
--- a/llvm/test/CodeGen/X86/select-of-half-constants.ll
+++ b/llvm/test/CodeGen/X86/select-of-half-constants.ll
@@ -6,9 +6,9 @@
define half @fcmp_select_fp_constants_olt(half %x) nounwind readnone {
; X64-AVX512FP16-LABEL: fcmp_select_fp_constants_olt:
; X64-AVX512FP16: # %bb.0:
-; X64-AVX512FP16-NEXT: vmovsh {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; X64-AVX512FP16-NEXT: vmovsh {{.*#+}} xmm1 = [4.2E+1,0.0E+0,0.0E+0,0.0E+0,0.0E+0,0.0E+0,0.0E+0,0.0E+0]
; X64-AVX512FP16-NEXT: vcmpltsh {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %k1
-; X64-AVX512FP16-NEXT: vmovsh {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-AVX512FP16-NEXT: vmovsh {{.*#+}} xmm0 = [2.3E+1,0.0E+0,0.0E+0,0.0E+0,0.0E+0,0.0E+0,0.0E+0,0.0E+0]
; X64-AVX512FP16-NEXT: vmovsh %xmm1, %xmm0, %xmm0 {%k1}
; X64-AVX512FP16-NEXT: retq
%c = fcmp olt half %x, -4.0
@@ -19,9 +19,9 @@ define half @fcmp_select_fp_constants_olt(half %x) nounwind readnone {
define half @fcmp_select_fp_constants_ogt(half %x) nounwind readnone {
; X64-AVX512FP16-LABEL: fcmp_select_fp_constants_ogt:
; X64-AVX512FP16: # %bb.0:
-; X64-AVX512FP16-NEXT: vmovsh {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; X64-AVX512FP16-NEXT: vmovsh {{.*#+}} xmm1 = [4.2E+1,0.0E+0,0.0E+0,0.0E+0,0.0E+0,0.0E+0,0.0E+0,0.0E+0]
; X64-AVX512FP16-NEXT: vcmpgtsh {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %k1
-; X64-AVX512FP16-NEXT: vmovsh {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-AVX512FP16-NEXT: vmovsh {{.*#+}} xmm0 = [2.3E+1,0.0E+0,0.0E+0,0.0E+0,0.0E+0,0.0E+0,0.0E+0,0.0E+0]
; X64-AVX512FP16-NEXT: vmovsh %xmm1, %xmm0, %xmm0 {%k1}
; X64-AVX512FP16-NEXT: retq
%c = fcmp ogt half %x, -4.0
diff --git a/llvm/test/CodeGen/X86/vector-half-conversions.ll b/llvm/test/CodeGen/X86/vector-half-conversions.ll
index f59960f..ba21af2 100644
--- a/llvm/test/CodeGen/X86/vector-half-conversions.ll
+++ b/llvm/test/CodeGen/X86/vector-half-conversions.ll
@@ -21,15 +21,13 @@ define float @cvt_i16_to_f32(i16 %a0) nounwind {
;
; F16C-LABEL: cvt_i16_to_f32:
; F16C: # %bb.0:
-; F16C-NEXT: movzwl %di, %eax
-; F16C-NEXT: vmovd %eax, %xmm0
+; F16C-NEXT: vmovd %edi, %xmm0
; F16C-NEXT: vcvtph2ps %xmm0, %xmm0
; F16C-NEXT: retq
;
; AVX512-LABEL: cvt_i16_to_f32:
; AVX512: # %bb.0:
-; AVX512-NEXT: movzwl %di, %eax
-; AVX512-NEXT: vmovd %eax, %xmm0
+; AVX512-NEXT: vmovd %edi, %xmm0
; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0
; AVX512-NEXT: retq
%1 = bitcast i16 %a0 to half
@@ -1370,16 +1368,14 @@ define double @cvt_i16_to_f64(i16 %a0) nounwind {
;
; F16C-LABEL: cvt_i16_to_f64:
; F16C: # %bb.0:
-; F16C-NEXT: movzwl %di, %eax
-; F16C-NEXT: vmovd %eax, %xmm0
+; F16C-NEXT: vmovd %edi, %xmm0
; F16C-NEXT: vcvtph2ps %xmm0, %xmm0
; F16C-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
; F16C-NEXT: retq
;
; AVX512-LABEL: cvt_i16_to_f64:
; AVX512: # %bb.0:
-; AVX512-NEXT: movzwl %di, %eax
-; AVX512-NEXT: vmovd %eax, %xmm0
+; AVX512-NEXT: vmovd %edi, %xmm0
; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0
; AVX512-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
; AVX512-NEXT: retq
@@ -1410,14 +1406,12 @@ define <2 x double> @cvt_2i16_to_2f64(<2 x i16> %a0) nounwind {
;
; F16C-LABEL: cvt_2i16_to_2f64:
; F16C: # %bb.0:
-; F16C-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
; F16C-NEXT: vcvtph2ps %xmm0, %xmm0
; F16C-NEXT: vcvtps2pd %xmm0, %xmm0
; F16C-NEXT: retq
;
; AVX512-LABEL: cvt_2i16_to_2f64:
; AVX512: # %bb.0:
-; AVX512-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0
; AVX512-NEXT: vcvtps2pd %xmm0, %xmm0
; AVX512-NEXT: retq
@@ -1503,14 +1497,12 @@ define <2 x double> @cvt_8i16_to_2f64(<8 x i16> %a0) nounwind {
;
; F16C-LABEL: cvt_8i16_to_2f64:
; F16C: # %bb.0:
-; F16C-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
; F16C-NEXT: vcvtph2ps %xmm0, %xmm0
; F16C-NEXT: vcvtps2pd %xmm0, %xmm0
; F16C-NEXT: retq
;
; AVX512-LABEL: cvt_8i16_to_2f64:
; AVX512: # %bb.0:
-; AVX512-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0
; AVX512-NEXT: vcvtps2pd %xmm0, %xmm0
; AVX512-NEXT: retq
@@ -1877,16 +1869,14 @@ define <2 x double> @load_cvt_2i16_to_2f64(ptr %a0) nounwind {
;
; F16C-LABEL: load_cvt_2i16_to_2f64:
; F16C: # %bb.0:
-; F16C-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; F16C-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
+; F16C-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; F16C-NEXT: vcvtph2ps %xmm0, %xmm0
; F16C-NEXT: vcvtps2pd %xmm0, %xmm0
; F16C-NEXT: retq
;
; AVX512-LABEL: load_cvt_2i16_to_2f64:
; AVX512: # %bb.0:
-; AVX512-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; AVX512-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
+; AVX512-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0
; AVX512-NEXT: vcvtps2pd %xmm0, %xmm0
; AVX512-NEXT: retq
@@ -4976,32 +4966,22 @@ define <4 x i32> @fptosi_2f16_to_4i32(<2 x half> %a) nounwind {
;
; F16C-LABEL: fptosi_2f16_to_4i32:
; F16C: # %bb.0:
-; F16C-NEXT: vpextrw $0, %xmm0, %eax
-; F16C-NEXT: movzwl %ax, %eax
-; F16C-NEXT: vmovd %eax, %xmm1
+; F16C-NEXT: vpsrld $16, %xmm0, %xmm1
; F16C-NEXT: vcvtph2ps %xmm1, %xmm1
-; F16C-NEXT: vpsrld $16, %xmm0, %xmm0
-; F16C-NEXT: vpextrw $0, %xmm0, %eax
-; F16C-NEXT: movzwl %ax, %eax
-; F16C-NEXT: vmovd %eax, %xmm0
+; F16C-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; F16C-NEXT: vcvtph2ps %xmm0, %xmm0
-; F16C-NEXT: vunpcklps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; F16C-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; F16C-NEXT: vcvttps2dq %xmm0, %xmm0
; F16C-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
; F16C-NEXT: retq
;
; AVX512-LABEL: fptosi_2f16_to_4i32:
; AVX512: # %bb.0:
-; AVX512-NEXT: vpextrw $0, %xmm0, %eax
-; AVX512-NEXT: movzwl %ax, %eax
-; AVX512-NEXT: vmovd %eax, %xmm1
+; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1
; AVX512-NEXT: vcvtph2ps %xmm1, %xmm1
-; AVX512-NEXT: vpsrld $16, %xmm0, %xmm0
-; AVX512-NEXT: vpextrw $0, %xmm0, %eax
-; AVX512-NEXT: movzwl %ax, %eax
-; AVX512-NEXT: vmovd %eax, %xmm0
+; AVX512-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0
-; AVX512-NEXT: vunpcklps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; AVX512-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; AVX512-NEXT: vcvttps2dq %xmm0, %xmm0
; AVX512-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
; AVX512-NEXT: retq
diff --git a/llvm/test/CodeGen/X86/vector-reduce-fmax-nnan.ll b/llvm/test/CodeGen/X86/vector-reduce-fmax-nnan.ll
index 71c4427..24113441 100644
--- a/llvm/test/CodeGen/X86/vector-reduce-fmax-nnan.ll
+++ b/llvm/test/CodeGen/X86/vector-reduce-fmax-nnan.ll
@@ -413,14 +413,8 @@ define half @test_v2f16(<2 x half> %a0) nounwind {
; AVX512F: # %bb.0:
; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512F-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX512F-NEXT: vpextrw $0, %xmm0, %eax
-; AVX512F-NEXT: movzwl %ax, %eax
-; AVX512F-NEXT: vmovd %eax, %xmm2
-; AVX512F-NEXT: vcvtph2ps %xmm2, %xmm2
-; AVX512F-NEXT: vpextrw $0, %xmm1, %eax
-; AVX512F-NEXT: movzwl %ax, %eax
-; AVX512F-NEXT: vmovd %eax, %xmm3
-; AVX512F-NEXT: vcvtph2ps %xmm3, %xmm3
+; AVX512F-NEXT: vcvtph2ps %xmm0, %xmm2
+; AVX512F-NEXT: vcvtph2ps %xmm1, %xmm3
; AVX512F-NEXT: xorl %eax, %eax
; AVX512F-NEXT: vucomiss %xmm3, %xmm2
; AVX512F-NEXT: movl $255, %ecx
@@ -434,14 +428,8 @@ define half @test_v2f16(<2 x half> %a0) nounwind {
; AVX512VL-LABEL: test_v2f16:
; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX512VL-NEXT: vpextrw $0, %xmm0, %eax
-; AVX512VL-NEXT: movzwl %ax, %eax
-; AVX512VL-NEXT: vmovd %eax, %xmm2
-; AVX512VL-NEXT: vcvtph2ps %xmm2, %xmm2
-; AVX512VL-NEXT: vpextrw $0, %xmm1, %eax
-; AVX512VL-NEXT: movzwl %ax, %eax
-; AVX512VL-NEXT: vmovd %eax, %xmm3
-; AVX512VL-NEXT: vcvtph2ps %xmm3, %xmm3
+; AVX512VL-NEXT: vcvtph2ps %xmm0, %xmm2
+; AVX512VL-NEXT: vcvtph2ps %xmm1, %xmm3
; AVX512VL-NEXT: xorl %eax, %eax
; AVX512VL-NEXT: vucomiss %xmm3, %xmm2
; AVX512VL-NEXT: movl $255, %ecx
diff --git a/llvm/test/CodeGen/X86/vector-reduce-fmin-nnan.ll b/llvm/test/CodeGen/X86/vector-reduce-fmin-nnan.ll
index 0b2f9d6..edefb16 100644
--- a/llvm/test/CodeGen/X86/vector-reduce-fmin-nnan.ll
+++ b/llvm/test/CodeGen/X86/vector-reduce-fmin-nnan.ll
@@ -412,14 +412,8 @@ define half @test_v2f16(<2 x half> %a0) nounwind {
; AVX512F: # %bb.0:
; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512F-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX512F-NEXT: vpextrw $0, %xmm0, %eax
-; AVX512F-NEXT: movzwl %ax, %eax
-; AVX512F-NEXT: vmovd %eax, %xmm2
-; AVX512F-NEXT: vcvtph2ps %xmm2, %xmm2
-; AVX512F-NEXT: vpextrw $0, %xmm1, %eax
-; AVX512F-NEXT: movzwl %ax, %eax
-; AVX512F-NEXT: vmovd %eax, %xmm3
-; AVX512F-NEXT: vcvtph2ps %xmm3, %xmm3
+; AVX512F-NEXT: vcvtph2ps %xmm0, %xmm2
+; AVX512F-NEXT: vcvtph2ps %xmm1, %xmm3
; AVX512F-NEXT: xorl %eax, %eax
; AVX512F-NEXT: vucomiss %xmm3, %xmm2
; AVX512F-NEXT: movl $255, %ecx
@@ -433,14 +427,8 @@ define half @test_v2f16(<2 x half> %a0) nounwind {
; AVX512VL-LABEL: test_v2f16:
; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX512VL-NEXT: vpextrw $0, %xmm0, %eax
-; AVX512VL-NEXT: movzwl %ax, %eax
-; AVX512VL-NEXT: vmovd %eax, %xmm2
-; AVX512VL-NEXT: vcvtph2ps %xmm2, %xmm2
-; AVX512VL-NEXT: vpextrw $0, %xmm1, %eax
-; AVX512VL-NEXT: movzwl %ax, %eax
-; AVX512VL-NEXT: vmovd %eax, %xmm3
-; AVX512VL-NEXT: vcvtph2ps %xmm3, %xmm3
+; AVX512VL-NEXT: vcvtph2ps %xmm0, %xmm2
+; AVX512VL-NEXT: vcvtph2ps %xmm1, %xmm3
; AVX512VL-NEXT: xorl %eax, %eax
; AVX512VL-NEXT: vucomiss %xmm3, %xmm2
; AVX512VL-NEXT: movl $255, %ecx
diff --git a/llvm/test/CodeGen/X86/vector-shuffle-128-v4.ll b/llvm/test/CodeGen/X86/vector-shuffle-128-v4.ll
index 468fec6..6360c68 100644
--- a/llvm/test/CodeGen/X86/vector-shuffle-128-v4.ll
+++ b/llvm/test/CodeGen/X86/vector-shuffle-128-v4.ll
@@ -2012,24 +2012,18 @@ define <4 x i32> @extract3_insert0_v4i32_7123(<4 x i32> %a0, <4 x i32> %a1) {
; SSE2-LABEL: extract3_insert0_v4i32_7123:
; SSE2: # %bb.0:
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[3,3,3,3]
-; SSE2-NEXT: movd %xmm1, %eax
-; SSE2-NEXT: movd %eax, %xmm1
; SSE2-NEXT: movss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
; SSE2-NEXT: retq
;
; SSE3-LABEL: extract3_insert0_v4i32_7123:
; SSE3: # %bb.0:
; SSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[3,3,3,3]
-; SSE3-NEXT: movd %xmm1, %eax
-; SSE3-NEXT: movd %eax, %xmm1
; SSE3-NEXT: movss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
; SSE3-NEXT: retq
;
; SSSE3-LABEL: extract3_insert0_v4i32_7123:
; SSSE3: # %bb.0:
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[3,3,3,3]
-; SSSE3-NEXT: movd %xmm1, %eax
-; SSSE3-NEXT: movd %eax, %xmm1
; SSSE3-NEXT: movss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
; SSSE3-NEXT: retq
;
diff --git a/llvm/test/MC/AArch64/cfi-bad-nesting-darwin.s b/llvm/test/MC/AArch64/cfi-bad-nesting-darwin.s
index 235b7d4..3a5af86 100644
--- a/llvm/test/MC/AArch64/cfi-bad-nesting-darwin.s
+++ b/llvm/test/MC/AArch64/cfi-bad-nesting-darwin.s
@@ -8,6 +8,10 @@
.p2align 2
_locomotive:
.cfi_startproc
+ ; An N_ALT_ENTRY symbol can be defined in the middle of a subsection, so
+ ; these are opted out of the .cfi_{start,end}proc nesting check.
+ .alt_entry _engineer
+_engineer:
ret
; It is invalid to have a non-private label between .cfi_startproc and
@@ -17,7 +21,7 @@ _locomotive:
.p2align 2
_caboose:
; DARWIN: [[#@LINE-1]]:1: error: non-private labels cannot appear between .cfi_startproc / .cfi_endproc pairs
-; DARWIN: [[#@LINE-10]]:2: error: previous .cfi_startproc was here
+; DARWIN: [[#@LINE-14]]:2: error: previous .cfi_startproc was here
ret
.cfi_endproc
diff --git a/llvm/test/MC/AMDGPU/gfx11_unsupported.s b/llvm/test/MC/AMDGPU/gfx11_unsupported.s
index bfca71a..f447263 100644
--- a/llvm/test/MC/AMDGPU/gfx11_unsupported.s
+++ b/llvm/test/MC/AMDGPU/gfx11_unsupported.s
@@ -2052,3 +2052,15 @@ global_atomic_cond_sub_u32 v0, v2, s[0:1] offset:64
global_atomic_ordered_add_b64 v0, v[2:3], s[0:1] offset:64
// CHECK: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+ds_subrev_u32 v1, v2
+// CHECK: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+ds_subrev_rtn_u32 v5, v1, v2
+// CHECK: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+ds_subrev_u64 v1, v[2:3]
+// CHECK: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+ds_subrev_rtn_u64 v[5:6], v1, v[2:3]
+// CHECK: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
diff --git a/llvm/test/MC/AMDGPU/gfx12_asm_ds_alias.s b/llvm/test/MC/AMDGPU/gfx12_asm_ds_alias.s
index aa063c8..057e993 100644
--- a/llvm/test/MC/AMDGPU/gfx12_asm_ds_alias.s
+++ b/llvm/test/MC/AMDGPU/gfx12_asm_ds_alias.s
@@ -27,5 +27,11 @@ ds_min_rtn_f64 v[5:6], v1, v[2:3]
ds_subrev_u32 v1, v2
// GFX12: ds_rsub_u32 v1, v2 ; encoding: [0x00,0x00,0x08,0xd8,0x01,0x02,0x00,0x00]
+ds_subrev_rtn_u32 v5, v1, v2
+// GFX12: ds_rsub_rtn_u32 v5, v1, v2 ; encoding: [0x00,0x00,0x88,0xd8,0x01,0x02,0x00,0x05]
+
ds_subrev_u64 v1, v[2:3]
// GFX12: ds_rsub_u64 v1, v[2:3] ; encoding: [0x00,0x00,0x08,0xd9,0x01,0x02,0x00,0x00]
+
+ds_subrev_rtn_u64 v[5:6], v1, v[2:3]
+// GFX12: ds_rsub_rtn_u64 v[5:6], v1, v[2:3] ; encoding: [0x00,0x00,0x88,0xd9,0x01,0x02,0x00,0x05]
diff --git a/llvm/test/MC/ARM/thumbv8m.s b/llvm/test/MC/ARM/thumbv8m.s
index f03dd03..0e9ab4a 100644
--- a/llvm/test/MC/ARM/thumbv8m.s
+++ b/llvm/test/MC/ARM/thumbv8m.s
@@ -184,13 +184,13 @@ ttat r0, r1
// 'Lazy Load/Store Multiple'
// UNDEF-BASELINE: error: instruction requires: armv8m.main
-// CHECK-MAINLINE: vlldm r5, {d0 - d15} @ encoding: [0x35,0xec,0x00,0x0a]
-// CHECK-MAINLINE_DSP: vlldm r5, {d0 - d15} @ encoding: [0x35,0xec,0x00,0x0a]
+// CHECK-MAINLINE: vlldm r5 @ encoding: [0x35,0xec,0x00,0x0a]
+// CHECK-MAINLINE_DSP: vlldm r5 @ encoding: [0x35,0xec,0x00,0x0a]
vlldm r5
// UNDEF-BASELINE: error: instruction requires: armv8m.main
-// CHECK-MAINLINE: vlstm r10, {d0 - d15} @ encoding: [0x2a,0xec,0x00,0x0a]
-// CHECK-MAINLINE_DSP: vlstm r10, {d0 - d15} @ encoding: [0x2a,0xec,0x00,0x0a]
+// CHECK-MAINLINE: vlstm r10 @ encoding: [0x2a,0xec,0x00,0x0a]
+// CHECK-MAINLINE_DSP: vlstm r10 @ encoding: [0x2a,0xec,0x00,0x0a]
vlstm r10
// New SYSm's
diff --git a/llvm/test/MC/ARM/vlstm-vlldm-8.1m.s b/llvm/test/MC/ARM/vlstm-vlldm-8.1m.s
deleted file mode 100644
index 4e35883..0000000
--- a/llvm/test/MC/ARM/vlstm-vlldm-8.1m.s
+++ /dev/null
@@ -1,11 +0,0 @@
-// RUN: llvm-mc -triple=armv8.1m.main-arm-none-eabi -mcpu=generic -show-encoding %s \
-// RUN: | FileCheck --check-prefixes=CHECK %s
-
-// RUN: llvm-mc -triple=thumbv8.1m.main-none-eabi -mcpu=generic -show-encoding %s \
-// RUN: | FileCheck --check-prefixes=CHECK %s
-
-vlstm r8, {d0 - d31}
-// CHECK: vlstm r8, {d0 - d31} @ encoding: [0x28,0xec,0x80,0x0a]
-
-vlldm r8, {d0 - d31}
-// CHECK: vlldm r8, {d0 - d31} @ encoding: [0x38,0xec,0x80,0x0a]
diff --git a/llvm/test/MC/ARM/vlstm-vlldm-8m.s b/llvm/test/MC/ARM/vlstm-vlldm-8m.s
deleted file mode 100644
index bbc9531..0000000
--- a/llvm/test/MC/ARM/vlstm-vlldm-8m.s
+++ /dev/null
@@ -1,17 +0,0 @@
-// RUN: llvm-mc -triple=armv8m.main-arm-none-eabi -mcpu=generic -show-encoding %s \
-// RUN: | FileCheck --check-prefixes=CHECK %s
-
-// RUN: llvm-mc -triple=thumbv8m.main-none-eabi -mcpu=generic -show-encoding %s \
-// RUN: | FileCheck --check-prefixes=CHECK %s
-
-vlstm r8, {d0 - d15}
-// CHECK: vlstm r8, {d0 - d15} @ encoding: [0x28,0xec,0x00,0x0a]
-
-vlldm r8, {d0 - d15}
-// CHECK: vlldm r8, {d0 - d15} @ encoding: [0x38,0xec,0x00,0x0a]
-
-vlstm r8
-// CHECK: vlstm r8, {d0 - d15} @ encoding: [0x28,0xec,0x00,0x0a]
-
-vlldm r8
-// CHECK: vlldm r8, {d0 - d15} @ encoding: [0x38,0xec,0x00,0x0a]
diff --git a/llvm/test/MC/ARM/vlstm-vlldm-diag.s b/llvm/test/MC/ARM/vlstm-vlldm-diag.s
deleted file mode 100644
index b57f535..0000000
--- a/llvm/test/MC/ARM/vlstm-vlldm-diag.s
+++ /dev/null
@@ -1,61 +0,0 @@
-// RUN: not llvm-mc -triple=armv8.1m.main-arm-none-eabi -mcpu=generic -show-encoding %s 2>&1 >/dev/null \
-// RUN: | FileCheck --check-prefixes=ERR %s
-
-// RUN: not llvm-mc -triple=armv8.1m.main-arm-none-eabi -mcpu=generic -show-encoding %s 2>&1 >/dev/null \
-// RUN: | FileCheck --check-prefixes=ERRT2 %s
-
-vlstm r8, {d0 - d11}
-// ERR: error: operand must be exactly {d0-d15} (T1) or {d0-d31} (T2)
-// ERR-NEXT: vlstm r8, {d0 - d11}
-
-vlldm r8, {d0 - d11}
-// ERR: error: operand must be exactly {d0-d15} (T1) or {d0-d31} (T2)
-// ERR-NEXT: vlldm r8, {d0 - d11}
-
-vlstm r8, {d3 - d15}
-// ERR: error: operand must be exactly {d0-d15} (T1) or {d0-d31} (T2)
-// ERR-NEXT: vlstm r8, {d3 - d15}
-
-vlldm r8, {d3 - d15}
-// ERR: error: operand must be exactly {d0-d15} (T1) or {d0-d31} (T2)
-// ERR-NEXT: vlldm r8, {d3 - d15}
-
-vlstm r8, {d0 - d29}
-// ERR: error: operand must be exactly {d0-d15} (T1) or {d0-d31} (T2)
-// ERR-NEXT: vlstm r8, {d0 - d29}
-
-vlldm r8, {d0 - d29}
-// ERR: error: operand must be exactly {d0-d15} (T1) or {d0-d31} (T2)
-// ERR-NEXT: vlldm r8, {d0 - d29}
-
-vlstm r8, {d3 - d31}
-// ERR: error: operand must be exactly {d0-d15} (T1) or {d0-d31} (T2)
-// ERR-NEXT: vlstm r8, {d3 - d31}
-
-vlldm r8, {d3 - d31}
-// ERR: error: operand must be exactly {d0-d15} (T1) or {d0-d31} (T2)
-// ERR-NEXT: vlldm r8, {d3 - d31}
-
-vlstm r8, {d0 - d35}
-// ERR: error: register expected
-// ERR-NEXT: vlstm r8, {d0 - d35}
-
-vlldm r8, {d0 - d35}
-// ERR: error: register expected
-// ERR-NEXT: vlldm r8, {d0 - d35}
-
-vlstm pc
-// ERR: error: operand must be a register in range [r0, r14]
-// ERR-NEXT: vlstm pc
-
-vlldm pc
-// ERR: error: operand must be a register in range [r0, r14]
-// ERR-NEXT: vlldm pc
-
-vlstm pc
-// ERRT2: error: operand must be a register in range [r0, r14]
-// ERRT2-NEXT: vlstm pc
-
-vlldm pc
-// ERRT2: error: operand must be a register in range [r0, r14]
-// ERRT2-NEXT: vlldm pc \ No newline at end of file
diff --git a/llvm/test/MC/Disassembler/ARM/armv8.1m-vlldm_vlstm-8.1.main.txt b/llvm/test/MC/Disassembler/ARM/armv8.1m-vlldm_vlstm-8.1.main.txt
deleted file mode 100644
index 6b988245..0000000
--- a/llvm/test/MC/Disassembler/ARM/armv8.1m-vlldm_vlstm-8.1.main.txt
+++ /dev/null
@@ -1,11 +0,0 @@
-// RUN: llvm-mc -triple=armv8.1m.main-arm-none-eabi -mcpu=generic -show-encoding -disassemble %s \
-// RUN: | FileCheck %s --check-prefixes=CHECK-DISS
-
-// RUN: llvm-mc -triple=thumbv8.1m.main-none-eabi -mcpu=generic -show-encoding -disassemble %s \
-// RUN: | FileCheck %s --check-prefixes=CHECK-DISS
-
-[0x28,0xec,0x80,0x0a]
-// CHECK-DISS: vlstm r8, {d0 - d31} @ encoding: [0x28,0xec,0x80,0x0a]
-
-[0x38,0xec,0x80,0x0a]
-// CHECK-DISS: vlldm r8, {d0 - d31} @ encoding: [0x38,0xec,0x80,0x0a] \ No newline at end of file
diff --git a/llvm/test/MC/Disassembler/ARM/armv8.1m-vlldm_vlstm-8.main.txt b/llvm/test/MC/Disassembler/ARM/armv8.1m-vlldm_vlstm-8.main.txt
deleted file mode 100644
index 1e28d52..0000000
--- a/llvm/test/MC/Disassembler/ARM/armv8.1m-vlldm_vlstm-8.main.txt
+++ /dev/null
@@ -1,17 +0,0 @@
-// RUN: llvm-mc -triple=armv8m.main-arm-none-eabi -mcpu=generic -show-encoding -disassemble %s \
-// RUN: | FileCheck %s --check-prefixes=CHECK-DISS
-
-// RUN: llvm-mc -triple=thumbv8m.main-none-eabi -mcpu=generic -show-encoding -disassemble %s \
-// RUN: | FileCheck %s --check-prefixes=CHECK-DISS
-
-[0x28,0xec,0x00,0x0a]
-// CHECK-DISS: vlstm r8, {d0 - d15} @ encoding: [0x28,0xec,0x00,0x0a]
-
-[0x38,0xec,0x00,0x0a]
-// CHECK-DISS: vlldm r8, {d0 - d15} @ encoding: [0x38,0xec,0x00,0x0a]
-
-[0x28,0xec,0x00,0x0a]
-// CHECK-DISS: vlstm r8, {d0 - d15} @ encoding: [0x28,0xec,0x00,0x0a]
-
-[0x38,0xec,0x00,0x0a]
-// CHECK-DISS: vlldm r8, {d0 - d15} @ encoding: [0x38,0xec,0x00,0x0a] \ No newline at end of file
diff --git a/llvm/test/MC/Disassembler/X86/apx/IgnoreW.txt b/llvm/test/MC/Disassembler/X86/apx/IgnoreW.txt
new file mode 100644
index 0000000..df41bdf
--- /dev/null
+++ b/llvm/test/MC/Disassembler/X86/apx/IgnoreW.txt
@@ -0,0 +1,118 @@
+# RUN: llvm-mc --disassemble %s -triple=x86_64 | FileCheck %s --check-prefixes=ATT
+# RUN: llvm-mc --disassemble %s -triple=x86_64 -x86-asm-syntax=intel --output-asm-variant=1 | FileCheck %s --check-prefixes=INTEL
+
+## invpcid
+
+# ATT: invpcid 123(%rax,%rbx,4), %r9
+# INTEL: invpcid r9, xmmword ptr [rax + 4*rbx + 123]
+0x62,0x74,0xfe,0x08,0xf2,0x4c,0x98,0x7b
+
+# ATT: invpcid 291(%r28,%r29,4), %r19
+# INTEL: invpcid r19, xmmword ptr [r28 + 4*r29 + 291]
+0x62,0x8c,0xfa,0x08,0xf2,0x9c,0xac,0x23,0x01,0x00,0x00
+
+## invept
+
+# ATT: invept 291(%r28,%r29,4), %r19
+# INTEL: invept r19, xmmword ptr [r28 + 4*r29 + 291]
+0x62,0x8c,0xfa,0x08,0xf0,0x9c,0xac,0x23,0x01,0x00,0x00
+
+# ATT: invept 123(%rax,%rbx,4), %r9
+# INTEL: invept r9, xmmword ptr [rax + 4*rbx + 123]
+0x62,0x74,0xfe,0x08,0xf0,0x4c,0x98,0x7b
+
+## invvpid
+
+# ATT: invvpid 291(%r28,%r29,4), %r19
+# INTEL: invvpid r19, xmmword ptr [r28 + 4*r29 + 291]
+0x62,0x8c,0xfa,0x08,0xf1,0x9c,0xac,0x23,0x01,0x00,0x00
+
+# ATT: invvpid 123(%rax,%rbx,4), %r9
+# INTEL: invvpid r9, xmmword ptr [rax + 4*rbx + 123]
+0x62,0x74,0xfe,0x08,0xf1,0x4c,0x98,0x7b
+
+## adc
+
+# ATT: {evex} adcb $123, %bl
+# INTEL: {evex} adc bl, 123
+0x62,0xf4,0xfc,0x08,0x80,0xd3,0x7b
+
+# ATT: adcb $123, %bl, %cl
+# INTEL: adc cl, bl, 123
+0x62,0xf4,0xf4,0x18,0x80,0xd3,0x7b
+
+# ATT: adcb $123, %r16b
+# INTEL: adc r16b, 123
+0xd5,0x18,0x80,0xd0,0x7b
+
+## add
+
+# ATT: {evex} addb $123, %bl
+# INTEL: {evex} add bl, 123
+0x62,0xf4,0xfc,0x08,0x80,0xc3,0x7b
+
+# ATT: {nf} addb $123, %bl
+# INTEL: {nf} add bl, 123
+0x62,0xf4,0xfc,0x0c,0x80,0xc3,0x7b
+
+# ATT: addb $123, %bl, %cl
+# INTEL: add cl, bl, 123
+0x62,0xf4,0xf4,0x18,0x80,0xc3,0x7b
+
+# ATT: {nf} addb $123, %bl, %cl
+# INTEL: {nf} add cl, bl, 123
+0x62,0xf4,0xf4,0x1c,0x80,0xc3,0x7b
+
+# ATT: addb $123, %r16b
+# INTEL: add r16b, 123
+0xd5,0x18,0x80,0xc0,0x7b
+
+## inc
+
+# ATT: {evex} incb %bl
+# INTEL: {evex} inc bl
+0x62,0xf4,0xfc,0x08,0xfe,0xc3
+
+# ATT: {nf} incb %bl
+# INTEL: {nf} inc bl
+0x62,0xf4,0xfc,0x0c,0xfe,0xc3
+
+# ATT: incb %bl, %bl
+# INTEL: inc bl, bl
+0x62,0xf4,0xe4,0x18,0xfe,0xc3
+
+# ATT: {nf} incb %bl, %bl
+# INTEL: {nf} inc bl, bl
+0x62,0xf4,0xe4,0x1c,0xfe,0xc3
+
+# ATT: incb %r16b
+# INTEL: inc r16b
+0xd5,0x18,0xfe,0xc0
+
+## mul
+
+# ATT: {evex} mulb %bl
+# INTEL: {evex} mul bl
+0x62,0xf4,0xfc,0x08,0xf6,0xe3
+
+# ATT: {nf} mulb %bl
+# INTEL: {nf} mul bl
+0x62,0xf4,0xfc,0x0c,0xf6,0xe3
+
+# ATT: mulb %r16b
+# INTEL: mul r16b
+0xd5,0x18,0xf6,0xe0
+
+## imul
+
+# ATT: {evex} imulb %bl
+# INTEL: {evex} imul bl
+0x62,0xf4,0xfc,0x08,0xf6,0xeb
+
+# ATT: {nf} imulb %bl
+# INTEL: {nf} imul bl
+0x62,0xf4,0xfc,0x0c,0xf6,0xeb
+
+# ATT: imulb %r16b
+# INTEL: imul r16b
+0xd5,0x18,0xf6,0xe8
diff --git a/llvm/test/MC/RISCV/rv32zacas-invalid.s b/llvm/test/MC/RISCV/rv32zacas-invalid.s
index b86246c..11d20da 100644
--- a/llvm/test/MC/RISCV/rv32zacas-invalid.s
+++ b/llvm/test/MC/RISCV/rv32zacas-invalid.s
@@ -1,4 +1,4 @@
-# RUN: not llvm-mc -triple riscv32 -mattr=+experimental-zacas < %s 2>&1 | FileCheck %s
+# RUN: not llvm-mc -triple riscv32 -mattr=+zacas < %s 2>&1 | FileCheck %s
# Non-zero offsets not supported for the third operand (rs1).
amocas.w a1, a3, 1(a5) # CHECK: :[[@LINE]]:18: error: optional integer offset must be 0
diff --git a/llvm/test/MC/RISCV/rv32zacas-valid.s b/llvm/test/MC/RISCV/rv32zacas-valid.s
index d80b963..05a9cdd 100644
--- a/llvm/test/MC/RISCV/rv32zacas-valid.s
+++ b/llvm/test/MC/RISCV/rv32zacas-valid.s
@@ -1,12 +1,12 @@
-# RUN: llvm-mc %s -triple=riscv32 -mattr=+experimental-zacas -riscv-no-aliases -show-encoding \
+# RUN: llvm-mc %s -triple=riscv32 -mattr=+zacas -riscv-no-aliases -show-encoding \
# RUN: | FileCheck -check-prefixes=CHECK-ASM,CHECK-ASM-AND-OBJ %s
-# RUN: llvm-mc %s -triple=riscv64 -mattr=+experimental-zacas -riscv-no-aliases -show-encoding \
+# RUN: llvm-mc %s -triple=riscv64 -mattr=+zacas -riscv-no-aliases -show-encoding \
# RUN: | FileCheck -check-prefixes=CHECK-ASM,CHECK-ASM-AND-OBJ %s
-# RUN: llvm-mc -filetype=obj -triple=riscv32 -mattr=+experimental-zacas < %s \
-# RUN: | llvm-objdump --mattr=+experimental-zacas -M no-aliases -d -r - \
+# RUN: llvm-mc -filetype=obj -triple=riscv32 -mattr=+zacas < %s \
+# RUN: | llvm-objdump --mattr=+zacas -M no-aliases -d -r - \
# RUN: | FileCheck --check-prefix=CHECK-ASM-AND-OBJ %s
-# RUN: llvm-mc -filetype=obj -triple=riscv64 -mattr=+experimental-zacas < %s \
-# RUN: | llvm-objdump --mattr=+experimental-zacas -M no-aliases -d -r - \
+# RUN: llvm-mc -filetype=obj -triple=riscv64 -mattr=+zacas < %s \
+# RUN: | llvm-objdump --mattr=+zacas -M no-aliases -d -r - \
# RUN: | FileCheck --check-prefix=CHECK-ASM-AND-OBJ %s
# RUN: not llvm-mc -triple=riscv32 -mattr=+a -show-encoding %s 2>&1 \
# RUN: | FileCheck %s --check-prefix=CHECK-ERROR
diff --git a/llvm/test/MC/RISCV/rv64zacas-valid.s b/llvm/test/MC/RISCV/rv64zacas-valid.s
index 843401b..694f43b 100644
--- a/llvm/test/MC/RISCV/rv64zacas-valid.s
+++ b/llvm/test/MC/RISCV/rv64zacas-valid.s
@@ -1,7 +1,7 @@
-# RUN: llvm-mc %s -triple=riscv64 -mattr=+experimental-zacas -riscv-no-aliases -show-encoding \
+# RUN: llvm-mc %s -triple=riscv64 -mattr=+zacas -riscv-no-aliases -show-encoding \
# RUN: | FileCheck -check-prefixes=CHECK-ASM,CHECK-ASM-AND-OBJ %s
-# RUN: llvm-mc -filetype=obj -triple=riscv64 -mattr=+experimental-zacas < %s \
-# RUN: | llvm-objdump --mattr=+experimental-zacas -M no-aliases -d -r - \
+# RUN: llvm-mc -filetype=obj -triple=riscv64 -mattr=+zacas < %s \
+# RUN: | llvm-objdump --mattr=+zacas -M no-aliases -d -r - \
# RUN: | FileCheck --check-prefix=CHECK-ASM-AND-OBJ %s
# RUN: not llvm-mc -triple=riscv64 -mattr=+a -show-encoding %s 2>&1 \
# RUN: | FileCheck %s --check-prefix=CHECK-ERROR
diff --git a/llvm/test/MC/RISCV/rvzabha-zacas-valid.s b/llvm/test/MC/RISCV/rvzabha-zacas-valid.s
index 8ad2f99..f1f705e 100644
--- a/llvm/test/MC/RISCV/rvzabha-zacas-valid.s
+++ b/llvm/test/MC/RISCV/rvzabha-zacas-valid.s
@@ -1,12 +1,12 @@
-# RUN: llvm-mc %s -triple=riscv32 -mattr=+experimental-zabha,+experimental-zacas -riscv-no-aliases -show-encoding \
+# RUN: llvm-mc %s -triple=riscv32 -mattr=+experimental-zabha,+zacas -riscv-no-aliases -show-encoding \
# RUN: | FileCheck -check-prefixes=CHECK-ASM,CHECK-ASM-AND-OBJ %s
-# RUN: llvm-mc %s -triple=riscv64 -mattr=+experimental-zabha,+experimental-zacas -riscv-no-aliases -show-encoding \
+# RUN: llvm-mc %s -triple=riscv64 -mattr=+experimental-zabha,+zacas -riscv-no-aliases -show-encoding \
# RUN: | FileCheck -check-prefixes=CHECK-ASM,CHECK-ASM-AND-OBJ %s
-# RUN: llvm-mc -filetype=obj -triple=riscv32 -mattr=+experimental-zabha,+experimental-zacas < %s \
-# RUN: | llvm-objdump --mattr=+experimental-zabha,+experimental-zacas -M no-aliases -d -r - \
+# RUN: llvm-mc -filetype=obj -triple=riscv32 -mattr=+experimental-zabha,+zacas < %s \
+# RUN: | llvm-objdump --mattr=+experimental-zabha,+zacas -M no-aliases -d -r - \
# RUN: | FileCheck --check-prefix=CHECK-ASM-AND-OBJ %s
-# RUN: llvm-mc -filetype=obj -triple=riscv64 -mattr=+experimental-zabha,+experimental-zacas < %s \
-# RUN: | llvm-objdump --mattr=+experimental-zabha,+experimental-zacas -M no-aliases -d -r - \
+# RUN: llvm-mc -filetype=obj -triple=riscv64 -mattr=+experimental-zabha,+zacas < %s \
+# RUN: | llvm-objdump --mattr=+experimental-zabha,+zacas -M no-aliases -d -r - \
# RUN: | FileCheck --check-prefix=CHECK-ASM-AND-OBJ %s
# RUN: not llvm-mc -triple=riscv32 -mattr=+experimental-zabha -show-encoding %s 2>&1 \
# RUN: | FileCheck %s --check-prefix=CHECK-ERROR
diff --git a/llvm/test/ThinLTO/X86/visibility-elf.ll b/llvm/test/ThinLTO/X86/visibility-elf.ll
index aa11c3e..fc7439b 100644
--- a/llvm/test/ThinLTO/X86/visibility-elf.ll
+++ b/llvm/test/ThinLTO/X86/visibility-elf.ll
@@ -36,12 +36,12 @@ declare void @ext(ptr)
;; Currently the visibility is not propagated onto an unimported function,
;; because we don't have summaries for declarations.
; CHECK: declare extern_weak void @not_imported()
-; CHECK: define available_externally hidden void @hidden_def_ref() !thinlto_src_module !0
-; CHECK: define available_externally hidden void @hidden_def_weak_ref() !thinlto_src_module !0
+; CHECK: define available_externally hidden void @hidden_def_ref() !thinlto_src_module !0 !thinlto_src_file !1
+; CHECK: define available_externally hidden void @hidden_def_weak_ref() !thinlto_src_module !0 !thinlto_src_file !1
;; This can be hidden, but we cannot communicate the declaration's visibility
;; to other modules because declarations don't have summaries, and the IRLinker
;; overrides it when importing the protected def.
-; CHECK: define available_externally protected void @protected_def_hidden_ref() !thinlto_src_module !0
+; CHECK: define available_externally protected void @protected_def_hidden_ref() !thinlto_src_module !0 !thinlto_src_file !1
; CHECK2: define hidden i32 @hidden_def_weak_def()
; CHECK2: define protected void @protected_def_weak_def()
diff --git a/llvm/test/ThinLTO/X86/visibility-macho.ll b/llvm/test/ThinLTO/X86/visibility-macho.ll
index d41ab4f..1a48b47 100644
--- a/llvm/test/ThinLTO/X86/visibility-macho.ll
+++ b/llvm/test/ThinLTO/X86/visibility-macho.ll
@@ -30,8 +30,8 @@ declare void @ext(ptr)
;; Currently the visibility is not propagated onto an unimported function,
;; because we don't have summaries for declarations.
; CHECK: declare extern_weak dso_local void @not_imported()
-; CHECK: define available_externally hidden void @hidden_def_ref() !thinlto_src_module !0
-; CHECK: define available_externally hidden void @hidden_def_weak_ref() !thinlto_src_module !0
+; CHECK: define available_externally hidden void @hidden_def_ref() !thinlto_src_module !0 !thinlto_src_file !1
+; CHECK: define available_externally hidden void @hidden_def_weak_ref() !thinlto_src_module !0 !thinlto_src_file !1
; CHECK2: define hidden i32 @hidden_def_weak_def()
; CHECK2: define hidden void @hidden_def_ref()
diff --git a/llvm/test/Transforms/Coroutines/coro-debug-dbg.values.ll b/llvm/test/Transforms/Coroutines/coro-debug-dbg.values.ll
index 47b2dda..dd9310f 100644
--- a/llvm/test/Transforms/Coroutines/coro-debug-dbg.values.ll
+++ b/llvm/test/Transforms/Coroutines/coro-debug-dbg.values.ll
@@ -9,6 +9,11 @@
; CHECK-SAME: !DIExpression(DW_OP_plus_uconst, [[OffsetX:[0-9]*]]))
; ^ No deref at the end, as this variable ("x") is an array;
; its value is its address. The entire array is in the frame.
+; CHECK: call void @llvm.dbg.assign(metadata ptr %[[frame]]
+; CHECK-SAME: !DIExpression(DW_OP_plus_uconst, [[OffsetX]])
+;; FIXME: Should we be updating the addresses on assigns here as well?
+; CHECK-SAME: , metadata ptr %[[frame]], metadata !DIExpression())
+
; CHECK: call void @llvm.dbg.value(metadata ptr %[[frame]]
; CHECK-SAME: !DIExpression(DW_OP_plus_uconst, [[OffsetSpill:[0-9]*]], DW_OP_deref))
; CHECK: call void @llvm.dbg.value(metadata ptr %[[frame]]
@@ -78,6 +83,7 @@ init.ready: ; preds = %init.suspend, %coro
%i.init.ready.inc = add nsw i32 0, 1
call void @llvm.dbg.value(metadata i32 %i.init.ready.inc, metadata !6, metadata !DIExpression()), !dbg !11
call void @llvm.dbg.value(metadata ptr %x, metadata !12, metadata !DIExpression()), !dbg !17
+ call void @llvm.dbg.assign(metadata ptr %x, metadata !12, metadata !DIExpression(), metadata !30, metadata ptr %x, metadata !DIExpression()), !dbg !17
call void @llvm.memset.p0.i64(ptr align 16 %x, i8 0, i64 40, i1 false), !dbg !17
call void @print(i32 %i.init.ready.inc)
%ready.again = call zeroext i1 @await_ready()
@@ -250,3 +256,4 @@ attributes #4 = { argmemonly nofree nosync nounwind willreturn writeonly }
!21 = !DILocation(line: 43, column: 3, scope: !7)
!22 = !DILocation(line: 43, column: 8, scope: !7)
!23 = !DILocalVariable(name: "produced", scope: !7, file: !1, line:24, type: !10)
+!30 = distinct !DIAssignID() \ No newline at end of file
diff --git a/llvm/test/Transforms/FunctionImport/funcimport.ll b/llvm/test/Transforms/FunctionImport/funcimport.ll
index 0129825..a0968a6 100644
--- a/llvm/test/Transforms/FunctionImport/funcimport.ll
+++ b/llvm/test/Transforms/FunctionImport/funcimport.ll
@@ -57,7 +57,7 @@ declare void @linkoncealias(...) #1
; CHECK-DAG: define available_externally void @linkoncealias()
; INSTLIMDEF-DAG: Import referencestatics
-; INSTLIMDEF-DAG: define available_externally i32 @referencestatics(i32 %i) !thinlto_src_module !0 {
+; INSTLIMDEF-DAG: define available_externally i32 @referencestatics(i32 %i) !thinlto_src_module !0 !thinlto_src_file !1 {
; INSTLIM5-DAG: declare i32 @referencestatics(...)
declare i32 @referencestatics(...) #1
@@ -66,27 +66,27 @@ declare i32 @referencestatics(...) #1
; Ensure that the call is to the properly-renamed function.
; INSTLIMDEF-DAG: Import staticfunc
; INSTLIMDEF-DAG: %call = call i32 @staticfunc.llvm.
-; INSTLIMDEF-DAG: define available_externally hidden i32 @staticfunc.llvm.{{.*}} !thinlto_src_module !0 {
+; INSTLIMDEF-DAG: define available_externally hidden i32 @staticfunc.llvm.{{.*}} !thinlto_src_module !0 !thinlto_src_file !1 {
; INSTLIMDEF-DAG: Import referenceglobals
-; CHECK-DAG: define available_externally i32 @referenceglobals(i32 %i) !thinlto_src_module !0 {
+; CHECK-DAG: define available_externally i32 @referenceglobals(i32 %i) !thinlto_src_module !0 !thinlto_src_file !1 {
declare i32 @referenceglobals(...) #1
; The import of referenceglobals will expose call to globalfunc1 that
; should in turn be imported.
; INSTLIMDEF-DAG: Import globalfunc1
-; CHECK-DAG: define available_externally void @globalfunc1() !thinlto_src_module !0
+; CHECK-DAG: define available_externally void @globalfunc1() !thinlto_src_module !0 !thinlto_src_file !1
; INSTLIMDEF-DAG: Import referencecommon
-; CHECK-DAG: define available_externally i32 @referencecommon(i32 %i) !thinlto_src_module !0 {
+; CHECK-DAG: define available_externally i32 @referencecommon(i32 %i) !thinlto_src_module !0 !thinlto_src_file !1 {
declare i32 @referencecommon(...) #1
; INSTLIMDEF-DAG: Import setfuncptr
-; CHECK-DAG: define available_externally void @setfuncptr() !thinlto_src_module !0 {
+; CHECK-DAG: define available_externally void @setfuncptr() !thinlto_src_module !0 !thinlto_src_file !1 {
declare void @setfuncptr(...) #1
; INSTLIMDEF-DAG: Import callfuncptr
-; CHECK-DAG: define available_externally void @callfuncptr() !thinlto_src_module !0 {
+; CHECK-DAG: define available_externally void @callfuncptr() !thinlto_src_module !0 !thinlto_src_file !1 {
declare void @callfuncptr(...) #1
; Ensure that all uses of local variable @P which has used in setfuncptr
@@ -97,7 +97,7 @@ declare void @callfuncptr(...) #1
; Ensure that @referencelargelinkonce definition is pulled in, but later we
; also check that the linkonceodr function is not.
-; CHECK-DAG: define available_externally void @referencelargelinkonce() !thinlto_src_module !0 {
+; CHECK-DAG: define available_externally void @referencelargelinkonce() !thinlto_src_module !0 !thinlto_src_file !1 {
; INSTLIM5-DAG: declare void @linkonceodr()
declare void @referencelargelinkonce(...)
@@ -110,13 +110,13 @@ declare void @weakfunc(...) #1
declare void @linkoncefunc2(...) #1
; INSTLIMDEF-DAG: Import funcwithpersonality
-; INSTLIMDEF-DAG: define available_externally hidden void @funcwithpersonality.llvm.{{.*}}() personality ptr @__gxx_personality_v0 !thinlto_src_module !0 {
+; INSTLIMDEF-DAG: define available_externally hidden void @funcwithpersonality.llvm.{{.*}}() personality ptr @__gxx_personality_v0 !thinlto_src_module !0 !thinlto_src_file !1 {
; INSTLIM5-DAG: declare hidden void @funcwithpersonality.llvm.{{.*}}()
; We can import variadic functions without a va_start, since the inliner
; can handle them.
; INSTLIMDEF-DAG: Import variadic_no_va_start
-; CHECK-DAG: define available_externally void @variadic_no_va_start(...) !thinlto_src_module !0 {
+; CHECK-DAG: define available_externally void @variadic_no_va_start(...) !thinlto_src_module !0 !thinlto_src_file !1 {
declare void @variadic_no_va_start(...)
; We can import variadic functions with a va_start, since the inliner
@@ -128,7 +128,8 @@ declare void @variadic_va_start(...)
; INSTLIMDEF-DAG: 15 function-import - Number of functions imported
; INSTLIMDEF-DAG: 4 function-import - Number of global variables imported
-; CHECK-DAG: !0 = !{!"{{.*}}/Inputs/funcimport.ll"}
+; CHECK-DAG: !0 = !{!"{{.*}}.bc"}
+; CHECK-DAG: !1 = !{!"{{.*}}/Inputs/funcimport.ll"}
; The actual GUID values will depend on path to test.
; GUID-DAG: GUID {{.*}} is weakalias
diff --git a/llvm/test/Transforms/Inline/inline_stats.ll b/llvm/test/Transforms/Inline/inline_stats.ll
index c779054..41c12b3 100644
--- a/llvm/test/Transforms/Inline/inline_stats.ll
+++ b/llvm/test/Transforms/Inline/inline_stats.ll
@@ -44,7 +44,7 @@ define void @internal3() {
declare void @external_decl()
-define void @external1() alwaysinline !thinlto_src_module !0 {
+define void @external1() alwaysinline !thinlto_src_module !0 !thinlto_src_file !1 {
call fastcc void @internal2()
call fastcc void @external2();
call void @external_decl();
@@ -87,7 +87,7 @@ define void @external_big() noinline !thinlto_src_module !1 {
}
; It should not be imported, but it should not break anything.
-define void @external_notcalled() !thinlto_src_module !0 {
+define void @external_notcalled() !thinlto_src_module !0 !thinlto_src_file !1 {
call void @external_notcalled()
ret void
}
diff --git a/llvm/test/Transforms/InstCombine/maxnum.ll b/llvm/test/Transforms/InstCombine/maxnum.ll
index 87288b1..e140a5b 100644
--- a/llvm/test/Transforms/InstCombine/maxnum.ll
+++ b/llvm/test/Transforms/InstCombine/maxnum.ll
@@ -66,7 +66,7 @@ define float @constant_fold_maxnum_f32_p0_n0() {
define float @constant_fold_maxnum_f32_n0_p0() {
; CHECK-LABEL: @constant_fold_maxnum_f32_n0_p0(
-; CHECK-NEXT: ret float -0.000000e+00
+; CHECK-NEXT: ret float 0.000000e+00
;
%x = call float @llvm.maxnum.f32(float -0.0, float 0.0)
ret float %x
diff --git a/llvm/test/Transforms/InstCombine/minnum.ll b/llvm/test/Transforms/InstCombine/minnum.ll
index 8050f07..cc6171b 100644
--- a/llvm/test/Transforms/InstCombine/minnum.ll
+++ b/llvm/test/Transforms/InstCombine/minnum.ll
@@ -60,7 +60,7 @@ define float @constant_fold_minnum_f32_p0_p0() {
define float @constant_fold_minnum_f32_p0_n0() {
; CHECK-LABEL: @constant_fold_minnum_f32_p0_n0(
-; CHECK-NEXT: ret float 0.000000e+00
+; CHECK-NEXT: ret float -0.000000e+00
;
%x = call float @llvm.minnum.f32(float 0.0, float -0.0)
ret float %x
@@ -199,7 +199,7 @@ define float @minnum_f32_1_minnum_p0_val_fmf3(float %x) {
define float @minnum_f32_p0_minnum_val_n0(float %x) {
; CHECK-LABEL: @minnum_f32_p0_minnum_val_n0(
-; CHECK-NEXT: [[Z:%.*]] = call float @llvm.minnum.f32(float [[X:%.*]], float 0.000000e+00)
+; CHECK-NEXT: [[Z:%.*]] = call float @llvm.minnum.f32(float [[X:%.*]], float -0.000000e+00)
; CHECK-NEXT: ret float [[Z]]
;
%y = call float @llvm.minnum.f32(float %x, float -0.0)
diff --git a/llvm/test/Transforms/InstSimplify/ConstProp/min-max.ll b/llvm/test/Transforms/InstSimplify/ConstProp/min-max.ll
index a5f5d4e..9120649 100644
--- a/llvm/test/Transforms/InstSimplify/ConstProp/min-max.ll
+++ b/llvm/test/Transforms/InstSimplify/ConstProp/min-max.ll
@@ -49,6 +49,38 @@ define float @minnum_float() {
ret float %1
}
+define float @minnum_float_p0_n0() {
+; CHECK-LABEL: @minnum_float_p0_n0(
+; CHECK-NEXT: ret float -0.000000e+00
+;
+ %min = call float @llvm.minnum.f32(float 0.0, float -0.0)
+ ret float %min
+}
+
+define float @minnum_float_n0_p0() {
+; CHECK-LABEL: @minnum_float_n0_p0(
+; CHECK-NEXT: ret float -0.000000e+00
+;
+ %min = call float @llvm.minnum.f32(float -0.0, float 0.0)
+ ret float %min
+}
+
+define float @minnum_float_p0_qnan() {
+; CHECK-LABEL: @minnum_float_p0_qnan(
+; CHECK-NEXT: ret float 0.000000e+00
+;
+ %min = call float @llvm.minnum.f32(float 0.0, float 0x7FF8000000000000)
+ ret float %min
+}
+
+define float @minnum_float_qnan_p0() {
+; CHECK-LABEL: @minnum_float_qnan_p0(
+; CHECK-NEXT: ret float 0.000000e+00
+;
+ %min = call float @llvm.minnum.f32(float 0x7FF8000000000000, float 0.0)
+ ret float %min
+}
+
define bfloat @minnum_bfloat() {
; CHECK-LABEL: @minnum_bfloat(
; CHECK-NEXT: ret bfloat 0xR40A0
@@ -95,7 +127,7 @@ define <4 x half> @minnum_half_vec() {
define <4 x float> @minnum_float_zeros_vec() {
; CHECK-LABEL: @minnum_float_zeros_vec(
-; CHECK-NEXT: ret <4 x float> <float 0.000000e+00, float -0.000000e+00, float 0.000000e+00, float -0.000000e+00>
+; CHECK-NEXT: ret <4 x float> <float 0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>
;
%1 = call <4 x float> @llvm.minnum.v4f32(<4 x float> <float 0.0, float -0.0, float 0.0, float -0.0>, <4 x float> <float 0.0, float 0.0, float -0.0, float -0.0>)
ret <4 x float> %1
@@ -109,6 +141,38 @@ define float @maxnum_float() {
ret float %1
}
+define float @maxnum_float_p0_n0() {
+; CHECK-LABEL: @maxnum_float_p0_n0(
+; CHECK-NEXT: ret float 0.000000e+00
+;
+ %max = call float @llvm.maxnum.f32(float 0.0, float -0.0)
+ ret float %max
+}
+
+define float @maxnum_float_n0_p0() {
+; CHECK-LABEL: @maxnum_float_n0_p0(
+; CHECK-NEXT: ret float 0.000000e+00
+;
+ %max = call float @llvm.maxnum.f32(float -0.0, float 0.0)
+ ret float %max
+}
+
+define float @maxnum_float_p0_qnan() {
+; CHECK-LABEL: @maxnum_float_p0_qnan(
+; CHECK-NEXT: ret float 0.000000e+00
+;
+ %max = call float @llvm.maxnum.f32(float 0.0, float 0x7FF8000000000000)
+ ret float %max
+}
+
+define float @maxnum_float_qnan_p0() {
+; CHECK-LABEL: @maxnum_float_qnan_p0(
+; CHECK-NEXT: ret float 0.000000e+00
+;
+ %max = call float @llvm.maxnum.f32(float 0x7FF8000000000000, float 0.0)
+ ret float %max
+}
+
define bfloat @maxnum_bfloat() {
; CHECK-LABEL: @maxnum_bfloat(
; CHECK-NEXT: ret bfloat 0xR4228
@@ -155,7 +219,7 @@ define <4 x half> @maxnum_half_vec() {
define <4 x float> @maxnum_float_zeros_vec() {
; CHECK-LABEL: @maxnum_float_zeros_vec(
-; CHECK-NEXT: ret <4 x float> <float 0.000000e+00, float -0.000000e+00, float 0.000000e+00, float -0.000000e+00>
+; CHECK-NEXT: ret <4 x float> <float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float -0.000000e+00>
;
%1 = call <4 x float> @llvm.maxnum.v4f32(<4 x float> <float 0.0, float -0.0, float 0.0, float -0.0>, <4 x float> <float 0.0, float 0.0, float -0.0, float -0.0>)
ret <4 x float> %1
diff --git a/llvm/test/Transforms/LoopVectorize/PowerPC/interleave_IC.ll b/llvm/test/Transforms/LoopVectorize/PowerPC/interleave_IC.ll
index 7121c85..c12b3b1 100644
--- a/llvm/test/Transforms/LoopVectorize/PowerPC/interleave_IC.ll
+++ b/llvm/test/Transforms/LoopVectorize/PowerPC/interleave_IC.ll
@@ -1,5 +1,5 @@
-; RUN: opt < %s -passes=loop-vectorize -S -mcpu=pwr9 -interleave-small-loop-scalar-reduction=true 2>&1 | FileCheck %s
-; RUN: opt < %s -passes='loop-vectorize' -S -mcpu=pwr9 -interleave-small-loop-scalar-reduction=true 2>&1 | FileCheck %s
+; RUN: opt < %s -passes=loop-vectorize -S -mcpu=pwr9 2>&1 | FileCheck %s
+; RUN: opt < %s -passes='loop-vectorize' -S -mcpu=pwr9 2>&1 | FileCheck %s
; CHECK-LABEL: vector.body
; CHECK: load double, ptr
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/riscv-vector-reverse.ll b/llvm/test/Transforms/LoopVectorize/RISCV/riscv-vector-reverse.ll
index da6dc34..72d9691 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/riscv-vector-reverse.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/riscv-vector-reverse.ll
@@ -36,10 +36,10 @@ define void @vector_reverse_i64(ptr nocapture noundef writeonly %A, ptr nocaptur
; CHECK-NEXT: LV: Found an estimated cost of 1 for VF vscale x 4 For instruction: %i.0 = add nsw i32 %i.0.in8, -1
; CHECK-NEXT: LV: Found an estimated cost of 1 for VF vscale x 4 For instruction: %idxprom = zext i32 %i.0 to i64
; CHECK-NEXT: LV: Found an estimated cost of 0 for VF vscale x 4 For instruction: %arrayidx = getelementptr inbounds i32, ptr %B, i64 %idxprom
-; CHECK-NEXT: LV: Found an estimated cost of 11 for VF vscale x 4 For instruction: %1 = load i32, ptr %arrayidx, align 4
+; CHECK-NEXT: LV: Found an estimated cost of 13 for VF vscale x 4 For instruction: %1 = load i32, ptr %arrayidx, align 4
; CHECK-NEXT: LV: Found an estimated cost of 2 for VF vscale x 4 For instruction: %add9 = add i32 %1, 1
; CHECK-NEXT: LV: Found an estimated cost of 0 for VF vscale x 4 For instruction: %arrayidx3 = getelementptr inbounds i32, ptr %A, i64 %idxprom
-; CHECK-NEXT: LV: Found an estimated cost of 11 for VF vscale x 4 For instruction: store i32 %add9, ptr %arrayidx3, align 4
+; CHECK-NEXT: LV: Found an estimated cost of 13 for VF vscale x 4 For instruction: store i32 %add9, ptr %arrayidx3, align 4
; CHECK-NEXT: LV: Found an estimated cost of 1 for VF vscale x 4 For instruction: %cmp = icmp ugt i64 %indvars.iv, 1
; CHECK-NEXT: LV: Found an estimated cost of 1 for VF vscale x 4 For instruction: %indvars.iv.next = add nsw i64 %indvars.iv, -1
; CHECK-NEXT: LV: Found an estimated cost of 0 for VF vscale x 4 For instruction: br i1 %cmp, label %for.body, label %for.cond.cleanup.loopexit, !llvm.loop !0
@@ -86,10 +86,10 @@ define void @vector_reverse_i64(ptr nocapture noundef writeonly %A, ptr nocaptur
; CHECK-NEXT: LV: Found an estimated cost of 1 for VF vscale x 4 For instruction: %i.0 = add nsw i32 %i.0.in8, -1
; CHECK-NEXT: LV: Found an estimated cost of 1 for VF vscale x 4 For instruction: %idxprom = zext i32 %i.0 to i64
; CHECK-NEXT: LV: Found an estimated cost of 0 for VF vscale x 4 For instruction: %arrayidx = getelementptr inbounds i32, ptr %B, i64 %idxprom
-; CHECK-NEXT: LV: Found an estimated cost of 11 for VF vscale x 4 For instruction: %1 = load i32, ptr %arrayidx, align 4
+; CHECK-NEXT: LV: Found an estimated cost of 13 for VF vscale x 4 For instruction: %1 = load i32, ptr %arrayidx, align 4
; CHECK-NEXT: LV: Found an estimated cost of 2 for VF vscale x 4 For instruction: %add9 = add i32 %1, 1
; CHECK-NEXT: LV: Found an estimated cost of 0 for VF vscale x 4 For instruction: %arrayidx3 = getelementptr inbounds i32, ptr %A, i64 %idxprom
-; CHECK-NEXT: LV: Found an estimated cost of 11 for VF vscale x 4 For instruction: store i32 %add9, ptr %arrayidx3, align 4
+; CHECK-NEXT: LV: Found an estimated cost of 13 for VF vscale x 4 For instruction: store i32 %add9, ptr %arrayidx3, align 4
; CHECK-NEXT: LV: Found an estimated cost of 1 for VF vscale x 4 For instruction: %cmp = icmp ugt i64 %indvars.iv, 1
; CHECK-NEXT: LV: Found an estimated cost of 1 for VF vscale x 4 For instruction: %indvars.iv.next = add nsw i64 %indvars.iv, -1
; CHECK-NEXT: LV: Found an estimated cost of 0 for VF vscale x 4 For instruction: br i1 %cmp, label %for.body, label %for.cond.cleanup.loopexit, !llvm.loop !0
@@ -112,7 +112,7 @@ define void @vector_reverse_i64(ptr nocapture noundef writeonly %A, ptr nocaptur
; CHECK-NEXT: LV(REG): RegisterClass: RISCV::GPRRC, 1 registers
; CHECK-NEXT: LV: The target has 31 registers of RISCV::GPRRC register class
; CHECK-NEXT: LV: The target has 32 registers of RISCV::VRRC register class
-; CHECK-NEXT: LV: Loop cost is 28
+; CHECK-NEXT: LV: Loop cost is 32
; CHECK-NEXT: LV: IC is 1
; CHECK-NEXT: LV: VF is vscale x 4
; CHECK-NEXT: LV: Not Interleaving.
@@ -122,6 +122,7 @@ define void @vector_reverse_i64(ptr nocapture noundef writeonly %A, ptr nocaptur
; CHECK-NEXT: Executing best plan with VF=vscale x 4, UF=1
; CHECK: LV: Interleaving disabled by the pass manager
; CHECK-NEXT: LV: Vectorizing: innermost loop.
+; CHECK-EMPTY:
;
entry:
%cmp7 = icmp sgt i32 %n, 0
@@ -176,10 +177,10 @@ define void @vector_reverse_f32(ptr nocapture noundef writeonly %A, ptr nocaptur
; CHECK-NEXT: LV: Found an estimated cost of 1 for VF vscale x 4 For instruction: %i.0 = add nsw i32 %i.0.in8, -1
; CHECK-NEXT: LV: Found an estimated cost of 1 for VF vscale x 4 For instruction: %idxprom = zext i32 %i.0 to i64
; CHECK-NEXT: LV: Found an estimated cost of 0 for VF vscale x 4 For instruction: %arrayidx = getelementptr inbounds float, ptr %B, i64 %idxprom
-; CHECK-NEXT: LV: Found an estimated cost of 11 for VF vscale x 4 For instruction: %1 = load float, ptr %arrayidx, align 4
+; CHECK-NEXT: LV: Found an estimated cost of 13 for VF vscale x 4 For instruction: %1 = load float, ptr %arrayidx, align 4
; CHECK-NEXT: LV: Found an estimated cost of 2 for VF vscale x 4 For instruction: %conv1 = fadd float %1, 1.000000e+00
; CHECK-NEXT: LV: Found an estimated cost of 0 for VF vscale x 4 For instruction: %arrayidx3 = getelementptr inbounds float, ptr %A, i64 %idxprom
-; CHECK-NEXT: LV: Found an estimated cost of 11 for VF vscale x 4 For instruction: store float %conv1, ptr %arrayidx3, align 4
+; CHECK-NEXT: LV: Found an estimated cost of 13 for VF vscale x 4 For instruction: store float %conv1, ptr %arrayidx3, align 4
; CHECK-NEXT: LV: Found an estimated cost of 1 for VF vscale x 4 For instruction: %cmp = icmp ugt i64 %indvars.iv, 1
; CHECK-NEXT: LV: Found an estimated cost of 1 for VF vscale x 4 For instruction: %indvars.iv.next = add nsw i64 %indvars.iv, -1
; CHECK-NEXT: LV: Found an estimated cost of 0 for VF vscale x 4 For instruction: br i1 %cmp, label %for.body, label %for.cond.cleanup.loopexit, !llvm.loop !0
@@ -226,10 +227,10 @@ define void @vector_reverse_f32(ptr nocapture noundef writeonly %A, ptr nocaptur
; CHECK-NEXT: LV: Found an estimated cost of 1 for VF vscale x 4 For instruction: %i.0 = add nsw i32 %i.0.in8, -1
; CHECK-NEXT: LV: Found an estimated cost of 1 for VF vscale x 4 For instruction: %idxprom = zext i32 %i.0 to i64
; CHECK-NEXT: LV: Found an estimated cost of 0 for VF vscale x 4 For instruction: %arrayidx = getelementptr inbounds float, ptr %B, i64 %idxprom
-; CHECK-NEXT: LV: Found an estimated cost of 11 for VF vscale x 4 For instruction: %1 = load float, ptr %arrayidx, align 4
+; CHECK-NEXT: LV: Found an estimated cost of 13 for VF vscale x 4 For instruction: %1 = load float, ptr %arrayidx, align 4
; CHECK-NEXT: LV: Found an estimated cost of 2 for VF vscale x 4 For instruction: %conv1 = fadd float %1, 1.000000e+00
; CHECK-NEXT: LV: Found an estimated cost of 0 for VF vscale x 4 For instruction: %arrayidx3 = getelementptr inbounds float, ptr %A, i64 %idxprom
-; CHECK-NEXT: LV: Found an estimated cost of 11 for VF vscale x 4 For instruction: store float %conv1, ptr %arrayidx3, align 4
+; CHECK-NEXT: LV: Found an estimated cost of 13 for VF vscale x 4 For instruction: store float %conv1, ptr %arrayidx3, align 4
; CHECK-NEXT: LV: Found an estimated cost of 1 for VF vscale x 4 For instruction: %cmp = icmp ugt i64 %indvars.iv, 1
; CHECK-NEXT: LV: Found an estimated cost of 1 for VF vscale x 4 For instruction: %indvars.iv.next = add nsw i64 %indvars.iv, -1
; CHECK-NEXT: LV: Found an estimated cost of 0 for VF vscale x 4 For instruction: br i1 %cmp, label %for.body, label %for.cond.cleanup.loopexit, !llvm.loop !0
@@ -252,7 +253,7 @@ define void @vector_reverse_f32(ptr nocapture noundef writeonly %A, ptr nocaptur
; CHECK-NEXT: LV(REG): RegisterClass: RISCV::GPRRC, 1 registers
; CHECK-NEXT: LV: The target has 31 registers of RISCV::GPRRC register class
; CHECK-NEXT: LV: The target has 32 registers of RISCV::VRRC register class
-; CHECK-NEXT: LV: Loop cost is 28
+; CHECK-NEXT: LV: Loop cost is 32
; CHECK-NEXT: LV: IC is 1
; CHECK-NEXT: LV: VF is vscale x 4
; CHECK-NEXT: LV: Not Interleaving.
diff --git a/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/global_remove_same.ll b/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/global_remove_same.ll
new file mode 100644
index 0000000..d3d13ae
--- /dev/null
+++ b/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/global_remove_same.ll
@@ -0,0 +1,15 @@
+; RUN: opt -S < %s | FileCheck %s
+
+define i32 @foo() {
+; CHECK-LABEL: @foo(
+; CHECK-NEXT: [[RESULT:%.*]] = call i32 @bar(i32 0, i32 1)
+; CHECK-NEXT: ret i32 [[RESULT]]
+;
+ %result = call i32 @bar(i32 0, i32 1)
+ ret i32 %result
+}
+
+declare i32 @bar(i32, i32)
+; CHECK-LABEL: @bar(
+; CHECK-SAME: i32
+; CHECK-SAME: i32
diff --git a/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/global_remove_same.ll.expected b/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/global_remove_same.ll.expected
new file mode 100644
index 0000000..e76efae
--- /dev/null
+++ b/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/global_remove_same.ll.expected
@@ -0,0 +1,13 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt -S < %s | FileCheck %s
+
+define i32 @foo() {
+; CHECK-LABEL: @foo(
+; CHECK-NEXT: [[RESULT:%.*]] = call i32 @bar(i32 0, i32 1)
+; CHECK-NEXT: ret i32 [[RESULT]]
+;
+ %result = call i32 @bar(i32 0, i32 1)
+ ret i32 %result
+}
+
+declare i32 @bar(i32, i32)
diff --git a/llvm/test/tools/UpdateTestChecks/update_test_checks/global_remove_same.test b/llvm/test/tools/UpdateTestChecks/update_test_checks/global_remove_same.test
new file mode 100644
index 0000000..5d447bab
--- /dev/null
+++ b/llvm/test/tools/UpdateTestChecks/update_test_checks/global_remove_same.test
@@ -0,0 +1,4 @@
+## Basic test checking global checks split over multiple lines are removed together
+# RUN: cp -f %S/Inputs/global_remove_same.ll %t.ll && %update_test_checks %t.ll
+# RUN: diff -u %t.ll %S/Inputs/global_remove_same.ll.expected
+
diff --git a/llvm/tools/llvm-dwarfdump/llvm-dwarfdump.cpp b/llvm/tools/llvm-dwarfdump/llvm-dwarfdump.cpp
index 2b438a8..2bfc970 100644
--- a/llvm/tools/llvm-dwarfdump/llvm-dwarfdump.cpp
+++ b/llvm/tools/llvm-dwarfdump/llvm-dwarfdump.cpp
@@ -286,6 +286,8 @@ static opt<bool> Verify("verify", desc("Verify the DWARF debug info."),
cat(DwarfDumpCategory));
static opt<ErrorDetailLevel> ErrorDetails(
"error-display", init(Unspecified),
+ desc("Set the level of detail and summary to display when verifying "
+ "(implies --verify)"),
values(clEnumValN(NoDetailsOrSummary, "quiet",
"Only display whether errors occurred."),
clEnumValN(NoDetailsOnlySummary, "summary",
@@ -295,6 +297,11 @@ static opt<ErrorDetailLevel> ErrorDetails(
clEnumValN(BothDetailsAndSummary, "full",
"Display each error as well as a summary. [default]")),
cat(DwarfDumpCategory));
+static opt<std::string> JsonErrSummaryFile(
+ "verify-json", init(""),
+ desc("Output JSON-formatted error summary to the specified file. "
+ "(Implies --verify)"),
+ value_desc("filename.json"), cat(DwarfDumpCategory));
static opt<bool> Quiet("quiet", desc("Use with -verify to not emit to STDOUT."),
cat(DwarfDumpCategory));
static opt<bool> DumpUUID("uuid", desc("Show the UUID for each architecture."),
@@ -349,6 +356,7 @@ static DIDumpOptions getDumpOpts(DWARFContext &C) {
ErrorDetails != NoDetailsOrSummary;
DumpOpts.ShowAggregateErrors = ErrorDetails != OnlyDetailsNoSummary &&
ErrorDetails != NoDetailsOnlySummary;
+ DumpOpts.JsonErrSummaryFile = JsonErrSummaryFile;
return DumpOpts.noImplicitRecursion();
}
return DumpOpts;
@@ -834,8 +842,10 @@ int main(int argc, char **argv) {
"-verbose is currently not supported";
return 1;
}
- if (!Verify && ErrorDetails != Unspecified)
- WithColor::warning() << "-error-detail has no affect without -verify";
+ // -error-detail and -json-summary-file both imply -verify
+ if (ErrorDetails != Unspecified || !JsonErrSummaryFile.empty()) {
+ Verify = true;
+ }
std::error_code EC;
ToolOutputFile OutputFile(OutputFilename, EC, sys::fs::OF_TextWithCRLF);
diff --git a/llvm/unittests/ADT/APFloatTest.cpp b/llvm/unittests/ADT/APFloatTest.cpp
index baf055e..6e4dda8 100644
--- a/llvm/unittests/ADT/APFloatTest.cpp
+++ b/llvm/unittests/ADT/APFloatTest.cpp
@@ -578,6 +578,11 @@ TEST(APFloatTest, MinNum) {
EXPECT_EQ(1.0, minnum(f2, f1).convertToDouble());
EXPECT_EQ(1.0, minnum(f1, nan).convertToDouble());
EXPECT_EQ(1.0, minnum(nan, f1).convertToDouble());
+
+ APFloat zp(0.0);
+ APFloat zn(-0.0);
+ EXPECT_EQ(-0.0, minnum(zp, zn).convertToDouble());
+ EXPECT_EQ(-0.0, minnum(zn, zp).convertToDouble());
}
TEST(APFloatTest, MaxNum) {
@@ -589,6 +594,11 @@ TEST(APFloatTest, MaxNum) {
EXPECT_EQ(2.0, maxnum(f2, f1).convertToDouble());
EXPECT_EQ(1.0, maxnum(f1, nan).convertToDouble());
EXPECT_EQ(1.0, maxnum(nan, f1).convertToDouble());
+
+ APFloat zp(0.0);
+ APFloat zn(-0.0);
+ EXPECT_EQ(0.0, maxnum(zp, zn).convertToDouble());
+ EXPECT_EQ(0.0, maxnum(zn, zp).convertToDouble());
}
TEST(APFloatTest, Minimum) {
diff --git a/llvm/unittests/CodeGen/AArch64SelectionDAGTest.cpp b/llvm/unittests/CodeGen/AArch64SelectionDAGTest.cpp
index bb8e76a..e077268 100644
--- a/llvm/unittests/CodeGen/AArch64SelectionDAGTest.cpp
+++ b/llvm/unittests/CodeGen/AArch64SelectionDAGTest.cpp
@@ -6,11 +6,13 @@
//
//===----------------------------------------------------------------------===//
+#include "llvm/Analysis/MemoryLocation.h"
#include "llvm/Analysis/OptimizationRemarkEmitter.h"
#include "llvm/AsmParser/Parser.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/CodeGen/TargetLowering.h"
+#include "llvm/IR/MDBuilder.h"
#include "llvm/MC/TargetRegistry.h"
#include "llvm/Support/KnownBits.h"
#include "llvm/Support/SourceMgr.h"
@@ -728,4 +730,70 @@ TEST_F(AArch64SelectionDAGTest, ReplaceAllUsesWith) {
EXPECT_EQ(DAG->getPCSections(New.getNode()), MD);
}
+TEST_F(AArch64SelectionDAGTest, computeKnownBits_extload_known01) {
+ SDLoc Loc;
+ auto Int8VT = EVT::getIntegerVT(Context, 8);
+ auto Int32VT = EVT::getIntegerVT(Context, 32);
+ auto Int64VT = EVT::getIntegerVT(Context, 64);
+ auto Ptr = DAG->getConstant(0, Loc, Int64VT);
+ auto PtrInfo =
+ MachinePointerInfo::getFixedStack(DAG->getMachineFunction(), 0);
+ AAMDNodes AA;
+ MDBuilder MDHelper(*DAG->getContext());
+ MDNode *Range = MDHelper.createRange(APInt(8, 0), APInt(8, 2));
+ MachineMemOperand *MMO = DAG->getMachineFunction().getMachineMemOperand(
+ PtrInfo, MachineMemOperand::MOLoad, 8, Align(8), AA, Range);
+
+ auto ALoad = DAG->getExtLoad(ISD::EXTLOAD, Loc, Int32VT, DAG->getEntryNode(),
+ Ptr, Int8VT, MMO);
+ KnownBits Known = DAG->computeKnownBits(ALoad);
+ EXPECT_EQ(Known.Zero, APInt(32, 0xfe));
+ EXPECT_EQ(Known.One, APInt(32, 0));
+
+ auto ZLoad = DAG->getExtLoad(ISD::ZEXTLOAD, Loc, Int32VT, DAG->getEntryNode(),
+ Ptr, Int8VT, MMO);
+ Known = DAG->computeKnownBits(ZLoad);
+ EXPECT_EQ(Known.Zero, APInt(32, 0xfffffffe));
+ EXPECT_EQ(Known.One, APInt(32, 0));
+
+ auto SLoad = DAG->getExtLoad(ISD::SEXTLOAD, Loc, Int32VT, DAG->getEntryNode(),
+ Ptr, Int8VT, MMO);
+ Known = DAG->computeKnownBits(SLoad);
+ EXPECT_EQ(Known.Zero, APInt(32, 0xfffffffe));
+ EXPECT_EQ(Known.One, APInt(32, 0));
+}
+
+TEST_F(AArch64SelectionDAGTest, computeKnownBits_extload_knownnegative) {
+ SDLoc Loc;
+ auto Int8VT = EVT::getIntegerVT(Context, 8);
+ auto Int32VT = EVT::getIntegerVT(Context, 32);
+ auto Int64VT = EVT::getIntegerVT(Context, 64);
+ auto Ptr = DAG->getConstant(0, Loc, Int64VT);
+ auto PtrInfo =
+ MachinePointerInfo::getFixedStack(DAG->getMachineFunction(), 0);
+ AAMDNodes AA;
+ MDBuilder MDHelper(*DAG->getContext());
+ MDNode *Range = MDHelper.createRange(APInt(8, 0xf0), APInt(8, 0xff));
+ MachineMemOperand *MMO = DAG->getMachineFunction().getMachineMemOperand(
+ PtrInfo, MachineMemOperand::MOLoad, 8, Align(8), AA, Range);
+
+ auto ALoad = DAG->getExtLoad(ISD::EXTLOAD, Loc, Int32VT, DAG->getEntryNode(),
+ Ptr, Int8VT, MMO);
+ KnownBits Known = DAG->computeKnownBits(ALoad);
+ EXPECT_EQ(Known.Zero, APInt(32, 0));
+ EXPECT_EQ(Known.One, APInt(32, 0xf0));
+
+ auto ZLoad = DAG->getExtLoad(ISD::ZEXTLOAD, Loc, Int32VT, DAG->getEntryNode(),
+ Ptr, Int8VT, MMO);
+ Known = DAG->computeKnownBits(ZLoad);
+ EXPECT_EQ(Known.Zero, APInt(32, 0xffffff00));
+ EXPECT_EQ(Known.One, APInt(32, 0x000000f0));
+
+ auto SLoad = DAG->getExtLoad(ISD::SEXTLOAD, Loc, Int32VT, DAG->getEntryNode(),
+ Ptr, Int8VT, MMO);
+ Known = DAG->computeKnownBits(SLoad);
+ EXPECT_EQ(Known.Zero, APInt(32, 0));
+ EXPECT_EQ(Known.One, APInt(32, 0xfffffff0));
+}
+
} // end namespace llvm
diff --git a/llvm/unittests/IR/VerifierTest.cpp b/llvm/unittests/IR/VerifierTest.cpp
index 31e3b9d..b2cd71e 100644
--- a/llvm/unittests/IR/VerifierTest.cpp
+++ b/llvm/unittests/IR/VerifierTest.cpp
@@ -339,5 +339,33 @@ TEST(VerifierTest, SwitchInst) {
EXPECT_TRUE(verifyFunction(*F));
}
+TEST(VerifierTest, CrossFunctionRef) {
+ LLVMContext C;
+ Module M("M", C);
+ FunctionType *FTy = FunctionType::get(Type::getVoidTy(C), /*isVarArg=*/false);
+ Function *F1 = Function::Create(FTy, Function::ExternalLinkage, "foo1", M);
+ Function *F2 = Function::Create(FTy, Function::ExternalLinkage, "foo2", M);
+ BasicBlock *Entry1 = BasicBlock::Create(C, "entry", F1);
+ BasicBlock *Entry2 = BasicBlock::Create(C, "entry", F2);
+ Type *I32 = Type::getInt32Ty(C);
+
+ Value *Alloca = new AllocaInst(I32, 0, "alloca", Entry1);
+ ReturnInst::Create(C, Entry1);
+
+ Instruction *Store = new StoreInst(ConstantInt::get(I32, 0), Alloca, Entry2);
+ ReturnInst::Create(C, Entry2);
+
+ std::string Error;
+ raw_string_ostream ErrorOS(Error);
+ EXPECT_TRUE(verifyModule(M, &ErrorOS));
+ EXPECT_TRUE(
+ StringRef(ErrorOS.str())
+ .starts_with("Referring to an instruction in another function!"));
+
+ // Explicitly erase the store to avoid a use-after-free when the module is
+ // destroyed.
+ Store->eraseFromParent();
+}
+
} // end anonymous namespace
} // end namespace llvm
diff --git a/llvm/unittests/Support/RISCVISAInfoTest.cpp b/llvm/unittests/Support/RISCVISAInfoTest.cpp
index df4c7f7..82cce23 100644
--- a/llvm/unittests/Support/RISCVISAInfoTest.cpp
+++ b/llvm/unittests/Support/RISCVISAInfoTest.cpp
@@ -752,6 +752,7 @@ R"(All available -march extensions for RISC-V
zmmul 1.0
za128rs 1.0
za64rs 1.0
+ zacas 1.0
zawrs 1.0
zfa 1.0
zfh 1.0
@@ -873,7 +874,6 @@ Experimental extensions
zimop 0.1
zaamo 0.2
zabha 1.0
- zacas 1.0
zalasr 0.1
zalrsc 0.2
zfbfmin 1.0
diff --git a/llvm/unittests/Target/ARM/MachineInstrTest.cpp b/llvm/unittests/Target/ARM/MachineInstrTest.cpp
index 3a76054..aeb25bf 100644
--- a/llvm/unittests/Target/ARM/MachineInstrTest.cpp
+++ b/llvm/unittests/Target/ARM/MachineInstrTest.cpp
@@ -1126,9 +1126,7 @@ TEST(MachineInstr, HasSideEffects) {
VLDR_VPR_post,
VLDR_VPR_pre,
VLLDM,
- VLLDM_T2,
VLSTM,
- VLSTM_T2,
VMRS,
VMRS_FPCXTNS,
VMRS_FPCXTS,
diff --git a/llvm/utils/TableGen/DXILEmitter.cpp b/llvm/utils/TableGen/DXILEmitter.cpp
index d47df59..fc958f5 100644
--- a/llvm/utils/TableGen/DXILEmitter.cpp
+++ b/llvm/utils/TableGen/DXILEmitter.cpp
@@ -11,11 +11,14 @@
//
//===----------------------------------------------------------------------===//
+#include "CodeGenTarget.h"
#include "SequenceToOffsetTable.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringSet.h"
#include "llvm/ADT/StringSwitch.h"
+#include "llvm/CodeGenTypes/MachineValueType.h"
#include "llvm/Support/DXILABI.h"
#include "llvm/TableGen/Record.h"
#include "llvm/TableGen/TableGenBackend.h"
@@ -30,28 +33,15 @@ struct DXILShaderModel {
int Minor = 0;
};
-struct DXILParameter {
- int Pos; // position in parameter list
- ParameterKind Kind;
- StringRef Name; // short, unique name
- StringRef Doc; // the documentation description of this parameter
- bool IsConst; // whether this argument requires a constant value in the IR
- StringRef EnumName; // the name of the enum type if applicable
- int MaxValue; // the maximum value for this parameter if applicable
- DXILParameter(const Record *R);
-};
-
struct DXILOperationDesc {
- StringRef OpName; // name of DXIL operation
+ std::string OpName; // name of DXIL operation
int OpCode; // ID of DXIL operation
StringRef OpClass; // name of the opcode class
- StringRef Category; // classification for this instruction
StringRef Doc; // the documentation description of this instruction
-
- SmallVector<DXILParameter> Params; // the operands that this instruction takes
- SmallVector<ParameterKind> OverloadTypes; // overload types if applicable
- StringRef Attr; // operation attribute; reference to string representation
- // of llvm::Attribute::AttrKind
+ SmallVector<MVT::SimpleValueType> OpTypes; // Vector of operand types -
+ // return type is at index 0
+ SmallVector<std::string>
+ OpAttributes; // operation attribute represented as strings
StringRef Intrinsic; // The llvm intrinsic map to OpName. Default is "" which
// means no map exists
bool IsDeriv = false; // whether this is some kind of derivative
@@ -74,81 +64,99 @@ struct DXILOperationDesc {
};
} // end anonymous namespace
-/*!
- Convert DXIL type name string to dxil::ParameterKind
-
- @param typeNameStr Type name string
- @return ParameterKind As defined in llvm/Support/DXILABI.h
-*/
-static ParameterKind lookupParameterKind(StringRef typeNameStr) {
- auto paramKind = StringSwitch<ParameterKind>(typeNameStr)
- .Case("llvm_void_ty", ParameterKind::VOID)
- .Case("llvm_half_ty", ParameterKind::HALF)
- .Case("llvm_float_ty", ParameterKind::FLOAT)
- .Case("llvm_double_ty", ParameterKind::DOUBLE)
- .Case("llvm_i1_ty", ParameterKind::I1)
- .Case("llvm_i8_ty", ParameterKind::I8)
- .Case("llvm_i16_ty", ParameterKind::I16)
- .Case("llvm_i32_ty", ParameterKind::I32)
- .Case("llvm_i64_ty", ParameterKind::I64)
- .Case("llvm_anyfloat_ty", ParameterKind::OVERLOAD)
- .Case("llvm_anyint_ty", ParameterKind::OVERLOAD)
- .Case("dxil_handle_ty", ParameterKind::DXIL_HANDLE)
- .Case("dxil_cbuffer_ty", ParameterKind::CBUFFER_RET)
- .Case("dxil_resource_ty", ParameterKind::RESOURCE_RET)
- .Default(ParameterKind::INVALID);
- assert(paramKind != ParameterKind::INVALID &&
- "Unsupported DXIL Type specified");
- return paramKind;
+/// Convert DXIL type name string to dxil::ParameterKind
+///
+/// \param VT Simple Value Type
+/// \return ParameterKind As defined in llvm/Support/DXILABI.h
+
+static ParameterKind getParameterKind(MVT::SimpleValueType VT) {
+ switch (VT) {
+ case MVT::isVoid:
+ return ParameterKind::VOID;
+ case MVT::f16:
+ return ParameterKind::HALF;
+ case MVT::f32:
+ return ParameterKind::FLOAT;
+ case MVT::f64:
+ return ParameterKind::DOUBLE;
+ case MVT::i1:
+ return ParameterKind::I1;
+ case MVT::i8:
+ return ParameterKind::I8;
+ case MVT::i16:
+ return ParameterKind::I16;
+ case MVT::i32:
+ return ParameterKind::I32;
+ case MVT::fAny:
+ case MVT::iAny:
+ return ParameterKind::OVERLOAD;
+ default:
+ llvm_unreachable("Support for specified DXIL Type not yet implemented");
+ }
}
+/// Construct an object using the DXIL Operation records specified
+/// in DXIL.td. This serves as the single source of reference of
+/// the information extracted from the specified Record R, for
+/// C++ code generated by this TableGen backend.
+// \param R Object representing TableGen record of a DXIL Operation
DXILOperationDesc::DXILOperationDesc(const Record *R) {
- OpName = R->getValueAsString("OpName");
+ OpName = R->getNameInitAsString();
OpCode = R->getValueAsInt("OpCode");
- OpClass = R->getValueAsDef("OpClass")->getValueAsString("Name");
- Category = R->getValueAsDef("OpCategory")->getValueAsString("Name");
- if (R->getValue("llvm_intrinsic")) {
- auto *IntrinsicDef = R->getValueAsDef("llvm_intrinsic");
+ Doc = R->getValueAsString("Doc");
+
+ if (R->getValue("LLVMIntrinsic")) {
+ auto *IntrinsicDef = R->getValueAsDef("LLVMIntrinsic");
auto DefName = IntrinsicDef->getName();
assert(DefName.starts_with("int_") && "invalid intrinsic name");
// Remove the int_ from intrinsic name.
Intrinsic = DefName.substr(4);
+ // TODO: It is expected that return type and parameter types of
+ // DXIL Operation are the same as that of the intrinsic. Deviations
+ // are expected to be encoded in TableGen record specification and
+ // handled accordingly here. Support to be added later, as needed.
+ // Get parameter type list of the intrinsic. Types attribute contains
+ // the list of as [returnType, param1Type,, param2Type, ...]
+
+ OverloadParamIndex = -1;
+ auto TypeRecs = IntrinsicDef->getValueAsListOfDefs("Types");
+ unsigned TypeRecsSize = TypeRecs.size();
+ // Populate return type and parameter type names
+ for (unsigned i = 0; i < TypeRecsSize; i++) {
+ auto TR = TypeRecs[i];
+ OpTypes.emplace_back(getValueType(TR->getValueAsDef("VT")));
+ // Get the overload parameter index.
+ // TODO : Seems hacky. Is it possible that more than one parameter can
+ // be of overload kind??
+ // TODO: Check for any additional constraints specified for DXIL operation
+ // restricting return type.
+ if (i > 0) {
+ auto &CurParam = OpTypes.back();
+ if (getParameterKind(CurParam) >= ParameterKind::OVERLOAD) {
+ OverloadParamIndex = i;
+ }
+ }
+ }
+ // Get the operation class
+ OpClass = R->getValueAsDef("OpClass")->getName();
+
+ // NOTE: For now, assume that attributes of DXIL Operation are the same as
+ // that of the intrinsic. Deviations are expected to be encoded in TableGen
+ // record specification and handled accordingly here. Support to be added
+ // later.
+ auto IntrPropList = IntrinsicDef->getValueAsListInit("IntrProperties");
+ auto IntrPropListSize = IntrPropList->size();
+ for (unsigned i = 0; i < IntrPropListSize; i++) {
+ OpAttributes.emplace_back(IntrPropList->getElement(i)->getAsString());
+ }
}
-
- Doc = R->getValueAsString("Doc");
-
- ListInit *ParamList = R->getValueAsListInit("Params");
- OverloadParamIndex = -1;
- for (unsigned I = 0; I < ParamList->size(); ++I) {
- Record *Param = ParamList->getElementAsRecord(I);
- Params.emplace_back(DXILParameter(Param));
- auto &CurParam = Params.back();
- if (CurParam.Kind >= ParameterKind::OVERLOAD)
- OverloadParamIndex = I;
- }
- ListInit *OverloadTypeList = R->getValueAsListInit("OverloadTypes");
-
- for (unsigned I = 0; I < OverloadTypeList->size(); ++I) {
- Record *R = OverloadTypeList->getElementAsRecord(I);
- OverloadTypes.emplace_back(lookupParameterKind(R->getNameInitAsString()));
- }
- Attr = StringRef(R->getValue("Attribute")->getNameInitAsString());
}
-DXILParameter::DXILParameter(const Record *R) {
- Name = R->getValueAsString("Name");
- Pos = R->getValueAsInt("Pos");
- Kind =
- lookupParameterKind(R->getValue("ParamType")->getValue()->getAsString());
- if (R->getValue("Doc"))
- Doc = R->getValueAsString("Doc");
- IsConst = R->getValueAsBit("IsConstant");
- EnumName = R->getValueAsString("EnumName");
- MaxValue = R->getValueAsInt("MaxValue");
-}
-
-static std::string parameterKindToString(ParameterKind Kind) {
+/// Return a string representation of ParameterKind enum
+/// \param Kind Parameter Kind enum value
+/// \return std::string string representation of input Kind
+static std::string getParameterKindStr(ParameterKind Kind) {
switch (Kind) {
case ParameterKind::INVALID:
return "INVALID";
@@ -182,92 +190,77 @@ static std::string parameterKindToString(ParameterKind Kind) {
llvm_unreachable("Unknown llvm::dxil::ParameterKind enum");
}
-static void emitDXILOpEnum(DXILOperationDesc &Op, raw_ostream &OS) {
- // Name = ID, // Doc
- OS << Op.OpName << " = " << Op.OpCode << ", // " << Op.Doc << "\n";
-}
+/// Return a string representation of OverloadKind enum that maps to
+/// input Simple Value Type enum
+/// \param VT Simple Value Type enum
+/// \return std::string string representation of OverloadKind
-static std::string buildCategoryStr(StringSet<> &Cetegorys) {
- std::string Str;
- raw_string_ostream OS(Str);
- for (auto &It : Cetegorys) {
- OS << " " << It.getKey();
+static std::string getOverloadKindStr(MVT::SimpleValueType VT) {
+ switch (VT) {
+ case MVT::isVoid:
+ return "OverloadKind::VOID";
+ case MVT::f16:
+ return "OverloadKind::HALF";
+ case MVT::f32:
+ return "OverloadKind::FLOAT";
+ case MVT::f64:
+ return "OverloadKind::DOUBLE";
+ case MVT::i1:
+ return "OverloadKind::I1";
+ case MVT::i8:
+ return "OverloadKind::I8";
+ case MVT::i16:
+ return "OverloadKind::I16";
+ case MVT::i32:
+ return "OverloadKind::I32";
+ case MVT::i64:
+ return "OverloadKind::I64";
+ case MVT::iAny:
+ return "OverloadKind::I16 | OverloadKind::I32 | OverloadKind::I64";
+ case MVT::fAny:
+ return "OverloadKind::HALF | OverloadKind::FLOAT | OverloadKind::DOUBLE";
+ default:
+ llvm_unreachable(
+ "Support for specified parameter OverloadKind not yet implemented");
}
- return OS.str();
}
-// Emit enum declaration for DXIL.
+/// Emit Enums of DXIL Ops
+/// \param A vector of DXIL Ops
+/// \param Output stream
static void emitDXILEnums(std::vector<DXILOperationDesc> &Ops,
raw_ostream &OS) {
- // Sort by Category + OpName.
+ // Sort by OpCode
llvm::sort(Ops, [](DXILOperationDesc &A, DXILOperationDesc &B) {
- // Group by Category first.
- if (A.Category == B.Category)
- // Inside same Category, order by OpName.
- return A.OpName < B.OpName;
- else
- return A.Category < B.Category;
+ return A.OpCode < B.OpCode;
});
OS << "// Enumeration for operations specified by DXIL\n";
OS << "enum class OpCode : unsigned {\n";
- StringMap<StringSet<>> ClassMap;
- StringRef PrevCategory = "";
for (auto &Op : Ops) {
- StringRef Category = Op.Category;
- if (Category != PrevCategory) {
- OS << "\n// " << Category << "\n";
- PrevCategory = Category;
- }
- emitDXILOpEnum(Op, OS);
- auto It = ClassMap.find(Op.OpClass);
- if (It != ClassMap.end()) {
- It->second.insert(Op.Category);
- } else {
- ClassMap[Op.OpClass].insert(Op.Category);
- }
+ // Name = ID, // Doc
+ OS << Op.OpName << " = " << Op.OpCode << ", // " << Op.Doc << "\n";
}
OS << "\n};\n\n";
- std::vector<std::pair<std::string, std::string>> ClassVec;
- for (auto &It : ClassMap) {
- ClassVec.emplace_back(
- std::pair(It.getKey().str(), buildCategoryStr(It.second)));
- }
- // Sort by Category + ClassName.
- llvm::sort(ClassVec, [](std::pair<std::string, std::string> &A,
- std::pair<std::string, std::string> &B) {
- StringRef ClassA = A.first;
- StringRef CategoryA = A.second;
- StringRef ClassB = B.first;
- StringRef CategoryB = B.second;
- // Group by Category first.
- if (CategoryA == CategoryB)
- // Inside same Category, order by ClassName.
- return ClassA < ClassB;
- else
- return CategoryA < CategoryB;
- });
-
OS << "// Groups for DXIL operations with equivalent function templates\n";
OS << "enum class OpCodeClass : unsigned {\n";
- PrevCategory = "";
- for (auto &It : ClassVec) {
-
- StringRef Category = It.second;
- if (Category != PrevCategory) {
- OS << "\n// " << Category << "\n";
- PrevCategory = Category;
- }
- StringRef Name = It.first;
- OS << Name << ",\n";
+ // Build an OpClass set to print
+ SmallSet<StringRef, 2> OpClassSet;
+ for (auto &Op : Ops) {
+ OpClassSet.insert(Op.OpClass);
+ }
+ for (auto &C : OpClassSet) {
+ OS << C << ",\n";
}
OS << "\n};\n\n";
}
-// Emit map from llvm intrinsic to DXIL operation.
+/// Emit map of DXIL operation to LLVM or DirectX intrinsic
+/// \param A vector of DXIL Ops
+/// \param Output stream
static void emitDXILIntrinsicMap(std::vector<DXILOperationDesc> &Ops,
raw_ostream &OS) {
OS << "\n";
@@ -285,75 +278,27 @@ static void emitDXILIntrinsicMap(std::vector<DXILOperationDesc> &Ops,
OS << "\n";
}
-/*!
- Convert operation attribute string to Attribute enum
-
- @param Attr string reference
- @return std::string Attribute enum string
- */
-static std::string emitDXILOperationAttr(StringRef Attr) {
- return StringSwitch<std::string>(Attr)
- .Case("ReadNone", "Attribute::ReadNone")
- .Case("ReadOnly", "Attribute::ReadOnly")
- .Default("Attribute::None");
-}
-
-static std::string overloadKindStr(ParameterKind Overload) {
- switch (Overload) {
- case ParameterKind::HALF:
- return "OverloadKind::HALF";
- case ParameterKind::FLOAT:
- return "OverloadKind::FLOAT";
- case ParameterKind::DOUBLE:
- return "OverloadKind::DOUBLE";
- case ParameterKind::I1:
- return "OverloadKind::I1";
- case ParameterKind::I8:
- return "OverloadKind::I8";
- case ParameterKind::I16:
- return "OverloadKind::I16";
- case ParameterKind::I32:
- return "OverloadKind::I32";
- case ParameterKind::I64:
- return "OverloadKind::I64";
- case ParameterKind::VOID:
- return "OverloadKind::VOID";
- default:
- return "OverloadKind::UNKNOWN";
- }
-}
-
-static std::string
-getDXILOperationOverloads(SmallVector<ParameterKind> Overloads) {
- // Format is: OverloadKind::FLOAT | OverloadKind::HALF
- auto It = Overloads.begin();
- std::string Result;
- raw_string_ostream OS(Result);
- OS << overloadKindStr(*It);
- for (++It; It != Overloads.end(); ++It) {
- OS << " | " << overloadKindStr(*It);
+/// Convert operation attribute string to Attribute enum
+///
+/// \param Attr string reference
+/// \return std::string Attribute enum string
+
+static std::string emitDXILOperationAttr(SmallVector<std::string> Attrs) {
+ for (auto Attr : Attrs) {
+ // TODO: For now just recognize IntrNoMem and IntrReadMem as valid and
+ // ignore others.
+ if (Attr == "IntrNoMem") {
+ return "Attribute::ReadNone";
+ } else if (Attr == "IntrReadMem") {
+ return "Attribute::ReadOnly";
+ }
}
- return OS.str();
-}
-
-static std::string lowerFirstLetter(StringRef Name) {
- if (Name.empty())
- return "";
-
- std::string LowerName = Name.str();
- LowerName[0] = llvm::toLower(Name[0]);
- return LowerName;
-}
-
-static std::string getDXILOpClassName(StringRef OpClass) {
- // Lower first letter expect for special case.
- return StringSwitch<std::string>(OpClass)
- .Case("CBufferLoad", "cbufferLoad")
- .Case("CBufferLoadLegacy", "cbufferLoadLegacy")
- .Case("GSInstanceID", "gsInstanceID")
- .Default(lowerFirstLetter(OpClass));
+ return "Attribute::None";
}
+/// Emit DXIL operation table
+/// \param A vector of DXIL Ops
+/// \param Output stream
static void emitDXILOperationTable(std::vector<DXILOperationDesc> &Ops,
raw_ostream &OS) {
// Sort by OpCode.
@@ -369,15 +314,16 @@ static void emitDXILOperationTable(std::vector<DXILOperationDesc> &Ops,
StringMap<SmallVector<ParameterKind>> ParameterMap;
StringSet<> ClassSet;
for (auto &Op : Ops) {
- OpStrings.add(Op.OpName.str());
+ OpStrings.add(Op.OpName);
if (ClassSet.contains(Op.OpClass))
continue;
ClassSet.insert(Op.OpClass);
- OpClassStrings.add(getDXILOpClassName(Op.OpClass));
+ OpClassStrings.add(Op.OpClass.data());
SmallVector<ParameterKind> ParamKindVec;
- for (auto &Param : Op.Params) {
- ParamKindVec.emplace_back(Param.Kind);
+ // ParamKindVec is a vector of parameters. Skip return type at index 0
+ for (unsigned i = 1; i < Op.OpTypes.size(); i++) {
+ ParamKindVec.emplace_back(getParameterKind(Op.OpTypes[i]));
}
ParameterMap[Op.OpClass] = ParamKindVec;
Parameters.add(ParamKindVec);
@@ -389,7 +335,7 @@ static void emitDXILOperationTable(std::vector<DXILOperationDesc> &Ops,
Parameters.layout();
// Emit the DXIL operation table.
- //{dxil::OpCode::Sin, OpCodeNameIndex, OpCodeClass::Unary,
+ //{dxil::OpCode::Sin, OpCodeNameIndex, OpCodeClass::unary,
// OpCodeClassNameIndex,
// OverloadKind::FLOAT | OverloadKind::HALF, Attribute::AttrKind::ReadNone, 0,
// 3, ParameterTableOffset},
@@ -398,12 +344,12 @@ static void emitDXILOperationTable(std::vector<DXILOperationDesc> &Ops,
OS << " static const OpCodeProperty OpCodeProps[] = {\n";
for (auto &Op : Ops) {
- OS << " { dxil::OpCode::" << Op.OpName << ", "
- << OpStrings.get(Op.OpName.str()) << ", OpCodeClass::" << Op.OpClass
- << ", " << OpClassStrings.get(getDXILOpClassName(Op.OpClass)) << ", "
- << getDXILOperationOverloads(Op.OverloadTypes) << ", "
- << emitDXILOperationAttr(Op.Attr) << ", " << Op.OverloadParamIndex
- << ", " << Op.Params.size() << ", "
+ OS << " { dxil::OpCode::" << Op.OpName << ", " << OpStrings.get(Op.OpName)
+ << ", OpCodeClass::" << Op.OpClass << ", "
+ << OpClassStrings.get(Op.OpClass.data()) << ", "
+ << getOverloadKindStr(Op.OpTypes[0]) << ", "
+ << emitDXILOperationAttr(Op.OpAttributes) << ", "
+ << Op.OverloadParamIndex << ", " << Op.OpTypes.size() - 1 << ", "
<< Parameters.get(ParameterMap[Op.OpClass]) << " },\n";
}
OS << " };\n";
@@ -418,7 +364,7 @@ static void emitDXILOperationTable(std::vector<DXILOperationDesc> &Ops,
"OpCodeProperty &B) {\n";
OS << " return A.OpCode < B.OpCode;\n";
OS << " });\n";
- OS << " assert(Prop && \"fail to find OpCodeProperty\");\n";
+ OS << " assert(Prop && \"failed to find OpCodeProperty\");\n";
OS << " return Prop;\n";
OS << "}\n\n";
@@ -450,7 +396,7 @@ static void emitDXILOperationTable(std::vector<DXILOperationDesc> &Ops,
Parameters.emit(
OS,
[](raw_ostream &ParamOS, ParameterKind Kind) {
- ParamOS << "ParameterKind::" << parameterKindToString(Kind);
+ ParamOS << "ParameterKind::" << getParameterKindStr(Kind);
},
"ParameterKind::INVALID");
OS << " };\n\n";
@@ -459,30 +405,28 @@ static void emitDXILOperationTable(std::vector<DXILOperationDesc> &Ops,
OS << "}\n ";
}
+/// Entry function call that invokes the functionality of this TableGen backend
+/// \param Records TableGen records of DXIL Operations defined in DXIL.td
+/// \param OS output stream
static void EmitDXILOperation(RecordKeeper &Records, raw_ostream &OS) {
- std::vector<Record *> Ops = Records.getAllDerivedDefinitions("DXILOperation");
OS << "// Generated code, do not edit.\n";
OS << "\n";
-
+ // Get all DXIL Ops to intrinsic mapping records
+ std::vector<Record *> OpIntrMaps =
+ Records.getAllDerivedDefinitions("DXILOpMapping");
std::vector<DXILOperationDesc> DXILOps;
- DXILOps.reserve(Ops.size());
- for (auto *Record : Ops) {
+ for (auto *Record : OpIntrMaps) {
DXILOps.emplace_back(DXILOperationDesc(Record));
}
-
OS << "#ifdef DXIL_OP_ENUM\n";
emitDXILEnums(DXILOps, OS);
OS << "#endif\n\n";
-
OS << "#ifdef DXIL_OP_INTRINSIC_MAP\n";
emitDXILIntrinsicMap(DXILOps, OS);
OS << "#endif\n\n";
-
OS << "#ifdef DXIL_OP_OPERATION_TABLE\n";
emitDXILOperationTable(DXILOps, OS);
OS << "#endif\n\n";
-
- OS << "\n";
}
static TableGen::Emitter::Opt X("gen-dxil-operation", EmitDXILOperation,
diff --git a/llvm/utils/TableGen/X86DisassemblerTables.cpp b/llvm/utils/TableGen/X86DisassemblerTables.cpp
index a48b9cf..f4d282f 100644
--- a/llvm/utils/TableGen/X86DisassemblerTables.cpp
+++ b/llvm/utils/TableGen/X86DisassemblerTables.cpp
@@ -567,7 +567,9 @@ static inline bool inheritsFrom(InstructionContext child,
case IC_EVEX_L2_W_OPSIZE_KZ_B:
return false;
case IC_EVEX_NF:
+ return WIG && inheritsFrom(child, IC_EVEX_W_NF);
case IC_EVEX_B_NF:
+ return WIG && inheritsFrom(child, IC_EVEX_W_B_NF);
case IC_EVEX_OPSIZE_NF:
case IC_EVEX_OPSIZE_B_NF:
case IC_EVEX_W_NF:
diff --git a/llvm/utils/UpdateTestChecks/common.py b/llvm/utils/UpdateTestChecks/common.py
index 4a02a92..5377752 100644
--- a/llvm/utils/UpdateTestChecks/common.py
+++ b/llvm/utils/UpdateTestChecks/common.py
@@ -388,7 +388,12 @@ def itertests(
def should_add_line_to_output(
- input_line, prefix_set, skip_global_checks=False, comment_marker=";"
+ input_line,
+ prefix_set,
+ *,
+ skip_global_checks=False,
+ skip_same_checks=False,
+ comment_marker=";",
):
# Skip any blank comment lines in the IR.
if not skip_global_checks and input_line.strip() == comment_marker:
@@ -402,9 +407,14 @@ def should_add_line_to_output(
# And skip any CHECK lines. We're building our own.
m = CHECK_RE.match(input_line)
if m and m.group(1) in prefix_set:
+ if skip_same_checks and CHECK_SAME_RE.match(input_line):
+ # The previous CHECK line was removed, so don't leave this dangling
+ return False
if skip_global_checks:
+ # Skip checks only if they are of global value definitions
global_ir_value_re = re.compile(r"(\[\[|@)", flags=(re.M))
- return not global_ir_value_re.search(input_line)
+ is_global = global_ir_value_re.search(input_line)
+ return not is_global
return False
return True
@@ -483,6 +493,7 @@ PREFIX_RE = re.compile("^[a-zA-Z0-9_-]+$")
CHECK_RE = re.compile(
r"^\s*(?://|[;#])\s*([^:]+?)(?:-NEXT|-NOT|-DAG|-LABEL|-SAME|-EMPTY)?:"
)
+CHECK_SAME_RE = re.compile(r"^\s*(?://|[;#])\s*([^:]+?)(?:-SAME)?:")
UTC_ARGS_KEY = "UTC_ARGS:"
UTC_ARGS_CMD = re.compile(r".*" + UTC_ARGS_KEY + r"\s*(?P<cmd>.*)\s*$")
diff --git a/llvm/utils/gn/secondary/lldb/test/BUILD.gn b/llvm/utils/gn/secondary/lldb/test/BUILD.gn
index 06ef738..414ea49 100644
--- a/llvm/utils/gn/secondary/lldb/test/BUILD.gn
+++ b/llvm/utils/gn/secondary/lldb/test/BUILD.gn
@@ -60,7 +60,8 @@ write_lit_cfg("lit_api_site_cfg") {
"LLDB_TEST_COMMON_ARGS=",
"LLDB_TEST_USER_ARGS=",
"LLDB_ENABLE_PYTHON=0",
- "LLDB_HAS_LIBCXX=0", # FIXME: support this (?)
+ "LLDB_HAS_LIBCXX=False", # FIXME: support this (?)
+ "LLDB_TEST_USE_VENDOR_PACKAGES=False",
"LLDB_LIBS_DIR=", # FIXME: for shared builds only (?)
"LLDB_TEST_ARCH=$current_cpu",
"LLDB_TEST_COMPILER=" + rebase_path("$root_build_dir/bin/clang"),
diff --git a/llvm/utils/update_test_checks.py b/llvm/utils/update_test_checks.py
index 06c247c..b5077d7 100755
--- a/llvm/utils/update_test_checks.py
+++ b/llvm/utils/update_test_checks.py
@@ -235,6 +235,7 @@ def main():
)
else:
# "Normal" mode.
+ dropped_previous_line = False
for input_line_info in ti.iterlines(output_lines):
input_line = input_line_info.line
args = input_line_info.args
@@ -282,7 +283,10 @@ def main():
has_checked_pre_function_globals = True
if common.should_add_line_to_output(
- input_line, prefix_set, not is_in_function
+ input_line,
+ prefix_set,
+ skip_global_checks=not is_in_function,
+ skip_same_checks=dropped_previous_line,
):
# This input line of the function body will go as-is into the output.
# Except make leading whitespace uniform: 2 spaces.
@@ -290,9 +294,13 @@ def main():
r" ", input_line
)
output_lines.append(input_line)
+ dropped_previous_line = False
if input_line.strip() == "}":
is_in_function = False
continue
+ else:
+ # If we are removing a check line, and the next line is CHECK-SAME, it MUST also be removed
+ dropped_previous_line = True
if is_in_function:
continue
diff --git a/llvm/utils/vim/syntax/mir.vim b/llvm/utils/vim/syntax/mir.vim
index 51ac498..024a795 100644
--- a/llvm/utils/vim/syntax/mir.vim
+++ b/llvm/utils/vim/syntax/mir.vim
@@ -43,6 +43,8 @@ if version >= 508 || !exists("did_c_syn_inits")
endif
HiLink mirSpecialComment SpecialComment
+
+ delcommand HiLink
endif
let b:current_syntax = "mir"
diff --git a/mlir/include/mlir/Dialect/Affine/Analysis/LoopAnalysis.h b/mlir/include/mlir/Dialect/Affine/Analysis/LoopAnalysis.h
index 92f3d5a..1f64b57 100644
--- a/mlir/include/mlir/Dialect/Affine/Analysis/LoopAnalysis.h
+++ b/mlir/include/mlir/Dialect/Affine/Analysis/LoopAnalysis.h
@@ -60,6 +60,26 @@ uint64_t getLargestDivisorOfTripCount(AffineForOp forOp);
DenseSet<Value, DenseMapInfo<Value>>
getInvariantAccesses(Value iv, ArrayRef<Value> indices);
+/// Given:
+/// 1. an induction variable `iv` of type AffineForOp;
+/// 2. a `memoryOp` of type const LoadOp& or const StoreOp&;
+/// determines whether `memoryOp` has a contiguous access along `iv`. Contiguous
+/// is defined as either invariant or varying only along a unique MemRef dim.
+/// Upon success, the unique MemRef dim is written in `memRefDim` (or -1 to
+/// convey the memRef access is invariant along `iv`).
+///
+/// Prerequisites:
+/// 1. `memRefDim` ~= nullptr;
+/// 2. `iv` of the proper type;
+/// 3. the MemRef accessed by `memoryOp` has no layout map or at most an
+/// identity layout map.
+///
+/// Currently only supports no layout map or identity layout map in the memref.
+/// Returns false if the memref has a non-identity layoutMap. This behavior is
+/// conservative.
+template <typename LoadOrStoreOp>
+bool isContiguousAccess(Value iv, LoadOrStoreOp memoryOp, int *memRefDim);
+
using VectorizableLoopFun = std::function<bool(AffineForOp)>;
/// Checks whether the loop is structurally vectorizable; i.e.:
diff --git a/mlir/include/mlir/Dialect/EmitC/IR/EmitC.td b/mlir/include/mlir/Dialect/EmitC/IR/EmitC.td
index c50fdf3..7b9fbb4 100644
--- a/mlir/include/mlir/Dialect/EmitC/IR/EmitC.td
+++ b/mlir/include/mlir/Dialect/EmitC/IR/EmitC.td
@@ -658,6 +658,70 @@ def EmitC_LiteralOp : EmitC_Op<"literal", [Pure]> {
let assemblyFormat = "$value attr-dict `:` type($result)";
}
+def EmitC_LogicalAndOp : EmitC_BinaryOp<"logical_and", []> {
+ let summary = "Logical and operation";
+ let description = [{
+ With the `logical_and` operation the logical operator && (and) can
+ be applied.
+
+ Example:
+
+ ```mlir
+ %0 = emitc.logical_and %arg0, %arg1 : i32, i32
+ ```
+ ```c++
+ // Code emitted for the operation above.
+ bool v3 = v1 && v2;
+ ```
+ }];
+
+ let results = (outs I1);
+ let assemblyFormat = "operands attr-dict `:` type(operands)";
+}
+
+def EmitC_LogicalNotOp : EmitC_Op<"logical_not", []> {
+ let summary = "Logical not operation";
+ let description = [{
+ With the `logical_not` operation the logical operator ! (negation) can
+ be applied.
+
+ Example:
+
+ ```mlir
+ %0 = emitc.logical_not %arg0 : i32
+ ```
+ ```c++
+ // Code emitted for the operation above.
+ bool v2 = !v1;
+ ```
+ }];
+
+ let arguments = (ins AnyType);
+ let results = (outs I1);
+ let assemblyFormat = "operands attr-dict `:` type(operands)";
+}
+
+def EmitC_LogicalOrOp : EmitC_BinaryOp<"logical_or", []> {
+ let summary = "Logical or operation";
+ let description = [{
+ With the `logical_or` operation the logical operator || (inclusive or)
+ can be applied.
+
+ Example:
+
+ ```mlir
+ %0 = emitc.logical_or %arg0, %arg1 : i32, i32
+ ```
+ ```c++
+ // Code emitted for the operation above.
+ bool v3 = v1 || v2;
+ ```
+ }];
+
+ let results = (outs I1);
+ let assemblyFormat = "operands attr-dict `:` type(operands)";
+}
+
def EmitC_MulOp : EmitC_BinaryOp<"mul", []> {
let summary = "Multiplication operation";
let description = [{
diff --git a/mlir/include/mlir/Dialect/GPU/IR/GPUOps.td b/mlir/include/mlir/Dialect/GPU/IR/GPUOps.td
index 955dd1e..bb373af 100644
--- a/mlir/include/mlir/Dialect/GPU/IR/GPUOps.td
+++ b/mlir/include/mlir/Dialect/GPU/IR/GPUOps.td
@@ -24,6 +24,7 @@ include "mlir/IR/EnumAttr.td"
include "mlir/IR/SymbolInterfaces.td"
include "mlir/Interfaces/ControlFlowInterfaces.td"
include "mlir/Interfaces/DataLayoutInterfaces.td"
+include "mlir/IR/OpAsmInterface.td"
include "mlir/Interfaces/FunctionInterfaces.td"
include "mlir/Interfaces/InferIntRangeInterface.td"
include "mlir/Interfaces/InferTypeOpInterface.td"
@@ -50,9 +51,21 @@ def GPU_DimensionAttr : EnumAttr<GPU_Dialect, GPU_Dimension, "dim">;
class GPU_IndexOp<string mnemonic, list<Trait> traits = []> :
GPU_Op<mnemonic, !listconcat(traits, [
- Pure, DeclareOpInterfaceMethods<InferIntRangeInterface>])>,
+ Pure,
+ DeclareOpInterfaceMethods<InferIntRangeInterface>,
+ DeclareOpInterfaceMethods<OpAsmOpInterface, ["getAsmResultNames"]>])>,
Arguments<(ins GPU_DimensionAttr:$dimension)>, Results<(outs Index)> {
let assemblyFormat = "$dimension attr-dict";
+ let extraClassDefinition = [{
+ void $cppClass::getAsmResultNames(
+ llvm::function_ref<void(mlir::Value, mlir::StringRef)> setNameFn) {
+ auto dimStr = stringifyDimension(getDimensionAttr().getValue());
+ auto opName = getOperationName();
+ opName.consume_front("gpu.");
+ SmallString<8> resultName({opName, "_", dimStr});
+ setNameFn(getResult(),resultName);
+ }
+ }];
}
def GPU_ClusterDimOp : GPU_IndexOp<"cluster_dim"> {
diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td
index 3127cf1..3a5447d 100644
--- a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td
+++ b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td
@@ -257,9 +257,10 @@ def SparseTensor_ReinterpretMapOp : SparseTensor_Op<"reinterpret_map", [NoMemory
let hasVerifier = 1;
}
-def SparseTensor_ToPositionsOp : SparseTensor_Op<"positions", [Pure]>,
+def SparseTensor_ToPositionsOp : SparseTensor_Op<"positions",
+ [Pure, DeclareOpInterfaceMethods<InferTypeOpInterface>]>,
Arguments<(ins AnySparseTensor:$tensor, LevelAttr:$level)>,
- Results<(outs AnyStridedMemRefOfRank<1>:$result)> {
+ Results<(outs AnyNon0RankedMemRef:$result)> {
let summary = "Extracts the `level`-th positions array of the `tensor`";
let description = [{
Returns the positions array of the tensor's storage at the given
@@ -283,9 +284,10 @@ def SparseTensor_ToPositionsOp : SparseTensor_Op<"positions", [Pure]>,
let hasVerifier = 1;
}
-def SparseTensor_ToCoordinatesOp : SparseTensor_Op<"coordinates", [Pure]>,
+def SparseTensor_ToCoordinatesOp : SparseTensor_Op<"coordinates",
+ [Pure, DeclareOpInterfaceMethods<InferTypeOpInterface>]>,
Arguments<(ins AnySparseTensor:$tensor, LevelAttr:$level)>,
- Results<(outs AnyStridedMemRefOfRank<1>:$result)> {
+ Results<(outs AnyNon0RankedMemRef:$result)> {
let summary = "Extracts the `level`-th coordinates array of the `tensor`";
let description = [{
Returns the coordinates array of the tensor's storage at the given
@@ -309,9 +311,10 @@ def SparseTensor_ToCoordinatesOp : SparseTensor_Op<"coordinates", [Pure]>,
let hasVerifier = 1;
}
-def SparseTensor_ToCoordinatesBufferOp : SparseTensor_Op<"coordinates_buffer", [Pure]>,
+def SparseTensor_ToCoordinatesBufferOp : SparseTensor_Op<"coordinates_buffer",
+ [Pure, DeclareOpInterfaceMethods<InferTypeOpInterface>]>,
Arguments<(ins AnySparseTensor:$tensor)>,
- Results<(outs AnyStridedMemRefOfRank<1>:$result)> {
+ Results<(outs AnyNon0RankedMemRef:$result)> {
let summary = "Extracts the linear coordinates array from a tensor";
let description = [{
Returns the linear coordinates array for a sparse tensor with
@@ -340,9 +343,10 @@ def SparseTensor_ToCoordinatesBufferOp : SparseTensor_Op<"coordinates_buffer", [
let hasVerifier = 1;
}
-def SparseTensor_ToValuesOp : SparseTensor_Op<"values", [Pure]>,
+def SparseTensor_ToValuesOp : SparseTensor_Op<"values",
+ [Pure, DeclareOpInterfaceMethods<InferTypeOpInterface>]>,
Arguments<(ins AnySparseTensor:$tensor)>,
- Results<(outs AnyStridedMemRefOfRank<1>:$result)> {
+ Results<(outs AnyNon0RankedMemRef:$result)> {
let summary = "Extracts numerical values array from a tensor";
let description = [{
Returns the values array of the sparse storage format for the given
@@ -1453,4 +1457,26 @@ def SparseTensor_ForeachOp : SparseTensor_Op<"foreach",
let hasVerifier = 1;
}
+//===----------------------------------------------------------------------===//
+// Sparse Tensor Debugging Operations.
+//===----------------------------------------------------------------------===//
+
+def SparseTensor_PrintOp : SparseTensor_Op<"print">,
+ Arguments<(ins AnySparseTensor:$tensor)> {
+ string summary = "Prints a sparse tensor (for testing and debugging)";
+ string description = [{
+ Prints the individual components of a sparse tensors (the positions,
+ coordinates, and values components) to stdout for testing and debugging
+ purposes. This operation lowers to just a few primitives in a light-weight
+ runtime support to simplify supporting this operation on new platforms.
+
+ Example:
+
+ ```mlir
+ sparse_tensor.print %tensor : tensor<1024x1024xf64, #CSR>
+ ```
+ }];
+ let assemblyFormat = "$tensor attr-dict `:` type($tensor)";
+}
+
#endif // SPARSETENSOR_OPS
diff --git a/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp b/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp
index 19cc914..337f8bb 100644
--- a/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp
+++ b/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp
@@ -1532,7 +1532,8 @@ public:
auto punct = printOp.getPunctuation();
if (auto stringLiteral = printOp.getStringLiteral()) {
LLVM::createPrintStrCall(rewriter, loc, parent, "vector_print_str",
- *stringLiteral, *getTypeConverter());
+ *stringLiteral, *getTypeConverter(),
+ /*addNewline=*/false);
} else if (punct != PrintPunctuation::NoPunctuation) {
emitCall(rewriter, printOp->getLoc(), [&] {
switch (punct) {
diff --git a/mlir/lib/Dialect/Affine/Analysis/LoopAnalysis.cpp b/mlir/lib/Dialect/Affine/Analysis/LoopAnalysis.cpp
index e645afe..fc0515b 100644
--- a/mlir/lib/Dialect/Affine/Analysis/LoopAnalysis.cpp
+++ b/mlir/lib/Dialect/Affine/Analysis/LoopAnalysis.cpp
@@ -195,43 +195,25 @@ DenseSet<Value> mlir::affine::getInvariantAccesses(Value iv,
return res;
}
-/// Given:
-/// 1. an induction variable `iv` of type AffineForOp;
-/// 2. a `memoryOp` of type const LoadOp& or const StoreOp&;
-/// determines whether `memoryOp` has a contiguous access along `iv`. Contiguous
-/// is defined as either invariant or varying only along a unique MemRef dim.
-/// Upon success, the unique MemRef dim is written in `memRefDim` (or -1 to
-/// convey the memRef access is invariant along `iv`).
-///
-/// Prerequisites:
-/// 1. `memRefDim` ~= nullptr;
-/// 2. `iv` of the proper type;
-/// 3. the MemRef accessed by `memoryOp` has no layout map or at most an
-/// identity layout map.
-///
-/// Currently only supports no layoutMap or identity layoutMap in the MemRef.
-/// Returns false if the MemRef has a non-identity layoutMap or more than 1
-/// layoutMap. This is conservative.
-///
-// TODO: check strides.
+// TODO: check access stride.
template <typename LoadOrStoreOp>
-static bool isContiguousAccess(Value iv, LoadOrStoreOp memoryOp,
- int *memRefDim) {
- static_assert(
- llvm::is_one_of<LoadOrStoreOp, AffineLoadOp, AffineStoreOp>::value,
- "Must be called on either LoadOp or StoreOp");
+bool mlir::affine::isContiguousAccess(Value iv, LoadOrStoreOp memoryOp,
+ int *memRefDim) {
+ static_assert(llvm::is_one_of<LoadOrStoreOp, AffineReadOpInterface,
+ AffineWriteOpInterface>::value,
+ "Must be called on either an affine read or write op");
assert(memRefDim && "memRefDim == nullptr");
auto memRefType = memoryOp.getMemRefType();
if (!memRefType.getLayout().isIdentity())
- return memoryOp.emitError("NYI: non-trivial layoutMap"), false;
+ return memoryOp.emitError("NYI: non-trivial layout map"), false;
int uniqueVaryingIndexAlongIv = -1;
auto accessMap = memoryOp.getAffineMap();
SmallVector<Value, 4> mapOperands(memoryOp.getMapOperands());
unsigned numDims = accessMap.getNumDims();
for (unsigned i = 0, e = memRefType.getRank(); i < e; ++i) {
- // Gather map operands used result expr 'i' in 'exprOperands'.
+ // Gather map operands used in result expr 'i' in 'exprOperands'.
SmallVector<Value, 4> exprOperands;
auto resultExpr = accessMap.getResult(i);
resultExpr.walk([&](AffineExpr expr) {
@@ -241,7 +223,7 @@ static bool isContiguousAccess(Value iv, LoadOrStoreOp memoryOp,
exprOperands.push_back(mapOperands[numDims + symExpr.getPosition()]);
});
// Check access invariance of each operand in 'exprOperands'.
- for (auto exprOperand : exprOperands) {
+ for (Value exprOperand : exprOperands) {
if (!isAccessIndexInvariant(iv, exprOperand)) {
if (uniqueVaryingIndexAlongIv != -1) {
// 2+ varying indices -> do not vectorize along iv.
@@ -259,6 +241,13 @@ static bool isContiguousAccess(Value iv, LoadOrStoreOp memoryOp,
return true;
}
+template bool mlir::affine::isContiguousAccess(Value iv,
+ AffineReadOpInterface loadOp,
+ int *memRefDim);
+template bool mlir::affine::isContiguousAccess(Value iv,
+ AffineWriteOpInterface loadOp,
+ int *memRefDim);
+
template <typename LoadOrStoreOp>
static bool isVectorElement(LoadOrStoreOp memoryOp) {
auto memRefType = memoryOp.getMemRefType();
@@ -344,10 +333,13 @@ bool mlir::affine::isVectorizableLoopBody(
auto load = dyn_cast<AffineLoadOp>(op);
auto store = dyn_cast<AffineStoreOp>(op);
int thisOpMemRefDim = -1;
- bool isContiguous = load ? isContiguousAccess(loop.getInductionVar(), load,
- &thisOpMemRefDim)
- : isContiguousAccess(loop.getInductionVar(), store,
- &thisOpMemRefDim);
+ bool isContiguous =
+ load ? isContiguousAccess(loop.getInductionVar(),
+ cast<AffineReadOpInterface>(*load),
+ &thisOpMemRefDim)
+ : isContiguousAccess(loop.getInductionVar(),
+ cast<AffineWriteOpInterface>(*store),
+ &thisOpMemRefDim);
if (thisOpMemRefDim != -1) {
// If memory accesses vary across different dimensions then the loop is
// not vectorizable.
diff --git a/mlir/lib/Dialect/Arith/Transforms/ExpandOps.cpp b/mlir/lib/Dialect/Arith/Transforms/ExpandOps.cpp
index 8deb8f0..7f246da 100644
--- a/mlir/lib/Dialect/Arith/Transforms/ExpandOps.cpp
+++ b/mlir/lib/Dialect/Arith/Transforms/ExpandOps.cpp
@@ -261,68 +261,62 @@ struct BFloat16TruncFOpConverter : public OpRewritePattern<arith::TruncFOp> {
return rewriter.notifyMatchFailure(op, "not a trunc of f32 to bf16.");
}
- Type i1Ty = b.getI1Type();
Type i16Ty = b.getI16Type();
Type i32Ty = b.getI32Type();
Type f32Ty = b.getF32Type();
if (auto shapedTy = dyn_cast<ShapedType>(operandTy)) {
- i1Ty = shapedTy.clone(i1Ty);
i16Ty = shapedTy.clone(i16Ty);
i32Ty = shapedTy.clone(i32Ty);
f32Ty = shapedTy.clone(f32Ty);
}
- Value bitcast = b.create<arith::BitcastOp>(i32Ty, operand);
-
- Value c23 = createConst(op.getLoc(), i32Ty, 23, rewriter);
- Value c31 = createConst(op.getLoc(), i32Ty, 31, rewriter);
- Value c23Mask = createConst(op.getLoc(), i32Ty, (1 << 23) - 1, rewriter);
- Value expMask =
- createConst(op.getLoc(), i32Ty, ((1 << 8) - 1) << 23, rewriter);
- Value expMax =
- createConst(op.getLoc(), i32Ty, ((1 << 8) - 2) << 23, rewriter);
-
- // Grab the sign bit.
- Value sign = b.create<arith::ShRUIOp>(bitcast, c31);
-
- // Our mantissa rounding value depends on the sign bit and the last
- // truncated bit.
- Value cManRound = createConst(op.getLoc(), i32Ty, (1 << 15), rewriter);
- cManRound = b.create<arith::SubIOp>(cManRound, sign);
-
- // Grab out the mantissa and directly apply rounding.
- Value man = b.create<arith::AndIOp>(bitcast, c23Mask);
- Value manRound = b.create<arith::AddIOp>(man, cManRound);
-
- // Grab the overflow bit and shift right if we overflow.
- Value roundBit = b.create<arith::ShRUIOp>(manRound, c23);
- Value manNew = b.create<arith::ShRUIOp>(manRound, roundBit);
-
- // Grab the exponent and round using the mantissa's carry bit.
- Value exp = b.create<arith::AndIOp>(bitcast, expMask);
- Value expCarry = b.create<arith::AddIOp>(exp, manRound);
- expCarry = b.create<arith::AndIOp>(expCarry, expMask);
-
- // If the exponent is saturated, we keep the max value.
- Value expCmp =
- b.create<arith::CmpIOp>(arith::CmpIPredicate::uge, exp, expMax);
- exp = b.create<arith::SelectOp>(expCmp, exp, expCarry);
-
- // If the exponent is max and we rolled over, keep the old mantissa.
- Value roundBitBool = b.create<arith::TruncIOp>(i1Ty, roundBit);
- Value keepOldMan = b.create<arith::AndIOp>(expCmp, roundBitBool);
- man = b.create<arith::SelectOp>(keepOldMan, man, manNew);
-
- // Assemble the now rounded f32 value (as an i32).
- Value rounded = b.create<arith::ShLIOp>(sign, c31);
- rounded = b.create<arith::OrIOp>(rounded, exp);
- rounded = b.create<arith::OrIOp>(rounded, man);
-
+ // Algorithm borrowed from this excellent code:
+ // https://github.com/pytorch/pytorch/blob/e1502c0cdbfd17548c612f25d5a65b1e4b86224d/c10/util/BFloat16.h#L60-L79
+ // There is a magic idea there, to let the addition of the rounding_bias to
+ // the mantissa simply overflow into the exponent bits. It's a bit of an
+ // aggressive, obfuscating optimization, but it is well-tested code, and it
+ // results in more concise and efficient IR.
+ // The case of NaN is handled separately (see isNaN and the final select).
+ // The case of infinities is NOT handled separately, which deserves an
+ // explanation. As the encoding of infinities has zero mantissa, the
+ // rounding-bias addition never carries into the exponent so that just gets
+ // truncated away, and as bfloat16 and float32 have the same number of
+ // exponent bits, that simple truncation is the desired outcome for
+ // infinities.
+ Value isNan =
+ b.create<arith::CmpFOp>(arith::CmpFPredicate::UNE, operand, operand);
+ // Constant used to make the rounding bias.
+ Value c7FFF = createConst(op.getLoc(), i32Ty, 0x7fff, rewriter);
+ // Constant used to generate a quiet NaN.
+ Value c7FC0_i16 = createConst(op.getLoc(), i16Ty, 0x7fc0, rewriter);
+ // Small constants used to address bits.
Value c16 = createConst(op.getLoc(), i32Ty, 16, rewriter);
- Value shr = b.create<arith::ShRUIOp>(rounded, c16);
- Value trunc = b.create<arith::TruncIOp>(i16Ty, shr);
- Value result = b.create<arith::BitcastOp>(resultTy, trunc);
-
+ Value c1 = createConst(op.getLoc(), i32Ty, 1, rewriter);
+ // Reinterpret the input f32 value as bits.
+ Value bitcast = b.create<arith::BitcastOp>(i32Ty, operand);
+ // Read bit 16 as a value in {0,1}.
+ Value bit16 =
+ b.create<arith::AndIOp>(b.create<arith::ShRUIOp>(bitcast, c16), c1);
+ // Determine the rounding bias to add as either 0x7fff or 0x8000 depending
+ // on bit 16, implementing the tie-breaking "to nearest even".
+ Value roundingBias = b.create<arith::AddIOp>(bit16, c7FFF);
+ // Add the rounding bias. Generally we want this to be added to the
+ // mantissa, but nothing prevents this to from carrying into the exponent
+ // bits, which would feel like a bug, but this is the magic trick here:
+ // when that happens, the mantissa gets reset to zero and the exponent
+ // gets incremented by the carry... which is actually exactly what we
+ // want.
+ Value biased = b.create<arith::AddIOp>(bitcast, roundingBias);
+ // Now that the rounding-bias has been added, truncating the low bits
+ // yields the correctly rounded result.
+ Value biasedAndShifted = b.create<arith::ShRUIOp>(biased, c16);
+ Value normalCaseResult_i16 =
+ b.create<arith::TruncIOp>(i16Ty, biasedAndShifted);
+ // Select either the above-computed result, or a quiet NaN constant
+ // if the input was NaN.
+ Value select =
+ b.create<arith::SelectOp>(isNan, c7FC0_i16, normalCaseResult_i16);
+ Value result = b.create<arith::BitcastOp>(resultTy, select);
rewriter.replaceOp(op, result);
return success();
}
diff --git a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
index 69c3413..232635c 100644
--- a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
+++ b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
@@ -1445,6 +1445,38 @@ OpFoldResult ReinterpretMapOp::fold(FoldAdaptor adaptor) {
return {};
}
+template <typename ToBufferOp>
+static LogicalResult inferSparseBufferType(ValueRange ops, DictionaryAttr attr,
+ OpaqueProperties prop,
+ RegionRange region,
+ SmallVectorImpl<mlir::Type> &ret) {
+ typename ToBufferOp::Adaptor adaptor(ops, attr, prop, region);
+ SparseTensorType stt = getSparseTensorType(adaptor.getTensor());
+ Type elemTp = nullptr;
+ bool withStride = false;
+ if constexpr (std::is_same_v<ToBufferOp, ToPositionsOp>) {
+ elemTp = stt.getPosType();
+ } else if constexpr (std::is_same_v<ToBufferOp, ToCoordinatesOp> ||
+ std::is_same_v<ToBufferOp, ToCoordinatesBufferOp>) {
+ elemTp = stt.getCrdType();
+ if constexpr (std::is_same_v<ToBufferOp, ToCoordinatesOp>)
+ withStride = stt.getAoSCOOStart() <= adaptor.getLevel();
+ } else if constexpr (std::is_same_v<ToBufferOp, ToValuesOp>) {
+ elemTp = stt.getElementType();
+ }
+
+ assert(elemTp && "unhandled operation.");
+ SmallVector<int64_t> bufShape = stt.getBatchLvlShape();
+ bufShape.push_back(ShapedType::kDynamic);
+
+ auto layout = withStride ? StridedLayoutAttr::StridedLayoutAttr::get(
+ stt.getContext(), ShapedType::kDynamic,
+ {ShapedType::kDynamic})
+ : StridedLayoutAttr();
+ ret.emplace_back(MemRefType::get(bufShape, elemTp, layout));
+ return success();
+}
+
LogicalResult ToPositionsOp::verify() {
auto stt = getSparseTensorType(getTensor());
if (failed(lvlIsInBounds(getLevel(), getTensor())))
@@ -1454,6 +1486,14 @@ LogicalResult ToPositionsOp::verify() {
return success();
}
+LogicalResult
+ToPositionsOp::inferReturnTypes(MLIRContext *ctx, std::optional<Location> loc,
+ ValueRange ops, DictionaryAttr attr,
+ OpaqueProperties prop, RegionRange region,
+ SmallVectorImpl<mlir::Type> &ret) {
+ return inferSparseBufferType<ToPositionsOp>(ops, attr, prop, region, ret);
+}
+
LogicalResult ToCoordinatesOp::verify() {
auto stt = getSparseTensorType(getTensor());
if (failed(lvlIsInBounds(getLevel(), getTensor())))
@@ -1463,6 +1503,14 @@ LogicalResult ToCoordinatesOp::verify() {
return success();
}
+LogicalResult
+ToCoordinatesOp::inferReturnTypes(MLIRContext *ctx, std::optional<Location> loc,
+ ValueRange ops, DictionaryAttr attr,
+ OpaqueProperties prop, RegionRange region,
+ SmallVectorImpl<mlir::Type> &ret) {
+ return inferSparseBufferType<ToCoordinatesOp>(ops, attr, prop, region, ret);
+}
+
LogicalResult ToCoordinatesBufferOp::verify() {
auto stt = getSparseTensorType(getTensor());
if (stt.getAoSCOOStart() >= stt.getLvlRank())
@@ -1470,6 +1518,14 @@ LogicalResult ToCoordinatesBufferOp::verify() {
return success();
}
+LogicalResult ToCoordinatesBufferOp::inferReturnTypes(
+ MLIRContext *ctx, std::optional<Location> loc, ValueRange ops,
+ DictionaryAttr attr, OpaqueProperties prop, RegionRange region,
+ SmallVectorImpl<mlir::Type> &ret) {
+ return inferSparseBufferType<ToCoordinatesBufferOp>(ops, attr, prop, region,
+ ret);
+}
+
LogicalResult ToValuesOp::verify() {
auto stt = getSparseTensorType(getTensor());
auto mtp = getMemRefType(getResult());
@@ -1478,6 +1534,15 @@ LogicalResult ToValuesOp::verify() {
return success();
}
+LogicalResult ToValuesOp::inferReturnTypes(MLIRContext *ctx,
+ std::optional<Location> loc,
+ ValueRange ops, DictionaryAttr attr,
+ OpaqueProperties prop,
+ RegionRange region,
+ SmallVectorImpl<mlir::Type> &ret) {
+ return inferSparseBufferType<ToValuesOp>(ops, attr, prop, region, ret);
+}
+
LogicalResult ToSliceOffsetOp::verify() {
auto rank = getRankedTensorType(getSlice()).getRank();
if (rank <= getDim().getSExtValue() || getDim().getSExtValue() < 0)
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseGPUCodegen.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseGPUCodegen.cpp
index cdee8a4..cb75f6a 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseGPUCodegen.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseGPUCodegen.cpp
@@ -496,11 +496,11 @@ static Value genFirstPosOrCrds(OpBuilder &builder, Location loc, Value a,
if (format == CuSparseFormat::kCOO) {
// Library uses SoA COO, direct IR uses AoS COO.
if (enableRT)
- return genToCoordinates(builder, loc, a, 0);
- return genToCoordinatesBuffer(builder, loc, a);
+ return builder.create<ToCoordinatesOp>(loc, a, 0);
+ return builder.create<ToCoordinatesBufferOp>(loc, a);
}
// Formats CSR/CSC and BSR use positions at 1.
- return genToPositions(builder, loc, a, 1);
+ return builder.create<ToPositionsOp>(loc, a, 1);
}
/// Generates the second coordinates of a sparse matrix.
@@ -510,7 +510,7 @@ static Value genSecondCrds(OpBuilder &builder, Location loc, Value a,
if (isCOO && !enableRT)
return Value(); // nothing needed
// Formats CSR/CSC and BSR use coordinates at 1.
- return genToCoordinates(builder, loc, a, 1);
+ return builder.create<ToCoordinatesOp>(loc, a, 1);
}
/// Generates the sparse matrix handle.
@@ -584,7 +584,7 @@ static LogicalResult rewriteSpMV(PatternRewriter &rewriter,
Value szX = linalg::createOrFoldDimOp(rewriter, loc, a, 1);
Value memR = genFirstPosOrCrds(rewriter, loc, a, format, enableRT);
Value memC = genSecondCrds(rewriter, loc, a, format, enableRT); // or empty
- Value memV = genToValues(rewriter, loc, a);
+ Value memV = rewriter.create<ToValuesOp>(loc, a);
Value rowA = genAllocCopy(rewriter, loc, memR, tokens);
Value colA = memC ? genAllocCopy(rewriter, loc, memC, tokens) : Value();
Value valA = genAllocCopy(rewriter, loc, memV, tokens);
@@ -682,7 +682,7 @@ static LogicalResult rewriteSpMM(PatternRewriter &rewriter,
Value szn = linalg::createOrFoldDimOp(rewriter, loc, b, 1);
Value memR = genFirstPosOrCrds(rewriter, loc, a, format, enableRT);
Value memC = genSecondCrds(rewriter, loc, a, format, enableRT); // or empty
- Value memV = genToValues(rewriter, loc, a);
+ Value memV = rewriter.create<ToValuesOp>(loc, a);
Value rowA = genAllocCopy(rewriter, loc, memR, tokens);
Value colA = memC ? genAllocCopy(rewriter, loc, memC, tokens) : Value();
Value valA = genAllocCopy(rewriter, loc, memV, tokens);
@@ -785,10 +785,10 @@ static LogicalResult rewriteSpGEMM(PatternRewriter &rewriter,
Value szn = linalg::createOrFoldDimOp(rewriter, loc, b, 1);
Value amemR = genFirstPosOrCrds(rewriter, loc, a, format, enableRT);
Value amemC = genSecondCrds(rewriter, loc, a, format, enableRT); // not empty
- Value amemV = genToValues(rewriter, loc, a);
+ Value amemV = rewriter.create<ToValuesOp>(loc, a);
Value bmemR = genFirstPosOrCrds(rewriter, loc, b, format, enableRT);
Value bmemC = genSecondCrds(rewriter, loc, b, format, enableRT); // not empty
- Value bmemV = genToValues(rewriter, loc, b);
+ Value bmemV = rewriter.create<ToValuesOp>(loc, b);
Value rowA = genAllocCopy(rewriter, loc, amemR, tokens);
Value colA = genAllocCopy(rewriter, loc, amemC, tokens);
Value valA = genAllocCopy(rewriter, loc, amemV, tokens);
@@ -1081,7 +1081,7 @@ static LogicalResult rewriteSDDMM(PatternRewriter &rewriter,
Value matB = genAllocCopy(rewriter, loc, bufB, tokens);
Value memR = genFirstPosOrCrds(rewriter, loc, c, format, enableRT);
Value memC = genSecondCrds(rewriter, loc, c, format, enableRT); // or empty
- Value memV = genToValues(rewriter, loc, c);
+ Value memV = rewriter.create<ToValuesOp>(loc, c);
Value rowC = genAllocCopy(rewriter, loc, memR, tokens);
Value colC = memC ? genAllocCopy(rewriter, loc, memC, tokens) : Value();
Value valC = genAllocCopy(rewriter, loc, memV, tokens);
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp
index d5eec4a..4e33931 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp
@@ -1058,17 +1058,9 @@ public:
// Replace the requested coordinates access with corresponding field.
// The cast_op is inserted by type converter to intermix 1:N type
// conversion.
- Location loc = op.getLoc();
auto desc = getDescriptorFromTensorTuple(adaptor.getTensor());
- Value field = desc.getCrdMemRefOrView(rewriter, loc, op.getLevel());
-
- // Insert a cast to bridge the actual type to the user expected type. If the
- // actual type and the user expected type aren't compatible, the compiler or
- // the runtime will issue an error.
- Type resType = op.getResult().getType();
- if (resType != field.getType())
- field = rewriter.create<memref::CastOp>(loc, resType, field);
- rewriter.replaceOp(op, field);
+ rewriter.replaceOp(
+ op, desc.getCrdMemRefOrView(rewriter, op.getLoc(), op.getLevel()));
return success();
}
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp
index 1bcc131..6ff2146 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp
@@ -21,9 +21,11 @@
#include "mlir/Dialect/MemRef/IR/MemRef.h"
#include "mlir/Dialect/SCF/IR/SCF.h"
#include "mlir/Dialect/SparseTensor/IR/SparseTensor.h"
+#include "mlir/Dialect/SparseTensor/IR/SparseTensorStorageLayout.h"
#include "mlir/Dialect/SparseTensor/IR/SparseTensorType.h"
#include "mlir/Dialect/SparseTensor/Transforms/Passes.h"
#include "mlir/Dialect/Tensor/IR/Tensor.h"
+#include "mlir/Dialect/Vector/IR/VectorOps.h"
#include "mlir/IR/AffineMap.h"
#include "mlir/IR/Matchers.h"
#include "mlir/Support/LLVM.h"
@@ -598,6 +600,101 @@ public:
}
};
+/// Sparse rewriting rule for the print operator. This operation is mainly used
+/// for debugging and testing. As such, it lowers to the vector.print operation
+/// which only require very light-weight runtime support.
+struct PrintRewriter : public OpRewritePattern<PrintOp> {
+public:
+ using OpRewritePattern::OpRewritePattern;
+ LogicalResult matchAndRewrite(PrintOp op,
+ PatternRewriter &rewriter) const override {
+ Location loc = op.getLoc();
+ auto tensor = op.getTensor();
+ auto stt = getSparseTensorType(tensor);
+ // Header with NSE.
+ auto nse = rewriter.create<NumberOfEntriesOp>(loc, tensor);
+ rewriter.create<vector::PrintOp>(
+ loc, rewriter.getStringAttr("---- Sparse Tensor ----\nnse = "));
+ rewriter.create<vector::PrintOp>(loc, nse);
+ // Use the "codegen" foreach loop construct to iterate over
+ // all typical sparse tensor components for printing.
+ foreachFieldAndTypeInSparseTensor(stt, [&rewriter, &loc, &tensor,
+ &stt](Type, FieldIndex,
+ SparseTensorFieldKind kind,
+ Level l, LevelType) {
+ switch (kind) {
+ case SparseTensorFieldKind::StorageSpec: {
+ break;
+ }
+ case SparseTensorFieldKind::PosMemRef: {
+ auto lvl = constantIndex(rewriter, loc, l);
+ rewriter.create<vector::PrintOp>(loc, rewriter.getStringAttr("pos["));
+ rewriter.create<vector::PrintOp>(
+ loc, lvl, vector::PrintPunctuation::NoPunctuation);
+ rewriter.create<vector::PrintOp>(loc, rewriter.getStringAttr("] : "));
+ auto pos = rewriter.create<ToPositionsOp>(loc, tensor, l);
+ printContents(rewriter, loc, pos);
+ break;
+ }
+ case SparseTensorFieldKind::CrdMemRef: {
+ auto lvl = constantIndex(rewriter, loc, l);
+ rewriter.create<vector::PrintOp>(loc, rewriter.getStringAttr("crd["));
+ rewriter.create<vector::PrintOp>(
+ loc, lvl, vector::PrintPunctuation::NoPunctuation);
+ rewriter.create<vector::PrintOp>(loc, rewriter.getStringAttr("] : "));
+ Value crd = nullptr;
+ // TODO: eliminates ToCoordinateBufferOp!
+ if (stt.getAoSCOOStart() == l)
+ crd = rewriter.create<ToCoordinatesBufferOp>(loc, tensor);
+ else
+ crd = rewriter.create<ToCoordinatesOp>(loc, tensor, l);
+ printContents(rewriter, loc, crd);
+ break;
+ }
+ case SparseTensorFieldKind::ValMemRef: {
+ rewriter.create<vector::PrintOp>(loc,
+ rewriter.getStringAttr("values : "));
+ auto val = rewriter.create<ToValuesOp>(loc, tensor);
+ printContents(rewriter, loc, val);
+ break;
+ }
+ }
+ return true;
+ });
+ rewriter.create<vector::PrintOp>(loc, rewriter.getStringAttr("----\n"));
+ rewriter.eraseOp(op);
+ return success();
+ }
+
+private:
+ // Helper to print contents of a single memref. Note that for the "push_back"
+ // vectors, this prints the full capacity, not just the size. This is done
+ // on purpose, so that clients see how much storage has been allocated in
+ // total. Contents of the extra capacity in the buffer may be uninitialized
+ // (unless the flag enable-buffer-initialization is set to true).
+ //
+ // Generates code to print:
+ // ( a0, a1, ... )
+ static void printContents(PatternRewriter &rewriter, Location loc,
+ Value vec) {
+ // Open bracket.
+ rewriter.create<vector::PrintOp>(loc, vector::PrintPunctuation::Open);
+ // For loop over elements.
+ auto zero = constantIndex(rewriter, loc, 0);
+ auto size = rewriter.create<memref::DimOp>(loc, vec, zero);
+ auto step = constantIndex(rewriter, loc, 1);
+ auto forOp = rewriter.create<scf::ForOp>(loc, zero, size, step);
+ rewriter.setInsertionPointToStart(forOp.getBody());
+ auto idx = forOp.getInductionVar();
+ auto val = rewriter.create<memref::LoadOp>(loc, vec, idx);
+ rewriter.create<vector::PrintOp>(loc, val, vector::PrintPunctuation::Comma);
+ rewriter.setInsertionPointAfter(forOp);
+ // Close bracket and end of line.
+ rewriter.create<vector::PrintOp>(loc, vector::PrintPunctuation::Close);
+ rewriter.create<vector::PrintOp>(loc, vector::PrintPunctuation::NewLine);
+ }
+};
+
/// Sparse rewriting rule for sparse-to-sparse reshape operator.
struct TensorReshapeRewriter : public OpRewritePattern<tensor::ReshapeOp> {
public:
@@ -1284,7 +1381,8 @@ struct OutRewriter : public OpRewritePattern<OutOp> {
void mlir::populatePreSparsificationRewriting(RewritePatternSet &patterns) {
patterns.add<FoldInvariantYield, FuseSparseMultiplyOverAdd, FuseTensorCast,
- GenSemiRingReduction, GenSemiRingSelect>(patterns.getContext());
+ GenSemiRingReduction, GenSemiRingSelect, PrintRewriter>(
+ patterns.getContext());
}
void mlir::populateLowerSparseOpsToForeachPatterns(RewritePatternSet &patterns,
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/Utils/CodegenUtils.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/Utils/CodegenUtils.cpp
index b888dfa..fa57015 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/Utils/CodegenUtils.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/Utils/CodegenUtils.cpp
@@ -554,41 +554,6 @@ sparse_tensor::genToMemref(OpBuilder &builder, Location loc, Value tensor) {
.getResult();
}
-Value sparse_tensor::genToPositions(OpBuilder &builder, Location loc,
- Value tensor, Level lvl) {
- const auto srcTp = getSparseTensorType(tensor);
- const Type posTp = srcTp.getPosType();
- const Type memTp = get1DMemRefType(posTp, /*withLayout=*/false);
- return builder.create<ToPositionsOp>(loc, memTp, tensor,
- builder.getIndexAttr(lvl));
-}
-
-Value sparse_tensor::genToCoordinates(OpBuilder &builder, Location loc,
- Value tensor, Level lvl) {
- const auto srcTp = getSparseTensorType(tensor);
- const Type crdTp = srcTp.getCrdType();
- const Type memTp =
- get1DMemRefType(crdTp, /*withLayout=*/lvl >= srcTp.getAoSCOOStart());
- return builder.create<ToCoordinatesOp>(loc, memTp, tensor,
- builder.getIndexAttr(lvl));
-}
-
-Value sparse_tensor::genToCoordinatesBuffer(OpBuilder &builder, Location loc,
- Value tensor) {
- const auto srcTp = getSparseTensorType(tensor);
- const Type crdTp = srcTp.getCrdType();
- const Type memTp = get1DMemRefType(crdTp, /*withLayout=*/false);
- return builder.create<ToCoordinatesBufferOp>(loc, memTp, tensor);
-}
-
-Value sparse_tensor::genToValues(OpBuilder &builder, Location loc,
- Value tensor) {
- RankedTensorType srcTp = getRankedTensorType(tensor);
- Type valTp = get1DMemRefType(srcTp.getElementType(),
- /*withLayout=*/false);
- return builder.create<ToValuesOp>(loc, valTp, tensor);
-}
-
Value sparse_tensor::genValMemSize(OpBuilder &builder, Location loc,
Value tensor) {
return getDescriptorFromTensorTuple(tensor).getValMemSize(builder, loc);
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/Utils/CodegenUtils.h b/mlir/lib/Dialect/SparseTensor/Transforms/Utils/CodegenUtils.h
index cc119bc..e8f6bd1 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/Utils/CodegenUtils.h
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/Utils/CodegenUtils.h
@@ -228,17 +228,6 @@ void deallocDenseTensor(OpBuilder &builder, Location loc, Value buffer);
void sizesFromSrc(OpBuilder &builder, SmallVectorImpl<Value> &sizes,
Location loc, Value src);
-/// Generates a 1D MemRefType with a dynamic size. When withLayout is set, the
-/// returned memref has a layout has unknown strides and offsets. Otherwise,
-/// a memref with a standard unit stride zero offset layout is returned.
-inline MemRefType get1DMemRefType(Type etp, bool withLayout) {
- auto layout = withLayout ? StridedLayoutAttr::StridedLayoutAttr::get(
- etp.getContext(), ShapedType::kDynamic,
- {ShapedType::kDynamic})
- : StridedLayoutAttr();
- return MemRefType::get(ShapedType::kDynamic, etp, layout);
-}
-
/// Scans to top of generated loop.
Operation *getTop(Operation *op);
@@ -281,22 +270,6 @@ void storeAll(OpBuilder &builder, Location loc, Value mem, ValueRange vs,
TypedValue<BaseMemRefType> genToMemref(OpBuilder &builder, Location loc,
Value tensor);
-/// Infers the result type and generates `ToPositionsOp`.
-Value genToPositions(OpBuilder &builder, Location loc, Value tensor, Level lvl);
-
-/// Infers the result type and generates `ToCoordinatesOp`. If the
-/// level is within a COO region, the result type is a memref with unknown
-/// stride and offset. Otherwise, the result type is a memref without
-/// any specified layout.
-Value genToCoordinates(OpBuilder &builder, Location loc, Value tensor,
- Level lvl);
-
-/// Infers the result type and generates `ToCoordinatesBufferOp`.
-Value genToCoordinatesBuffer(OpBuilder &builder, Location loc, Value tensor);
-
-/// Infers the result type and generates `ToValuesOp`.
-Value genToValues(OpBuilder &builder, Location loc, Value tensor);
-
/// Generates code to retrieve the values size for the sparse tensor.
Value genValMemSize(OpBuilder &builder, Location loc, Value tensor);
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/Utils/LoopEmitter.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/Utils/LoopEmitter.cpp
index 0ead135..812c288 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/Utils/LoopEmitter.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/Utils/LoopEmitter.cpp
@@ -259,7 +259,7 @@ void LoopEmitter::initializeLoopEmit(
// Annotated sparse tensors.
// We also need the value buffer for all-dense annotated "sparse"
// tensors.
- valBuffer[t] = genToValues(builder, loc, tensor);
+ valBuffer[t] = builder.create<ToValuesOp>(loc, tensor);
}
// NOTE: we can also prepare for 0 lvl here in advance, this will hoist
// some loop preparation from tensor iteration, but will also (undesirably)
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/Utils/SparseTensorLevel.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/Utils/SparseTensorLevel.cpp
index 011d814..8edacaa 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/Utils/SparseTensorLevel.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/Utils/SparseTensorLevel.cpp
@@ -1281,21 +1281,21 @@ sparse_tensor::makeSparseTensorLevel(OpBuilder &b, Location l, Value t,
case LevelFormat::Batch:
llvm_unreachable("not implemented");
case LevelFormat::Compressed: {
- Value pos = genToPositions(b, l, t, lvl);
- Value crd = genToCoordinates(b, l, t, lvl);
+ Value pos = b.create<ToPositionsOp>(l, t, lvl);
+ Value crd = b.create<ToCoordinatesOp>(l, t, lvl);
return std::make_unique<CompressedLevel>(tid, lvl, lt, sz, pos, crd);
}
case LevelFormat::LooseCompressed: {
- Value pos = genToPositions(b, l, t, lvl);
- Value crd = genToCoordinates(b, l, t, lvl);
+ Value pos = b.create<ToPositionsOp>(l, t, lvl);
+ Value crd = b.create<ToCoordinatesOp>(l, t, lvl);
return std::make_unique<LooseCompressedLevel>(tid, lvl, lt, sz, pos, crd);
}
case LevelFormat::Singleton: {
- Value crd = genToCoordinates(b, l, t, lvl);
+ Value crd = b.create<ToCoordinatesOp>(l, t, lvl);
return std::make_unique<SingletonLevel>(tid, lvl, lt, sz, crd);
}
case LevelFormat::NOutOfM: {
- Value crd = genToCoordinates(b, l, t, lvl);
+ Value crd = b.create<ToCoordinatesOp>(l, t, lvl);
return std::make_unique<NOutOfMLevel>(tid, lvl, lt, sz, crd);
}
case LevelFormat::Undef:
diff --git a/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp b/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp
index e6efec1..fe2f250 100644
--- a/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp
+++ b/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp
@@ -4012,15 +4012,17 @@ static bool inferStaticShape(PackOp packOp, SmallVectorImpl<int64_t> &srcShape,
llvm::SmallSetVector<int64_t, 4> innerDims;
innerDims.insert(packOp.getInnerDimsPos().begin(),
packOp.getInnerDimsPos().end());
- auto outerDimsPerm = packOp.getOuterDimsPerm();
+ SmallVector<int64_t> inverseOuterDimsPerm;
+ if (!packOp.getOuterDimsPerm().empty())
+ inverseOuterDimsPerm = invertPermutationVector(packOp.getOuterDimsPerm());
int srcRank = packOp.getSourceRank();
for (auto i : llvm::seq<int64_t>(0, srcRank)) {
if (innerDims.contains(i))
continue;
int64_t srcPos = i;
int64_t destPos = i;
- if (!outerDimsPerm.empty())
- destPos = outerDimsPerm[srcPos];
+ if (!inverseOuterDimsPerm.empty())
+ destPos = inverseOuterDimsPerm[srcPos];
if (ShapedType::isDynamic(srcShape[srcPos]) ==
ShapedType::isDynamic(destShape[destPos])) {
continue;
@@ -4240,15 +4242,17 @@ static bool inferStaticShape(UnPackOp op, SmallVectorImpl<int64_t> &srcShape,
op.getDestType().getShape().end());
llvm::SmallSetVector<int64_t, 4> innerDims;
innerDims.insert(op.getInnerDimsPos().begin(), op.getInnerDimsPos().end());
- auto outerDimsPerm = op.getOuterDimsPerm();
+ SmallVector<int64_t> inverseOuterDimsPerm;
+ if (!op.getOuterDimsPerm().empty())
+ inverseOuterDimsPerm = invertPermutationVector(op.getOuterDimsPerm());
int destRank = op.getDestRank();
for (auto i : llvm::seq<int64_t>(0, destRank)) {
if (innerDims.contains(i))
continue;
int64_t srcPos = i;
int64_t destPos = i;
- if (!outerDimsPerm.empty())
- srcPos = outerDimsPerm[destPos];
+ if (!inverseOuterDimsPerm.empty())
+ srcPos = inverseOuterDimsPerm[destPos];
if (ShapedType::isDynamic(srcShape[srcPos]) ==
ShapedType::isDynamic(destShape[destPos])) {
continue;
diff --git a/mlir/lib/Target/Cpp/TranslateToCpp.cpp b/mlir/lib/Target/Cpp/TranslateToCpp.cpp
index 2ba3dec..16aa136 100644
--- a/mlir/lib/Target/Cpp/TranslateToCpp.cpp
+++ b/mlir/lib/Target/Cpp/TranslateToCpp.cpp
@@ -627,6 +627,33 @@ static LogicalResult printOperation(CppEmitter &emitter,
return success();
}
+static LogicalResult printOperation(CppEmitter &emitter,
+ emitc::LogicalAndOp logicalAndOp) {
+ Operation *operation = logicalAndOp.getOperation();
+ return printBinaryOperation(emitter, operation, "&&");
+}
+
+static LogicalResult printOperation(CppEmitter &emitter,
+ emitc::LogicalNotOp logicalNotOp) {
+ raw_ostream &os = emitter.ostream();
+
+ if (failed(emitter.emitAssignPrefix(*logicalNotOp.getOperation())))
+ return failure();
+
+ os << "!";
+
+ if (failed(emitter.emitOperand(logicalNotOp.getOperand())))
+ return failure();
+
+ return success();
+}
+
+static LogicalResult printOperation(CppEmitter &emitter,
+ emitc::LogicalOrOp logicalOrOp) {
+ Operation *operation = logicalOrOp.getOperation();
+ return printBinaryOperation(emitter, operation, "||");
+}
+
static LogicalResult printOperation(CppEmitter &emitter, emitc::ForOp forOp) {
raw_indented_ostream &os = emitter.ostream();
@@ -1284,7 +1311,8 @@ LogicalResult CppEmitter::emitOperation(Operation &op, bool trailingSemicolon) {
emitc::CallOpaqueOp, emitc::CastOp, emitc::CmpOp,
emitc::ConstantOp, emitc::DeclareFuncOp, emitc::DivOp,
emitc::ExpressionOp, emitc::ForOp, emitc::FuncOp, emitc::IfOp,
- emitc::IncludeOp, emitc::MulOp, emitc::RemOp, emitc::ReturnOp,
+ emitc::IncludeOp, emitc::LogicalAndOp, emitc::LogicalNotOp,
+ emitc::LogicalOrOp, emitc::MulOp, emitc::RemOp, emitc::ReturnOp,
emitc::SubOp, emitc::VariableOp, emitc::VerbatimOp>(
[&](auto op) { return printOperation(*this, op); })
// Func ops.
diff --git a/mlir/test/Dialect/Affine/access-analysis.mlir b/mlir/test/Dialect/Affine/access-analysis.mlir
new file mode 100644
index 0000000..68310b9
--- /dev/null
+++ b/mlir/test/Dialect/Affine/access-analysis.mlir
@@ -0,0 +1,67 @@
+// RUN: mlir-opt %s -split-input-file -test-affine-access-analysis -verify-diagnostics | FileCheck %s
+
+// CHECK-LABEL: func @loop_1d
+func.func @loop_1d(%A : memref<?x?xf32>, %B : memref<?x?x?xf32>) {
+ %c0 = arith.constant 0 : index
+ %M = memref.dim %A, %c0 : memref<?x?xf32>
+ affine.for %i = 0 to %M {
+ affine.for %j = 0 to %M {
+ affine.load %A[%c0, %i] : memref<?x?xf32>
+ // expected-remark@above {{contiguous along loop 0}}
+ affine.load %A[%c0, 8 * %i + %j] : memref<?x?xf32>
+ // expected-remark@above {{contiguous along loop 1}}
+ // Note/FIXME: access stride isn't being checked.
+ // expected-remark@-3 {{contiguous along loop 0}}
+
+ // These are all non-contiguous along both loops. Nothing is emitted.
+ affine.load %A[%i, %c0] : memref<?x?xf32>
+ // Note/FIXME: access stride isn't being checked.
+ affine.load %A[%i, 8 * %j] : memref<?x?xf32>
+ // expected-remark@above {{contiguous along loop 1}}
+ affine.load %A[%j, 4 * %i] : memref<?x?xf32>
+ // expected-remark@above {{contiguous along loop 0}}
+ }
+ }
+ return
+}
+
+// -----
+
+#map = affine_map<(d0) -> (d0 * 16)>
+#map1 = affine_map<(d0) -> (d0 * 16 + 16)>
+#map2 = affine_map<(d0) -> (d0)>
+#map3 = affine_map<(d0) -> (d0 + 1)>
+
+func.func @tiled(%arg0: memref<*xf32>) {
+ %alloc = memref.alloc() {alignment = 64 : i64} : memref<1x224x224x64xf32>
+ %cast = memref.cast %arg0 : memref<*xf32> to memref<64xf32>
+ affine.for %arg1 = 0 to 4 {
+ affine.for %arg2 = 0 to 224 {
+ affine.for %arg3 = 0 to 14 {
+ %alloc_0 = memref.alloc() : memref<1x16x1x16xf32>
+ affine.for %arg4 = #map(%arg1) to #map1(%arg1) {
+ affine.for %arg5 = #map(%arg3) to #map1(%arg3) {
+ %0 = affine.load %cast[%arg4] : memref<64xf32>
+ // expected-remark@above {{contiguous along loop 3}}
+ affine.store %0, %alloc_0[0, %arg1 * -16 + %arg4, 0, %arg3 * -16 + %arg5] : memref<1x16x1x16xf32>
+ // expected-remark@above {{contiguous along loop 4}}
+ // expected-remark@above {{contiguous along loop 2}}
+ }
+ }
+ affine.for %arg4 = #map(%arg1) to #map1(%arg1) {
+ affine.for %arg5 = #map2(%arg2) to #map3(%arg2) {
+ affine.for %arg6 = #map(%arg3) to #map1(%arg3) {
+ %0 = affine.load %alloc_0[0, %arg1 * -16 + %arg4, -%arg2 + %arg5, %arg3 * -16 + %arg6] : memref<1x16x1x16xf32>
+ // expected-remark@above {{contiguous along loop 5}}
+ // expected-remark@above {{contiguous along loop 2}}
+ affine.store %0, %alloc[0, %arg5, %arg6, %arg4] : memref<1x224x224x64xf32>
+ // expected-remark@above {{contiguous along loop 3}}
+ }
+ }
+ }
+ memref.dealloc %alloc_0 : memref<1x16x1x16xf32>
+ }
+ }
+ }
+ return
+}
diff --git a/mlir/test/Dialect/Arith/expand-ops.mlir b/mlir/test/Dialect/Arith/expand-ops.mlir
index 046e8ff..91f652e 100644
--- a/mlir/test/Dialect/Arith/expand-ops.mlir
+++ b/mlir/test/Dialect/Arith/expand-ops.mlir
@@ -255,36 +255,21 @@ func.func @truncf_f32(%arg0 : f32) -> bf16 {
}
// CHECK-LABEL: @truncf_f32
-
-// CHECK-DAG: %[[C16:.+]] = arith.constant 16
-// CHECK-DAG: %[[C32768:.+]] = arith.constant 32768
-// CHECK-DAG: %[[C2130706432:.+]] = arith.constant 2130706432
-// CHECK-DAG: %[[C2139095040:.+]] = arith.constant 2139095040
-// CHECK-DAG: %[[C8388607:.+]] = arith.constant 8388607
-// CHECK-DAG: %[[C31:.+]] = arith.constant 31
-// CHECK-DAG: %[[C23:.+]] = arith.constant 23
-// CHECK-DAG: %[[BITCAST:.+]] = arith.bitcast %arg0
-// CHECK-DAG: %[[SIGN:.+]] = arith.shrui %[[BITCAST:.+]], %[[C31]]
-// CHECK-DAG: %[[ROUND:.+]] = arith.subi %[[C32768]], %[[SIGN]]
-// CHECK-DAG: %[[MANTISSA:.+]] = arith.andi %[[BITCAST]], %[[C8388607]]
-// CHECK-DAG: %[[ROUNDED:.+]] = arith.addi %[[MANTISSA]], %[[ROUND]]
-// CHECK-DAG: %[[ROLL:.+]] = arith.shrui %[[ROUNDED]], %[[C23]]
-// CHECK-DAG: %[[SHR:.+]] = arith.shrui %[[ROUNDED]], %[[ROLL]]
-// CHECK-DAG: %[[EXP:.+]] = arith.andi %0, %[[C2139095040]]
-// CHECK-DAG: %[[EXPROUND:.+]] = arith.addi %[[EXP]], %[[ROUNDED]]
-// CHECK-DAG: %[[EXPROLL:.+]] = arith.andi %[[EXPROUND]], %[[C2139095040]]
-// CHECK-DAG: %[[EXPMAX:.+]] = arith.cmpi uge, %[[EXP]], %[[C2130706432]]
-// CHECK-DAG: %[[EXPNEW:.+]] = arith.select %[[EXPMAX]], %[[EXP]], %[[EXPROLL]]
-// CHECK-DAG: %[[OVERFLOW_B:.+]] = arith.trunci %[[ROLL]]
-// CHECK-DAG: %[[KEEP_MAN:.+]] = arith.andi %[[EXPMAX]], %[[OVERFLOW_B]]
-// CHECK-DAG: %[[MANNEW:.+]] = arith.select %[[KEEP_MAN]], %[[MANTISSA]], %[[SHR]]
-// CHECK-DAG: %[[NEWSIGN:.+]] = arith.shli %[[SIGN]], %[[C31]]
-// CHECK-DAG: %[[WITHEXP:.+]] = arith.ori %[[NEWSIGN]], %[[EXPNEW]]
-// CHECK-DAG: %[[WITHMAN:.+]] = arith.ori %[[WITHEXP]], %[[MANNEW]]
-// CHECK-DAG: %[[SHIFT:.+]] = arith.shrui %[[WITHMAN]], %[[C16]]
-// CHECK-DAG: %[[TRUNC:.+]] = arith.trunci %[[SHIFT]]
-// CHECK-DAG: %[[RES:.+]] = arith.bitcast %[[TRUNC]]
-// CHECK: return %[[RES]]
+// CHECK-DAG: %[[C1:.+]] = arith.constant 1 : i32
+// CHECK-DAG: %[[C16:.+]] = arith.constant 16 : i32
+// CHECK-DAG: %[[C7FC0_i16:.+]] = arith.constant 32704 : i16
+// CHECK-DAG: %[[C7FFF:.+]] = arith.constant 32767 : i32
+// CHECK-DAG: %[[ISNAN:.+]] = arith.cmpf une, %arg0, %arg0 : f32
+// CHECK-DAG: %[[BITCAST:.+]] = arith.bitcast %arg0 : f32 to i32
+// CHECK-DAG: %[[SHRUI:.+]] = arith.shrui %[[BITCAST]], %[[C16]] : i32
+// CHECK-DAG: %[[BIT16:.+]] = arith.andi %[[SHRUI]], %[[C1]] : i32
+// CHECK-DAG: %[[ROUNDING_BIAS:.+]] = arith.addi %[[BIT16]], %[[C7FFF]] : i32
+// CHECK-DAG: %[[BIASED:.+]] = arith.addi %[[BITCAST]], %[[ROUNDING_BIAS]] : i32
+// CHECK-DAG: %[[BIASED_SHIFTED:.+]] = arith.shrui %[[BIASED]], %[[C16]] : i32
+// CHECK-DAG: %[[NORMAL_CASE_RESULT_i16:.+]] = arith.trunci %[[BIASED_SHIFTED]] : i32 to i16
+// CHECK-DAG: %[[SELECT:.+]] = arith.select %[[ISNAN]], %[[C7FC0_i16]], %[[NORMAL_CASE_RESULT_i16]] : i16
+// CHECK-DAG: %[[RESULT:.+]] = arith.bitcast %[[SELECT]] : i16 to bf16
+// CHECK: return %[[RESULT]]
// -----
diff --git a/mlir/test/Dialect/EmitC/invalid_ops.mlir b/mlir/test/Dialect/EmitC/invalid_ops.mlir
index 121a216..5f64b53 100644
--- a/mlir/test/Dialect/EmitC/invalid_ops.mlir
+++ b/mlir/test/Dialect/EmitC/invalid_ops.mlir
@@ -331,3 +331,27 @@ emitc.declare_func @bar
// expected-error@+1 {{'emitc.declare_func' op requires attribute 'sym_name'}}
"emitc.declare_func"() : () -> ()
+
+// -----
+
+func.func @logical_and_resulterror(%arg0: i32, %arg1: i32) {
+ // expected-error @+1 {{'emitc.logical_and' op result #0 must be 1-bit signless integer, but got 'i32'}}
+ %0 = "emitc.logical_and"(%arg0, %arg1) : (i32, i32) -> i32
+ return
+}
+
+// -----
+
+func.func @logical_not_resulterror(%arg0: i32) {
+ // expected-error @+1 {{'emitc.logical_not' op result #0 must be 1-bit signless integer, but got 'i32'}}
+ %0 = "emitc.logical_not"(%arg0) : (i32) -> i32
+ return
+}
+
+// -----
+
+func.func @logical_or_resulterror(%arg0: i32, %arg1: i32) {
+ // expected-error @+1 {{'emitc.logical_or' op result #0 must be 1-bit signless integer, but got 'i32'}}
+ %0 = "emitc.logical_or"(%arg0, %arg1) : (i32, i32) -> i32
+ return
+}
diff --git a/mlir/test/Dialect/EmitC/ops.mlir b/mlir/test/Dialect/EmitC/ops.mlir
index 93119be..045fb24 100644
--- a/mlir/test/Dialect/EmitC/ops.mlir
+++ b/mlir/test/Dialect/EmitC/ops.mlir
@@ -117,6 +117,13 @@ func.func @cmp(%arg0 : i32, %arg1 : f32, %arg2 : i64, %arg3 : f64, %arg4 : !emit
return
}
+func.func @logical(%arg0: i32, %arg1: i32) {
+ %0 = emitc.logical_and %arg0, %arg1 : i32, i32
+ %1 = emitc.logical_not %arg0 : i32
+ %2 = emitc.logical_or %arg0, %arg1 : i32, i32
+ return
+}
+
func.func @test_if(%arg0: i1, %arg1: f32) {
emitc.if %arg0 {
%0 = emitc.call_opaque "func_const"(%arg1) : (f32) -> i32
diff --git a/mlir/test/Dialect/GPU/ops.mlir b/mlir/test/Dialect/GPU/ops.mlir
index 8d249c9..511b018 100644
--- a/mlir/test/Dialect/GPU/ops.mlir
+++ b/mlir/test/Dialect/GPU/ops.mlir
@@ -59,24 +59,39 @@ module attributes {gpu.container_module} {
gpu.module @kernels {
gpu.func @kernel_1(%arg0 : f32, %arg1 : memref<?xf32, 1>) kernel {
%tIdX = gpu.thread_id x
+ // CHECK: thread_id_x
%tIdY = gpu.thread_id y
+ // CHECK-NEXT: thread_id_y
%tIdZ = gpu.thread_id z
+ // CHECK-NEXT: thread_id_z
%bDimX = gpu.block_dim x
+ // CHECK-NEXT: block_dim_x
%bDimY = gpu.block_dim y
+ // CHECK-NEXT: block_dim_y
%bDimZ = gpu.block_dim z
+ // CHECK-NEXT: block_dim_z
%bIdX = gpu.block_id x
+ // CHECK-NEXT: block_id_x
%bIdY = gpu.block_id y
+ // CHECK-NEXT: block_id_y
%bIdZ = gpu.block_id z
+ // CHECK-NEXT: block_id_z
%gDimX = gpu.grid_dim x
+ // CHECK-NEXT: grid_dim_x
%gDimY = gpu.grid_dim y
+ // CHECK-NEXT: grid_dim_y
%gDimZ = gpu.grid_dim z
+ // CHECK-NEXT: grid_dim_z
%gIdX = gpu.global_id x
+ // CHECK-NEXT: global_id_x
%gIdY = gpu.global_id y
+ // CHECK-NEXT: global_id_y
%gIdZ = gpu.global_id z
+ // CHECK-NEXT: global_id_z
%sgId = gpu.subgroup_id : index
%numSg = gpu.num_subgroups : index
diff --git a/mlir/test/Dialect/SparseTensor/invalid.mlir b/mlir/test/Dialect/SparseTensor/invalid.mlir
index f85bc51..395b812 100644
--- a/mlir/test/Dialect/SparseTensor/invalid.mlir
+++ b/mlir/test/Dialect/SparseTensor/invalid.mlir
@@ -1027,3 +1027,13 @@ func.func @sparse_reinterpret_map(%t0 : tensor<6x12xi32, #BSR>) -> tensor<3x4x2x
to tensor<3x4x2x4xi32, #DSDD>
return %t1 : tensor<3x4x2x4xi32, #DSDD>
}
+
+// -----
+
+#CSR = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : compressed, d1 : compressed)}>
+
+func.func @sparse_print(%arg0: tensor<10x10xf64>) {
+ // expected-error@+1 {{'sparse_tensor.print' op operand #0 must be sparse tensor of any type values}}
+ sparse_tensor.print %arg0 : tensor<10x10xf64>
+ return
+}
diff --git a/mlir/test/Dialect/SparseTensor/roundtrip.mlir b/mlir/test/Dialect/SparseTensor/roundtrip.mlir
index 476fa1b..f4a58df 100644
--- a/mlir/test/Dialect/SparseTensor/roundtrip.mlir
+++ b/mlir/test/Dialect/SparseTensor/roundtrip.mlir
@@ -705,8 +705,25 @@ func.func @sparse_lvl(%arg0: index, %t : tensor<?x?xi32, #BSR>) -> index {
map = (i, j, k, l) -> (i: dense, j: compressed, k: dense, l: dense)
}>
+// CHECK-LABEL: func.func @sparse_reinterpret_map(
+// CHECK-SAME: %[[A0:.*]]: tensor<6x12xi32, #sparse{{[0-9]*}}>)
+// CHECK: %[[VAL:.*]] = sparse_tensor.reinterpret_map %[[A0]]
+// CHECK: return %[[VAL]]
func.func @sparse_reinterpret_map(%t0 : tensor<6x12xi32, #BSR>) -> tensor<3x4x2x3xi32, #DSDD> {
%t1 = sparse_tensor.reinterpret_map %t0 : tensor<6x12xi32, #BSR>
to tensor<3x4x2x3xi32, #DSDD>
return %t1 : tensor<3x4x2x3xi32, #DSDD>
}
+
+// -----
+
+#CSR = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : compressed, d1 : compressed)}>
+
+// CHECK-LABEL: func.func @sparse_print(
+// CHECK-SAME: %[[A0:.*]]: tensor<10x10xf64, #sparse{{[0-9]*}}>)
+// CHECK: sparse_tensor.print %[[A0]]
+// CHECK: return
+func.func @sparse_print(%arg0: tensor<10x10xf64, #CSR>) {
+ sparse_tensor.print %arg0 : tensor<10x10xf64, #CSR>
+ return
+}
diff --git a/mlir/test/Dialect/Tensor/canonicalize.mlir b/mlir/test/Dialect/Tensor/canonicalize.mlir
index e123c77..d17c23a 100644
--- a/mlir/test/Dialect/Tensor/canonicalize.mlir
+++ b/mlir/test/Dialect/Tensor/canonicalize.mlir
@@ -822,7 +822,7 @@ func.func @infer_src_shape_pack(%src: tensor<?x?x?x?xf32>, %dest: tensor<10x20x3
// CHECK-LABEL: func.func @infer_src_shape_pack
// CHECK-SAME: %[[SRC:[0-9a-zA-Z]+]]
// CHECK-SAME: %[[DEST:[0-9a-zA-Z]+]]
-// CHECK: %[[CAST_SRC:.+]] = tensor.cast %[[SRC]] : tensor<?x?x?x?xf32> to tensor<30x20x?x10xf32>
+// CHECK: %[[CAST_SRC:.+]] = tensor.cast %[[SRC]] : tensor<?x?x?x?xf32> to tensor<40x20x?x30xf32>
// CHECK: %[[PACK:.+]] = tensor.pack %[[CAST_SRC]] {{.+}} into %[[DEST]]
// CHECK: return %[[PACK]]
@@ -841,13 +841,24 @@ func.func @infer_dest_shape_pack(%src: tensor<30x20x?x10xf32>, %dest: tensor<?x?
// CHECK-LABEL: func.func @infer_dest_shape_pack
// CHECK-SAME: %[[SRC:[0-9a-zA-Z]+]]
// CHECK-SAME: %[[DEST:[0-9a-zA-Z]+]]
-// CHECK: %[[CAST_DEST:.+]] = tensor.cast %[[DEST]] : tensor<?x?x?x?x16xf32> to tensor<10x20x30x?x16xf32>
+// CHECK: %[[CAST_DEST:.+]] = tensor.cast %[[DEST]] : tensor<?x?x?x?x16xf32> to tensor<?x20x10x30x16xf32>
// CHECK: %[[PACK:.+]] = tensor.pack %[[SRC]] {{.+}} into %[[CAST_DEST]]
-// CHECK: %[[CAST_PACK:.+]] = tensor.cast %[[PACK]] : tensor<10x20x30x?x16xf32> to tensor<?x?x?x?x16xf32>
+// CHECK: %[[CAST_PACK:.+]] = tensor.cast %[[PACK]] : tensor<?x20x10x30x16xf32> to tensor<?x?x?x?x16xf32>
// CHECK: return %[[CAST_PACK]]
// -----
+func.func @no_infer_pack_shape(%arg0: tensor<?x32x100xf32>, %arg1: index) -> tensor<32x7x?x16x1xf32> {
+ %cst = arith.constant 0.000000e+00 : f32
+ %0 = tensor.empty(%arg1) : tensor<32x7x?x16x1xf32>
+ %pack = tensor.pack %arg0 padding_value(%cst : f32) outer_dims_perm = [1, 2, 0] inner_dims_pos = [2, 0] inner_tiles = [16, 1] into %0 : tensor<?x32x100xf32> -> tensor<32x7x?x16x1xf32>
+ return %pack : tensor<32x7x?x16x1xf32>
+}
+// CHECK-LABEL: func.func @no_infer_pack_shape
+// CHECK-NOT: tensor.cast
+
+// -----
+
func.func @fold_padding_value_pack_negative1(%arg0: tensor<1200x499999xf32>) -> tensor<31250x1200x16x1xf32> {
%cst = arith.constant 0.000000e+00 : f32
%0 = tensor.empty() : tensor<31250x1200x16x1xf32>
@@ -920,9 +931,9 @@ func.func @infer_dest_shape_unpack(%src: tensor<10x20x30x40x16xf32>, %dest: tens
// CHECK-LABEL: func.func @infer_dest_shape_unpack
// CHECK-SAME: %[[SRC:[0-9a-zA-Z]+]]
// CHECK-SAME: %[[DEST:[0-9a-zA-Z]+]]
-// CHECK: %[[CAST_DEST:.+]] = tensor.cast %[[DEST]] : tensor<?x?x?x?xf32> to tensor<30x20x?x10xf32>
+// CHECK: %[[CAST_DEST:.+]] = tensor.cast %[[DEST]] : tensor<?x?x?x?xf32> to tensor<40x20x?x30xf32>
// CHECK: %[[UNPACK:.+]] = tensor.unpack %[[SRC]] {{.+}} into %[[CAST_DEST]]
-// CHECK: %[[CAST_UNPACK:.+]] = tensor.cast %[[UNPACK]] : tensor<30x20x?x10xf32> to tensor<?x?x?x?xf32>
+// CHECK: %[[CAST_UNPACK:.+]] = tensor.cast %[[UNPACK]] : tensor<40x20x?x30xf32> to tensor<?x?x?x?xf32>
// CHECK: return %[[CAST_UNPACK]]
// -----
@@ -938,12 +949,24 @@ func.func @infer_src_shape_unpack(%src: tensor<?x?x?x?x16xf32>, %dest: tensor<30
// CHECK-LABEL: func.func @infer_src_shape_unpack
// CHECK-SAME: %[[SRC:[0-9a-zA-Z]+]]
// CHECK-SAME: %[[DEST:[0-9a-zA-Z]+]]
-// CHECK: %[[CAST_SRC:.+]] = tensor.cast %[[SRC]] : tensor<?x?x?x?x16xf32> to tensor<10x20x30x?x16xf32>
+// CHECK: %[[CAST_SRC:.+]] = tensor.cast %[[SRC]] : tensor<?x?x?x?x16xf32> to tensor<?x20x10x30x16xf32>
// CHECK: %[[UNPACK:.+]] = tensor.unpack %[[CAST_SRC]]
// CHECK: return %[[UNPACK]]
// -----
+func.func @no_infer_unpack_shape(%arg1: tensor<32x7x?x16x1xf32>, %arg2: index) -> tensor<?x32x100xf32> {
+ %cst = arith.constant 0.000000e+00 : f32
+ %0 = tensor.empty(%arg2) : tensor<?x32x100xf32>
+ %unpack = tensor.unpack %arg1 outer_dims_perm = [1, 2, 0] inner_dims_pos = [2, 0] inner_tiles = [16, 1] into %0 : tensor<32x7x?x16x1xf32> -> tensor<?x32x100xf32>
+ return %unpack : tensor<?x32x100xf32>
+}
+// CHECK-LABEL: func.func @no_infer_unpack_shape
+// CHECK-NOT: tensor.cast
+
+// -----
+
+
// CHECK-LABEL: func @fold_overlapping_insert
// CHECK-SAME: %[[INPUT:.+]]: tensor<?x?x?xf32>, %{{.+}}: tensor<4x?x8xf32>, %[[SLICE2:.+]]: tensor<4x?x8xf32>
func.func @fold_overlapping_insert(%input : tensor<?x?x?xf32>, %slice1: tensor<4x?x8xf32>, %slice2: tensor<4x?x8xf32>, %i: index, %size: index) -> (tensor<?x?x?xf32>) {
diff --git a/mlir/test/Integration/Dialect/Arith/CPU/test-wide-int-emulation-compare-results-i16.mlir b/mlir/test/Integration/Dialect/Arith/CPU/test-wide-int-emulation-compare-results-i16.mlir
index 15bafed..437e49a 100644
--- a/mlir/test/Integration/Dialect/Arith/CPU/test-wide-int-emulation-compare-results-i16.mlir
+++ b/mlir/test/Integration/Dialect/Arith/CPU/test-wide-int-emulation-compare-results-i16.mlir
@@ -26,7 +26,7 @@ func.func @check_results(%lhs : i16, %rhs : i16, %res0 : i16, %res1 : i16) -> ()
%mismatch = arith.cmpi ne, %res0, %res1 : i16
scf.if %mismatch -> () {
vector.print %res1 : i16
- vector.print str "Mismatch"
+ vector.print str "Mismatch\n"
}
return
}
diff --git a/mlir/test/Integration/Dialect/Linalg/CPU/ArmSME/fill-2d.mlir b/mlir/test/Integration/Dialect/Linalg/CPU/ArmSME/fill-2d.mlir
index 12f13e8..881e279 100644
--- a/mlir/test/Integration/Dialect/Linalg/CPU/ArmSME/fill-2d.mlir
+++ b/mlir/test/Integration/Dialect/Linalg/CPU/ArmSME/fill-2d.mlir
@@ -88,7 +88,7 @@ func.func @entry() {
}
// CHECK: SME: END OF TEST OUTPUT
- vector.print str "SME: END OF TEST OUTPUT"
+ vector.print str "SME: END OF TEST OUTPUT\n"
return
}
diff --git a/mlir/test/Integration/Dialect/Linalg/CPU/ArmSME/use-too-many-tiles.mlir b/mlir/test/Integration/Dialect/Linalg/CPU/ArmSME/use-too-many-tiles.mlir
index ee3866de..588b44a 100644
--- a/mlir/test/Integration/Dialect/Linalg/CPU/ArmSME/use-too-many-tiles.mlir
+++ b/mlir/test/Integration/Dialect/Linalg/CPU/ArmSME/use-too-many-tiles.mlir
@@ -24,23 +24,23 @@ func.func @use_too_many_tiles(%a: memref<?x?xi16>, %b: memref<?x?xi16>, %c: mem
// CHECK-LABEL: tile_a:
// CHECK-COUNT-8: ( 0, 0, 0, 0, 0, 0, 0, 0
- vector.print str "tile_a:"
+ vector.print str "tile_a:\n"
vector.print %tile_a : vector<[8]x[8]xi16>
// CHECK-LABEL: tile_b:
// CHECK-COUNT-8: ( 1, 1, 1, 1, 1, 1, 1, 1
- vector.print str "tile_b:"
+ vector.print str "tile_b:\n"
vector.print %tile_b : vector<[8]x[8]xi16>
// CHECK-LABEL: tile_c:
// CHECK-COUNT-8: ( 2, 2, 2, 2, 2, 2, 2, 2
- vector.print str "tile_c:"
+ vector.print str "tile_c:\n"
vector.print %tile_c : vector<[8]x[8]xi16>
// CHECK-LABEL: tile_d:
// CHECK-COUNT-8: ( 3, 3, 3, 3, 3, 3, 3, 3
- vector.print str "tile_d:"
+ vector.print str "tile_d:\n"
vector.print %tile_d : vector<[8]x[8]xi16>
// CHECK-LABEL: tile_e:
// CHECK-COUNT-8: ( 4, 4, 4, 4, 4, 4, 4, 4
- vector.print str "tile_e:"
+ vector.print str "tile_e:\n"
vector.print %tile_e : vector<[8]x[8]xi16>
return
}
diff --git a/mlir/test/Integration/Dialect/Linalg/CPU/ArmSVE/matmul.mlir b/mlir/test/Integration/Dialect/Linalg/CPU/ArmSVE/matmul.mlir
index 22cf15d..51a0c8f 100644
--- a/mlir/test/Integration/Dialect/Linalg/CPU/ArmSVE/matmul.mlir
+++ b/mlir/test/Integration/Dialect/Linalg/CPU/ArmSVE/matmul.mlir
@@ -36,7 +36,7 @@ func.func @matmul_f32() {
// Print and verify the output
// F32-LABEL: SVE: START OF TEST OUTPUT
- vector.print str "SVE: START OF TEST OUTPUT"
+ vector.print str "SVE: START OF TEST OUTPUT\n"
// F32-NEXT: Unranked Memref {{.*}} rank = 2 offset = 0 sizes = [5, 15] strides = [15, 1] data =
// F32-COUNT-5: [29.5788, 29.5788, 29.5788, 29.5788, 29.5788, 29.5788, 29.5788, 29.5788, 29.5788, 29.5788, 29.5788, 29.5788, 29.5788, 29.5788, 29.5788]
@@ -44,7 +44,7 @@ func.func @matmul_f32() {
call @printMemrefF32(%xf) : (tensor<*xf32>) -> ()
// F32-NEXT: SVE: END OF TEST OUTPUT
- vector.print str "SVE: END OF TEST OUTPUT"
+ vector.print str "SVE: END OF TEST OUTPUT\n"
return
}
@@ -73,7 +73,7 @@ func.func @matmul_mixed_ty() {
// Print and verify the output
// MIXED-LABEL: SVE: START OF TEST OUTPUT
- vector.print str "SVE: START OF TEST OUTPUT"
+ vector.print str "SVE: START OF TEST OUTPUT\n"
// MIXED-NEXT: Unranked Memref {{.*}} rank = 2 offset = 0 sizes = [5, 15] strides = [15, 1] data =
// MIXED-COUNT-5: [45387, 45387, 45387, 45387, 45387, 45387, 45387, 45387, 45387, 45387, 45387, 45387, 45387, 45387, 45387]
@@ -81,7 +81,7 @@ func.func @matmul_mixed_ty() {
call @printMemrefI32(%xf) : (tensor<*xi32>) -> ()
// MIXED-NEXT: SVE: END OF TEST OUTPUT
- vector.print str "SVE: END OF TEST OUTPUT"
+ vector.print str "SVE: END OF TEST OUTPUT\n"
return
}
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/block.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/block.mlir
index 6468c4b..1184d40 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/block.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/block.mlir
@@ -10,7 +10,7 @@
// DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
// DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
// DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
// DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
// DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
//
@@ -82,38 +82,39 @@ module {
return %0 : tensor<?x?xf64, #BSR>
}
- func.func @entry() {
+ func.func @main() {
%c0 = arith.constant 0 : index
%f0 = arith.constant 0.0 : f64
%fileName = call @getTensorFilename(%c0) : (index) -> (!Filename)
%A = sparse_tensor.new %fileName : !Filename to tensor<?x?xf64, #BSR>
- // CHECK: ( 0, 2, 3 )
- // CHECK-NEXT: ( 0, 2, 1 )
- // CHECK-NEXT: ( 1, 2, 0, 3, 4, 0, 0, 5, 6, 7, 8, 0 )
- %pos = sparse_tensor.positions %A {level = 1 : index } : tensor<?x?xf64, #BSR> to memref<?xindex>
- %vecp = vector.transfer_read %pos[%c0], %c0 : memref<?xindex>, vector<3xindex>
- vector.print %vecp : vector<3xindex>
- %crd = sparse_tensor.coordinates %A {level = 1 : index } : tensor<?x?xf64, #BSR> to memref<?xindex>
- %vecc = vector.transfer_read %crd[%c0], %c0 : memref<?xindex>, vector<3xindex>
- vector.print %vecc : vector<3xindex>
- %val = sparse_tensor.values %A : tensor<?x?xf64, #BSR> to memref<?xf64>
- %vecv = vector.transfer_read %val[%c0], %f0 : memref<?xf64>, vector<12xf64>
- vector.print %vecv : vector<12xf64>
+ // CHECK: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 12
+ // CHECK-NEXT: pos[1] : ( 0, 2, 3,
+ // CHECK-NEXT: crd[1] : ( 0, 2, 1,
+ // CHECK-NEXT: values : ( 1, 2, 0, 3, 4, 0, 0, 5, 6, 7, 8, 0,
+ // CHECK-NEXT: ----
+ sparse_tensor.print %A : tensor<?x?xf64, #BSR>
- // CHECK-NEXT: ( 1, 2, 0, 3, 4, 0, 0, 5, 6, 7, 8, 0 )
+ // CHECK-NEXT: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 12
+ // CHECK-NEXT: pos[1] : ( 0, 2, 3,
+ // CHECK-NEXT: crd[1] : ( 0, 2, 1
+ // CHECK-NEXT: values : ( 1, 2, 0, 3, 4, 0, 0, 5, 6, 7, 8, 0,
+ // CHECK-NEXT: ----
%t1 = sparse_tensor.reinterpret_map %A : tensor<?x?xf64, #BSR>
to tensor<?x?x2x2xf64, #DSDD>
- %vdsdd = sparse_tensor.values %t1 : tensor<?x?x2x2xf64, #DSDD> to memref<?xf64>
- %vecdsdd = vector.transfer_read %vdsdd[%c0], %f0 : memref<?xf64>, vector<12xf64>
- vector.print %vecdsdd : vector<12xf64>
+ sparse_tensor.print %t1 : tensor<?x?x2x2xf64, #DSDD>
- // CHECK-NEXT: ( 3, 6, 0, 9, 12, 0, 0, 15, 18, 21, 24, 0 )
+ // CHECK-NEXT: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 12
+ // CHECK-NEXT: pos[1] : ( 0, 2, 3,
+ // CHECK-NEXT: crd[1] : ( 0, 2, 1,
+ // CHECK-NEXT: values : ( 3, 6, 0, 9, 12, 0, 0, 15, 18, 21, 24, 0,
+ // CHECK-NEXT: ----
%As = call @scale(%A) : (tensor<?x?xf64, #BSR>) -> (tensor<?x?xf64, #BSR>)
- %vals = sparse_tensor.values %As : tensor<?x?xf64, #BSR> to memref<?xf64>
- %vecs = vector.transfer_read %vals[%c0], %f0 : memref<?xf64>, vector<12xf64>
- vector.print %vecs : vector<12xf64>
+ sparse_tensor.print %As : tensor<?x?xf64, #BSR>
// Release the resources.
bufferization.dealloc_tensor %A: tensor<?x?xf64, #BSR>
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/block_majors.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/block_majors.mlir
index cb06f09..f8e83b5 100755
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/block_majors.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/block_majors.mlir
@@ -102,9 +102,15 @@
//
module {
- // CHECK: ( 0, 1, 2 )
- // CHECK-NEXT: ( 0, 2 )
- // CHECK-NEXT: ( 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 4, 5, 0, 0, 0, 0, 0, 0, 0, 0, 6, 7 )
+
+ //
+ // CHECK: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 24
+ // CHECK-NEXT: pos[1] : ( 0, 1, 2,
+ // CHECK-NEXT: crd[1] : ( 0, 2,
+ // CHECK-NEXT: values : ( 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 4, 5, 0, 0, 0, 0, 0, 0, 0, 0, 6, 7,
+ // CHECK-NEXT: ----
+ //
func.func @foo1() {
// Build.
%c0 = arith.constant 0 : index
@@ -115,23 +121,20 @@ module {
> : tensor<6x16xf64>
%s1 = sparse_tensor.convert %m : tensor<6x16xf64> to tensor<?x?xf64, #BSR_row_rowmajor>
// Test.
- %pos1 = sparse_tensor.positions %s1 {level = 1 : index } : tensor<?x?xf64, #BSR_row_rowmajor> to memref<?xindex>
- %vecp1 = vector.transfer_read %pos1[%c0], %c0 : memref<?xindex>, vector<3xindex>
- vector.print %vecp1 : vector<3xindex>
- %crd1 = sparse_tensor.coordinates %s1 {level = 1 : index } : tensor<?x?xf64, #BSR_row_rowmajor> to memref<?xindex>
- %vecc1 = vector.transfer_read %crd1[%c0], %c0 : memref<?xindex>, vector<2xindex>
- vector.print %vecc1 : vector<2xindex>
- %val1 = sparse_tensor.values %s1 : tensor<?x?xf64, #BSR_row_rowmajor> to memref<?xf64>
- %vecv1 = vector.transfer_read %val1[%c0], %f0 : memref<?xf64>, vector<24xf64>
- vector.print %vecv1 : vector<24xf64>
+ sparse_tensor.print %s1 : tensor<?x?xf64, #BSR_row_rowmajor>
// Release.
bufferization.dealloc_tensor %s1: tensor<?x?xf64, #BSR_row_rowmajor>
return
}
- // CHECK-NEXT: ( 0, 1, 2 )
- // CHECK-NEXT: ( 0, 2 )
- // CHECK-NEXT: ( 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 3, 4, 0, 0, 5, 0, 0, 0, 0, 6, 0, 0, 7 )
+ //
+ // CHECK-NEXT: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 24
+ // CHECK-NEXT: pos[1] : ( 0, 1, 2,
+ // CHECK-NEXT: crd[1] : ( 0, 2,
+ // CHECK-NEXT: values : ( 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 3, 4, 0, 0, 5, 0, 0, 0, 0, 6, 0, 0, 7,
+ // CHECK-NEXT: ----
+ //
func.func @foo2() {
// Build.
%c0 = arith.constant 0 : index
@@ -142,23 +145,20 @@ module {
> : tensor<6x16xf64>
%s2 = sparse_tensor.convert %m : tensor<6x16xf64> to tensor<?x?xf64, #BSR_row_colmajor>
// Test.
- %pos2 = sparse_tensor.positions %s2 {level = 1 : index } : tensor<?x?xf64, #BSR_row_colmajor> to memref<?xindex>
- %vecp2 = vector.transfer_read %pos2[%c0], %c0 : memref<?xindex>, vector<3xindex>
- vector.print %vecp2 : vector<3xindex>
- %crd2 = sparse_tensor.coordinates %s2 {level = 1 : index } : tensor<?x?xf64, #BSR_row_colmajor> to memref<?xindex>
- %vecc2 = vector.transfer_read %crd2[%c0], %c0 : memref<?xindex>, vector<2xindex>
- vector.print %vecc2 : vector<2xindex>
- %val2 = sparse_tensor.values %s2 : tensor<?x?xf64, #BSR_row_colmajor> to memref<?xf64>
- %vecv2 = vector.transfer_read %val2[%c0], %f0 : memref<?xf64>, vector<24xf64>
- vector.print %vecv2 : vector<24xf64>
+ sparse_tensor.print %s2 : tensor<?x?xf64, #BSR_row_colmajor>
// Release.
bufferization.dealloc_tensor %s2: tensor<?x?xf64, #BSR_row_colmajor>
return
}
- // CHECK-NEXT: ( 0, 1, 1, 2, 2 )
- // CHECK-NEXT: ( 0, 1 )
- // CHECK-NEXT: ( 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 4, 5, 0, 0, 0, 0, 0, 0, 0, 0, 6, 7 )
+ //
+ // CHECK-NEXT: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 24
+ // CHECK-NEXT: pos[1] : ( 0, 1, 1, 2, 2,
+ // CHECK-NEXT: crd[1] : ( 0, 1,
+ // CHECK-NEXT: values : ( 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 4, 5, 0, 0, 0, 0, 0, 0, 0, 0, 6, 7,
+ // CHECK-NEXT: ----
+ //
func.func @foo3() {
// Build.
%c0 = arith.constant 0 : index
@@ -169,23 +169,20 @@ module {
> : tensor<6x16xf64>
%s3 = sparse_tensor.convert %m : tensor<6x16xf64> to tensor<?x?xf64, #BSR_col_rowmajor>
// Test.
- %pos3 = sparse_tensor.positions %s3 {level = 1 : index } : tensor<?x?xf64, #BSR_col_rowmajor> to memref<?xindex>
- %vecp3 = vector.transfer_read %pos3[%c0], %c0 : memref<?xindex>, vector<5xindex>
- vector.print %vecp3 : vector<5xindex>
- %crd3 = sparse_tensor.coordinates %s3 {level = 1 : index } : tensor<?x?xf64, #BSR_col_rowmajor> to memref<?xindex>
- %vecc3 = vector.transfer_read %crd3[%c0], %c0 : memref<?xindex>, vector<2xindex>
- vector.print %vecc3 : vector<2xindex>
- %val3 = sparse_tensor.values %s3 : tensor<?x?xf64, #BSR_col_rowmajor> to memref<?xf64>
- %vecv3 = vector.transfer_read %val3[%c0], %f0 : memref<?xf64>, vector<24xf64>
- vector.print %vecv3 : vector<24xf64>
+ sparse_tensor.print %s3 : tensor<?x?xf64, #BSR_col_rowmajor>
// Release.
bufferization.dealloc_tensor %s3: tensor<?x?xf64, #BSR_col_rowmajor>
return
}
- // CHECK-NEXT: ( 0, 1, 1, 2, 2 )
- // CHECK-NEXT: ( 0, 1 )
- // CHECK-NEXT: ( 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 3, 4, 0, 0, 5, 0, 0, 0, 0, 6, 0, 0, 7 )
+ //
+ // CHECK-NEXT: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 24
+ // CHECK-NEXT: pos[1] : ( 0, 1, 1, 2, 2,
+ // CHECK-NEXT: crd[1] : ( 0, 1,
+ // CHECK-NEXT: values : ( 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 3, 4, 0, 0, 5, 0, 0, 0, 0, 6, 0, 0, 7,
+ // CHECK-NEXT: ----
+ //
func.func @foo4() {
// Build.
%c0 = arith.constant 0 : index
@@ -196,15 +193,7 @@ module {
> : tensor<6x16xf64>
%s4 = sparse_tensor.convert %m : tensor<6x16xf64> to tensor<?x?xf64, #BSR_col_colmajor>
// Test.
- %pos4 = sparse_tensor.positions %s4 {level = 1 : index } : tensor<?x?xf64, #BSR_col_colmajor> to memref<?xindex>
- %vecp4 = vector.transfer_read %pos4[%c0], %c0 : memref<?xindex>, vector<5xindex>
- vector.print %vecp4 : vector<5xindex>
- %crd4 = sparse_tensor.coordinates %s4 {level = 1 : index } : tensor<?x?xf64, #BSR_col_colmajor> to memref<?xindex>
- %vecc4 = vector.transfer_read %crd4[%c0], %c0 : memref<?xindex>, vector<2xindex>
- vector.print %vecc4 : vector<2xindex>
- %val4 = sparse_tensor.values %s4 : tensor<?x?xf64, #BSR_col_colmajor> to memref<?xf64>
- %vecv4 = vector.transfer_read %val4[%c0], %f0 : memref<?xf64>, vector<24xf64>
- vector.print %vecv4 : vector<24xf64>
+ sparse_tensor.print %s4 : tensor<?x?xf64, #BSR_col_colmajor>
// Release.
bufferization.dealloc_tensor %s4: tensor<?x?xf64, #BSR_col_colmajor>
return
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output.mlir
index 5f6524a4..c6ee0ce 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output.mlir
@@ -10,7 +10,7 @@
// DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
// DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
// DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
// DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
// DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
//
@@ -83,12 +83,11 @@ module {
}
func.func private @getTensorFilename(index) -> (!Filename)
- func.func private @printMemref1dF64(%ptr : memref<?xf64>) attributes { llvm.emit_c_interface }
//
// Main driver that reads matrix from file and calls the kernel.
//
- func.func @entry() {
+ func.func @main() {
%d0 = arith.constant 0.0 : f64
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
@@ -104,14 +103,13 @@ module {
//
// Print the linearized 5x5 result for verification.
- // CHECK: 25
- // CHECK: [2, 0, 0, 2.8, 0, 0, 4, 0, 0, 5, 0, 0, 6, 0, 0, 8.2, 0, 0, 8, 0, 0, 10.4, 0, 0, 10
//
- %n = sparse_tensor.number_of_entries %0 : tensor<?x?xf64, #DenseMatrix>
- vector.print %n : index
- %m = sparse_tensor.values %0
- : tensor<?x?xf64, #DenseMatrix> to memref<?xf64>
- call @printMemref1dF64(%m) : (memref<?xf64>) -> ()
+ // CHECK: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 25
+ // CHECK-NEXT: values : ( 2, 0, 0, 2.8, 0, 0, 4, 0, 0, 5, 0, 0, 6, 0, 0, 8.2, 0, 0, 8, 0, 0, 10.4, 0, 0, 10,
+ // CHECK-NEXT: ----
+ //
+ sparse_tensor.print %0 : tensor<?x?xf64, #DenseMatrix>
// Release the resources.
bufferization.dealloc_tensor %a : tensor<?x?xf64, #SparseMatrix>
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output_bf16.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output_bf16.mlir
index 81cd2d8..0b34ff5 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output_bf16.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output_bf16.mlir
@@ -10,7 +10,7 @@
// DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
// DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
// DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
// DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
// DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
//
@@ -67,20 +67,8 @@ module {
return %0 : tensor<?xbf16, #DenseVector>
}
- // Dumps a dense vector of type bf16.
- func.func @dump_vec(%arg0: tensor<?xbf16, #DenseVector>) {
- // Dump the values array to verify only sparse contents are stored.
- %c0 = arith.constant 0 : index
- %d0 = arith.constant -1.0 : bf16
- %0 = sparse_tensor.values %arg0 : tensor<?xbf16, #DenseVector> to memref<?xbf16>
- %1 = vector.transfer_read %0[%c0], %d0: memref<?xbf16>, vector<32xbf16>
- %f1 = arith.extf %1: vector<32xbf16> to vector<32xf32>
- vector.print %f1 : vector<32xf32>
- return
- }
-
// Driver method to call and verify the kernel.
- func.func @entry() {
+ func.func @main() {
%c0 = arith.constant 0 : index
// Setup sparse vectors.
@@ -103,8 +91,12 @@ module {
//
// Verify the result.
//
- // CHECK: ( 1, 11, 0, 2, 13, 0, 0, 0, 0, 0, 14, 3, 0, 0, 0, 0, 15, 4, 16, 0, 5, 6, 0, 0, 0, 0, 0, 0, 7, 8, 0, 9 )
- call @dump_vec(%0) : (tensor<?xbf16, #DenseVector>) -> ()
+ // CHECK: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 32
+ // CHECK-NEXT: values : ( 1, 11, 0, 2, 13, 0, 0, 0, 0, 0, 14, 3, 0, 0, 0, 0, 15, 4, 16, 0, 5, 6, 0, 0, 0, 0, 0, 0, 7, 8, 0, 9,
+ // CHECK-NEXT: ----
+ //
+ sparse_tensor.print %0 : tensor<?xbf16, #DenseVector>
// Release the resources.
bufferization.dealloc_tensor %sv1 : tensor<?xbf16, #SparseVector>
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output_f16.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output_f16.mlir
index b320afd..4956821 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output_f16.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output_f16.mlir
@@ -10,7 +10,7 @@
// DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
// DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
// DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
// DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
// DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
//
@@ -68,20 +68,8 @@ module {
return %0 : tensor<?xf16, #DenseVector>
}
- // Dumps a dense vector of type f16.
- func.func @dump_vec(%arg0: tensor<?xf16, #DenseVector>) {
- // Dump the values array to verify only sparse contents are stored.
- %c0 = arith.constant 0 : index
- %d0 = arith.constant -1.0 : f16
- %0 = sparse_tensor.values %arg0 : tensor<?xf16, #DenseVector> to memref<?xf16>
- %1 = vector.transfer_read %0[%c0], %d0: memref<?xf16>, vector<32xf16>
- %f1 = arith.extf %1: vector<32xf16> to vector<32xf32>
- vector.print %f1 : vector<32xf32>
- return
- }
-
// Driver method to call and verify the kernel.
- func.func @entry() {
+ func.func @main() {
%c0 = arith.constant 0 : index
// Setup sparse vectors.
@@ -104,8 +92,12 @@ module {
//
// Verify the result.
//
- // CHECK: ( 1, 11, 0, 2, 13, 0, 0, 0, 0, 0, 14, 3, 0, 0, 0, 0, 15, 4, 16, 0, 5, 6, 0, 0, 0, 0, 0, 0, 7, 8, 0, 9 )
- call @dump_vec(%0) : (tensor<?xf16, #DenseVector>) -> ()
+ // CHECK: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 32
+ // CHECK-NEXT: values : ( 1, 11, 0, 2, 13, 0, 0, 0, 0, 0, 14, 3, 0, 0, 0, 0, 15, 4, 16, 0, 5, 6, 0, 0, 0, 0, 0, 0, 7, 8, 0, 9,
+ // CHECK-NEXT: ----
+ //
+ sparse_tensor.print %0 : tensor<?xf16, #DenseVector>
// Release the resources.
bufferization.dealloc_tensor %sv1 : tensor<?xf16, #SparseVector>
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_3d.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_3d.mlir
index c141df6..3a32ff2 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_3d.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_3d.mlir
@@ -45,91 +45,6 @@
module {
-
- func.func @dump(%arg0: tensor<5x4x3xf64, #TensorCSR>) {
- %c0 = arith.constant 0 : index
- %fu = arith.constant 99.0 : f64
- %p0 = sparse_tensor.positions %arg0 { level = 0 : index } : tensor<5x4x3xf64, #TensorCSR> to memref<?xindex>
- %i0 = sparse_tensor.coordinates %arg0 { level = 0 : index } : tensor<5x4x3xf64, #TensorCSR> to memref<?xindex>
- %p2 = sparse_tensor.positions %arg0 { level = 2 : index } : tensor<5x4x3xf64, #TensorCSR> to memref<?xindex>
- %i2 = sparse_tensor.coordinates %arg0 { level = 2 : index } : tensor<5x4x3xf64, #TensorCSR> to memref<?xindex>
- %v = sparse_tensor.values %arg0 : tensor<5x4x3xf64, #TensorCSR> to memref<?xf64>
- %vp0 = vector.transfer_read %p0[%c0], %c0: memref<?xindex>, vector<2xindex>
- vector.print %vp0 : vector<2xindex>
- %vi0 = vector.transfer_read %i0[%c0], %c0: memref<?xindex>, vector<2xindex>
- vector.print %vi0 : vector<2xindex>
- %vp2 = vector.transfer_read %p2[%c0], %c0: memref<?xindex>, vector<9xindex>
- vector.print %vp2 : vector<9xindex>
- %vi2 = vector.transfer_read %i2[%c0], %c0: memref<?xindex>, vector<5xindex>
- vector.print %vi2 : vector<5xindex>
- %vv = vector.transfer_read %v[%c0], %fu: memref<?xf64>, vector<5xf64>
- vector.print %vv : vector<5xf64>
- return
- }
-
- func.func @dump_row(%arg0: tensor<5x4x3xf64, #TensorRow>) {
- %c0 = arith.constant 0 : index
- %fu = arith.constant 99.0 : f64
- %p0 = sparse_tensor.positions %arg0 { level = 0 : index } : tensor<5x4x3xf64, #TensorRow> to memref<?xindex>
- %i0 = sparse_tensor.coordinates %arg0 { level = 0 : index } : tensor<5x4x3xf64, #TensorRow> to memref<?xindex>
- %p1 = sparse_tensor.positions %arg0 { level = 1 : index } : tensor<5x4x3xf64, #TensorRow> to memref<?xindex>
- %i1 = sparse_tensor.coordinates %arg0 { level = 1 : index } : tensor<5x4x3xf64, #TensorRow> to memref<?xindex>
- %v = sparse_tensor.values %arg0 : tensor<5x4x3xf64, #TensorRow> to memref<?xf64>
- %vp0 = vector.transfer_read %p0[%c0], %c0: memref<?xindex>, vector<2xindex>
- vector.print %vp0 : vector<2xindex>
- %vi0 = vector.transfer_read %i0[%c0], %c0: memref<?xindex>, vector<2xindex>
- vector.print %vi0 : vector<2xindex>
- %vp1 = vector.transfer_read %p1[%c0], %c0: memref<?xindex>, vector<3xindex>
- vector.print %vp1 : vector<3xindex>
- %vi1 = vector.transfer_read %i1[%c0], %c0: memref<?xindex>, vector<4xindex>
- vector.print %vi1 : vector<4xindex>
- %vv = vector.transfer_read %v[%c0], %fu: memref<?xf64>, vector<12xf64>
- vector.print %vv : vector<12xf64>
- return
- }
-
-func.func @dump_ccoo(%arg0: tensor<5x4x3xf64, #CCoo>) {
- %c0 = arith.constant 0 : index
- %fu = arith.constant 99.0 : f64
- %p0 = sparse_tensor.positions %arg0 { level = 0 : index } : tensor<5x4x3xf64, #CCoo> to memref<?xindex>
- %i0 = sparse_tensor.coordinates %arg0 { level = 0 : index } : tensor<5x4x3xf64, #CCoo> to memref<?xindex>
- %p1 = sparse_tensor.positions %arg0 { level = 1 : index } : tensor<5x4x3xf64, #CCoo> to memref<?xindex>
- %i1 = sparse_tensor.coordinates %arg0 { level = 1 : index } : tensor<5x4x3xf64, #CCoo> to memref<?xindex>
- %i2 = sparse_tensor.coordinates %arg0 { level = 2 : index } : tensor<5x4x3xf64, #CCoo> to memref<?xindex>
- %v = sparse_tensor.values %arg0 : tensor<5x4x3xf64, #CCoo> to memref<?xf64>
- %vp0 = vector.transfer_read %p0[%c0], %c0: memref<?xindex>, vector<2xindex>
- vector.print %vp0 : vector<2xindex>
- %vi0 = vector.transfer_read %i0[%c0], %c0: memref<?xindex>, vector<2xindex>
- vector.print %vi0 : vector<2xindex>
- %vp1 = vector.transfer_read %p1[%c0], %c0: memref<?xindex>, vector<3xindex>
- vector.print %vp1 : vector<3xindex>
- %vi1 = vector.transfer_read %i1[%c0], %c0: memref<?xindex>, vector<5xindex>
- vector.print %vi1 : vector<5xindex>
- %vi2 = vector.transfer_read %i2[%c0], %c0: memref<?xindex>, vector<5xindex>
- vector.print %vi2 : vector<5xindex>
- %vv = vector.transfer_read %v[%c0], %fu: memref<?xf64>, vector<5xf64>
- vector.print %vv : vector<5xf64>
- return
- }
-
-func.func @dump_dcoo(%arg0: tensor<5x4x3xf64, #DCoo>) {
- %c0 = arith.constant 0 : index
- %fu = arith.constant 99.0 : f64
- %p1 = sparse_tensor.positions %arg0 { level = 1 : index } : tensor<5x4x3xf64, #DCoo> to memref<?xindex>
- %i1 = sparse_tensor.coordinates %arg0 { level = 1 : index } : tensor<5x4x3xf64, #DCoo> to memref<?xindex>
- %i2 = sparse_tensor.coordinates %arg0 { level = 2 : index } : tensor<5x4x3xf64, #DCoo> to memref<?xindex>
- %v = sparse_tensor.values %arg0 : tensor<5x4x3xf64, #DCoo> to memref<?xf64>
- %vp1 = vector.transfer_read %p1[%c0], %c0: memref<?xindex>, vector<6xindex>
- vector.print %vp1 : vector<6xindex>
- %vi1 = vector.transfer_read %i1[%c0], %c0: memref<?xindex>, vector<5xindex>
- vector.print %vi1 : vector<5xindex>
- %vi2 = vector.transfer_read %i2[%c0], %c0: memref<?xindex>, vector<5xindex>
- vector.print %vi2 : vector<5xindex>
- %vv = vector.transfer_read %v[%c0], %fu: memref<?xf64>, vector<5xf64>
- vector.print %vv : vector<5xf64>
- return
-}
-
//
// Main driver.
//
@@ -145,13 +60,14 @@ func.func @dump_dcoo(%arg0: tensor<5x4x3xf64, #DCoo>) {
%f4 = arith.constant 4.4 : f64
%f5 = arith.constant 5.5 : f64
- //
- // CHECK: ( 0, 2 )
- // CHECK-NEXT: ( 3, 4 )
- // CHECK-NEXT: ( 0, 2, 2, 2, 3, 3, 3, 4, 5 )
- // CHECK-NEXT: ( 1, 2, 1, 2, 2 )
- // CHECK-NEXT: ( 1.1, 2.2, 3.3, 4.4, 5.5 )
- //
+ // CHECK: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 5
+ // CHECK-NEXT: pos[0] : ( 0, 2
+ // CHECK-NEXT: crd[0] : ( 3, 4
+ // CHECK-NEXT: pos[2] : ( 0, 2, 2, 2, 3, 3, 3, 4, 5
+ // CHECK-NEXT: crd[2] : ( 1, 2, 1, 2, 2
+ // CHECK-NEXT: values : ( 1.1, 2.2, 3.3, 4.4, 5.5
+ // CHECK-NEXT: ----
%tensora = tensor.empty() : tensor<5x4x3xf64, #TensorCSR>
%tensor1 = sparse_tensor.insert %f1 into %tensora[%c3, %c0, %c1] : tensor<5x4x3xf64, #TensorCSR>
%tensor2 = sparse_tensor.insert %f2 into %tensor1[%c3, %c0, %c2] : tensor<5x4x3xf64, #TensorCSR>
@@ -159,15 +75,16 @@ func.func @dump_dcoo(%arg0: tensor<5x4x3xf64, #DCoo>) {
%tensor4 = sparse_tensor.insert %f4 into %tensor3[%c4, %c2, %c2] : tensor<5x4x3xf64, #TensorCSR>
%tensor5 = sparse_tensor.insert %f5 into %tensor4[%c4, %c3, %c2] : tensor<5x4x3xf64, #TensorCSR>
%tensorm = sparse_tensor.load %tensor5 hasInserts : tensor<5x4x3xf64, #TensorCSR>
- call @dump(%tensorm) : (tensor<5x4x3xf64, #TensorCSR>) -> ()
-
- //
- // CHECK-NEXT: ( 0, 2 )
- // CHECK-NEXT: ( 3, 4 )
- // CHECK-NEXT: ( 0, 2, 4 )
- // CHECK-NEXT: ( 0, 3, 2, 3 )
- // CHECK-NEXT: ( 0, 1.1, 2.2, 0, 3.3, 0, 0, 0, 4.4, 0, 0, 5.5 )
- //
+ sparse_tensor.print %tensorm : tensor<5x4x3xf64, #TensorCSR>
+
+ // CHECK-NEXT: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 12
+ // CHECK-NEXT: pos[0] : ( 0, 2
+ // CHECK-NEXT: crd[0] : ( 3, 4
+ // CHECK-NEXT: pos[1] : ( 0, 2, 4
+ // CHECK-NEXT: crd[1] : ( 0, 3, 2, 3
+ // CHECK-NEXT: values : ( 0, 1.1, 2.2, 0, 3.3, 0, 0, 0, 4.4, 0, 0, 5.5
+ // CHECK-NEXT: ----
%rowa = tensor.empty() : tensor<5x4x3xf64, #TensorRow>
%row1 = sparse_tensor.insert %f1 into %rowa[%c3, %c0, %c1] : tensor<5x4x3xf64, #TensorRow>
%row2 = sparse_tensor.insert %f2 into %row1[%c3, %c0, %c2] : tensor<5x4x3xf64, #TensorRow>
@@ -175,15 +92,16 @@ func.func @dump_dcoo(%arg0: tensor<5x4x3xf64, #DCoo>) {
%row4 = sparse_tensor.insert %f4 into %row3[%c4, %c2, %c2] : tensor<5x4x3xf64, #TensorRow>
%row5 = sparse_tensor.insert %f5 into %row4[%c4, %c3, %c2] : tensor<5x4x3xf64, #TensorRow>
%rowm = sparse_tensor.load %row5 hasInserts : tensor<5x4x3xf64, #TensorRow>
- call @dump_row(%rowm) : (tensor<5x4x3xf64, #TensorRow>) -> ()
-
- //
- // CHECK: ( 0, 2 )
- // CHECK-NEXT: ( 3, 4 )
- // CHECK-NEXT: ( 0, 3, 5 )
- // CHECK-NEXT: ( 0, 0, 3, 2, 3 )
- // CHECK-NEXT: ( 1, 2, 1, 2, 2 )
- // CHECK-NEXT: ( 1.1, 2.2, 3.3, 4.4, 5.5 )
+ sparse_tensor.print %rowm : tensor<5x4x3xf64, #TensorRow>
+
+ // CHECK-NEXT: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 5
+ // CHECK-NEXT: pos[0] : ( 0, 2
+ // CHECK-NEXT: crd[0] : ( 3, 4
+ // CHECK-NEXT: pos[1] : ( 0, 3, 5
+ // CHECK-NEXT: crd[1] : ( 0, 1, 0, 2, 3, 1, 2, 2, 3, 2
+ // CHECK-NEXT: values : ( 1.1, 2.2, 3.3, 4.4, 5.5
+ // CHECK-NEXT: ----
%ccoo = tensor.empty() : tensor<5x4x3xf64, #CCoo>
%ccoo1 = sparse_tensor.insert %f1 into %ccoo[%c3, %c0, %c1] : tensor<5x4x3xf64, #CCoo>
%ccoo2 = sparse_tensor.insert %f2 into %ccoo1[%c3, %c0, %c2] : tensor<5x4x3xf64, #CCoo>
@@ -191,13 +109,14 @@ func.func @dump_dcoo(%arg0: tensor<5x4x3xf64, #DCoo>) {
%ccoo4 = sparse_tensor.insert %f4 into %ccoo3[%c4, %c2, %c2] : tensor<5x4x3xf64, #CCoo>
%ccoo5 = sparse_tensor.insert %f5 into %ccoo4[%c4, %c3, %c2] : tensor<5x4x3xf64, #CCoo>
%ccoom = sparse_tensor.load %ccoo5 hasInserts : tensor<5x4x3xf64, #CCoo>
- call @dump_ccoo(%ccoom) : (tensor<5x4x3xf64, #CCoo>) -> ()
-
- //
- // CHECK-NEXT: ( 0, 0, 0, 0, 3, 5 )
- // CHECK-NEXT: ( 0, 0, 3, 2, 3 )
- // CHECK-NEXT: ( 1, 2, 1, 2, 2 )
- // CHECK-NEXT: ( 1.1, 2.2, 3.3, 4.4, 5.5 )
+ sparse_tensor.print %ccoom : tensor<5x4x3xf64, #CCoo>
+
+ // CHECK-NEXT: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 5
+ // CHECK-NEXT: pos[1] : ( 0, 0, 0, 0, 3, 5
+ // CHECK-NEXT: crd[1] : ( 0, 1, 0, 2, 3, 1, 2, 2, 3, 2
+ // CHECK-NEXT: values : ( 1.1, 2.2, 3.3, 4.4, 5.5
+ // CHECK-NEXT: ----
%dcoo = tensor.empty() : tensor<5x4x3xf64, #DCoo>
%dcoo1 = sparse_tensor.insert %f1 into %dcoo[%c3, %c0, %c1] : tensor<5x4x3xf64, #DCoo>
%dcoo2 = sparse_tensor.insert %f2 into %dcoo1[%c3, %c0, %c2] : tensor<5x4x3xf64, #DCoo>
@@ -205,23 +124,7 @@ func.func @dump_dcoo(%arg0: tensor<5x4x3xf64, #DCoo>) {
%dcoo4 = sparse_tensor.insert %f4 into %dcoo3[%c4, %c2, %c2] : tensor<5x4x3xf64, #DCoo>
%dcoo5 = sparse_tensor.insert %f5 into %dcoo4[%c4, %c3, %c2] : tensor<5x4x3xf64, #DCoo>
%dcoom = sparse_tensor.load %dcoo5 hasInserts : tensor<5x4x3xf64, #DCoo>
- call @dump_dcoo(%dcoom) : (tensor<5x4x3xf64, #DCoo>) -> ()
-
- // NOE sanity check.
- //
- // CHECK-NEXT: 5
- // CHECK-NEXT: 12
- // CHECK-NEXT: 5
- // CHECK-NEXT: 5
- //
- %noe1 = sparse_tensor.number_of_entries %tensorm : tensor<5x4x3xf64, #TensorCSR>
- vector.print %noe1 : index
- %noe2 = sparse_tensor.number_of_entries %rowm : tensor<5x4x3xf64, #TensorRow>
- vector.print %noe2 : index
- %noe3 = sparse_tensor.number_of_entries %ccoom : tensor<5x4x3xf64, #CCoo>
- vector.print %noe3 : index
- %noe4 = sparse_tensor.number_of_entries %dcoom : tensor<5x4x3xf64, #DCoo>
- vector.print %noe4 : index
+ sparse_tensor.print %dcoom : tensor<5x4x3xf64, #DCoo>
// Release resources.
bufferization.dealloc_tensor %tensorm : tensor<5x4x3xf64, #TensorCSR>
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_print.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_print.mlir
new file mode 100755
index 0000000..79728fd
--- /dev/null
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_print.mlir
@@ -0,0 +1,269 @@
+//--------------------------------------------------------------------------------------------------
+// WHEN CREATING A NEW TEST, PLEASE JUST COPY & PASTE WITHOUT EDITS.
+//
+// Set-up that's shared across all tests in this directory. In principle, this
+// config could be moved to lit.local.cfg. However, there are downstream users that
+// do not use these LIT config files. Hence why this is kept inline.
+//
+// DEFINE: %{sparsifier_opts} = enable-runtime-library=true
+// DEFINE: %{sparsifier_opts_sve} = enable-arm-sve=true %{sparsifier_opts}
+// DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
+// DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
+// DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
+// DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
+// DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
+//
+// DEFINE: %{env} =
+//--------------------------------------------------------------------------------------------------
+
+// RUN: %{compile} | %{run} | FileCheck %s
+//
+// Do the same run, but now with direct IR generation.
+// REDEFINE: %{sparsifier_opts} = enable-runtime-library=false enable-buffer-initialization=true
+// RUN: %{compile} | %{run} | FileCheck %s
+//
+
+#AllDense = #sparse_tensor.encoding<{
+ map = (i, j) -> (
+ i : dense,
+ j : dense
+ )
+}>
+
+#AllDenseT = #sparse_tensor.encoding<{
+ map = (i, j) -> (
+ j : dense,
+ i : dense
+ )
+}>
+
+#CSR = #sparse_tensor.encoding<{
+ map = (i, j) -> (
+ i : dense,
+ j : compressed
+ )
+}>
+
+#DCSR = #sparse_tensor.encoding<{
+ map = (i, j) -> (
+ i : compressed,
+ j : compressed
+ )
+}>
+
+#CSC = #sparse_tensor.encoding<{
+ map = (i, j) -> (
+ j : dense,
+ i : compressed
+ )
+}>
+
+#DCSC = #sparse_tensor.encoding<{
+ map = (i, j) -> (
+ j : compressed,
+ i : compressed
+ )
+}>
+
+#BSR = #sparse_tensor.encoding<{
+ map = (i, j) -> (
+ i floordiv 2 : compressed,
+ j floordiv 4 : compressed,
+ i mod 2 : dense,
+ j mod 4 : dense
+ )
+}>
+
+#BSRC = #sparse_tensor.encoding<{
+ map = (i, j) -> (
+ i floordiv 2 : compressed,
+ j floordiv 4 : compressed,
+ j mod 4 : dense,
+ i mod 2 : dense
+ )
+}>
+
+#BSC = #sparse_tensor.encoding<{
+ map = (i, j) -> (
+ j floordiv 4 : compressed,
+ i floordiv 2 : compressed,
+ i mod 2 : dense,
+ j mod 4 : dense
+ )
+}>
+
+#BSCC = #sparse_tensor.encoding<{
+ map = (i, j) -> (
+ j floordiv 4 : compressed,
+ i floordiv 2 : compressed,
+ j mod 4 : dense,
+ i mod 2 : dense
+ )
+}>
+
+#BSR0 = #sparse_tensor.encoding<{
+ map = (i, j) -> (
+ i floordiv 2 : dense,
+ j floordiv 4 : compressed,
+ i mod 2 : dense,
+ j mod 4 : dense
+ )
+}>
+
+#BSC0 = #sparse_tensor.encoding<{
+ map = (i, j) -> (
+ j floordiv 4 : dense,
+ i floordiv 2 : compressed,
+ i mod 2 : dense,
+ j mod 4 : dense
+ )
+}>
+
+module {
+
+ //
+ // Main driver that tests sparse tensor storage.
+ //
+ func.func @main() {
+ %x = arith.constant dense <[
+ [ 1, 0, 2, 0, 0, 0, 0, 0 ],
+ [ 0, 0, 0, 0, 0, 0, 0, 0 ],
+ [ 0, 0, 0, 0, 0, 0, 0, 0 ],
+ [ 0, 0, 3, 4, 0, 5, 0, 0 ] ]> : tensor<4x8xi32>
+
+ %XO = sparse_tensor.convert %x : tensor<4x8xi32> to tensor<4x8xi32, #AllDense>
+ %XT = sparse_tensor.convert %x : tensor<4x8xi32> to tensor<4x8xi32, #AllDenseT>
+
+ // CHECK: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 32
+ // CHECK-NEXT: values : ( 1, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 4, 0, 5, 0, 0,
+ // CHECK-NEXT: ----
+ sparse_tensor.print %XO : tensor<4x8xi32, #AllDense>
+
+ // CHECK-NEXT: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 32
+ // CHECK-NEXT: values : ( 1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 3, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0,
+ // CHECK-NEXT: ----
+ sparse_tensor.print %XT : tensor<4x8xi32, #AllDenseT>
+
+ %a = sparse_tensor.convert %x : tensor<4x8xi32> to tensor<4x8xi32, #CSR>
+ %b = sparse_tensor.convert %x : tensor<4x8xi32> to tensor<4x8xi32, #DCSR>
+ %c = sparse_tensor.convert %x : tensor<4x8xi32> to tensor<4x8xi32, #CSC>
+ %d = sparse_tensor.convert %x : tensor<4x8xi32> to tensor<4x8xi32, #DCSC>
+ %e = sparse_tensor.convert %x : tensor<4x8xi32> to tensor<4x8xi32, #BSR>
+ %f = sparse_tensor.convert %x : tensor<4x8xi32> to tensor<4x8xi32, #BSRC>
+ %g = sparse_tensor.convert %x : tensor<4x8xi32> to tensor<4x8xi32, #BSC>
+ %h = sparse_tensor.convert %x : tensor<4x8xi32> to tensor<4x8xi32, #BSCC>
+ %i = sparse_tensor.convert %x : tensor<4x8xi32> to tensor<4x8xi32, #BSR0>
+ %j = sparse_tensor.convert %x : tensor<4x8xi32> to tensor<4x8xi32, #BSC0>
+
+ // CHECK-NEXT: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 5
+ // CHECK-NEXT: pos[1] : ( 0, 2, 2, 2, 5,
+ // CHECK-NEXT: crd[1] : ( 0, 2, 2, 3, 5,
+ // CHECK-NEXT: values : ( 1, 2, 3, 4, 5,
+ // CHECK-NEXT: ----
+ sparse_tensor.print %a : tensor<4x8xi32, #CSR>
+
+ // CHECK-NEXT: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 5
+ // CHECK-NEXT: pos[0] : ( 0, 2,
+ // CHECK-NEXT: crd[0] : ( 0, 3,
+ // CHECK-NEXT: pos[1] : ( 0, 2, 5,
+ // CHECK-NEXT: crd[1] : ( 0, 2, 2, 3, 5,
+ // CHECK-NEXT: values : ( 1, 2, 3, 4, 5,
+ // CHECK-NEXT: ----
+ sparse_tensor.print %b : tensor<4x8xi32, #DCSR>
+
+ // CHECK-NEXT: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 5
+ // CHECK-NEXT: pos[1] : ( 0, 1, 1, 3, 4, 4, 5, 5, 5,
+ // CHECK-NEXT: crd[1] : ( 0, 0, 3, 3, 3,
+ // CHECK-NEXT: values : ( 1, 2, 3, 4, 5,
+ // CHECK-NEXT: ----
+ sparse_tensor.print %c : tensor<4x8xi32, #CSC>
+
+ // CHECK-NEXT: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 5
+ // CHECK-NEXT: pos[0] : ( 0, 4,
+ // CHECK-NEXT: crd[0] : ( 0, 2, 3, 5,
+ // CHECK-NEXT: pos[1] : ( 0, 1, 3, 4, 5,
+ // CHECK-NEXT: crd[1] : ( 0, 0, 3, 3, 3,
+ // CHECK-NEXT: values : ( 1, 2, 3, 4, 5,
+ // CHECK-NEXT: ----
+ sparse_tensor.print %d : tensor<4x8xi32, #DCSC>
+
+ // CHECK-NEXT: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 24
+ // CHECK-NEXT: pos[0] : ( 0, 2,
+ // CHECK-NEXT: crd[0] : ( 0, 1,
+ // CHECK-NEXT: pos[1] : ( 0, 1, 3,
+ // CHECK-NEXT: crd[1] : ( 0, 0, 1,
+ // CHECK-NEXT: values : ( 1, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 4, 0, 0, 0, 0, 0, 5, 0, 0,
+ // CHECK-NEXT: ----
+ sparse_tensor.print %e : tensor<4x8xi32, #BSR>
+
+ // CHECK-NEXT: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 24
+ // CHECK-NEXT: pos[0] : ( 0, 2,
+ // CHECK-NEXT: crd[0] : ( 0, 1,
+ // CHECK-NEXT: pos[1] : ( 0, 1, 3,
+ // CHECK-NEXT: crd[1] : ( 0, 0, 1,
+ // CHECK-NEXT: values : ( 1, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 4, 0, 0, 0, 5, 0, 0, 0, 0,
+ // CHECK-NEXT: ----
+ sparse_tensor.print %f : tensor<4x8xi32, #BSRC>
+
+ // CHECK-NEXT: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 24
+ // CHECK-NEXT: pos[0] : ( 0, 2,
+ // CHECK-NEXT: crd[0] : ( 0, 1,
+ // CHECK-NEXT: pos[1] : ( 0, 2, 3,
+ // CHECK-NEXT: crd[1] : ( 0, 1, 1,
+ // CHECK-NEXT: values : ( 1, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 4, 0, 0, 0, 0, 0, 5, 0, 0,
+ // CHECK-NEXT: ----
+ sparse_tensor.print %g : tensor<4x8xi32, #BSC>
+
+ // CHECK-NEXT: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 24
+ // CHECK-NEXT: pos[0] : ( 0, 2,
+ // CHECK-NEXT: crd[0] : ( 0, 1,
+ // CHECK-NEXT: pos[1] : ( 0, 2, 3,
+ // CHECK-NEXT: crd[1] : ( 0, 1, 1,
+ // CHECK-NEXT: values : ( 1, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 4, 0, 0, 0, 5, 0, 0, 0, 0,
+ // CHECK-NEXT: ----
+ sparse_tensor.print %h : tensor<4x8xi32, #BSCC>
+
+ // CHECK-NEXT: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 24
+ // CHECK-NEXT: pos[1] : ( 0, 1, 3,
+ // CHECK-NEXT: crd[1] : ( 0, 0, 1,
+ // CHECK-NEXT: values : ( 1, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 4, 0, 0, 0, 0, 0, 5, 0, 0,
+ // CHECK-NEXT: ----
+ sparse_tensor.print %i : tensor<4x8xi32, #BSR0>
+
+ // CHECK-NEXT: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 24
+ // CHECK-NEXT: pos[1] : ( 0, 2, 3,
+ // CHECK-NEXT: crd[1] : ( 0, 1, 1,
+ // CHECK-NEXT: values : ( 1, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 4, 0, 0, 0, 0, 0, 5, 0, 0,
+ // CHECK-NEXT: ----
+ sparse_tensor.print %j : tensor<4x8xi32, #BSC0>
+
+ // Release the resources.
+ bufferization.dealloc_tensor %XO : tensor<4x8xi32, #AllDense>
+ bufferization.dealloc_tensor %XT : tensor<4x8xi32, #AllDenseT>
+ bufferization.dealloc_tensor %a : tensor<4x8xi32, #CSR>
+ bufferization.dealloc_tensor %b : tensor<4x8xi32, #DCSR>
+ bufferization.dealloc_tensor %c : tensor<4x8xi32, #CSC>
+ bufferization.dealloc_tensor %d : tensor<4x8xi32, #DCSC>
+ bufferization.dealloc_tensor %e : tensor<4x8xi32, #BSR>
+ bufferization.dealloc_tensor %f : tensor<4x8xi32, #BSRC>
+ bufferization.dealloc_tensor %g : tensor<4x8xi32, #BSC>
+ bufferization.dealloc_tensor %h : tensor<4x8xi32, #BSCC>
+ bufferization.dealloc_tensor %i : tensor<4x8xi32, #BSR0>
+ bufferization.dealloc_tensor %j : tensor<4x8xi32, #BSC0>
+
+ return
+ }
+}
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_re_im.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_re_im.mlir
index b44ffc3..1860fc1 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_re_im.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_re_im.mlir
@@ -10,7 +10,7 @@
// DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
// DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
// DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
// DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
// DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
//
@@ -72,22 +72,7 @@ module {
return %0 : tensor<?xf32, #SparseVector>
}
- func.func @dump(%arg0: tensor<?xf32, #SparseVector>) {
- %c0 = arith.constant 0 : index
- %d0 = arith.constant -1.0 : f32
- %n = sparse_tensor.number_of_entries %arg0 : tensor<?xf32, #SparseVector>
- vector.print %n : index
- %values = sparse_tensor.values %arg0 : tensor<?xf32, #SparseVector> to memref<?xf32>
- %0 = vector.transfer_read %values[%c0], %d0: memref<?xf32>, vector<3xf32>
- vector.print %0 : vector<3xf32>
- %coordinates = sparse_tensor.coordinates %arg0 { level = 0 : index } : tensor<?xf32, #SparseVector> to memref<?xindex>
- %1 = vector.transfer_read %coordinates[%c0], %c0: memref<?xindex>, vector<3xindex>
- vector.print %1 : vector<3xindex>
- return
- }
-
- // Driver method to call and verify functions cim and cre.
- func.func @entry() {
+ func.func @main() {
// Setup sparse vectors.
%v1 = arith.constant sparse<
[ [0], [20], [31] ],
@@ -104,20 +89,27 @@ module {
//
// Verify the results.
//
- // CHECK: 3
- // CHECK-NEXT: ( 5.13, 3, 5 )
- // CHECK-NEXT: ( 0, 20, 31 )
- // CHECK-NEXT: 3
- // CHECK-NEXT: ( 2, 4, 6 )
- // CHECK-NEXT: ( 0, 20, 31 )
+ // CHECK: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 3
+ // CHECK-NEXT: pos[0] : ( 0, 3,
+ // CHECK-NEXT: crd[0] : ( 0, 20, 31,
+ // CHECK-NEXT: values : ( 5.13, 3, 5,
+ // CHECK-NEXT: ----
+ //
+ // CHECK-NEXT: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 3
+ // CHECK-NEXT: pos[0] : ( 0, 3,
+ // CHECK-NEXT: crd[0] : ( 0, 20, 31,
+ // CHECK-NEXT: values : ( 2, 4, 6,
+ // CHECK-NEXT: ----
//
- call @dump(%0) : (tensor<?xf32, #SparseVector>) -> ()
- call @dump(%1) : (tensor<?xf32, #SparseVector>) -> ()
+ sparse_tensor.print %0 : tensor<?xf32, #SparseVector>
+ sparse_tensor.print %1 : tensor<?xf32, #SparseVector>
// Release the resources.
bufferization.dealloc_tensor %sv1 : tensor<?xcomplex<f32>, #SparseVector>
- bufferization.dealloc_tensor %0 : tensor<?xf32, #SparseVector>
- bufferization.dealloc_tensor %1 : tensor<?xf32, #SparseVector>
+ bufferization.dealloc_tensor %0 : tensor<?xf32, #SparseVector>
+ bufferization.dealloc_tensor %1 : tensor<?xf32, #SparseVector>
return
}
}
diff --git a/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/Emulated/test-setArmSVLBits.mlir b/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/Emulated/test-setArmSVLBits.mlir
index 4151811..1794564 100644
--- a/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/Emulated/test-setArmSVLBits.mlir
+++ b/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/Emulated/test-setArmSVLBits.mlir
@@ -12,13 +12,13 @@ func.func @checkSVL() {
%svl_h = arm_sme.streaming_vl <half>
%svl_w = arm_sme.streaming_vl <word>
%svl_d = arm_sme.streaming_vl <double>
- vector.print str "SVL.b"
+ vector.print str "SVL.b\n"
vector.print %svl_b : index
- vector.print str "SVL.h"
+ vector.print str "SVL.h\n"
vector.print %svl_h : index
- vector.print str "SVL.w"
+ vector.print str "SVL.w\n"
vector.print %svl_w : index
- vector.print str "SVL.d"
+ vector.print str "SVL.d\n"
vector.print %svl_d : index
return
}
diff --git a/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/load-store-128-bit-tile.mlir b/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/load-store-128-bit-tile.mlir
index 2b8899b..41e7248 100644
--- a/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/load-store-128-bit-tile.mlir
+++ b/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/load-store-128-bit-tile.mlir
@@ -53,13 +53,13 @@ func.func @test_load_store_zaq0() {
// CHECK-LABEL: INITIAL TILE A:
// CHECK: ( 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7 )
- vector.print str "INITIAL TILE A:"
+ vector.print str "INITIAL TILE A:\n"
func.call @print_i8s(%tile_a_bytes, %zaq_size_bytes) : (memref<?xi8>, index) -> ()
vector.print punctuation <newline>
// CHECK-LABEL: INITIAL TILE B:
// CHECK: ( 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64 )
- vector.print str "INITIAL TILE B:"
+ vector.print str "INITIAL TILE B:\n"
func.call @print_i8s(%tile_b_bytes, %zaq_size_bytes) : (memref<?xi8>, index) -> ()
vector.print punctuation <newline>
@@ -68,13 +68,13 @@ func.func @test_load_store_zaq0() {
// CHECK-LABEL: FINAL TILE A:
// CHECK: ( 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7 )
- vector.print str "FINAL TILE A:"
+ vector.print str "FINAL TILE A:\n"
func.call @print_i8s(%tile_a_bytes, %zaq_size_bytes) : (memref<?xi8>, index) -> ()
vector.print punctuation <newline>
// CHECK-LABEL: FINAL TILE B:
// CHECK: ( 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7 )
- vector.print str "FINAL TILE B:"
+ vector.print str "FINAL TILE B:\n"
func.call @print_i8s(%tile_b_bytes, %zaq_size_bytes) : (memref<?xi8>, index) -> ()
return
diff --git a/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-load-vertical.mlir b/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-load-vertical.mlir
index 27be801..68c31ac 100644
--- a/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-load-vertical.mlir
+++ b/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-load-vertical.mlir
@@ -49,12 +49,12 @@ func.func @entry() {
// CHECK-NEXT: ( 2, 2, 2, 2
// CHECK-NEXT: ( 3, 3, 3, 3
// CHECK: TILE END
- vector.print str "TILE BEGIN"
+ vector.print str "TILE BEGIN\n"
scf.for %i = %c0 to %za_s_size step %svl_s {
%tileslice = vector.load %mem1[%i] : memref<?xi32>, vector<[4]xi32>
vector.print %tileslice : vector<[4]xi32>
}
- vector.print str "TILE END"
+ vector.print str "TILE END\n"
// 2. VERTICAL LAYOUT
// Dump "mem2". The smallest SVL is 128-bits so the tile will be at least
@@ -66,9 +66,9 @@ func.func @entry() {
// CHECK-NEXT: ( 0, 1, 2, 3
// CHECK-NEXT: ( 0, 1, 2, 3
// CHECK: TILE END
- vector.print str "TILE BEGIN"
+ vector.print str "TILE BEGIN\n"
vector.print %0 : vector<[4]x[4]xi32>
- vector.print str "TILE END"
+ vector.print str "TILE END\n"
return
}
diff --git a/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-multi-tile-transpose.mlir b/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-multi-tile-transpose.mlir
index 9d836d9..cd48f2a 100644
--- a/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-multi-tile-transpose.mlir
+++ b/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-multi-tile-transpose.mlir
@@ -46,12 +46,12 @@ func.func @testTransposedReadWithMask(%maskRows: index, %maskCols: index) {
vector.transfer_write %readTransposed, %outDyn[%c0, %c0] {in_bounds = [true, true]} : vector<[16]x[4]xf32>, memref<?x?xf32>
/// Print the input memref.
- vector.print str "Input memref:"
+ vector.print str "Input memref:\n"
%inUnranked = memref.cast %inDyn : memref<?x?xf32> to memref<*xf32>
call @printMemrefF32(%inUnranked) : (memref<*xf32>) -> ()
/// Print the result memref.
- vector.print str "Masked transposed result:"
+ vector.print str "Masked transposed result:\n"
%outUnranked = memref.cast %outDyn : memref<?x?xf32> to memref<*xf32>
call @printMemrefF32(%outUnranked) : (memref<*xf32>) -> ()
@@ -84,12 +84,12 @@ func.func @testTransposedWriteWithMask(%maskRows: index, %maskCols: index) {
: vector<[16]x[4]xf32>, memref<?x?xf32>
/// Print the input memref.
- vector.print str "Input memref:"
+ vector.print str "Input memref:\n"
%inUnranked = memref.cast %inDyn : memref<?x?xf32> to memref<*xf32>
call @printMemrefF32(%inUnranked) : (memref<*xf32>) -> ()
/// Print the result memref.
- vector.print str "Masked transposed result:"
+ vector.print str "Masked transposed result:\n"
%outUnranked = memref.cast %outDyn : memref<?x?xf32> to memref<*xf32>
call @printMemrefF32(%outUnranked) : (memref<*xf32>) -> ()
diff --git a/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-outerproduct-f32.mlir b/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-outerproduct-f32.mlir
index 7e7869d..fb6c06c 100644
--- a/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-outerproduct-f32.mlir
+++ b/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-outerproduct-f32.mlir
@@ -35,9 +35,9 @@ func.func @test_outerproduct_no_accumulator_4x4xf32() {
// WITHOUT-ACC-NEXT: ( 0, 2, 4, 6
// WITHOUT-ACC-NEXT: ( 0, 3, 6, 9
// WITHOUT-ACC: TILE END
- vector.print str "TILE BEGIN"
+ vector.print str "TILE BEGIN\n"
vector.print %tile : vector<[4]x[4]xf32>
- vector.print str "TILE END"
+ vector.print str "TILE END\n"
return
}
@@ -60,9 +60,9 @@ func.func @test_outerproduct_with_accumulator_4x4xf32() {
// WITH-ACC-NEXT: ( 10, 12, 14, 16
// WITH-ACC-NEXT: ( 10, 13, 16, 19
// WITH-ACC: TILE END
- vector.print str "TILE BEGIN"
+ vector.print str "TILE BEGIN\n"
vector.print %tile : vector<[4]x[4]xf32>
- vector.print str "TILE END"
+ vector.print str "TILE END\n"
return
}
@@ -91,9 +91,9 @@ func.func @test_masked_outerproduct_no_accumulator_4x4xf32() {
// WITH-MASK-NEXT: ( 3, 6, 0, 0
// WITH-MASK-NEXT: ( 0, 0, 0, 0
// WITH-MASK: TILE END
- vector.print str "TILE BEGIN"
+ vector.print str "TILE BEGIN\n"
vector.print %tile : vector<[4]x[4]xf32>
- vector.print str "TILE END"
+ vector.print str "TILE END\n"
return
}
@@ -124,9 +124,9 @@ func.func @test_masked_outerproduct_with_accumulator_4x4xf32() {
// WITH-MASK-AND-ACC-NEXT: ( 10, 10, 10, 10
// WITH-MASK-AND-ACC-NEXT: ( 10, 10, 10, 10
// WITH-MASK-AND-ACC: TILE END
- vector.print str "TILE BEGIN"
+ vector.print str "TILE BEGIN\n"
vector.print %tile : vector<[4]x[4]xf32>
- vector.print str "TILE END"
+ vector.print str "TILE END\n"
return
}
diff --git a/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-outerproduct-f64.mlir b/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-outerproduct-f64.mlir
index 46bf799..b845860 100644
--- a/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-outerproduct-f64.mlir
+++ b/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-outerproduct-f64.mlir
@@ -40,9 +40,9 @@ func.func @test_outerproduct_no_accumulator_2x2xf64() {
// CHECK-NEXT: ( 1, 2
// CHECK-NEXT: ( 2, 4
// CHECK: TILE END
- vector.print str "TILE BEGIN"
+ vector.print str "TILE BEGIN\n"
vector.print %tile : vector<[2]x[2]xf64>
- vector.print str "TILE END"
+ vector.print str "TILE END\n"
return
}
@@ -66,9 +66,9 @@ func.func @test_outerproduct_with_accumulator_2x2xf64() {
// WITH-ACC-NEXT: ( 11, 12
// WITH-ACC-NEXT: ( 12, 14
// WITH-ACC: TILE END
- vector.print str "TILE BEGIN"
+ vector.print str "TILE BEGIN\n"
vector.print %tile : vector<[2]x[2]xf64>
- vector.print str "TILE END"
+ vector.print str "TILE END\n"
return
}
@@ -96,9 +96,9 @@ func.func @test_masked_outerproduct_no_accumulator_2x2xf64() {
// WITH-MASK-NEXT: ( 1, 0
// WITH-MASK-NEXT: ( 2, 0
// WITH-MASK: TILE END
- vector.print str "TILE BEGIN"
+ vector.print str "TILE BEGIN\n"
vector.print %tile : vector<[2]x[2]xf64>
- vector.print str "TILE END"
+ vector.print str "TILE END\n"
return
}
@@ -127,9 +127,9 @@ func.func @test_masked_outerproduct_with_accumulator_2x2xf64() {
// WITH-MASK-AND-ACC-NEXT: ( 11, 12
// WITH-MASK-AND-ACC-NEXT: ( 10, 10
// WITH-MASK-AND-ACC: TILE END
- vector.print str "TILE BEGIN"
+ vector.print str "TILE BEGIN\n"
vector.print %tile : vector<[2]x[2]xf64>
- vector.print str "TILE END"
+ vector.print str "TILE END\n"
return
}
diff --git a/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-transfer-read-2d.mlir b/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-transfer-read-2d.mlir
index 52f5688..7421521 100644
--- a/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-transfer-read-2d.mlir
+++ b/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-transfer-read-2d.mlir
@@ -14,7 +14,7 @@ func.func @transfer_read_2d(%A : memref<?x?xf32>, %base1: index, %base2: index)
%0 = vector.transfer_read %A[%base1, %base2], %pad {in_bounds=[true, true]} :
memref<?x?xf32>, vector<[4]x[4]xf32>
- vector.print str "TILE BEGIN:"
+ vector.print str "TILE BEGIN:\n"
vector.print %0: vector<[4]x[4]xf32>
return
@@ -27,7 +27,7 @@ func.func @transfer_read_2d_transposed(%A : memref<?x?xf32>, %base1: index, %bas
{permutation_map = affine_map<(d0, d1) -> (d1, d0)>, in_bounds=[true, true]}
: memref<?x?xf32>, vector<[4]x[4]xf32>
- vector.print str "TILE BEGIN:"
+ vector.print str "TILE BEGIN:\n"
vector.print %0 : vector<[4]x[4]xf32>
return
@@ -42,7 +42,7 @@ func.func @transfer_read_2d_mask(%A : memref<?x?xf32>, %base1: index, %base2: in
%0 = vector.transfer_read %A[%base1, %base2], %pad, %mask
{in_bounds = [true, true]} : memref<?x?xf32>, vector<[4]x[4]xf32>
- vector.print str "TILE BEGIN:"
+ vector.print str "TILE BEGIN:\n"
vector.print %0: vector<[4]x[4]xf32>
return
@@ -58,7 +58,7 @@ func.func @transfer_read_2d_mask_transposed(%A : memref<?x?xf32>, %base1: index,
{permutation_map = affine_map<(d0, d1) -> (d1, d0)>, in_bounds=[true, true]}
: memref<?x?xf32>, vector<[4]x[4]xf32>
- vector.print str "TILE BEGIN:"
+ vector.print str "TILE BEGIN:\n"
vector.print %0: vector<[4]x[4]xf32>
return
@@ -73,7 +73,7 @@ func.func @transfer_read_2d_mask_non_zero_pad(%A : memref<?x?xf32>, %base1: inde
%0 = vector.transfer_read %A[%base1, %base2], %pad, %mask
{in_bounds = [true, true]} : memref<?x?xf32>, vector<[4]x[4]xf32>
- vector.print str "TILE BEGIN:"
+ vector.print str "TILE BEGIN:\n"
vector.print %0: vector<[4]x[4]xf32>
return
@@ -89,7 +89,7 @@ func.func @transfer_read_2d_mask_non_zero_pad_transposed(%A : memref<?x?xf32>, %
{permutation_map = affine_map<(d0, d1) -> (d1, d0)>, in_bounds=[true, true]}
: memref<?x?xf32>, vector<[4]x[4]xf32>
- vector.print str "TILE BEGIN:"
+ vector.print str "TILE BEGIN:\n"
vector.print %0: vector<[4]x[4]xf32>
return
diff --git a/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-transfer-write-2d.mlir b/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-transfer-write-2d.mlir
index 710cc66..2fef705 100644
--- a/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-transfer-write-2d.mlir
+++ b/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-transfer-write-2d.mlir
@@ -51,7 +51,7 @@ func.func @transfer_write_2d_mask_transposed(%A : memref<?x?xf32>, %base1: index
func.func @load_and_print(%A : memref<?x?xf32>, %base1: index, %base2: index) {
%0 = vector.load %A[%base1, %base2] : memref<?x?xf32>, vector<[4]x[4]xf32>
- vector.print str "TILE BEGIN:"
+ vector.print str "TILE BEGIN:\n"
vector.print %0: vector<[4]x[4]xf32>
return
diff --git a/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-transpose.mlir b/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-transpose.mlir
index 88bc0d0..177c96f 100644
--- a/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-transpose.mlir
+++ b/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-transpose.mlir
@@ -51,9 +51,9 @@ func.func @entry() {
// CHECK-NEXT: ( 2, 2, 2, 2
// CHECK-NEXT: ( 3, 3, 3, 3
// CHECK: TILE END
- vector.print str "TILE BEGIN"
+ vector.print str "TILE BEGIN\n"
vector.print %tile : vector<[4]x[4]xi32>
- vector.print str "TILE END"
+ vector.print str "TILE END\n"
// Dump the transposed tile. The smallest SVL is 128-bits so the tile will be
// at least 4x4xi32.
@@ -64,9 +64,9 @@ func.func @entry() {
// CHECK-NEXT: ( 0, 1, 2, 3
// CHECK-NEXT: ( 0, 1, 2, 3
// CHECK: TILE END
- vector.print str "TILE BEGIN"
+ vector.print str "TILE BEGIN\n"
vector.print %transposed_tile : vector<[4]x[4]xi32>
- vector.print str "TILE END"
+ vector.print str "TILE END\n"
return
}
diff --git a/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/tile_fill.mlir b/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/tile_fill.mlir
index e149174..3d74508 100644
--- a/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/tile_fill.mlir
+++ b/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/tile_fill.mlir
@@ -23,9 +23,9 @@ func.func @entry() -> i32 {
// CHECK-NEXT: ( 123, 123, 123, 123
// CHECK-NEXT: ( 123, 123, 123, 123
// CHECK: TILE END
- vector.print str "TILE BEGIN"
+ vector.print str "TILE BEGIN\n"
vector.print %tile : vector<[4]x[4]xi32>
- vector.print str "TILE END"
+ vector.print str "TILE END\n"
%c0_i32 = arith.constant 0 : i32
return %c0_i32 : i32
diff --git a/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/vector-load-store.mlir b/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/vector-load-store.mlir
index b29790db..48080fd 100644
--- a/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/vector-load-store.mlir
+++ b/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/vector-load-store.mlir
@@ -255,7 +255,7 @@ func.func @load_store_two_za_s_tiles() -> i32 {
// CHECK-NEXT: ( 1, 1, 1, 1
// CHECK-NEXT: ( 1, 1, 1, 1
// CHECK: TILE END
- vector.print str "TILE BEGIN"
+ vector.print str "TILE BEGIN\n"
scf.for %i = %c0 to %size_of_two_tiles step %svl_s {
%av = vector.load %mem2[%i] : memref<?xi32>, vector<[4]xi32>
vector.print %av : vector<[4]xi32>
@@ -263,11 +263,11 @@ func.func @load_store_two_za_s_tiles() -> i32 {
%tileSizeMinusStep = arith.subi %size_of_tile, %svl_s : index
%isNextTile = arith.cmpi eq, %i, %tileSizeMinusStep : index
scf.if %isNextTile {
- vector.print str "TILE END"
- vector.print str "TILE BEGIN"
+ vector.print str "TILE END\n"
+ vector.print str "TILE BEGIN\n"
}
}
- vector.print str "TILE END"
+ vector.print str "TILE END\n"
return %c0_i32 : i32
}
diff --git a/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/Emulated/test-setArmVLBits.mlir b/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/Emulated/test-setArmVLBits.mlir
index 4f46c6e..aa8d0e4 100644
--- a/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/Emulated/test-setArmVLBits.mlir
+++ b/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/Emulated/test-setArmVLBits.mlir
@@ -8,7 +8,7 @@
func.func @checkVScale() {
%vscale = vector.vscale
- vector.print str "vscale"
+ vector.print str "vscale = "
vector.print %vscale : index
return
}
@@ -20,28 +20,23 @@ func.func @setAndCheckVL(%bits: i32) {
}
func.func @main() {
- // CHECK: vscale
- // CHECK-NEXT: 1
+ // CHECK: vscale = 1
%c128 = arith.constant 128 : i32
func.call @setAndCheckVL(%c128) : (i32) -> ()
- // CHECK: vscale
- // CHECK-NEXT: 2
+ // CHECK: vscale = 2
%c256 = arith.constant 256 : i32
func.call @setAndCheckVL(%c256) : (i32) -> ()
- // CHECK: vscale
- // CHECK-NEXT: 4
+ // CHECK: vscale = 4
%c512 = arith.constant 512 : i32
func.call @setAndCheckVL(%c512) : (i32) -> ()
- // CHECK: vscale
- // CHECK-NEXT: 8
+ // CHECK: vscale = 8
%c1024 = arith.constant 1024 : i32
func.call @setAndCheckVL(%c1024) : (i32) -> ()
- // CHECK: vscale
- // CHECK-NEXT: 16
+ // CHECK: vscale = 16
%c2048 = arith.constant 2048 : i32
func.call @setAndCheckVL(%c2048) : (i32) -> ()
diff --git a/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/arrays-of-scalable-vectors.mlir b/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/arrays-of-scalable-vectors.mlir
index c486bf0..afb23e8 100644
--- a/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/arrays-of-scalable-vectors.mlir
+++ b/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/arrays-of-scalable-vectors.mlir
@@ -24,7 +24,7 @@ func.func @read_and_print_2d_vector(%memref: memref<3x?xf32>) {
/// Print each of the vectors.
/// vscale is >= 1, so at least 8 elements will be printed.
- vector.print str "read_and_print_2d_vector()"
+ vector.print str "read_and_print_2d_vector()\n"
// CHECK-LABEL: read_and_print_2d_vector()
// CHECK: ( 8, 8, 8, 8, 8, 8, 8, 8
vector.print %row0 : vector<[8]xf32>
@@ -62,21 +62,21 @@ func.func @add_arrays_of_scalable_vectors(%a: memref<1x2x?xf32>, %b: memref<1x2x
// CHECK-LABEL: Vector A
// CHECK-NEXT: ( 5, 5, 5, 5
// CHECK-NEXT: ( 5, 5, 5, 5
- vector.print str "\nVector A"
+ vector.print str "\nVector A\n"
%vector_a = vector.transfer_read %a[%c0, %c0, %c0], %cst, %mask_a {in_bounds = [true, true, true]} : memref<1x2x?xf32>, vector<1x2x[4]xf32>
func.call @print_1x2xVSCALExf32(%vector_a) : (vector<1x2x[4]xf32>) -> ()
// CHECK-LABEL: Vector B
// CHECK-NEXT: ( 4, 4, 4, 4
// CHECK-NEXT: ( 4, 4, 4, 4
- vector.print str "\nVector B"
+ vector.print str "\nVector B\n"
%vector_b = vector.transfer_read %b[%c0, %c0, %c0], %cst, %mask_b {in_bounds = [true, true, true]} : memref<1x2x?xf32>, vector<1x2x[4]xf32>
func.call @print_1x2xVSCALExf32(%vector_b) : (vector<1x2x[4]xf32>) -> ()
// CHECK-LABEL: Sum
// CHECK-NEXT: ( 9, 9, 9, 9
// CHECK-NEXT: ( 9, 9, 9, 9
- vector.print str "\nSum"
+ vector.print str "\nSum\n"
%sum = arith.addf %vector_a, %vector_b : vector<1x2x[4]xf32>
func.call @print_1x2xVSCALExf32(%sum) : (vector<1x2x[4]xf32>) -> ()
@@ -97,7 +97,7 @@ func.func @entry() {
linalg.fill ins(%f32_8 : f32) outs(%test_1_memref :memref<3x?xf32>)
- vector.print str "=> Print and read 2D arrays of scalable vectors:"
+ vector.print str "=> Print and read 2D arrays of scalable vectors:\n"
func.call @read_and_print_2d_vector(%test_1_memref) : (memref<3x?xf32>) -> ()
vector.print str "\n====================\n"
@@ -109,7 +109,7 @@ func.func @entry() {
linalg.fill ins(%f32_5 : f32) outs(%test_2_memref_a :memref<1x2x?xf32>)
linalg.fill ins(%f32_4 : f32) outs(%test_2_memref_b :memref<1x2x?xf32>)
- vector.print str "=> Reading and adding two 3D arrays of scalable vectors:"
+ vector.print str "=> Reading and adding two 3D arrays of scalable vectors:\n"
func.call @add_arrays_of_scalable_vectors(
%test_2_memref_a, %test_2_memref_b) : (memref<1x2x?xf32>, memref<1x2x?xf32>) -> ()
diff --git a/mlir/test/Integration/Dialect/Vector/CPU/test-print-str.mlir b/mlir/test/Integration/Dialect/Vector/CPU/test-print-str.mlir
index 78d6609..25a44f2 100644
--- a/mlir/test/Integration/Dialect/Vector/CPU/test-print-str.mlir
+++ b/mlir/test/Integration/Dialect/Vector/CPU/test-print-str.mlir
@@ -7,8 +7,8 @@
func.func @entry() {
// CHECK: Hello, World!
- vector.print str "Hello, World!"
+ vector.print str "Hello, World!\n"
// CHECK-NEXT: Bye!
- vector.print str "Bye!"
+ vector.print str "Bye!\n"
return
}
diff --git a/mlir/test/Target/Cpp/logical_operators.mlir b/mlir/test/Target/Cpp/logical_operators.mlir
new file mode 100644
index 0000000..7083dc2
--- /dev/null
+++ b/mlir/test/Target/Cpp/logical_operators.mlir
@@ -0,0 +1,14 @@
+// RUN: mlir-translate -mlir-to-cpp %s | FileCheck %s
+
+func.func @logical(%arg0: i32, %arg1: i32) -> () {
+ %0 = emitc.logical_and %arg0, %arg1 : i32, i32
+ %1 = emitc.logical_not %arg0 : i32
+ %2 = emitc.logical_or %arg0, %arg1 : i32, i32
+
+ return
+}
+
+// CHECK-LABEL: void logical
+// CHECK-NEXT: bool [[V2:[^ ]*]] = [[V0:[^ ]*]] && [[V1:[^ ]*]];
+// CHECK-NEXT: bool [[V3:[^ ]*]] = ![[V0]];
+// CHECK-NEXT: bool [[V4:[^ ]*]] = [[V0]] || [[V1]];
diff --git a/mlir/test/lib/Dialect/Affine/CMakeLists.txt b/mlir/test/lib/Dialect/Affine/CMakeLists.txt
index af9f312..14960a4 100644
--- a/mlir/test/lib/Dialect/Affine/CMakeLists.txt
+++ b/mlir/test/lib/Dialect/Affine/CMakeLists.txt
@@ -3,6 +3,7 @@ add_mlir_library(MLIRAffineTransformsTestPasses
TestAffineDataCopy.cpp
TestAffineLoopUnswitching.cpp
TestAffineLoopParametricTiling.cpp
+ TestAccessAnalysis.cpp
TestDecomposeAffineOps.cpp
TestReifyValueBounds.cpp
TestLoopFusion.cpp
@@ -21,6 +22,7 @@ add_mlir_library(MLIRAffineTransformsTestPasses
LINK_LIBS PUBLIC
MLIRArithTransforms
+ MLIRAffineAnalysis
MLIRAffineTransforms
MLIRAffineUtils
MLIRIR
diff --git a/mlir/test/lib/Dialect/Affine/TestAccessAnalysis.cpp b/mlir/test/lib/Dialect/Affine/TestAccessAnalysis.cpp
new file mode 100644
index 0000000..b380462
--- /dev/null
+++ b/mlir/test/lib/Dialect/Affine/TestAccessAnalysis.cpp
@@ -0,0 +1,83 @@
+//===- TestAccessAnalysis.cpp - Test affine access analysis utility -------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements a pass to test affine access analysis utilities.
+//
+//===----------------------------------------------------------------------===//
+#include "mlir/Dialect/Affine/Analysis/LoopAnalysis.h"
+#include "mlir/Dialect/Affine/Analysis/Utils.h"
+#include "mlir/Dialect/Affine/LoopFusionUtils.h"
+#include "mlir/Dialect/Func/IR/FuncOps.h"
+#include "mlir/Pass/Pass.h"
+
+#define PASS_NAME "test-affine-access-analysis"
+
+using namespace mlir;
+using namespace mlir::affine;
+
+namespace {
+
+struct TestAccessAnalysis
+ : public PassWrapper<TestAccessAnalysis, OperationPass<func::FuncOp>> {
+ MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestAccessAnalysis)
+
+ StringRef getArgument() const final { return PASS_NAME; }
+ StringRef getDescription() const final {
+ return "Tests affine memory access analysis utility";
+ }
+
+ void runOnOperation() override;
+};
+
+} // namespace
+
+/// Gathers all affine load/store ops in loop nest rooted at 'forOp' into
+/// 'loadAndStoreOps'.
+static void
+gatherLoadsAndStores(AffineForOp forOp,
+ SmallVectorImpl<Operation *> &loadAndStoreOps) {
+ forOp.walk([&](Operation *op) {
+ if (isa<AffineReadOpInterface, AffineWriteOpInterface>(op))
+ loadAndStoreOps.push_back(op);
+ });
+}
+
+void TestAccessAnalysis::runOnOperation() {
+ SmallVector<Operation *> loadStores;
+ SmallVector<AffineForOp> enclosingOps;
+ // Go over all top-level affine.for ops and test each contained affine
+ // access's contiguity along every surrounding loop IV.
+ for (auto forOp : getOperation().getOps<AffineForOp>()) {
+ loadStores.clear();
+ gatherLoadsAndStores(forOp, loadStores);
+ for (Operation *memOp : loadStores) {
+ enclosingOps.clear();
+ getAffineForIVs(*memOp, &enclosingOps);
+ for (unsigned d = 0, e = enclosingOps.size(); d < e; d++) {
+ int memRefDim;
+ bool isContiguous;
+ if (auto read = dyn_cast<AffineReadOpInterface>(memOp)) {
+ isContiguous = isContiguousAccess(enclosingOps[d].getInductionVar(),
+ read, &memRefDim);
+ } else {
+ isContiguous = isContiguousAccess(enclosingOps[d].getInductionVar(),
+ cast<AffineWriteOpInterface>(memOp),
+ &memRefDim);
+ }
+ if (isContiguous && memRefDim == 0)
+ memOp->emitRemark("contiguous along loop ") << d << '\n';
+ }
+ }
+ }
+}
+
+namespace mlir {
+void registerTestAffineAccessAnalysisPass() {
+ PassRegistration<TestAccessAnalysis>();
+}
+} // namespace mlir
diff --git a/mlir/test/mlir-cpu-runner/expand-arith-ops.mlir b/mlir/test/mlir-cpu-runner/expand-arith-ops.mlir
index 44141cc..0bf6523 100644
--- a/mlir/test/mlir-cpu-runner/expand-arith-ops.mlir
+++ b/mlir/test/mlir-cpu-runner/expand-arith-ops.mlir
@@ -13,10 +13,21 @@ func.func @trunc_bf16(%a : f32) {
}
func.func @main() {
- // CHECK: 1.00781
- %roundOneI = arith.constant 0x3f808000 : i32
- %roundOneF = arith.bitcast %roundOneI : i32 to f32
- call @trunc_bf16(%roundOneF): (f32) -> ()
+ // Note: this is a tie (low 16 bits are 0x8000). We expect the rounding behavior
+ // to break ties "to nearest-even", which in this case means downwards,
+ // since bit 16 is not set.
+ // CHECK: 1
+ %value_1_00391_I = arith.constant 0x3f808000 : i32
+ %value_1_00391_F = arith.bitcast %value_1_00391_I : i32 to f32
+ call @trunc_bf16(%value_1_00391_F): (f32) -> ()
+
+ // Note: this is a tie (low 16 bits are 0x8000). We expect the rounding behavior
+ // to break ties "to nearest-even", which in this case means upwards,
+ // since bit 16 is set.
+ // CHECK-NEXT: 1.01562
+ %value_1_01172_I = arith.constant 0x3f818000 : i32
+ %value_1_01172_F = arith.bitcast %value_1_01172_I : i32 to f32
+ call @trunc_bf16(%value_1_01172_F): (f32) -> ()
// CHECK-NEXT: -1
%noRoundNegOneI = arith.constant 0xbf808000 : i32
@@ -38,15 +49,27 @@ func.func @main() {
%neginff = arith.bitcast %neginfi : i32 to f32
call @trunc_bf16(%neginff): (f32) -> ()
+ // Note: this rounds upwards. As the mantissa was already saturated, this rounding
+ // causes the exponent to be incremented. As the exponent was already the
+ // maximum exponent value for finite values, this increment of the exponent
+ // causes this to overflow to +inf.
+ // CHECK-NEXT: inf
+ %big_overflowing_i = arith.constant 0x7f7fffff : i32
+ %big_overflowing_f = arith.bitcast %big_overflowing_i : i32 to f32
+ call @trunc_bf16(%big_overflowing_f): (f32) -> ()
+
+ // Same as the previous testcase but negative.
+ // CHECK-NEXT: -inf
+ %negbig_overflowing_i = arith.constant 0xff7fffff : i32
+ %negbig_overflowing_f = arith.bitcast %negbig_overflowing_i : i32 to f32
+ call @trunc_bf16(%negbig_overflowing_f): (f32) -> ()
+
+ // In contrast to the previous two testcases, the upwards-rounding here
+ // does not cause overflow.
// CHECK-NEXT: 3.38953e+38
- %bigi = arith.constant 0x7f7fffff : i32
- %bigf = arith.bitcast %bigi : i32 to f32
- call @trunc_bf16(%bigf): (f32) -> ()
-
- // CHECK-NEXT: -3.38953e+38
- %negbigi = arith.constant 0xff7fffff : i32
- %negbigf = arith.bitcast %negbigi : i32 to f32
- call @trunc_bf16(%negbigf): (f32) -> ()
+ %big_nonoverflowing_i = arith.constant 0x7f7effff : i32
+ %big_nonoverflowing_f = arith.bitcast %big_nonoverflowing_i : i32 to f32
+ call @trunc_bf16(%big_nonoverflowing_f): (f32) -> ()
// CHECK-NEXT: 1.625
%exprolli = arith.constant 0x3fcfffff : i32
diff --git a/mlir/test/python/dialects/gpu/dialect.py b/mlir/test/python/dialects/gpu/dialect.py
index 0293e8f..2f49e2e 100644
--- a/mlir/test/python/dialects/gpu/dialect.py
+++ b/mlir/test/python/dialects/gpu/dialect.py
@@ -27,6 +27,6 @@ def testMMAElementWiseAttr():
module = Module.create()
with InsertionPoint(module.body):
gpu.BlockDimOp(gpu.Dimension.y)
- # CHECK: %0 = gpu.block_dim y
+ # CHECK: %block_dim_y = gpu.block_dim y
print(module)
pass
diff --git a/mlir/tools/mlir-opt/mlir-opt.cpp b/mlir/tools/mlir-opt/mlir-opt.cpp
index 4dfa05c..0ba1a3a 100644
--- a/mlir/tools/mlir-opt/mlir-opt.cpp
+++ b/mlir/tools/mlir-opt/mlir-opt.cpp
@@ -43,6 +43,7 @@ void registerSliceAnalysisTestPass();
void registerSymbolTestPasses();
void registerRegionTestPasses();
void registerTestAffineDataCopyPass();
+void registerTestAffineAccessAnalysisPass();
void registerTestAffineReifyValueBoundsPass();
void registerTestAffineLoopUnswitchingPass();
void registerTestAffineWalk();
@@ -169,6 +170,7 @@ void registerTestPasses() {
registerSymbolTestPasses();
registerRegionTestPasses();
registerTestAffineDataCopyPass();
+ registerTestAffineAccessAnalysisPass();
registerTestAffineLoopUnswitchingPass();
registerTestAffineReifyValueBoundsPass();
registerTestAffineWalk();
diff --git a/openmp/CMakeLists.txt b/openmp/CMakeLists.txt
index 03068af..3c4ff76 100644
--- a/openmp/CMakeLists.txt
+++ b/openmp/CMakeLists.txt
@@ -46,9 +46,15 @@ if (OPENMP_STANDALONE_BUILD)
set(CMAKE_CXX_EXTENSIONS NO)
else()
set(OPENMP_ENABLE_WERROR ${LLVM_ENABLE_WERROR})
- # If building in tree, we honor the same install suffix LLVM uses.
- set(OPENMP_INSTALL_LIBDIR "lib${LLVM_LIBDIR_SUFFIX}" CACHE STRING
- "Path where built OpenMP libraries should be installed.")
+
+ # When building in tree we install the runtime according to the LLVM settings.
+ if(LLVM_ENABLE_PER_TARGET_RUNTIME_DIR AND NOT APPLE)
+ set(OPENMP_INSTALL_LIBDIR lib${LLVM_LIBDIR_SUFFIX}/${LLVM_DEFAULT_TARGET_TRIPLE} CACHE STRING
+ "Path where built openmp libraries should be installed.")
+ else()
+ set(OPENMP_INSTALL_LIBDIR "lib${LLVM_LIBDIR_SUFFIX}" CACHE STRING
+ "Path where built OpenMP libraries should be installed.")
+ endif()
if (NOT MSVC)
set(OPENMP_TEST_C_COMPILER ${LLVM_RUNTIME_OUTPUT_INTDIR}/clang)
diff --git a/utils/bazel/llvm-project-overlay/clang/BUILD.bazel b/utils/bazel/llvm-project-overlay/clang/BUILD.bazel
index 18af1af..6d3904f 100644
--- a/utils/bazel/llvm-project-overlay/clang/BUILD.bazel
+++ b/utils/bazel/llvm-project-overlay/clang/BUILD.bazel
@@ -1843,7 +1843,6 @@ cc_library(
":driver",
":driver_options_inc_gen",
":edit",
- ":install_api",
":lex",
":parse",
":sema",
@@ -2066,7 +2065,9 @@ cc_library(
deps = [
":ast",
":basic",
+ ":frontend",
":support",
+ "//llvm:Core",
"//llvm:Support",
"//llvm:TextAPI",
],
diff --git a/utils/bazel/llvm-project-overlay/libc/BUILD.bazel b/utils/bazel/llvm-project-overlay/libc/BUILD.bazel
index a1a5b7f..16ceaadf2 100644
--- a/utils/bazel/llvm-project-overlay/libc/BUILD.bazel
+++ b/utils/bazel/llvm-project-overlay/libc/BUILD.bazel
@@ -80,8 +80,8 @@ libc_support_library(
)
libc_support_library(
- name = "__support_macros_properties_float",
- hdrs = ["src/__support/macros/properties/float.h"],
+ name = "__support_macros_properties_types",
+ hdrs = ["src/__support/macros/properties/types.h"],
deps = [
":__support_macros_properties_architectures",
":__support_macros_properties_compiler",
@@ -332,7 +332,7 @@ libc_support_library(
deps = [
":__support_macros_attributes",
":__support_macros_config",
- ":__support_macros_properties_float",
+ ":__support_macros_properties_types",
":llvm_libc_macros_stdfix_macros",
],
)
@@ -697,7 +697,7 @@ libc_support_library(
":__support_cpp_type_traits",
":__support_libc_assert",
":__support_macros_attributes",
- ":__support_macros_properties_float",
+ ":__support_macros_properties_types",
":__support_math_extras",
":__support_uint128",
],