aboutsummaryrefslogtreecommitdiff
path: root/flang
diff options
context:
space:
mode:
authorVijay Kandiah <vkandiah@nvidia.com>2024-06-14 11:36:05 -0500
committerGitHub <noreply@github.com>2024-06-14 11:36:05 -0500
commitc0cba5198155dba246ddd5764f57595d9bbbddef (patch)
treecabf2b28b3e0650cc8520ff64e614b9446f5ae2b /flang
parent2f5ec13761fa672cb39ff147d876c2604c08bcae (diff)
downloadllvm-c0cba5198155dba246ddd5764f57595d9bbbddef.zip
llvm-c0cba5198155dba246ddd5764f57595d9bbbddef.tar.gz
llvm-c0cba5198155dba246ddd5764f57595d9bbbddef.tar.bz2
[Flang] Hoisting constant-sized allocas at flang codegen. (#95310)
This change modifies the `AllocaOpConversion` in flang codegen to insert constant-sized LLVM allocas at the entry block of `LLVMFuncOp` or OpenACC/OpenMP Op, rather than in-place at the `fir.alloca`. This effectively hoists constant-sized FIR allocas to the proper block. When compiling the example subroutine below with `flang-new`, we get a llvm.stacksave/stackrestore pair around a constant-sized `fir.alloca i32`. ``` subroutine test(n) block integer :: n print *, n end block end subroutine test ``` Without the proposed change, downstream LLVM compilation cannot hoist this constant-sized alloca out of the stacksave/stackrestore region which may lead to missed downstream optimizations: ``` *** IR Dump After Safe Stack instrumentation pass (safe-stack) *** define void @test_(ptr %0) !dbg !3 { %2 = call ptr @llvm.stacksave.p0(), !dbg !7 %3 = alloca i32, i64 1, align 4, !dbg !8 %4 = call ptr @_FortranAioBeginExternalListOutput(i32 6, ptr @_QQclX62c91d05f046c7a656e7978eb13f2821, i32 4), !dbg !9 %5 = load i32, ptr %3, align 4, !dbg !10, !tbaa !11 %6 = call i1 @_FortranAioOutputInteger32(ptr %4, i32 %5), !dbg !10 %7 = call i32 @_FortranAioEndIoStatement(ptr %4), !dbg !9 call void @llvm.stackrestore.p0(ptr %2), !dbg !15 ret void, !dbg !16 } ``` With this change, the `llvm.alloca` is already hoisted out of the stacksave/stackrestore region during flang codegen: ``` // -----// IR Dump After FIRToLLVMLowering (fir-to-llvm-ir) //----- // llvm.func @test_(%arg0: !llvm.ptr {fir.bindc_name = "n"}) attributes {fir.internal_name = "_QPtest"} { %0 = llvm.mlir.constant(4 : i32) : i32 %1 = llvm.mlir.constant(1 : i64) : i64 %2 = llvm.alloca %1 x i32 {bindc_name = "n"} : (i64) -> !llvm.ptr %3 = llvm.mlir.constant(6 : i32) : i32 %4 = llvm.mlir.undef : i1 %5 = llvm.call @llvm.stacksave.p0() {fastmathFlags = #llvm.fastmath<contract>} : () -> !llvm.ptr %6 = llvm.mlir.addressof @_QQclX62c91d05f046c7a656e7978eb13f2821 : !llvm.ptr %7 = llvm.call @_FortranAioBeginExternalListOutput(%3, %6, %0) {fastmathFlags = #llvm.fastmath<contract>} : (i32, !llvm.ptr, i32) -> !llvm.ptr %8 = llvm.load %2 {tbaa = [#tbaa_tag]} : !llvm.ptr -> i32 %9 = llvm.call @_FortranAioOutputInteger32(%7, %8) {fastmathFlags = #llvm.fastmath<contract>} : (!llvm.ptr, i32) -> i1 %10 = llvm.call @_FortranAioEndIoStatement(%7) {fastmathFlags = #llvm.fastmath<contract>} : (!llvm.ptr) -> i32 llvm.call @llvm.stackrestore.p0(%5) {fastmathFlags = #llvm.fastmath<contract>} : (!llvm.ptr) -> () llvm.return } ``` --------- Co-authored-by: Vijay Kandiah <vkandiah@sky6.pgi.net>
Diffstat (limited to 'flang')
-rw-r--r--flang/include/flang/Optimizer/CodeGen/FIROpPatterns.h13
-rw-r--r--flang/lib/Optimizer/CodeGen/CodeGen.cpp20
-rw-r--r--flang/lib/Optimizer/CodeGen/FIROpPatterns.cpp48
-rw-r--r--flang/test/Fir/alloc.fir12
-rw-r--r--flang/test/Fir/boxproc.fir10
-rw-r--r--flang/test/Fir/convert-to-llvm-openmp-and-fir.fir117
-rw-r--r--flang/test/Fir/convert-to-llvm.fir22
-rw-r--r--flang/test/Integration/OpenMP/copyprivate.f902
-rw-r--r--flang/test/Transforms/debug-local-var-2.f9010
9 files changed, 137 insertions, 117 deletions
diff --git a/flang/include/flang/Optimizer/CodeGen/FIROpPatterns.h b/flang/include/flang/Optimizer/CodeGen/FIROpPatterns.h
index 211acdc..ac09566 100644
--- a/flang/include/flang/Optimizer/CodeGen/FIROpPatterns.h
+++ b/flang/include/flang/Optimizer/CodeGen/FIROpPatterns.h
@@ -51,7 +51,9 @@ protected:
/// appropriate reified structures.
mlir::Value integerCast(mlir::Location loc,
mlir::ConversionPatternRewriter &rewriter,
- mlir::Type ty, mlir::Value val) const;
+ mlir::Type ty, mlir::Value val,
+ bool fold = false) const;
+
struct TypePair {
mlir::Type fir;
mlir::Type llvm;
@@ -144,9 +146,12 @@ protected:
// Find the Block in which the alloca should be inserted.
// The order to recursively find the proper block:
// 1. An OpenMP Op that will be outlined.
- // 2. A LLVMFuncOp
- // 3. The first ancestor that is an OpenMP Op or a LLVMFuncOp
- mlir::Block *getBlockForAllocaInsert(mlir::Operation *op) const;
+ // 2. An OpenMP or OpenACC Op with one or more regions holding executable
+ // code.
+ // 3. A LLVMFuncOp
+ // 4. The first ancestor that is one of the above.
+ mlir::Block *getBlockForAllocaInsert(mlir::Operation *op,
+ mlir::Region *parentRegion) const;
// Generate an alloca of size 1 for an object of type \p llvmObjectTy in the
// allocation address space provided for the architecture in the DataLayout
diff --git a/flang/lib/Optimizer/CodeGen/CodeGen.cpp b/flang/lib/Optimizer/CodeGen/CodeGen.cpp
index 9f21c6b..4448224 100644
--- a/flang/lib/Optimizer/CodeGen/CodeGen.cpp
+++ b/flang/lib/Optimizer/CodeGen/CodeGen.cpp
@@ -218,7 +218,7 @@ struct AllocaOpConversion : public fir::FIROpConversion<fir::AllocaOp> {
chrTy.getContext(), chrTy.getFKind());
llvmObjectType = convertType(rawCharTy);
assert(end == 1);
- size = integerCast(loc, rewriter, ity, lenParams[0]);
+ size = integerCast(loc, rewriter, ity, lenParams[0], /*fold=*/true);
} else if (auto recTy = mlir::dyn_cast<fir::RecordType>(scalarType)) {
mlir::LLVM::LLVMFuncOp memSizeFn =
getDependentTypeMemSizeFn(recTy, alloc, rewriter);
@@ -236,17 +236,29 @@ struct AllocaOpConversion : public fir::FIROpConversion<fir::AllocaOp> {
}
}
if (auto scaleSize = genAllocationScaleSize(alloc, ity, rewriter))
- size = rewriter.create<mlir::LLVM::MulOp>(loc, ity, size, scaleSize);
+ size =
+ rewriter.createOrFold<mlir::LLVM::MulOp>(loc, ity, size, scaleSize);
if (alloc.hasShapeOperands()) {
unsigned end = operands.size();
for (; i < end; ++i)
- size = rewriter.create<mlir::LLVM::MulOp>(
- loc, ity, size, integerCast(loc, rewriter, ity, operands[i]));
+ size = rewriter.createOrFold<mlir::LLVM::MulOp>(
+ loc, ity, size,
+ integerCast(loc, rewriter, ity, operands[i], /*fold=*/true));
}
unsigned allocaAs = getAllocaAddressSpace(rewriter);
unsigned programAs = getProgramAddressSpace(rewriter);
+ if (mlir::isa<mlir::LLVM::ConstantOp>(size.getDefiningOp())) {
+ // Set the Block in which the llvm alloca should be inserted.
+ mlir::Operation *parentOp = rewriter.getInsertionBlock()->getParentOp();
+ mlir::Region *parentRegion = rewriter.getInsertionBlock()->getParent();
+ mlir::Block *insertBlock =
+ getBlockForAllocaInsert(parentOp, parentRegion);
+ size.getDefiningOp()->moveAfter(insertBlock, insertBlock->begin());
+ rewriter.setInsertionPointAfter(size.getDefiningOp());
+ }
+
// NOTE: we used to pass alloc->getAttrs() in the builder for non opaque
// pointers! Only propagate pinned and bindc_name to help debugging, but
// this should have no functional purpose (and passing the operand segment
diff --git a/flang/lib/Optimizer/CodeGen/FIROpPatterns.cpp b/flang/lib/Optimizer/CodeGen/FIROpPatterns.cpp
index 72e072d..b9a28b8 100644
--- a/flang/lib/Optimizer/CodeGen/FIROpPatterns.cpp
+++ b/flang/lib/Optimizer/CodeGen/FIROpPatterns.cpp
@@ -62,10 +62,9 @@ mlir::LLVM::ConstantOp ConvertFIRToLLVMPattern::genConstantOffset(
/// to the specific target may involve some sign-extending or truncation of
/// values, particularly to fit them from abstract box types to the
/// appropriate reified structures.
-mlir::Value
-ConvertFIRToLLVMPattern::integerCast(mlir::Location loc,
- mlir::ConversionPatternRewriter &rewriter,
- mlir::Type ty, mlir::Value val) const {
+mlir::Value ConvertFIRToLLVMPattern::integerCast(
+ mlir::Location loc, mlir::ConversionPatternRewriter &rewriter,
+ mlir::Type ty, mlir::Value val, bool fold) const {
auto valTy = val.getType();
// If the value was not yet lowered, lower its type so that it can
// be used in getPrimitiveTypeSizeInBits.
@@ -73,10 +72,17 @@ ConvertFIRToLLVMPattern::integerCast(mlir::Location loc,
valTy = convertType(valTy);
auto toSize = mlir::LLVM::getPrimitiveTypeSizeInBits(ty);
auto fromSize = mlir::LLVM::getPrimitiveTypeSizeInBits(valTy);
- if (toSize < fromSize)
- return rewriter.create<mlir::LLVM::TruncOp>(loc, ty, val);
- if (toSize > fromSize)
- return rewriter.create<mlir::LLVM::SExtOp>(loc, ty, val);
+ if (fold) {
+ if (toSize < fromSize)
+ return rewriter.createOrFold<mlir::LLVM::TruncOp>(loc, ty, val);
+ if (toSize > fromSize)
+ return rewriter.createOrFold<mlir::LLVM::SExtOp>(loc, ty, val);
+ } else {
+ if (toSize < fromSize)
+ return rewriter.create<mlir::LLVM::TruncOp>(loc, ty, val);
+ if (toSize > fromSize)
+ return rewriter.create<mlir::LLVM::SExtOp>(loc, ty, val);
+ }
return val;
}
@@ -274,16 +280,19 @@ mlir::Value ConvertFIRToLLVMPattern::computeBoxSize(
// Find the Block in which the alloca should be inserted.
// The order to recursively find the proper block:
// 1. An OpenMP Op that will be outlined.
-// 2. A LLVMFuncOp
-// 3. The first ancestor that is an OpenMP Op or a LLVMFuncOp
-mlir::Block *
-ConvertFIRToLLVMPattern::getBlockForAllocaInsert(mlir::Operation *op) const {
+// 2. An OpenMP or OpenACC Op with one or more regions holding executable code.
+// 3. A LLVMFuncOp
+// 4. The first ancestor that is one of the above.
+mlir::Block *ConvertFIRToLLVMPattern::getBlockForAllocaInsert(
+ mlir::Operation *op, mlir::Region *parentRegion) const {
if (auto iface = mlir::dyn_cast<mlir::omp::OutlineableOpenMPOpInterface>(op))
return iface.getAllocaBlock();
+ if (auto recipeIface = mlir::dyn_cast<mlir::accomp::RecipeInterface>(op))
+ return recipeIface.getAllocaBlock(*parentRegion);
if (auto llvmFuncOp = mlir::dyn_cast<mlir::LLVM::LLVMFuncOp>(op))
return &llvmFuncOp.front();
- return getBlockForAllocaInsert(op->getParentOp());
+ return getBlockForAllocaInsert(op->getParentOp(), parentRegion);
}
// Generate an alloca of size 1 for an object of type \p llvmObjectTy in the
@@ -297,16 +306,9 @@ mlir::Value ConvertFIRToLLVMPattern::genAllocaAndAddrCastWithType(
mlir::ConversionPatternRewriter &rewriter) const {
auto thisPt = rewriter.saveInsertionPoint();
mlir::Operation *parentOp = rewriter.getInsertionBlock()->getParentOp();
- if (mlir::isa<mlir::omp::DeclareReductionOp>(parentOp) ||
- mlir::isa<mlir::omp::PrivateClauseOp>(parentOp)) {
- // DeclareReductionOp & PrivateClauseOp have multiple child regions. We want
- // to get the first block of whichever of those regions we are currently in
- mlir::Region *parentRegion = rewriter.getInsertionBlock()->getParent();
- rewriter.setInsertionPointToStart(&parentRegion->front());
- } else {
- mlir::Block *insertBlock = getBlockForAllocaInsert(parentOp);
- rewriter.setInsertionPointToStart(insertBlock);
- }
+ mlir::Region *parentRegion = rewriter.getInsertionBlock()->getParent();
+ mlir::Block *insertBlock = getBlockForAllocaInsert(parentOp, parentRegion);
+ rewriter.setInsertionPointToStart(insertBlock);
auto size = genI32Constant(loc, rewriter, 1);
unsigned allocaAs = getAllocaAddressSpace(rewriter);
unsigned programAs = getProgramAddressSpace(rewriter);
diff --git a/flang/test/Fir/alloc.fir b/flang/test/Fir/alloc.fir
index ca624c0..e00fc9d 100644
--- a/flang/test/Fir/alloc.fir
+++ b/flang/test/Fir/alloc.fir
@@ -156,7 +156,7 @@ func.func @allocmem_array_of_dynchar(%l: i32) -> !fir.heap<!fir.array<3x3x!fir.c
// CHECK-LABEL: define ptr @alloca_dynarray_of_nonchar(
// CHECK-SAME: i64 %[[extent:.*]])
-// CHECK: %[[prod1:.*]] = mul i64 1, %[[extent]]
+// CHECK: %[[prod1:.*]] = mul i64 %[[extent]], 1
// CHECK: alloca [3 x i32], i64 %[[prod1]]
func.func @alloca_dynarray_of_nonchar(%e: index) -> !fir.ref<!fir.array<3x?xi32>> {
%1 = fir.alloca !fir.array<3x?xi32>, %e
@@ -165,7 +165,7 @@ func.func @alloca_dynarray_of_nonchar(%e: index) -> !fir.ref<!fir.array<3x?xi32>
// CHECK-LABEL: define ptr @alloca_dynarray_of_nonchar2(
// CHECK-SAME: i64 %[[extent:.*]])
-// CHECK: %[[prod1:.*]] = mul i64 1, %[[extent]]
+// CHECK: %[[prod1:.*]] = mul i64 %[[extent]], 1
// CHECK: %[[prod2:.*]] = mul i64 %[[prod1]], %[[extent]]
// CHECK: alloca i32, i64 %[[prod2]]
func.func @alloca_dynarray_of_nonchar2(%e: index) -> !fir.ref<!fir.array<?x?xi32>> {
@@ -194,7 +194,7 @@ func.func @allocmem_dynarray_of_nonchar2(%e: index) -> !fir.heap<!fir.array<?x?x
// CHECK-LABEL: define ptr @alloca_dynarray_of_char(
// CHECK-SAME: i64 %[[extent:.*]])
-// CHECK: %[[prod1:.*]] = mul i64 1, %[[extent]]
+// CHECK: %[[prod1:.*]] = mul i64 %[[extent]], 1
// CHECK: alloca [3 x [10 x i16]], i64 %[[prod1]]
func.func @alloca_dynarray_of_char(%e : index) -> !fir.ref<!fir.array<3x?x!fir.char<2,10>>> {
%1 = fir.alloca !fir.array<3x?x!fir.char<2,10>>, %e
@@ -203,7 +203,7 @@ func.func @alloca_dynarray_of_char(%e : index) -> !fir.ref<!fir.array<3x?x!fir.c
// CHECK-LABEL: define ptr @alloca_dynarray_of_char2(
// CHECK-SAME: i64 %[[extent:.*]])
-// CHECK: %[[prod1:.*]] = mul i64 1, %[[extent]]
+// CHECK: %[[prod1:.*]] = mul i64 %[[extent]], 1
// CHECK: %[[prod2:.*]] = mul i64 %[[prod1]], %[[extent]]
// CHECK: alloca [10 x i16], i64 %[[prod2]]
func.func @alloca_dynarray_of_char2(%e : index) -> !fir.ref<!fir.array<?x?x!fir.char<2,10>>> {
@@ -334,10 +334,10 @@ func.func @allocmem_array_with_holes_dynchar(%arg0: index, %arg1: index) -> !fir
}
// CHECK-LABEL: define void @alloca_unlimited_polymorphic_box
-// CHECK: %[[VAL_0:.*]] = alloca { ptr, i64, i32, i8, i8, i8, i8, ptr, [1 x i64] }, i64 1
// CHECK: %[[VAL_1:.*]] = alloca { ptr, i64, i32, i8, i8, i8, i8, [1 x [3 x i64]], ptr, [1 x i64] }, i64 1
-// CHECK: %[[VAL_2:.*]] = alloca { ptr, i64, i32, i8, i8, i8, i8, ptr, [1 x i64] }, i64 1
+// CHECK: %[[VAL_0:.*]] = alloca { ptr, i64, i32, i8, i8, i8, i8, ptr, [1 x i64] }, i64 1
// CHECK: %[[VAL_3:.*]] = alloca { ptr, i64, i32, i8, i8, i8, i8, [1 x [3 x i64]], ptr, [1 x i64] }, i64 1
+// CHECK: %[[VAL_2:.*]] = alloca { ptr, i64, i32, i8, i8, i8, i8, ptr, [1 x i64] }, i64 1
func.func @alloca_unlimited_polymorphic_box() {
%0 = fir.alloca !fir.class<none>
diff --git a/flang/test/Fir/boxproc.fir b/flang/test/Fir/boxproc.fir
index 1fed16a..834017b 100644
--- a/flang/test/Fir/boxproc.fir
+++ b/flang/test/Fir/boxproc.fir
@@ -1,12 +1,12 @@
// RUN: tco %s | FileCheck %s
// CHECK-LABEL: define void @_QPtest_proc_dummy()
-// CHECK: %[[VAL_0:.*]] = alloca i32, i64 1, align 4
+// CHECK: %[[VAL_3:.*]] = alloca [32 x i8], i64 1, align 1
// CHECK: %[[VAL_1:.*]] = alloca { ptr }, i64 1, align 8
+// CHECK: %[[VAL_0:.*]] = alloca i32, i64 1, align 4
// CHECK: %[[VAL_2:.*]] = getelementptr { ptr }, ptr %[[VAL_1]], i32 0, i32 0
// CHECK: store ptr %[[VAL_0]], ptr %[[VAL_2]], align 8
// CHECK: store i32 1, ptr %[[VAL_0]], align 4
-// CHECK: %[[VAL_3:.*]] = alloca [32 x i8], i64 1, align 1
// CHECK: call void @llvm.init.trampoline(ptr %[[VAL_3]], ptr @_QFtest_proc_dummyPtest_proc_dummy_a, ptr %[[VAL_1]])
// CHECK: %[[VAL_6:.*]] = call ptr @llvm.adjust.trampoline(ptr %[[VAL_3]])
// CHECK: call void @_QPtest_proc_dummy_other(ptr %[[VAL_6]])
@@ -61,9 +61,10 @@ func.func @_QPtest_proc_dummy_other(%arg0: !fir.boxproc<() -> ()>) {
}
// CHECK-LABEL: define void @_QPtest_proc_dummy_char()
-// CHECK: %[[VAL_0:.*]] = alloca [40 x i8], i64 1, align 1
-// CHECK: %[[VAL_1:.*]] = alloca [10 x i8], i64 1, align 1
+// CHECK: %[[VAL_20:.*]] = alloca [32 x i8], i64 1, align 1
// CHECK: %[[VAL_2:.*]] = alloca { { ptr, i64 } }, i64 1, align 8
+// CHECK: %[[VAL_1:.*]] = alloca [10 x i8], i64 1, align 1
+// CHECK: %[[VAL_0:.*]] = alloca [40 x i8], i64 1, align 1
// CHECK: %[[VAL_3:.*]] = getelementptr { { ptr, i64 } }, ptr %[[VAL_2]], i32 0, i32 0
// CHECK: %[[VAL_5:.*]] = insertvalue { ptr, i64 } undef, ptr %[[VAL_1]], 0
// CHECK: %[[VAL_6:.*]] = insertvalue { ptr, i64 } %[[VAL_5]], i64 10, 1
@@ -75,7 +76,6 @@ func.func @_QPtest_proc_dummy_other(%arg0: !fir.boxproc<() -> ()>) {
// CHECK: %[[VAL_15:.*]] = icmp sgt i64 %[[VAL_13]], 0
// CHECK: %[[VAL_18:.*]] = getelementptr [10 x [1 x i8]], ptr %[[VAL_1]], i32 0, i64 %[[VAL_11]]
// CHECK: store [1 x i8] c" ", ptr %[[VAL_18]], align 1
-// CHECK: %[[VAL_20:.*]] = alloca [32 x i8], i64 1, align 1
// CHECK: call void @llvm.init.trampoline(ptr %[[VAL_20]], ptr @_QFtest_proc_dummy_charPgen_message, ptr %[[VAL_2]])
// CHECK: %[[VAL_23:.*]] = call ptr @llvm.adjust.trampoline(ptr %[[VAL_20]])
// CHECK: %[[VAL_25:.*]] = insertvalue { ptr, i64 } undef, ptr %[[VAL_23]], 0
diff --git a/flang/test/Fir/convert-to-llvm-openmp-and-fir.fir b/flang/test/Fir/convert-to-llvm-openmp-and-fir.fir
index 72cd0a7..45ff89b 100644
--- a/flang/test/Fir/convert-to-llvm-openmp-and-fir.fir
+++ b/flang/test/Fir/convert-to-llvm-openmp-and-fir.fir
@@ -280,58 +280,58 @@ func.func @_QPomp_target_data() {
return
}
- // CHECK-LABEL: llvm.func @_QPomp_target_data() {
- // CHECK: %0 = llvm.mlir.constant(1024 : index) : i64
- // CHECK: %[[VAL_0:.*]] = llvm.mlir.constant(1 : i64) : i64
- // CHECK: %[[VAL_1:.*]] = llvm.alloca %[[VAL_0]] x !llvm.array<1024 x i32> {bindc_name = "a"} : (i64) -> !llvm.ptr
- // CHECK: %3 = llvm.mlir.constant(1024 : index) : i64
- // CHECK: %[[VAL_2:.*]] = llvm.mlir.constant(1 : i64) : i64
- // CHECK: %[[VAL_3:.*]] = llvm.alloca %[[VAL_2]] x !llvm.array<1024 x i32> {bindc_name = "b"} : (i64) -> !llvm.ptr
- // CHECK: %6 = llvm.mlir.constant(1024 : index) : i64
- // CHECK: %[[VAL_4:.*]] = llvm.mlir.constant(1 : i64) : i64
- // CHECK: %[[VAL_5:.*]] = llvm.alloca %[[VAL_4]] x !llvm.array<1024 x i32> {bindc_name = "c"} : (i64) -> !llvm.ptr
- // CHECK: %9 = llvm.mlir.constant(1024 : index) : i64
- // CHECK: %[[VAL_6:.*]] = llvm.mlir.constant(1 : i64) : i64
- // CHECK: %[[VAL_7:.*]] = llvm.alloca %[[VAL_6]] x !llvm.array<1024 x i32> {bindc_name = "d"} : (i64) -> !llvm.ptr
- // CHECK: %12 = llvm.mlir.constant(1 : index) : i64
- // CHECK: %13 = llvm.mlir.constant(0 : index) : i64
- // CHECK: %14 = llvm.mlir.constant(1023 : index) : i64
- // CHECK: %15 = omp.map.bounds lower_bound(%13 : i64) upper_bound(%14 : i64) extent(%0 : i64) stride(%12 : i64) start_idx(%12 : i64)
- // CHECK: %16 = omp.map.info var_ptr(%[[VAL_1]] : !llvm.ptr, !llvm.array<1024 x i32>) map_clauses(to) capture(ByRef) bounds(%15) -> !llvm.ptr {name = "a"}
- // CHECK: %17 = llvm.mlir.constant(1 : index) : i64
- // CHECK: %18 = llvm.mlir.constant(0 : index) : i64
- // CHECK: %19 = llvm.mlir.constant(1023 : index) : i64
- // CHECK: %20 = omp.map.bounds lower_bound(%18 : i64) upper_bound(%19 : i64) extent(%3 : i64) stride(%17 : i64) start_idx(%17 : i64)
- // CHECK: %21 = omp.map.info var_ptr(%[[VAL_3]] : !llvm.ptr, !llvm.array<1024 x i32>) map_clauses(to) capture(ByRef) bounds(%20) -> !llvm.ptr {name = "b"}
- // CHECK: %22 = llvm.mlir.constant(1 : index) : i64
- // CHECK: %23 = llvm.mlir.constant(0 : index) : i64
- // CHECK: %24 = llvm.mlir.constant(1023 : index) : i64
- // CHECK: %25 = omp.map.bounds lower_bound(%23 : i64) upper_bound(%24 : i64) extent(%6 : i64) stride(%22 : i64) start_idx(%22 : i64)
- // CHECK: %26 = omp.map.info var_ptr(%[[VAL_5]] : !llvm.ptr, !llvm.array<1024 x i32>) map_clauses(always, exit_release_or_enter_alloc) capture(ByRef) bounds(%25) -> !llvm.ptr {name = "c"}
- // CHECK: omp.target_enter_data map_entries(%16, %21, %26 : !llvm.ptr, !llvm.ptr, !llvm.ptr)
- // CHECK: %27 = llvm.mlir.constant(1 : index) : i64
- // CHECK: %28 = llvm.mlir.constant(0 : index) : i64
- // CHECK: %29 = llvm.mlir.constant(1023 : index) : i64
- // CHECK: %30 = omp.map.bounds lower_bound(%28 : i64) upper_bound(%29 : i64) extent(%0 : i64) stride(%27 : i64) start_idx(%27 : i64)
- // CHECK: %31 = omp.map.info var_ptr(%[[VAL_1]] : !llvm.ptr, !llvm.array<1024 x i32>) map_clauses(from) capture(ByRef) bounds(%30) -> !llvm.ptr {name = "a"}
- // CHECK: %32 = llvm.mlir.constant(1 : index) : i64
- // CHECK: %33 = llvm.mlir.constant(0 : index) : i64
- // CHECK: %34 = llvm.mlir.constant(1023 : index) : i64
- // CHECK: %35 = omp.map.bounds lower_bound(%33 : i64) upper_bound(%34 : i64) extent(%3 : i64) stride(%32 : i64) start_idx(%32 : i64)
- // CHECK: %36 = omp.map.info var_ptr(%[[VAL_3]] : !llvm.ptr, !llvm.array<1024 x i32>) map_clauses(from) capture(ByRef) bounds(%35) -> !llvm.ptr {name = "b"}
- // CHECK: %37 = llvm.mlir.constant(1 : index) : i64
- // CHECK: %38 = llvm.mlir.constant(0 : index) : i64
- // CHECK: %39 = llvm.mlir.constant(1023 : index) : i64
- // CHECK: %40 = omp.map.bounds lower_bound(%38 : i64) upper_bound(%39 : i64) extent(%6 : i64) stride(%37 : i64) start_idx(%37 : i64)
- // CHECK: %41 = omp.map.info var_ptr(%[[VAL_5]] : !llvm.ptr, !llvm.array<1024 x i32>) map_clauses(exit_release_or_enter_alloc) capture(ByRef) bounds(%40) -> !llvm.ptr {name = "c"}
- // CHECK: %42 = llvm.mlir.constant(1 : index) : i64
- // CHECK: %43 = llvm.mlir.constant(0 : index) : i64
- // CHECK: %44 = llvm.mlir.constant(1023 : index) : i64
- // CHECK: %45 = omp.map.bounds lower_bound(%43 : i64) upper_bound(%44 : i64) extent(%9 : i64) stride(%42 : i64) start_idx(%42 : i64)
- // CHECK: %46 = omp.map.info var_ptr(%[[VAL_7]] : !llvm.ptr, !llvm.array<1024 x i32>) map_clauses(always, delete) capture(ByRef) bounds(%45) -> !llvm.ptr {name = "d"}
- // CHECK: omp.target_exit_data map_entries(%31, %36, %41, %46 : !llvm.ptr, !llvm.ptr, !llvm.ptr, !llvm.ptr)
- // CHECK: llvm.return
- // CHECK: }
+// CHECK-LABEL: llvm.func @_QPomp_target_data() {
+// CHECK: %[[VAL_0:.*]] = llvm.mlir.constant(1024 : index) : i64
+// CHECK: %[[VAL_1:.*]] = llvm.mlir.constant(1 : i64) : i64
+// CHECK: %[[VAL_2:.*]] = llvm.alloca %[[VAL_1]] x !llvm.array<1024 x i32> {bindc_name = "d"} : (i64) -> !llvm.ptr
+// CHECK: %[[VAL_3:.*]] = llvm.mlir.constant(1 : i64) : i64
+// CHECK: %[[VAL_4:.*]] = llvm.alloca %[[VAL_3]] x !llvm.array<1024 x i32> {bindc_name = "c"} : (i64) -> !llvm.ptr
+// CHECK: %[[VAL_5:.*]] = llvm.mlir.constant(1 : i64) : i64
+// CHECK: %[[VAL_6:.*]] = llvm.alloca %[[VAL_5]] x !llvm.array<1024 x i32> {bindc_name = "b"} : (i64) -> !llvm.ptr
+// CHECK: %[[VAL_7:.*]] = llvm.mlir.constant(1 : i64) : i64
+// CHECK: %[[VAL_8:.*]] = llvm.alloca %[[VAL_7]] x !llvm.array<1024 x i32> {bindc_name = "a"} : (i64) -> !llvm.ptr
+// CHECK: %[[VAL_9:.*]] = llvm.mlir.constant(1024 : index) : i64
+// CHECK: %[[VAL_10:.*]] = llvm.mlir.constant(1024 : index) : i64
+// CHECK: %[[VAL_11:.*]] = llvm.mlir.constant(1024 : index) : i64
+// CHECK: %[[VAL_12:.*]] = llvm.mlir.constant(1 : index) : i64
+// CHECK: %[[VAL_13:.*]] = llvm.mlir.constant(0 : index) : i64
+// CHECK: %[[VAL_14:.*]] = llvm.mlir.constant(1023 : index) : i64
+// CHECK: %[[VAL_15:.*]] = omp.map.bounds lower_bound(%[[VAL_13]] : i64) upper_bound(%[[VAL_14]] : i64) extent(%[[VAL_0]] : i64) stride(%[[VAL_12]] : i64) start_idx(%[[VAL_12]] : i64)
+// CHECK: %[[VAL_16:.*]] = omp.map.info var_ptr(%[[VAL_8]] : !llvm.ptr, !llvm.array<1024 x i32>) map_clauses(to) capture(ByRef) bounds(%[[VAL_15]]) -> !llvm.ptr {name = "a"}
+// CHECK: %[[VAL_17:.*]] = llvm.mlir.constant(1 : index) : i64
+// CHECK: %[[VAL_18:.*]] = llvm.mlir.constant(0 : index) : i64
+// CHECK: %[[VAL_19:.*]] = llvm.mlir.constant(1023 : index) : i64
+// CHECK: %[[VAL_20:.*]] = omp.map.bounds lower_bound(%[[VAL_18]] : i64) upper_bound(%[[VAL_19]] : i64) extent(%[[VAL_9]] : i64) stride(%[[VAL_17]] : i64) start_idx(%[[VAL_17]] : i64)
+// CHECK: %[[VAL_21:.*]] = omp.map.info var_ptr(%[[VAL_6]] : !llvm.ptr, !llvm.array<1024 x i32>) map_clauses(to) capture(ByRef) bounds(%[[VAL_20]]) -> !llvm.ptr {name = "b"}
+// CHECK: %[[VAL_22:.*]] = llvm.mlir.constant(1 : index) : i64
+// CHECK: %[[VAL_23:.*]] = llvm.mlir.constant(0 : index) : i64
+// CHECK: %[[VAL_24:.*]] = llvm.mlir.constant(1023 : index) : i64
+// CHECK: %[[VAL_25:.*]] = omp.map.bounds lower_bound(%[[VAL_23]] : i64) upper_bound(%[[VAL_24]] : i64) extent(%[[VAL_10]] : i64) stride(%[[VAL_22]] : i64) start_idx(%[[VAL_22]] : i64)
+// CHECK: %[[VAL_26:.*]] = omp.map.info var_ptr(%[[VAL_4]] : !llvm.ptr, !llvm.array<1024 x i32>) map_clauses(always, exit_release_or_enter_alloc) capture(ByRef) bounds(%[[VAL_25]]) -> !llvm.ptr {name = "c"}
+// CHECK: omp.target_enter_data map_entries(%[[VAL_16]], %[[VAL_21]], %[[VAL_26]] : !llvm.ptr, !llvm.ptr, !llvm.ptr)
+// CHECK: %[[VAL_27:.*]] = llvm.mlir.constant(1 : index) : i64
+// CHECK: %[[VAL_28:.*]] = llvm.mlir.constant(0 : index) : i64
+// CHECK: %[[VAL_29:.*]] = llvm.mlir.constant(1023 : index) : i64
+// CHECK: %[[VAL_30:.*]] = omp.map.bounds lower_bound(%[[VAL_28]] : i64) upper_bound(%[[VAL_29]] : i64) extent(%[[VAL_0]] : i64) stride(%[[VAL_27]] : i64) start_idx(%[[VAL_27]] : i64)
+// CHECK: %[[VAL_31:.*]] = omp.map.info var_ptr(%[[VAL_8]] : !llvm.ptr, !llvm.array<1024 x i32>) map_clauses(from) capture(ByRef) bounds(%[[VAL_30]]) -> !llvm.ptr {name = "a"}
+// CHECK: %[[VAL_32:.*]] = llvm.mlir.constant(1 : index) : i64
+// CHECK: %[[VAL_33:.*]] = llvm.mlir.constant(0 : index) : i64
+// CHECK: %[[VAL_34:.*]] = llvm.mlir.constant(1023 : index) : i64
+// CHECK: %[[VAL_35:.*]] = omp.map.bounds lower_bound(%[[VAL_33]] : i64) upper_bound(%[[VAL_34]] : i64) extent(%[[VAL_9]] : i64) stride(%[[VAL_32]] : i64) start_idx(%[[VAL_32]] : i64)
+// CHECK: %[[VAL_36:.*]] = omp.map.info var_ptr(%[[VAL_6]] : !llvm.ptr, !llvm.array<1024 x i32>) map_clauses(from) capture(ByRef) bounds(%[[VAL_35]]) -> !llvm.ptr {name = "b"}
+// CHECK: %[[VAL_37:.*]] = llvm.mlir.constant(1 : index) : i64
+// CHECK: %[[VAL_38:.*]] = llvm.mlir.constant(0 : index) : i64
+// CHECK: %[[VAL_39:.*]] = llvm.mlir.constant(1023 : index) : i64
+// CHECK: %[[VAL_40:.*]] = omp.map.bounds lower_bound(%[[VAL_38]] : i64) upper_bound(%[[VAL_39]] : i64) extent(%[[VAL_10]] : i64) stride(%[[VAL_37]] : i64) start_idx(%[[VAL_37]] : i64)
+// CHECK: %[[VAL_41:.*]] = omp.map.info var_ptr(%[[VAL_4]] : !llvm.ptr, !llvm.array<1024 x i32>) map_clauses(exit_release_or_enter_alloc) capture(ByRef) bounds(%[[VAL_40]]) -> !llvm.ptr {name = "c"}
+// CHECK: %[[VAL_42:.*]] = llvm.mlir.constant(1 : index) : i64
+// CHECK: %[[VAL_43:.*]] = llvm.mlir.constant(0 : index) : i64
+// CHECK: %[[VAL_44:.*]] = llvm.mlir.constant(1023 : index) : i64
+// CHECK: %[[VAL_45:.*]] = omp.map.bounds lower_bound(%[[VAL_43]] : i64) upper_bound(%[[VAL_44]] : i64) extent(%[[VAL_11]] : i64) stride(%[[VAL_42]] : i64) start_idx(%[[VAL_42]] : i64)
+// CHECK: %[[VAL_46:.*]] = omp.map.info var_ptr(%[[VAL_2]] : !llvm.ptr, !llvm.array<1024 x i32>) map_clauses(always, delete) capture(ByRef) bounds(%[[VAL_45]]) -> !llvm.ptr {name = "d"}
+// CHECK: omp.target_exit_data map_entries(%[[VAL_31]], %[[VAL_36]], %[[VAL_41]], %[[VAL_46]] : !llvm.ptr, !llvm.ptr, !llvm.ptr, !llvm.ptr)
+// CHECK: llvm.return
+// CHECK: }
// -----
@@ -374,9 +374,9 @@ func.func @_QPopenmp_target_data_region() {
// CHECK-LABEL: llvm.func @_QPopenmp_target_data_region() {
// CHECK: %[[VAL_0:.*]] = llvm.mlir.constant(1 : i64) : i64
-// CHECK: %[[VAL_1:.*]] = llvm.alloca %[[VAL_0]] x !llvm.array<1024 x i32> {bindc_name = "a"} : (i64) -> !llvm.ptr
// CHECK: %[[VAL_2:.*]] = llvm.mlir.constant(1 : i64) : i64
// CHECK: %[[VAL_3:.*]] = llvm.alloca %[[VAL_2]] x i32 {bindc_name = "i"} : (i64) -> !llvm.ptr
+// CHECK: %[[VAL_1:.*]] = llvm.alloca %[[VAL_0]] x !llvm.array<1024 x i32> {bindc_name = "a"} : (i64) -> !llvm.ptr
// CHECK: %[[VAL_MAX:.*]] = llvm.mlir.constant(1024 : index) : i64
// CHECK: %[[VAL_ONE:.*]] = llvm.mlir.constant(1 : index) : i64
// CHECK: %[[VAL_ZERO:.*]] = llvm.mlir.constant(0 : index) : i64
@@ -675,9 +675,9 @@ func.func @_QPsb() {
}
// CHECK: llvm.func @_QPsb() {
-// CHECK: %[[ONE:.*]] = llvm.mlir.constant(1 : i32) : i32
// CHECK: %[[SIZE:.*]] = llvm.mlir.constant(1 : i64) : i64
-// CHECK: %[[LI_REF:.*]] = llvm.alloca %6 x i32 {bindc_name = "li"} : (i64) -> !llvm.ptr
+// CHECK: %[[LI_REF:.*]] = llvm.alloca %[[SIZE]] x i32 {bindc_name = "li"} : (i64) -> !llvm.ptr
+// CHECK: %[[ONE:.*]] = llvm.mlir.constant(1 : i32) : i32
// CHECK: omp.sections {
// CHECK: omp.section {
// CHECK: llvm.br ^[[BB_ENTRY:.*]]({{.*}})
@@ -715,7 +715,7 @@ func.func @_QPsb() {
// CHECK: }
// CHECK-LABEL: @_QPsimple_reduction
// CHECK-SAME: %[[ARRAY_REF:.*]]: !llvm.ptr
-// CHECK: %[[RED_ACCUMULATOR:.*]] = llvm.alloca %2 x i32 {bindc_name = "x"} : (i64) -> !llvm.ptr
+// CHECK: %[[RED_ACCUMULATOR:.*]] = llvm.alloca %1 x i32 {bindc_name = "x"} : (i64) -> !llvm.ptr
// CHECK: omp.parallel {
// CHECK: omp.wsloop reduction(@[[EQV_REDUCTION]] %[[RED_ACCUMULATOR]] -> %[[PRV:.+]] : !llvm.ptr) {
// CHECK-NEXT: omp.loop_nest
@@ -797,6 +797,7 @@ func.func @_QPs(%arg0: !fir.ref<!fir.complex<4>> {fir.bindc_name = "x"}) {
// Test if llvm.alloca is properly inserted in the omp section
+//CHECK: %[[CONST0:.*]] = llvm.mlir.constant(1 : i64) : i64
//CHECK: %[[CONST:.*]] = llvm.mlir.constant(1 : i64) : i64
//CHECK: %[[ALLOCA:.*]] = llvm.alloca %[[CONST]] x !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8)> {bindc_name = "iattr"} : (i64) -> !llvm.ptr
//CHECK: omp.parallel {
@@ -907,9 +908,9 @@ omp.critical.declare @help hint(contended)
// CHECK: llvm.func @omp_critical_() {
func.func @omp_critical_() {
-// CHECK: %[[X_REF:.*]] = llvm.alloca %{{.*}} x i32 {bindc_name = "x"} : (i64) -> !llvm.ptr
- %0 = fir.alloca i32 {bindc_name = "x", uniq_name = "_QFomp_criticalEx"}
// CHECK: %[[Y_REF:.*]] = llvm.alloca %{{.*}} x i32 {bindc_name = "y"} : (i64) -> !llvm.ptr
+ %0 = fir.alloca i32 {bindc_name = "x", uniq_name = "_QFomp_criticalEx"}
+// CHECK: %[[X_REF:.*]] = llvm.alloca %{{.*}} x i32 {bindc_name = "x"} : (i64) -> !llvm.ptr
%1 = fir.alloca i32 {bindc_name = "y", uniq_name = "_QFomp_criticalEy"}
// CHECK: omp.critical(@help)
omp.critical(@help) {
diff --git a/flang/test/Fir/convert-to-llvm.fir b/flang/test/Fir/convert-to-llvm.fir
index ee116e9..d705967 100644
--- a/flang/test/Fir/convert-to-llvm.fir
+++ b/flang/test/Fir/convert-to-llvm.fir
@@ -1178,7 +1178,7 @@ func.func @alloca_fixed_char_array(%e : index) -> !fir.ref<!fir.array<?x?x!fir.c
// CHECK-LABEL: llvm.func @alloca_fixed_char_array
// CHECK-SAME: ([[E:%.*]]: i64) -> !llvm.ptr
// CHECK-DAG: [[ONE:%.*]] = llvm.mlir.constant(1 : i64) : i64
-// CHECK: [[PROD1:%.*]] = llvm.mul [[ONE]], [[E]] : i64
+// CHECK: [[PROD1:%.*]] = llvm.mul [[E]], [[ONE]] : i64
// CHECK: [[PROD2:%.*]] = llvm.mul [[PROD1]], [[E]] : i64
// GENERIC: [[A:%.*]] = llvm.alloca [[PROD2]] x !llvm.array<8 x i8>
// AMDGPU: [[AA:%.*]] = llvm.alloca [[PROD2]] x !llvm.array<8 x i8> : (i64) -> !llvm.ptr<5>
@@ -1225,7 +1225,7 @@ func.func @alloca_multidim_array(%0 : index) -> !fir.ref<!fir.array<8x16x32xf32>
// CHECK-SAME: ([[OP1:%.*]]: i64) -> !llvm.ptr
// CHECK: [[OP2:%.*]] = llvm.mlir.constant(24 : index) : i64
// CHECK: [[ONE:%.*]] = llvm.mlir.constant(1 : i64) : i64
-// CHECK: [[MUL1:%.*]] = llvm.mul [[ONE]], [[OP1]] : i64
+// CHECK: [[MUL1:%.*]] = llvm.mul [[OP1]], [[ONE]] : i64
// CHECK: [[TOTAL:%.*]] = llvm.mul [[MUL1]], [[OP2]] : i64
// GENERIC: [[A:%.*]] = llvm.alloca [[TOTAL]] x !llvm.array<32 x array<16 x array<8 x f32>>>
// AMDGPU: [[AA:%.*]] = llvm.alloca [[TOTAL]] x !llvm.array<32 x array<16 x array<8 x f32>>> : (i64) -> !llvm.ptr<5>
@@ -1246,7 +1246,7 @@ func.func @alloca_const_interior_array(%0 : index) -> !fir.ref<!fir.array<8x9x?x
// CHECK-SAME: ([[OP1:%.*]]: i64) -> !llvm.ptr
// CHECK: [[OP2:%.*]] = llvm.mlir.constant(64 : index) : i64
// CHECK: [[ONE:%.*]] = llvm.mlir.constant(1 : i64) : i64
-// CHECK: [[MUL1:%.*]] = llvm.mul [[ONE]], [[OP1]] : i64
+// CHECK: [[MUL1:%.*]] = llvm.mul [[OP1]], [[ONE]] : i64
// CHECK: [[TOTAL:%.*]] = llvm.mul [[MUL1]], [[OP2]] : i64
// GENERIC: [[A:%.*]] = llvm.alloca [[TOTAL]] x !llvm.array<9 x array<8 x f32>>
// AMDGPU: [[AA:%.*]] = llvm.alloca [[TOTAL]] x !llvm.array<9 x array<8 x f32>> : (i64) -> !llvm.ptr<5>
@@ -1937,7 +1937,7 @@ func.func private @_QPxb(!fir.box<!fir.array<?x?xf64>>)
// CHECK: %[[N2_TMP:.*]] = llvm.sub %[[N]], %[[SH2]] : i64
// CHECK: %[[N2:.*]] = llvm.add %[[N2_TMP]], %[[C1]] : i64
// CHECK: %[[C1_0:.*]] = llvm.mlir.constant(1 : i64) : i64
-// CHECK: %[[ARR_SIZE_TMP1:.*]] = llvm.mul %[[C1_0]], %[[N1]] : i64
+// CHECK: %[[ARR_SIZE_TMP1:.*]] = llvm.mul %[[N1]], %[[C1_0]] : i64
// CHECK: %[[ARR_SIZE:.*]] = llvm.mul %[[ARR_SIZE_TMP1]], %[[N2]] : i64
// GENERIC: %[[ARR:.*]] = llvm.alloca %[[ARR_SIZE]] x f64 {bindc_name = "arr"} : (i64) -> !llvm.ptr
// AMDGPU: %[[AR:.*]] = llvm.alloca %[[ARR_SIZE]] x f64 {bindc_name = "arr"} : (i64) -> !llvm.ptr<5>
@@ -2015,17 +2015,17 @@ func.func private @_QPtest_dt_callee(%arg0: !fir.box<!fir.array<?xi32>>)
// AMDGPU: %[[AA:.*]] = llvm.alloca %[[ALLOCA_SIZE]] x !llvm.struct<(ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i64>>)> {alignment = 8 : i64} : (i32) -> !llvm.ptr<5>
// AMDGPU: %[[ALLOCA:.*]] = llvm.addrspacecast %[[AA]] : !llvm.ptr<5> to !llvm.ptr
// CHECK: %[[C20:.*]] = llvm.mlir.constant(20 : index) : i64
-// CHECK: %[[C1:.*]] = llvm.mlir.constant(1 : i64) : i64
-// CHECK: %[[C10:.*]] = llvm.mlir.constant(10 : i64) : i64
-// CHECK: %[[C2:.*]] = llvm.mlir.constant(2 : i64) : i64
-// CHECK: %[[ALLOCA_SIZE_V:.*]] = llvm.mlir.constant(1 : i64) : i64
-// GENERIC: %[[V:.*]] = llvm.alloca %[[ALLOCA_SIZE_V]] x i32 {bindc_name = "v"} : (i64) -> !llvm.ptr
-// AMDGPU: %[[AB:.*]] = llvm.alloca %[[ALLOCA_SIZE_V]] x i32 {bindc_name = "v"} : (i64) -> !llvm.ptr<5>
-// AMDGPU: %[[V:.*]] = llvm.addrspacecast %[[AB]] : !llvm.ptr<5> to !llvm.ptr
// CHECK: %[[ALLOCA_SIZE_X:.*]] = llvm.mlir.constant(1 : i64) : i64
// GENERIC: %[[X:.*]] = llvm.alloca %[[ALLOCA_SIZE_X]] x !llvm.array<20 x struct<"_QFtest_dt_sliceTt", (i32, i32)>> {bindc_name = "x"} : (i64) -> !llvm.ptr
// AMDGPU: %[[AC:.*]] = llvm.alloca %[[ALLOCA_SIZE_X]] x !llvm.array<20 x struct<"_QFtest_dt_sliceTt", (i32, i32)>> {bindc_name = "x"} : (i64) -> !llvm.ptr<5>
// AMDGPU: %[[X:.*]] = llvm.addrspacecast %[[AC]] : !llvm.ptr<5> to !llvm.ptr
+// CHECK: %[[ALLOCA_SIZE_V:.*]] = llvm.mlir.constant(1 : i64) : i64
+// GENERIC: %[[V:.*]] = llvm.alloca %[[ALLOCA_SIZE_V]] x i32 {bindc_name = "v"} : (i64) -> !llvm.ptr
+// AMDGPU: %[[AB:.*]] = llvm.alloca %[[ALLOCA_SIZE_V]] x i32 {bindc_name = "v"} : (i64) -> !llvm.ptr<5>
+// AMDGPU: %[[V:.*]] = llvm.addrspacecast %[[AB]] : !llvm.ptr<5> to !llvm.ptr
+// CHECK: %[[C1:.*]] = llvm.mlir.constant(1 : i64) : i64
+// CHECK: %[[C10:.*]] = llvm.mlir.constant(10 : i64) : i64
+// CHECK: %[[C2:.*]] = llvm.mlir.constant(2 : i64) : i64
// CHECK: %[[TYPE_CODE:.*]] = llvm.mlir.constant(9 : i32) : i32
// CHECK: %[[NULL:.*]] = llvm.mlir.zero : !llvm.ptr
// CHECK: %[[GEP:.*]] = llvm.getelementptr %[[NULL]][1]
diff --git a/flang/test/Integration/OpenMP/copyprivate.f90 b/flang/test/Integration/OpenMP/copyprivate.f90
index d32319a..dd69ebd 100644
--- a/flang/test/Integration/OpenMP/copyprivate.f90
+++ b/flang/test/Integration/OpenMP/copyprivate.f90
@@ -33,8 +33,8 @@
!CHECK-NEXT: }
!CHECK-LABEL: define internal void @test_scalar_..omp_par({{.*}})
-!CHECK: %[[I:.*]] = alloca i32, i64 1
!CHECK: %[[J:.*]] = alloca i32, i64 1
+!CHECK: %[[I:.*]] = alloca i32, i64 1
!CHECK: %[[DID_IT:.*]] = alloca i32
!CHECK: store i32 0, ptr %[[DID_IT]]
!CHECK: %[[THREAD_NUM1:.*]] = call i32 @__kmpc_global_thread_num(ptr @[[LOC:.*]])
diff --git a/flang/test/Transforms/debug-local-var-2.f90 b/flang/test/Transforms/debug-local-var-2.f90
index ce78bfd..79fe1ba 100644
--- a/flang/test/Transforms/debug-local-var-2.f90
+++ b/flang/test/Transforms/debug-local-var-2.f90
@@ -6,12 +6,12 @@
! This tests checks the debug information for local variables in llvm IR.
! BOTH-LABEL: define void @_QQmain
-! BOTH-DAG: %[[AL11:.*]] = alloca i32
-! BOTH-DAG: %[[AL12:.*]] = alloca i64
-! BOTH-DAG: %[[AL13:.*]] = alloca i8
-! BOTH-DAG: %[[AL14:.*]] = alloca i32
-! BOTH-DAG: %[[AL15:.*]] = alloca float
! BOTH-DAG: %[[AL16:.*]] = alloca double
+! BOTH-DAG: %[[AL15:.*]] = alloca float
+! BOTH-DAG: %[[AL14:.*]] = alloca i32
+! BOTH-DAG: %[[AL13:.*]] = alloca i8
+! BOTH-DAG: %[[AL12:.*]] = alloca i64
+! BOTH-DAG: %[[AL11:.*]] = alloca i32
! INTRINSICS-DAG: call void @llvm.dbg.declare(metadata ptr %[[AL11]], metadata ![[I4:.*]], metadata !DIExpression())
! INTRINSICS-DAG: call void @llvm.dbg.declare(metadata ptr %[[AL12]], metadata ![[I8:.*]], metadata !DIExpression())
! INTRINSICS-DAG: call void @llvm.dbg.declare(metadata ptr %[[AL13]], metadata ![[L1:.*]], metadata !DIExpression())