// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _ --version 5 // RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=52 -triple x86_64-unknown-linux-gnu -emit-llvm -o - %s | FileCheck %s // RUN: %clang_cc1 -fopenmp -fopenmp-version=52 -triple x86_64-unknown-linux-gnu -x c++ -std=c++11 -emit-pch -o %t %s // RUN: %clang_cc1 -fopenmp -fopenmp-version=52 -triple x86_64-unknown-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s // RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=52 -triple x86_64-unknown-linux-gnu -emit-llvm -o - %s | FileCheck %s // RUN: %clang_cc1 -fopenmp -fopenmp-version=52 -fnoopenmp-use-tls -triple x86_64-unknown-linux-gnu -x c++ -std=c++11 -emit-pch -o %t %s // RUN: %clang_cc1 -fopenmp -fopenmp-version=52 -fnoopenmp-use-tls -triple x86_64-unknown-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck --check-prefix CHECK-TLS %s // RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=52 -triple x86_64-unknown-linux-gnu -emit-llvm -o - %s | FileCheck --check-prefix SIMD-ONLY0 %s // RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=52 -triple x86_64-unknown-linux-gnu -x c++ -std=c++11 -emit-pch -o %t %s // RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=52 -triple x86_64-unknown-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck --check-prefix SIMD-ONLY0 %s // RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=52 -triple x86_64-unknown-linux-gnu -emit-llvm -o - %s | FileCheck --check-prefix SIMD-ONLY0 %s // RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=52 -fnoopenmp-use-tls -triple x86_64-unknown-linux-gnu -x c++ -std=c++11 -emit-pch -o %t %s // RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=52 -fnoopenmp-use-tls -triple x86_64-unknown-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck --check-prefix SIMD-ONLY0 %s // expected-no-diagnostics #ifndef HEADER #define HEADER enum omp_allocator_handle_t { omp_null_allocator = 0, omp_default_mem_alloc = 1, omp_large_cap_mem_alloc = 2, omp_const_mem_alloc = 3, omp_high_bw_mem_alloc = 4, omp_low_lat_mem_alloc = 5, omp_cgroup_mem_alloc = 6, omp_pteam_mem_alloc = 7, omp_thread_mem_alloc = 8, KMP_ALLOCATOR_MAX_HANDLE = __UINTPTR_MAX__ }; template struct ST { static T m; }; template T foo() { T v; #pragma omp scope private(v) allocate(allocator(TY):v) v = ST::m; #pragma omp scope private(v) allocate(align(al), allocator(TY):v) ++v; return v; } namespace ns { int a; } omp_allocator_handle_t foo(); int main() { static int a; static int temp; int v; #pragma omp scope private(ns::a) allocate(allocator(omp_pteam_mem_alloc):ns::a) ns::a++; #pragma omp scope private(a) allocate(align(8),allocator(omp_thread_mem_alloc):a) a = 2; #pragma omp scope private(v) allocate(align(1) : v) ++v; #pragma omp scope private(v) allocate(allocator(omp_default_mem_alloc) : v) ++v; #pragma omp scope private(v) allocate(allocator(omp_large_cap_mem_alloc), align(8) : v) ++v; #pragma omp scope private(v) allocate(align(4) : v) ++v; #pragma omp scope private(v) allocate(align(2), allocator(omp_default_mem_alloc) : v) ++v; #pragma omp scope private(v) allocate(align(8), allocator(foo()) : v) ++v; double b = 3; #pragma omp scope private(temp) allocate(temp) temp += foo(); return temp+ns::a; } extern template int ST::m; const int b = 8; void bar(int a, float &z) { #pragma omp scope private(a,z) allocate(align(b), allocator(omp_default_mem_alloc) : a,z) a += b + z; } #endif // CHECK-LABEL: define dso_local noundef i32 @main( // CHECK-SAME: ) #[[ATTR0:[0-9]+]] { // CHECK-NEXT: [[ENTRY:.*:]] // CHECK-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 // CHECK-NEXT: [[V:%.*]] = alloca i32, align 4 // CHECK-NEXT: [[B:%.*]] = alloca double, align 8 // CHECK-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1:[0-9]+]]) // CHECK-NEXT: store i32 0, ptr [[RETVAL]], align 4 // CHECK-NEXT: [[DOTA__VOID_ADDR:%.*]] = call ptr @__kmpc_alloc(i32 [[TMP0]], i64 4, ptr inttoptr (i64 7 to ptr)) // CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTA__VOID_ADDR]], align 4 // CHECK-NEXT: [[INC:%.*]] = add nsw i32 [[TMP1]], 1 // CHECK-NEXT: store i32 [[INC]], ptr [[DOTA__VOID_ADDR]], align 4 // CHECK-NEXT: call void @__kmpc_free(i32 [[TMP0]], ptr [[DOTA__VOID_ADDR]], ptr inttoptr (i64 7 to ptr)) // CHECK-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2:[0-9]+]], i32 [[TMP0]]) // CHECK-NEXT: [[DOTA__VOID_ADDR1:%.*]] = call ptr @__kmpc_aligned_alloc(i32 [[TMP0]], i64 8, i64 4, ptr inttoptr (i64 8 to ptr)) // CHECK-NEXT: store i32 2, ptr [[DOTA__VOID_ADDR1]], align 4 // CHECK-NEXT: call void @__kmpc_free(i32 [[TMP0]], ptr [[DOTA__VOID_ADDR1]], ptr inttoptr (i64 8 to ptr)) // CHECK-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP0]]) // CHECK-NEXT: [[DOTV__VOID_ADDR:%.*]] = call ptr @__kmpc_aligned_alloc(i32 [[TMP0]], i64 4, i64 4, ptr null) // CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTV__VOID_ADDR]], align 4 // CHECK-NEXT: [[INC2:%.*]] = add nsw i32 [[TMP2]], 1 // CHECK-NEXT: store i32 [[INC2]], ptr [[DOTV__VOID_ADDR]], align 4 // CHECK-NEXT: call void @__kmpc_free(i32 [[TMP0]], ptr [[DOTV__VOID_ADDR]], ptr null) // CHECK-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP0]]) // CHECK-NEXT: [[DOTV__VOID_ADDR3:%.*]] = call ptr @__kmpc_alloc(i32 [[TMP0]], i64 4, ptr inttoptr (i64 1 to ptr)) // CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTV__VOID_ADDR3]], align 4 // CHECK-NEXT: [[INC4:%.*]] = add nsw i32 [[TMP3]], 1 // CHECK-NEXT: store i32 [[INC4]], ptr [[DOTV__VOID_ADDR3]], align 4 // CHECK-NEXT: call void @__kmpc_free(i32 [[TMP0]], ptr [[DOTV__VOID_ADDR3]], ptr inttoptr (i64 1 to ptr)) // CHECK-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP0]]) // CHECK-NEXT: [[DOTV__VOID_ADDR5:%.*]] = call ptr @__kmpc_aligned_alloc(i32 [[TMP0]], i64 8, i64 4, ptr inttoptr (i64 2 to ptr)) // CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTV__VOID_ADDR5]], align 4 // CHECK-NEXT: [[INC6:%.*]] = add nsw i32 [[TMP4]], 1 // CHECK-NEXT: store i32 [[INC6]], ptr [[DOTV__VOID_ADDR5]], align 4 // CHECK-NEXT: call void @__kmpc_free(i32 [[TMP0]], ptr [[DOTV__VOID_ADDR5]], ptr inttoptr (i64 2 to ptr)) // CHECK-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP0]]) // CHECK-NEXT: [[DOTV__VOID_ADDR7:%.*]] = call ptr @__kmpc_aligned_alloc(i32 [[TMP0]], i64 4, i64 4, ptr null) // CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTV__VOID_ADDR7]], align 4 // CHECK-NEXT: [[INC8:%.*]] = add nsw i32 [[TMP5]], 1 // CHECK-NEXT: store i32 [[INC8]], ptr [[DOTV__VOID_ADDR7]], align 4 // CHECK-NEXT: call void @__kmpc_free(i32 [[TMP0]], ptr [[DOTV__VOID_ADDR7]], ptr null) // CHECK-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP0]]) // CHECK-NEXT: [[DOTV__VOID_ADDR9:%.*]] = call ptr @__kmpc_aligned_alloc(i32 [[TMP0]], i64 4, i64 4, ptr inttoptr (i64 1 to ptr)) // CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTV__VOID_ADDR9]], align 4 // CHECK-NEXT: [[INC10:%.*]] = add nsw i32 [[TMP6]], 1 // CHECK-NEXT: store i32 [[INC10]], ptr [[DOTV__VOID_ADDR9]], align 4 // CHECK-NEXT: call void @__kmpc_free(i32 [[TMP0]], ptr [[DOTV__VOID_ADDR9]], ptr inttoptr (i64 1 to ptr)) // CHECK-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP0]]) // CHECK-NEXT: [[CALL:%.*]] = call noundef i64 @_Z3foov() // CHECK-NEXT: [[CONV:%.*]] = inttoptr i64 [[CALL]] to ptr // CHECK-NEXT: [[DOTV__VOID_ADDR11:%.*]] = call ptr @__kmpc_aligned_alloc(i32 [[TMP0]], i64 8, i64 4, ptr [[CONV]]) // CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTV__VOID_ADDR11]], align 4 // CHECK-NEXT: [[INC12:%.*]] = add nsw i32 [[TMP7]], 1 // CHECK-NEXT: store i32 [[INC12]], ptr [[DOTV__VOID_ADDR11]], align 4 // CHECK-NEXT: [[CALL13:%.*]] = call noundef i64 @_Z3foov() // CHECK-NEXT: [[CONV14:%.*]] = inttoptr i64 [[CALL13]] to ptr // CHECK-NEXT: call void @__kmpc_free(i32 [[TMP0]], ptr [[DOTV__VOID_ADDR11]], ptr [[CONV14]]) // CHECK-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP0]]) // CHECK-NEXT: store double 3.000000e+00, ptr [[B]], align 8 // CHECK-NEXT: [[DOTTEMP__VOID_ADDR:%.*]] = call ptr @__kmpc_alloc(i32 [[TMP0]], i64 4, ptr null) // CHECK-NEXT: [[CALL15:%.*]] = call noundef i32 @_Z3fooIiL22omp_allocator_handle_t6ELj8EET_v() // CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTTEMP__VOID_ADDR]], align 4 // CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP8]], [[CALL15]] // CHECK-NEXT: store i32 [[ADD]], ptr [[DOTTEMP__VOID_ADDR]], align 4 // CHECK-NEXT: call void @__kmpc_free(i32 [[TMP0]], ptr [[DOTTEMP__VOID_ADDR]], ptr null) // CHECK-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP0]]) // CHECK-NEXT: [[TMP9:%.*]] = load i32, ptr @_ZZ4mainE4temp, align 4 // CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr @_ZN2ns1aE, align 4 // CHECK-NEXT: [[ADD16:%.*]] = add nsw i32 [[TMP9]], [[TMP10]] // CHECK-NEXT: ret i32 [[ADD16]] // // // CHECK-LABEL: define linkonce_odr noundef i32 @_Z3fooIiL22omp_allocator_handle_t6ELj8EET_v( // CHECK-SAME: ) #[[ATTR4:[0-9]+]] comdat { // CHECK-NEXT: [[ENTRY:.*:]] // CHECK-NEXT: [[V:%.*]] = alloca i32, align 4 // CHECK-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) // CHECK-NEXT: [[DOTV__VOID_ADDR:%.*]] = call ptr @__kmpc_alloc(i32 [[TMP0]], i64 4, ptr inttoptr (i64 6 to ptr)) // CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @_ZN2STIiE1mE, align 4 // CHECK-NEXT: store i32 [[TMP1]], ptr [[DOTV__VOID_ADDR]], align 4 // CHECK-NEXT: call void @__kmpc_free(i32 [[TMP0]], ptr [[DOTV__VOID_ADDR]], ptr inttoptr (i64 6 to ptr)) // CHECK-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP0]]) // CHECK-NEXT: [[DOTV__VOID_ADDR1:%.*]] = call ptr @__kmpc_aligned_alloc(i32 [[TMP0]], i64 8, i64 4, ptr inttoptr (i64 6 to ptr)) // CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTV__VOID_ADDR1]], align 4 // CHECK-NEXT: [[INC:%.*]] = add nsw i32 [[TMP2]], 1 // CHECK-NEXT: store i32 [[INC]], ptr [[DOTV__VOID_ADDR1]], align 4 // CHECK-NEXT: call void @__kmpc_free(i32 [[TMP0]], ptr [[DOTV__VOID_ADDR1]], ptr inttoptr (i64 6 to ptr)) // CHECK-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP0]]) // CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[V]], align 4 // CHECK-NEXT: ret i32 [[TMP3]] // // // CHECK-LABEL: define dso_local void @_Z3bariRf( // CHECK-SAME: i32 noundef [[A:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[Z:%.*]]) #[[ATTR4]] { // CHECK-NEXT: [[ENTRY:.*:]] // CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 // CHECK-NEXT: [[Z_ADDR:%.*]] = alloca ptr, align 8 // CHECK-NEXT: [[TMP:%.*]] = alloca ptr, align 8 // CHECK-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) // CHECK-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4 // CHECK-NEXT: store ptr [[Z]], ptr [[Z_ADDR]], align 8 // CHECK-NEXT: [[DOTA__VOID_ADDR:%.*]] = call ptr @__kmpc_aligned_alloc(i32 [[TMP0]], i64 8, i64 4, ptr inttoptr (i64 1 to ptr)) // CHECK-NEXT: [[DOTZ__VOID_ADDR:%.*]] = call ptr @__kmpc_aligned_alloc(i32 [[TMP0]], i64 8, i64 4, ptr inttoptr (i64 1 to ptr)) // CHECK-NEXT: store ptr [[DOTZ__VOID_ADDR]], ptr [[TMP]], align 8 // CHECK-NEXT: [[TMP1:%.*]] = load ptr, ptr [[TMP]], align 8 // CHECK-NEXT: [[TMP2:%.*]] = load float, ptr [[TMP1]], align 4 // CHECK-NEXT: [[ADD:%.*]] = fadd float 8.000000e+00, [[TMP2]] // CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTA__VOID_ADDR]], align 4 // CHECK-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP3]] to float // CHECK-NEXT: [[ADD1:%.*]] = fadd float [[CONV]], [[ADD]] // CHECK-NEXT: [[CONV2:%.*]] = fptosi float [[ADD1]] to i32 // CHECK-NEXT: store i32 [[CONV2]], ptr [[DOTA__VOID_ADDR]], align 4 // CHECK-NEXT: call void @__kmpc_free(i32 [[TMP0]], ptr [[DOTZ__VOID_ADDR]], ptr inttoptr (i64 1 to ptr)) // CHECK-NEXT: call void @__kmpc_free(i32 [[TMP0]], ptr [[DOTA__VOID_ADDR]], ptr inttoptr (i64 1 to ptr)) // CHECK-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP0]]) // CHECK-NEXT: ret void // // // CHECK-TLS-LABEL: define dso_local noundef i32 @main( // CHECK-TLS-SAME: ) #[[ATTR0:[0-9]+]] { // CHECK-TLS-NEXT: [[ENTRY:.*:]] // CHECK-TLS-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 // CHECK-TLS-NEXT: [[V:%.*]] = alloca i32, align 4 // CHECK-TLS-NEXT: [[B:%.*]] = alloca double, align 8 // CHECK-TLS-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1:[0-9]+]]) // CHECK-TLS-NEXT: store i32 0, ptr [[RETVAL]], align 4 // CHECK-TLS-NEXT: [[DOTA__VOID_ADDR:%.*]] = call ptr @__kmpc_alloc(i32 [[TMP0]], i64 4, ptr inttoptr (i64 7 to ptr)) // CHECK-TLS-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTA__VOID_ADDR]], align 4 // CHECK-TLS-NEXT: [[INC:%.*]] = add nsw i32 [[TMP1]], 1 // CHECK-TLS-NEXT: store i32 [[INC]], ptr [[DOTA__VOID_ADDR]], align 4 // CHECK-TLS-NEXT: call void @__kmpc_free(i32 [[TMP0]], ptr [[DOTA__VOID_ADDR]], ptr inttoptr (i64 7 to ptr)) // CHECK-TLS-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2:[0-9]+]], i32 [[TMP0]]) // CHECK-TLS-NEXT: [[DOTA__VOID_ADDR1:%.*]] = call ptr @__kmpc_aligned_alloc(i32 [[TMP0]], i64 8, i64 4, ptr inttoptr (i64 8 to ptr)) // CHECK-TLS-NEXT: store i32 2, ptr [[DOTA__VOID_ADDR1]], align 4 // CHECK-TLS-NEXT: call void @__kmpc_free(i32 [[TMP0]], ptr [[DOTA__VOID_ADDR1]], ptr inttoptr (i64 8 to ptr)) // CHECK-TLS-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP0]]) // CHECK-TLS-NEXT: [[DOTV__VOID_ADDR:%.*]] = call ptr @__kmpc_aligned_alloc(i32 [[TMP0]], i64 4, i64 4, ptr null) // CHECK-TLS-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTV__VOID_ADDR]], align 4 // CHECK-TLS-NEXT: [[INC2:%.*]] = add nsw i32 [[TMP2]], 1 // CHECK-TLS-NEXT: store i32 [[INC2]], ptr [[DOTV__VOID_ADDR]], align 4 // CHECK-TLS-NEXT: call void @__kmpc_free(i32 [[TMP0]], ptr [[DOTV__VOID_ADDR]], ptr null) // CHECK-TLS-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP0]]) // CHECK-TLS-NEXT: [[DOTV__VOID_ADDR3:%.*]] = call ptr @__kmpc_alloc(i32 [[TMP0]], i64 4, ptr inttoptr (i64 1 to ptr)) // CHECK-TLS-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTV__VOID_ADDR3]], align 4 // CHECK-TLS-NEXT: [[INC4:%.*]] = add nsw i32 [[TMP3]], 1 // CHECK-TLS-NEXT: store i32 [[INC4]], ptr [[DOTV__VOID_ADDR3]], align 4 // CHECK-TLS-NEXT: call void @__kmpc_free(i32 [[TMP0]], ptr [[DOTV__VOID_ADDR3]], ptr inttoptr (i64 1 to ptr)) // CHECK-TLS-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP0]]) // CHECK-TLS-NEXT: [[DOTV__VOID_ADDR5:%.*]] = call ptr @__kmpc_aligned_alloc(i32 [[TMP0]], i64 8, i64 4, ptr inttoptr (i64 2 to ptr)) // CHECK-TLS-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTV__VOID_ADDR5]], align 4 // CHECK-TLS-NEXT: [[INC6:%.*]] = add nsw i32 [[TMP4]], 1 // CHECK-TLS-NEXT: store i32 [[INC6]], ptr [[DOTV__VOID_ADDR5]], align 4 // CHECK-TLS-NEXT: call void @__kmpc_free(i32 [[TMP0]], ptr [[DOTV__VOID_ADDR5]], ptr inttoptr (i64 2 to ptr)) // CHECK-TLS-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP0]]) // CHECK-TLS-NEXT: [[DOTV__VOID_ADDR7:%.*]] = call ptr @__kmpc_aligned_alloc(i32 [[TMP0]], i64 4, i64 4, ptr null) // CHECK-TLS-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTV__VOID_ADDR7]], align 4 // CHECK-TLS-NEXT: [[INC8:%.*]] = add nsw i32 [[TMP5]], 1 // CHECK-TLS-NEXT: store i32 [[INC8]], ptr [[DOTV__VOID_ADDR7]], align 4 // CHECK-TLS-NEXT: call void @__kmpc_free(i32 [[TMP0]], ptr [[DOTV__VOID_ADDR7]], ptr null) // CHECK-TLS-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP0]]) // CHECK-TLS-NEXT: [[DOTV__VOID_ADDR9:%.*]] = call ptr @__kmpc_aligned_alloc(i32 [[TMP0]], i64 4, i64 4, ptr inttoptr (i64 1 to ptr)) // CHECK-TLS-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTV__VOID_ADDR9]], align 4 // CHECK-TLS-NEXT: [[INC10:%.*]] = add nsw i32 [[TMP6]], 1 // CHECK-TLS-NEXT: store i32 [[INC10]], ptr [[DOTV__VOID_ADDR9]], align 4 // CHECK-TLS-NEXT: call void @__kmpc_free(i32 [[TMP0]], ptr [[DOTV__VOID_ADDR9]], ptr inttoptr (i64 1 to ptr)) // CHECK-TLS-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP0]]) // CHECK-TLS-NEXT: [[CALL:%.*]] = call noundef i64 @_Z3foov() // CHECK-TLS-NEXT: [[CONV:%.*]] = inttoptr i64 [[CALL]] to ptr // CHECK-TLS-NEXT: [[DOTV__VOID_ADDR11:%.*]] = call ptr @__kmpc_aligned_alloc(i32 [[TMP0]], i64 8, i64 4, ptr [[CONV]]) // CHECK-TLS-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTV__VOID_ADDR11]], align 4 // CHECK-TLS-NEXT: [[INC12:%.*]] = add nsw i32 [[TMP7]], 1 // CHECK-TLS-NEXT: store i32 [[INC12]], ptr [[DOTV__VOID_ADDR11]], align 4 // CHECK-TLS-NEXT: [[CALL13:%.*]] = call noundef i64 @_Z3foov() // CHECK-TLS-NEXT: [[CONV14:%.*]] = inttoptr i64 [[CALL13]] to ptr // CHECK-TLS-NEXT: call void @__kmpc_free(i32 [[TMP0]], ptr [[DOTV__VOID_ADDR11]], ptr [[CONV14]]) // CHECK-TLS-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP0]]) // CHECK-TLS-NEXT: store double 3.000000e+00, ptr [[B]], align 8 // CHECK-TLS-NEXT: [[DOTTEMP__VOID_ADDR:%.*]] = call ptr @__kmpc_alloc(i32 [[TMP0]], i64 4, ptr null) // CHECK-TLS-NEXT: [[CALL15:%.*]] = call noundef i32 @_Z3fooIiL22omp_allocator_handle_t6ELj8EET_v() // CHECK-TLS-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTTEMP__VOID_ADDR]], align 4 // CHECK-TLS-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP8]], [[CALL15]] // CHECK-TLS-NEXT: store i32 [[ADD]], ptr [[DOTTEMP__VOID_ADDR]], align 4 // CHECK-TLS-NEXT: call void @__kmpc_free(i32 [[TMP0]], ptr [[DOTTEMP__VOID_ADDR]], ptr null) // CHECK-TLS-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP0]]) // CHECK-TLS-NEXT: [[TMP9:%.*]] = load i32, ptr @_ZZ4mainE4temp, align 4 // CHECK-TLS-NEXT: [[TMP10:%.*]] = load i32, ptr @_ZN2ns1aE, align 4 // CHECK-TLS-NEXT: [[ADD16:%.*]] = add nsw i32 [[TMP9]], [[TMP10]] // CHECK-TLS-NEXT: ret i32 [[ADD16]] // // // CHECK-TLS-LABEL: define linkonce_odr noundef i32 @_Z3fooIiL22omp_allocator_handle_t6ELj8EET_v( // CHECK-TLS-SAME: ) #[[ATTR4:[0-9]+]] comdat { // CHECK-TLS-NEXT: [[ENTRY:.*:]] // CHECK-TLS-NEXT: [[V:%.*]] = alloca i32, align 4 // CHECK-TLS-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) // CHECK-TLS-NEXT: [[DOTV__VOID_ADDR:%.*]] = call ptr @__kmpc_alloc(i32 [[TMP0]], i64 4, ptr inttoptr (i64 6 to ptr)) // CHECK-TLS-NEXT: [[TMP1:%.*]] = load i32, ptr @_ZN2STIiE1mE, align 4 // CHECK-TLS-NEXT: store i32 [[TMP1]], ptr [[DOTV__VOID_ADDR]], align 4 // CHECK-TLS-NEXT: call void @__kmpc_free(i32 [[TMP0]], ptr [[DOTV__VOID_ADDR]], ptr inttoptr (i64 6 to ptr)) // CHECK-TLS-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP0]]) // CHECK-TLS-NEXT: [[DOTV__VOID_ADDR1:%.*]] = call ptr @__kmpc_aligned_alloc(i32 [[TMP0]], i64 8, i64 4, ptr inttoptr (i64 6 to ptr)) // CHECK-TLS-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTV__VOID_ADDR1]], align 4 // CHECK-TLS-NEXT: [[INC:%.*]] = add nsw i32 [[TMP2]], 1 // CHECK-TLS-NEXT: store i32 [[INC]], ptr [[DOTV__VOID_ADDR1]], align 4 // CHECK-TLS-NEXT: call void @__kmpc_free(i32 [[TMP0]], ptr [[DOTV__VOID_ADDR1]], ptr inttoptr (i64 6 to ptr)) // CHECK-TLS-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP0]]) // CHECK-TLS-NEXT: [[TMP3:%.*]] = load i32, ptr [[V]], align 4 // CHECK-TLS-NEXT: ret i32 [[TMP3]] // // // CHECK-TLS-LABEL: define dso_local void @_Z3bariRf( // CHECK-TLS-SAME: i32 noundef [[A:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[Z:%.*]]) #[[ATTR4]] { // CHECK-TLS-NEXT: [[ENTRY:.*:]] // CHECK-TLS-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 // CHECK-TLS-NEXT: [[Z_ADDR:%.*]] = alloca ptr, align 8 // CHECK-TLS-NEXT: [[TMP:%.*]] = alloca ptr, align 8 // CHECK-TLS-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) // CHECK-TLS-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4 // CHECK-TLS-NEXT: store ptr [[Z]], ptr [[Z_ADDR]], align 8 // CHECK-TLS-NEXT: [[DOTA__VOID_ADDR:%.*]] = call ptr @__kmpc_aligned_alloc(i32 [[TMP0]], i64 8, i64 4, ptr inttoptr (i64 1 to ptr)) // CHECK-TLS-NEXT: [[DOTZ__VOID_ADDR:%.*]] = call ptr @__kmpc_aligned_alloc(i32 [[TMP0]], i64 8, i64 4, ptr inttoptr (i64 1 to ptr)) // CHECK-TLS-NEXT: store ptr [[DOTZ__VOID_ADDR]], ptr [[TMP]], align 8 // CHECK-TLS-NEXT: [[TMP1:%.*]] = load ptr, ptr [[TMP]], align 8 // CHECK-TLS-NEXT: [[TMP2:%.*]] = load float, ptr [[TMP1]], align 4 // CHECK-TLS-NEXT: [[ADD:%.*]] = fadd float 8.000000e+00, [[TMP2]] // CHECK-TLS-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTA__VOID_ADDR]], align 4 // CHECK-TLS-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP3]] to float // CHECK-TLS-NEXT: [[ADD1:%.*]] = fadd float [[CONV]], [[ADD]] // CHECK-TLS-NEXT: [[CONV2:%.*]] = fptosi float [[ADD1]] to i32 // CHECK-TLS-NEXT: store i32 [[CONV2]], ptr [[DOTA__VOID_ADDR]], align 4 // CHECK-TLS-NEXT: call void @__kmpc_free(i32 [[TMP0]], ptr [[DOTZ__VOID_ADDR]], ptr inttoptr (i64 1 to ptr)) // CHECK-TLS-NEXT: call void @__kmpc_free(i32 [[TMP0]], ptr [[DOTA__VOID_ADDR]], ptr inttoptr (i64 1 to ptr)) // CHECK-TLS-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP0]]) // CHECK-TLS-NEXT: ret void // // // SIMD-ONLY0-LABEL: define dso_local noundef i32 @main( // SIMD-ONLY0-SAME: ) #[[ATTR0:[0-9]+]] { // SIMD-ONLY0-NEXT: [[ENTRY:.*:]] // SIMD-ONLY0-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 // SIMD-ONLY0-NEXT: [[V:%.*]] = alloca i32, align 4 // SIMD-ONLY0-NEXT: [[A:%.*]] = alloca i32, align 4 // SIMD-ONLY0-NEXT: [[A1:%.*]] = alloca i32, align 4 // SIMD-ONLY0-NEXT: [[V2:%.*]] = alloca i32, align 4 // SIMD-ONLY0-NEXT: [[V4:%.*]] = alloca i32, align 4 // SIMD-ONLY0-NEXT: [[V6:%.*]] = alloca i32, align 4 // SIMD-ONLY0-NEXT: [[V8:%.*]] = alloca i32, align 4 // SIMD-ONLY0-NEXT: [[V10:%.*]] = alloca i32, align 4 // SIMD-ONLY0-NEXT: [[V12:%.*]] = alloca i32, align 4 // SIMD-ONLY0-NEXT: [[B:%.*]] = alloca double, align 8 // SIMD-ONLY0-NEXT: [[TEMP:%.*]] = alloca i32, align 4 // SIMD-ONLY0-NEXT: store i32 0, ptr [[RETVAL]], align 4 // SIMD-ONLY0-NEXT: [[TMP0:%.*]] = load i32, ptr [[A]], align 4 // SIMD-ONLY0-NEXT: [[INC:%.*]] = add nsw i32 [[TMP0]], 1 // SIMD-ONLY0-NEXT: store i32 [[INC]], ptr [[A]], align 4 // SIMD-ONLY0-NEXT: store i32 2, ptr [[A1]], align 4 // SIMD-ONLY0-NEXT: [[TMP1:%.*]] = load i32, ptr [[V2]], align 4 // SIMD-ONLY0-NEXT: [[INC3:%.*]] = add nsw i32 [[TMP1]], 1 // SIMD-ONLY0-NEXT: store i32 [[INC3]], ptr [[V2]], align 4 // SIMD-ONLY0-NEXT: [[TMP2:%.*]] = load i32, ptr [[V4]], align 4 // SIMD-ONLY0-NEXT: [[INC5:%.*]] = add nsw i32 [[TMP2]], 1 // SIMD-ONLY0-NEXT: store i32 [[INC5]], ptr [[V4]], align 4 // SIMD-ONLY0-NEXT: [[TMP3:%.*]] = load i32, ptr [[V6]], align 4 // SIMD-ONLY0-NEXT: [[INC7:%.*]] = add nsw i32 [[TMP3]], 1 // SIMD-ONLY0-NEXT: store i32 [[INC7]], ptr [[V6]], align 4 // SIMD-ONLY0-NEXT: [[TMP4:%.*]] = load i32, ptr [[V8]], align 4 // SIMD-ONLY0-NEXT: [[INC9:%.*]] = add nsw i32 [[TMP4]], 1 // SIMD-ONLY0-NEXT: store i32 [[INC9]], ptr [[V8]], align 4 // SIMD-ONLY0-NEXT: [[TMP5:%.*]] = load i32, ptr [[V10]], align 4 // SIMD-ONLY0-NEXT: [[INC11:%.*]] = add nsw i32 [[TMP5]], 1 // SIMD-ONLY0-NEXT: store i32 [[INC11]], ptr [[V10]], align 4 // SIMD-ONLY0-NEXT: [[TMP6:%.*]] = load i32, ptr [[V12]], align 4 // SIMD-ONLY0-NEXT: [[INC13:%.*]] = add nsw i32 [[TMP6]], 1 // SIMD-ONLY0-NEXT: store i32 [[INC13]], ptr [[V12]], align 4 // SIMD-ONLY0-NEXT: store double 3.000000e+00, ptr [[B]], align 8 // SIMD-ONLY0-NEXT: [[CALL:%.*]] = call noundef i32 @_Z3fooIiL22omp_allocator_handle_t6ELj8EET_v() // SIMD-ONLY0-NEXT: [[TMP7:%.*]] = load i32, ptr [[TEMP]], align 4 // SIMD-ONLY0-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP7]], [[CALL]] // SIMD-ONLY0-NEXT: store i32 [[ADD]], ptr [[TEMP]], align 4 // SIMD-ONLY0-NEXT: [[TMP8:%.*]] = load i32, ptr @_ZZ4mainE4temp, align 4 // SIMD-ONLY0-NEXT: [[TMP9:%.*]] = load i32, ptr @_ZN2ns1aE, align 4 // SIMD-ONLY0-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP8]], [[TMP9]] // SIMD-ONLY0-NEXT: ret i32 [[ADD14]] // // // SIMD-ONLY0-LABEL: define linkonce_odr noundef i32 @_Z3fooIiL22omp_allocator_handle_t6ELj8EET_v( // SIMD-ONLY0-SAME: ) #[[ATTR1:[0-9]+]] comdat { // SIMD-ONLY0-NEXT: [[ENTRY:.*:]] // SIMD-ONLY0-NEXT: [[V:%.*]] = alloca i32, align 4 // SIMD-ONLY0-NEXT: [[V1:%.*]] = alloca i32, align 4 // SIMD-ONLY0-NEXT: [[V2:%.*]] = alloca i32, align 4 // SIMD-ONLY0-NEXT: [[TMP0:%.*]] = load i32, ptr @_ZN2STIiE1mE, align 4 // SIMD-ONLY0-NEXT: store i32 [[TMP0]], ptr [[V1]], align 4 // SIMD-ONLY0-NEXT: [[TMP1:%.*]] = load i32, ptr [[V2]], align 4 // SIMD-ONLY0-NEXT: [[INC:%.*]] = add nsw i32 [[TMP1]], 1 // SIMD-ONLY0-NEXT: store i32 [[INC]], ptr [[V2]], align 4 // SIMD-ONLY0-NEXT: [[TMP2:%.*]] = load i32, ptr [[V]], align 4 // SIMD-ONLY0-NEXT: ret i32 [[TMP2]] // // // SIMD-ONLY0-LABEL: define dso_local void @_Z3bariRf( // SIMD-ONLY0-SAME: i32 noundef [[A:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[Z:%.*]]) #[[ATTR1]] { // SIMD-ONLY0-NEXT: [[ENTRY:.*:]] // SIMD-ONLY0-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 // SIMD-ONLY0-NEXT: [[Z_ADDR:%.*]] = alloca ptr, align 8 // SIMD-ONLY0-NEXT: [[A1:%.*]] = alloca i32, align 4 // SIMD-ONLY0-NEXT: [[Z2:%.*]] = alloca float, align 4 // SIMD-ONLY0-NEXT: [[TMP:%.*]] = alloca ptr, align 8 // SIMD-ONLY0-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4 // SIMD-ONLY0-NEXT: store ptr [[Z]], ptr [[Z_ADDR]], align 8 // SIMD-ONLY0-NEXT: store ptr [[Z2]], ptr [[TMP]], align 8 // SIMD-ONLY0-NEXT: [[TMP0:%.*]] = load ptr, ptr [[TMP]], align 8 // SIMD-ONLY0-NEXT: [[TMP1:%.*]] = load float, ptr [[TMP0]], align 4 // SIMD-ONLY0-NEXT: [[ADD:%.*]] = fadd float 8.000000e+00, [[TMP1]] // SIMD-ONLY0-NEXT: [[TMP2:%.*]] = load i32, ptr [[A1]], align 4 // SIMD-ONLY0-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP2]] to float // SIMD-ONLY0-NEXT: [[ADD3:%.*]] = fadd float [[CONV]], [[ADD]] // SIMD-ONLY0-NEXT: [[CONV4:%.*]] = fptosi float [[ADD3]] to i32 // SIMD-ONLY0-NEXT: store i32 [[CONV4]], ptr [[A1]], align 4 // SIMD-ONLY0-NEXT: ret void //