// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // RUN: %clang_cc1 -triple x86_64-pc-linux -emit-llvm -o - %s | FileCheck %s __INT32_TYPE__*m1(__INT32_TYPE__ i) __attribute__((alloc_align(1))); // Condition where parameter to m1 is not size_t. // CHECK-LABEL: @test1( // CHECK-NEXT: entry: // CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 // CHECK-NEXT: store i32 [[A:%.*]], ptr [[A_ADDR]], align 4 // CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 // CHECK-NEXT: [[CALL:%.*]] = call ptr @m1(i32 noundef [[TMP0]]) // CHECK-NEXT: [[CASTED_ALIGN:%.*]] = zext i32 [[TMP0]] to i64 // CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[CALL]], i64 [[CASTED_ALIGN]]) ] // CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[CALL]], align 4 // CHECK-NEXT: ret i32 [[TMP1]] // __INT32_TYPE__ test1(__INT32_TYPE__ a) { return *m1(a); } // Condition where test2 param needs casting. // CHECK-LABEL: @test2( // CHECK-NEXT: entry: // CHECK-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 // CHECK-NEXT: store i64 [[A:%.*]], ptr [[A_ADDR]], align 8 // CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8 // CHECK-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 // CHECK-NEXT: [[CALL:%.*]] = call ptr @m1(i32 noundef [[CONV]]) // CHECK-NEXT: [[CASTED_ALIGN:%.*]] = zext i32 [[CONV]] to i64 // CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[CALL]], i64 [[CASTED_ALIGN]]) ] // CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[CALL]], align 4 // CHECK-NEXT: ret i32 [[TMP1]] // __INT32_TYPE__ test2(__SIZE_TYPE__ a) { return *m1(a); } __INT32_TYPE__ *m2(__SIZE_TYPE__ i) __attribute__((alloc_align(1))); // test3 param needs casting, but 'm2' is correct. // CHECK-LABEL: @test3( // CHECK-NEXT: entry: // CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 // CHECK-NEXT: store i32 [[A:%.*]], ptr [[A_ADDR]], align 4 // CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 // CHECK-NEXT: [[CONV:%.*]] = sext i32 [[TMP0]] to i64 // CHECK-NEXT: [[CALL:%.*]] = call ptr @m2(i64 noundef [[CONV]]) // CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[CALL]], i64 [[CONV]]) ] // CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[CALL]], align 4 // CHECK-NEXT: ret i32 [[TMP1]] // __INT32_TYPE__ test3(__INT32_TYPE__ a) { return *m2(a); } // Every type matches, canonical example. // CHECK-LABEL: @test4( // CHECK-NEXT: entry: // CHECK-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 // CHECK-NEXT: store i64 [[A:%.*]], ptr [[A_ADDR]], align 8 // CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8 // CHECK-NEXT: [[CALL:%.*]] = call ptr @m2(i64 noundef [[TMP0]]) // CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[CALL]], i64 [[TMP0]]) ] // CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[CALL]], align 4 // CHECK-NEXT: ret i32 [[TMP1]] // __INT32_TYPE__ test4(__SIZE_TYPE__ a) { return *m2(a); } struct Empty {}; struct MultiArgs { __INT64_TYPE__ a, b;}; // Struct parameter doesn't take up an IR parameter, 'i' takes up 1. // Truncation to i64 is permissible, since alignments of greater than 2^64 are insane. __INT32_TYPE__ *m3(struct Empty s, __int128_t i) __attribute__((alloc_align(2))); // CHECK-LABEL: @test5( // CHECK-NEXT: entry: // CHECK-NEXT: [[A_ADDR:%.*]] = alloca i128, align 16 // CHECK-NEXT: [[E:%.*]] = alloca [[STRUCT_EMPTY:%.*]], align 1 // CHECK-NEXT: store i128 [[A:%.*]], ptr [[A_ADDR]], align 16 // CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr [[A_ADDR]], align 16 // CHECK-NEXT: [[CALL:%.*]] = call ptr @m3(i128 noundef [[TMP0]]) // CHECK-NEXT: [[CASTED_ALIGN:%.*]] = trunc i128 [[TMP0]] to i64 // CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[CALL]], i64 [[CASTED_ALIGN]]) ] // CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[CALL]], align 4 // CHECK-NEXT: ret i32 [[TMP1]] // __INT32_TYPE__ test5(__int128_t a) { struct Empty e; return *m3(e, a); } // Struct parameter takes up 2 parameters, 'i' takes up 1. __INT32_TYPE__ *m4(struct MultiArgs s, __int128_t i) __attribute__((alloc_align(2))); // CHECK-LABEL: @test6( // CHECK-NEXT: entry: // CHECK-NEXT: [[A_ADDR:%.*]] = alloca i128, align 16 // CHECK-NEXT: [[E:%.*]] = alloca [[STRUCT_MULTIARGS:%.*]], align 8 // CHECK-NEXT: store i128 [[A:%.*]], ptr [[A_ADDR]], align 16 // CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr [[A_ADDR]], align 16 // CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw { i64, i64 }, ptr [[E]], i32 0, i32 0 // CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr [[TMP1]], align 8 // CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw { i64, i64 }, ptr [[E]], i32 0, i32 1 // CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr [[TMP3]], align 8 // CHECK-NEXT: [[CALL:%.*]] = call ptr @m4(i64 [[TMP2]], i64 [[TMP4]], i128 noundef [[TMP0]]) // CHECK-NEXT: [[CASTED_ALIGN:%.*]] = trunc i128 [[TMP0]] to i64 // CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[CALL]], i64 [[CASTED_ALIGN]]) ] // CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[CALL]], align 4 // CHECK-NEXT: ret i32 [[TMP5]] // __INT32_TYPE__ test6(__int128_t a) { struct MultiArgs e; return *m4(e, a); }