1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
|
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
; RUN: llc -O0 -mtriple=riscv64 -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefix=RV64 %s
; RUN: llc -O0 -mtriple=riscv64-unknown-linux-gnu -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefix=RV64-LINUX %s
declare void @llvm.init.trampoline(ptr, ptr, ptr)
declare ptr @llvm.adjust.trampoline(ptr)
declare i64 @f(ptr nest, i64)
define i64 @test0(i64 %n, ptr %p) nounwind {
; RV64-LABEL: test0:
; RV64: # %bb.0:
; RV64-NEXT: addi sp, sp, -64
; RV64-NEXT: sd ra, 56(sp) # 8-byte Folded Spill
; RV64-NEXT: sd a0, 8(sp) # 8-byte Folded Spill
; RV64-NEXT: lui a0, %hi(f)
; RV64-NEXT: addi a0, a0, %lo(f)
; RV64-NEXT: sd a0, 48(sp)
; RV64-NEXT: sd a1, 40(sp)
; RV64-NEXT: li a0, 919
; RV64-NEXT: sw a0, 24(sp)
; RV64-NEXT: lui a0, 40
; RV64-NEXT: addi a0, a0, 103
; RV64-NEXT: sw a0, 36(sp)
; RV64-NEXT: lui a0, 4155
; RV64-NEXT: addi a0, a0, 899
; RV64-NEXT: sw a0, 32(sp)
; RV64-NEXT: lui a0, 6203
; RV64-NEXT: addi a0, a0, 643
; RV64-NEXT: sw a0, 28(sp)
; RV64-NEXT: addi a1, sp, 40
; RV64-NEXT: addi a0, sp, 24
; RV64-NEXT: sd a0, 16(sp) # 8-byte Folded Spill
; RV64-NEXT: call __clear_cache
; RV64-NEXT: ld a0, 8(sp) # 8-byte Folded Reload
; RV64-NEXT: ld a1, 16(sp) # 8-byte Folded Reload
; RV64-NEXT: jalr a1
; RV64-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
; RV64-NEXT: addi sp, sp, 64
; RV64-NEXT: ret
;
; RV64-LINUX-LABEL: test0:
; RV64-LINUX: # %bb.0:
; RV64-LINUX-NEXT: addi sp, sp, -64
; RV64-LINUX-NEXT: sd ra, 56(sp) # 8-byte Folded Spill
; RV64-LINUX-NEXT: sd a0, 8(sp) # 8-byte Folded Spill
; RV64-LINUX-NEXT: lui a0, %hi(f)
; RV64-LINUX-NEXT: addi a0, a0, %lo(f)
; RV64-LINUX-NEXT: sd a0, 48(sp)
; RV64-LINUX-NEXT: sd a1, 40(sp)
; RV64-LINUX-NEXT: li a0, 919
; RV64-LINUX-NEXT: sw a0, 24(sp)
; RV64-LINUX-NEXT: lui a0, 40
; RV64-LINUX-NEXT: addi a0, a0, 103
; RV64-LINUX-NEXT: sw a0, 36(sp)
; RV64-LINUX-NEXT: lui a0, 4155
; RV64-LINUX-NEXT: addi a0, a0, 899
; RV64-LINUX-NEXT: sw a0, 32(sp)
; RV64-LINUX-NEXT: lui a0, 6203
; RV64-LINUX-NEXT: addi a0, a0, 643
; RV64-LINUX-NEXT: sw a0, 28(sp)
; RV64-LINUX-NEXT: addi a1, sp, 40
; RV64-LINUX-NEXT: addi a0, sp, 24
; RV64-LINUX-NEXT: sd a0, 16(sp) # 8-byte Folded Spill
; RV64-LINUX-NEXT: li a2, 0
; RV64-LINUX-NEXT: call __riscv_flush_icache
; RV64-LINUX-NEXT: ld a0, 8(sp) # 8-byte Folded Reload
; RV64-LINUX-NEXT: ld a1, 16(sp) # 8-byte Folded Reload
; RV64-LINUX-NEXT: jalr a1
; RV64-LINUX-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
; RV64-LINUX-NEXT: addi sp, sp, 64
; RV64-LINUX-NEXT: ret
%alloca = alloca [32 x i8], align 8
call void @llvm.init.trampoline(ptr %alloca, ptr @f, ptr %p)
%tramp = call ptr @llvm.adjust.trampoline(ptr %alloca)
%ret = call i64 %tramp(i64 %n)
ret i64 %ret
}
|