1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
|
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
; RUN: llc < %s -mtriple=nvptx64 -mcpu=sm_35 -verify-machineinstrs | FileCheck %s
; RUN: %if ptxas %{ llc < %s -mtriple=nvptx64 -mcpu=sm_35 | %ptxas-verify %}
; Verify that we correctly emit code for extending ldg/ldu. We do not expose
; extending variants in the backend, but the ldg/ldu selection code may pick
; extending loads as candidates. We do want to support this, so make sure we
; emit the necessary cvt.* instructions to implement the extension and let ptxas
; emit the real extending loads.
target datalayout = "e-i64:64-v16:16-v32:32-n16:32:64"
target triple = "nvptx64-nvidia-cuda"
define ptx_kernel void @spam(ptr addrspace(1) noalias nocapture readonly %arg, ptr addrspace(1) noalias nocapture %arg1, i64 %arg2, i64 %arg3) #0 {
; CHECK-LABEL: spam(
; CHECK: .maxntid 1, 1, 1
; CHECK-NEXT: {
; CHECK-NEXT: .reg .b32 %r<2>;
; CHECK-NEXT: .reg .b64 %rd<9>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0: // %bb
; CHECK-NEXT: ld.param.b64 %rd1, [spam_param_0];
; CHECK-NEXT: ld.param.b64 %rd2, [spam_param_3];
; CHECK-NEXT: shl.b64 %rd3, %rd2, 1;
; CHECK-NEXT: add.s64 %rd4, %rd1, %rd3;
; CHECK-NEXT: ld.param.b64 %rd5, [spam_param_1];
; CHECK-NEXT: ld.global.nc.s16 %r1, [%rd4+16];
; CHECK-NEXT: mul.wide.s32 %rd6, %r1, %r1;
; CHECK-NEXT: ld.global.b64 %rd7, [%rd5];
; CHECK-NEXT: add.s64 %rd8, %rd6, %rd7;
; CHECK-NEXT: st.global.b64 [%rd5], %rd8;
; CHECK-NEXT: ret;
bb:
%tmp5 = add nsw i64 %arg3, 8
%tmp6 = getelementptr i16, ptr addrspace(1) %arg, i64 %tmp5
%tmp7 = load i16, ptr addrspace(1) %tmp6, align 2
%tmp8 = sext i16 %tmp7 to i64
%tmp9 = mul nsw i64 %tmp8, %tmp8
%tmp10 = load i64, ptr addrspace(1) %arg1, align 8
%tmp11 = add nsw i64 %tmp9, %tmp10
store i64 %tmp11, ptr addrspace(1) %arg1, align 8
ret void
}
attributes #0 = { norecurse nounwind "polly.skip.fn" "nvvm.maxntid"="1,1,1" }
|