1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532
2533
2534
2535
2536
2537
2538
2539
2540
2541
2542
2543
2544
2545
2546
2547
2548
2549
2550
2551
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587
2588
2589
2590
2591
2592
2593
2594
2595
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606
2607
2608
2609
2610
2611
2612
2613
2614
2615
2616
2617
2618
2619
2620
2621
2622
2623
2624
2625
2626
2627
2628
2629
2630
2631
2632
2633
2634
2635
2636
2637
2638
2639
2640
2641
2642
2643
2644
2645
2646
2647
2648
2649
2650
2651
2652
2653
2654
2655
2656
2657
2658
2659
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672
2673
2674
2675
2676
2677
2678
2679
2680
2681
2682
2683
2684
2685
2686
2687
2688
2689
2690
2691
2692
2693
2694
2695
2696
2697
2698
2699
2700
2701
2702
2703
2704
2705
2706
2707
2708
2709
2710
2711
2712
2713
2714
2715
2716
2717
2718
2719
2720
2721
2722
2723
2724
2725
2726
2727
2728
2729
2730
2731
2732
2733
2734
2735
2736
2737
2738
2739
2740
2741
2742
2743
2744
2745
2746
2747
2748
2749
2750
2751
2752
2753
2754
2755
2756
2757
2758
2759
2760
2761
2762
2763
2764
2765
2766
2767
2768
2769
2770
2771
2772
2773
2774
2775
2776
2777
2778
2779
2780
2781
2782
2783
2784
2785
2786
2787
2788
2789
2790
2791
2792
2793
2794
2795
2796
2797
2798
2799
2800
2801
2802
2803
2804
2805
2806
2807
2808
2809
2810
2811
|
=============================
User Guide for NVPTX Back-end
=============================
.. contents::
:local:
:depth: 3
Introduction
============
To support GPU programming, the NVPTX back-end supports a subset of LLVM IR
along with a defined set of conventions used to represent GPU programming
concepts. This document provides an overview of the general usage of the back-
end, including a description of the conventions used and the set of accepted
LLVM IR.
.. note::
This document assumes a basic familiarity with CUDA and the PTX
assembly language. Information about the CUDA Driver API and the PTX assembly
language can be found in the `CUDA documentation
<http://docs.nvidia.com/cuda/index.html>`_.
Conventions
===========
Marking Functions as Kernels
----------------------------
In PTX, there are two types of functions: *device functions*, which are only
callable by device code, and *kernel functions*, which are callable by host
code. By default, the back-end will emit device functions. The ``ptx_kernel``
calling convention is used to declare a function as a kernel function.
The following example shows a kernel function calling a device function in LLVM
IR. The function ``@my_kernel`` is callable from host code, but ``@my_fmad`` is
not.
.. code-block:: llvm
define float @my_fmad(float %x, float %y, float %z) {
%mul = fmul float %x, %y
%add = fadd float %mul, %z
ret float %add
}
define ptx_kernel void @my_kernel(ptr %ptr) {
%val = load float, ptr %ptr
%ret = call float @my_fmad(float %val, float %val, float %val)
store float %ret, ptr %ptr
ret void
}
When compiled, the PTX kernel functions are callable by host-side code.
.. _nvptx_fnattrs:
Function Attributes
-------------------
``"nvvm.maxclusterrank"="<n>"``
This attribute specifies the maximum number of blocks per cluster. Must be
non-zero. Only supported for Hopper+.
``"nvvm.minctasm"="<n>"``
This indicates a hint/directive to the compiler/driver, asking it to put at
least these many CTAs on an SM.
``"nvvm.maxnreg"="<n>"``
This attribute indicates the maximum number of registers to be used for the
kernel function.
``"nvvm.maxntid"="<x>[,<y>[,<z>]]"``
This attribute declares the maximum number of threads in the thread block
(CTA). The maximum number of threads is the product of the maximum extent in
each dimension. Exceeding the maximum number of threads results in a runtime
error or kernel launch failure.
``"nvvm.reqntid"="<x>[,<y>[,<z>]]"``
This attribute declares the exact number of threads in the thread block
(CTA). The number of threads is the product of the value in each dimension.
Specifying a different CTA dimension at launch will result in a runtime
error or kernel launch failure.
``"nvvm.cluster_dim"="<x>[,<y>[,<z>]]"``
This attribute declares the number of thread blocks (CTAs) in the cluster.
The total number of CTAs is the product of the number of CTAs in each
dimension. Specifying a different cluster dimension at launch will result in
a runtime error or kernel launch failure. Only supported for Hopper+.
.. _address_spaces:
Address Spaces
--------------
The NVPTX back-end uses the following address space mapping:
============= ======================
Address Space Memory Space
============= ======================
0 Generic
1 Global
2 Internal Use
3 Shared
4 Constant
5 Local
7 Shared Cluster
============= ======================
Every global variable and pointer type is assigned to one of these address
spaces, with 0 being the default address space. Intrinsics are provided which
can be used to convert pointers between the generic and non-generic address
spaces.
As an example, the following IR will define an array ``@g`` that resides in
global device memory.
.. code-block:: llvm
@g = internal addrspace(1) global [4 x i32] [ i32 0, i32 1, i32 2, i32 3 ]
LLVM IR functions can read and write to this array, and host-side code can
copy data to it by name with the CUDA Driver API.
Note that since address space 0 is the generic space, it is illegal to have
global variables in address space 0. Address space 0 is the default address
space in LLVM, so the ``addrspace(N)`` annotation is *required* for global
variables.
Triples
-------
The NVPTX target uses the module triple to select between 32/64-bit code
generation and the driver-compiler interface to use. The triple architecture
can be one of ``nvptx`` (32-bit PTX) or ``nvptx64`` (64-bit PTX). The
operating system should be one of ``cuda`` or ``nvcl``, which determines the
interface used by the generated code to communicate with the driver. Most
users will want to use ``cuda`` as the operating system, which makes the
generated PTX compatible with the CUDA Driver API.
Example: 32-bit PTX for CUDA Driver API: ``nvptx-nvidia-cuda``
Example: 64-bit PTX for CUDA Driver API: ``nvptx64-nvidia-cuda``
.. _nvptx_arch_hierarchy:
NVPTX Architecture Hierarchy and Ordering
=========================================
GPU architectures: sm_2Y/sm_3Y/sm_5Y/sm_6Y/sm_7Y/sm_8Y/sm_9Y/sm_10Y/sm_12Y
('Y' represents version within the architecture)
The architectures have name of form ``sm_XYz`` where ``X`` represent the generation
number, ``Y`` represents the version within the architecture, and ``z`` represents
the optional feature suffix.
If ``X1Y1 <= X2Y2``, then GPU capabilities of ``sm_X1Y1`` are included in ``sm_X2Y2``.
For example, take ``sm_90`` (9 represents ``X``, 0 represents ``Y``, and no feature
suffix) and ``sm_103`` architectures (10 represents ``X``, 3 represents ``Y``, and no
feature suffix). Since 90 <= 103, ``sm_90`` is compatible with ``sm_103``.
The family-specific variants have ``f`` feature suffix and they follow
following order:
``sm_X{Y2}f > sm_X{Y1}f`` iff ``Y2 > Y1``
``sm_XY{f} > sm_{XY}{}``
For example, take ``sm_100f`` (10 represents ``X``, 0 represents ``Y``, and ``f``
represents ``z``) and ``sm_103f`` (10 represents ``X``, 3 represents ``Y``, and ``f``
represents ``z``) architecture variants. Since ``Y1 < Y2``, ``sm_100f`` is compatible with
``sm_103f``. Similarly based on the second rule, ``sm_90`` is compatible with ``sm_103f``.
Some counter examples, take ``sm_100f`` and ``sm_120f`` (12 represents ``X``, 0
represents ``Y``, and ``f`` represents ``z``) architecture variants. Since both
belongs to different family i.e. ``X1 != X2``, ``sm_100f`` is not compatible with
``sm_120f``.
The architecture-specific variants have ``a`` feature suffix and they follow
following order:
``sm_XY{a} > sm_XY{f} > sm_{XY}{}``
For example, take ``sm_103a`` (10 represents ``X``, 3 represents ``Y``, and ``a``
represents ``z``), ``sm_103f``, and ``sm_103`` architecture variants. The ``sm_103`` is
compatible with ``sm_103a`` and ``sm_103f``, and ``sm_103f`` is compatible with ``sm_103a``.
Encoding := Arch * 10 + 2 (for 'f') + 1 (for 'a')
Arch := X * 10 + Y
For example, ``sm_103f`` is encoded as 1032 (103 * 10 + 2) and ``sm_103a`` is
encoded as 1033 (103 * 10 + 2 + 1).
This encoding allows simple partial ordering of the architectures.
* Compare Family and Arch by dividing FullSMVersion by 100 and 10
respectively before the comparison.
* Compare within the family by comparing FullSMVersion, given both belongs to
the same family.
* Detect ``a`` variants by checking FullSMVersion & 1.
.. _nvptx_intrinsics:
NVPTX Intrinsics
================
Reading PTX Special Registers
-----------------------------
'``llvm.nvvm.read.ptx.sreg.*``'
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Syntax:
"""""""
.. code-block:: llvm
declare i32 @llvm.nvvm.read.ptx.sreg.tid.x()
declare i32 @llvm.nvvm.read.ptx.sreg.tid.y()
declare i32 @llvm.nvvm.read.ptx.sreg.tid.z()
declare i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
declare i32 @llvm.nvvm.read.ptx.sreg.ntid.y()
declare i32 @llvm.nvvm.read.ptx.sreg.ntid.z()
declare i32 @llvm.nvvm.read.ptx.sreg.ctaid.x()
declare i32 @llvm.nvvm.read.ptx.sreg.ctaid.y()
declare i32 @llvm.nvvm.read.ptx.sreg.ctaid.z()
declare i32 @llvm.nvvm.read.ptx.sreg.nctaid.x()
declare i32 @llvm.nvvm.read.ptx.sreg.nctaid.y()
declare i32 @llvm.nvvm.read.ptx.sreg.nctaid.z()
declare i32 @llvm.nvvm.read.ptx.sreg.warpsize()
Overview:
"""""""""
The '``@llvm.nvvm.read.ptx.sreg.*``' intrinsics provide access to the PTX
special registers, in particular the kernel launch bounds. These registers
map in the following way to CUDA builtins:
============ =====================================
CUDA Builtin PTX Special Register Intrinsic
============ =====================================
``threadId`` ``@llvm.nvvm.read.ptx.sreg.tid.*``
``blockIdx`` ``@llvm.nvvm.read.ptx.sreg.ctaid.*``
``blockDim`` ``@llvm.nvvm.read.ptx.sreg.ntid.*``
``gridDim`` ``@llvm.nvvm.read.ptx.sreg.nctaid.*``
============ =====================================
Barriers
--------
'``llvm.nvvm.barrier.cta.*``'
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Syntax:
"""""""
.. code-block:: llvm
declare void @llvm.nvvm.barrier.cta.sync.count(i32 %id, i32 %n)
declare void @llvm.nvvm.barrier.cta.sync.all(i32 %id)
declare void @llvm.nvvm.barrier.cta.arrive.count(i32 %id, i32 %n)
declare void @llvm.nvvm.barrier.cta.sync.aligned.count(i32 %id, i32 %n)
declare void @llvm.nvvm.barrier.cta.sync.aligned.all(i32 %id)
declare void @llvm.nvvm.barrier.cta.arrive.aligned.count(i32 %id, i32 %n)
Overview:
"""""""""
The '``@llvm.nvvm.barrier.cta.*``' family of intrinsics perform barrier
synchronization and communication within a CTA. They can be used by the threads
within the CTA for synchronization and communication.
Semantics:
""""""""""
Operand %id specifies a logical barrier resource and must fall within the range
0 through 15. When present, operand %n specifies the number of threads
participating in the barrier. When specifying a thread count, the value must be
a multiple of the warp size. With the '``@llvm.nvvm.barrier.cta.sync.*``'
variants, the '``.all``' suffix indicates that all threads in the CTA should
participate in the barrier while the '``.count``' suffix indicates that only
the threads specified by the %n operand should participate in the barrier.
All forms of the '``@llvm.nvvm.barrier.cta.*``' intrinsic cause the executing
thread to wait for all non-exited threads from its warp and then marks the
warp's arrival at the barrier. In addition to signaling its arrival at the
barrier, the '``@llvm.nvvm.barrier.cta.sync.*``' intrinsics cause the executing
thread to wait for non-exited threads of all other warps participating in the
barrier to arrive. On the other hand, the '``@llvm.nvvm.barrier.cta.arrive.*``'
intrinsic does not cause the executing thread to wait for threads of other
participating warps.
When a barrier completes, the waiting threads are restarted without delay,
and the barrier is reinitialized so that it can be immediately reused.
The '``@llvm.nvvm.barrier.cta.*``' intrinsic has an optional '``.aligned``'
modifier to indicate textual alignment of the barrier. When specified, it
indicates that all threads in the CTA will execute the same
'``@llvm.nvvm.barrier.cta.*``' instruction. In conditionally executed code, an
aligned '``@llvm.nvvm.barrier.cta.*``' instruction should only be used if it is
known that all threads in the CTA evaluate the condition identically, otherwise
behavior is undefined.
Electing a thread
-----------------
'``llvm.nvvm.elect.sync``'
^^^^^^^^^^^^^^^^^^^^^^^^^^
Syntax:
"""""""
.. code-block:: llvm
declare {i32, i1} @llvm.nvvm.elect.sync(i32 %membermask)
Overview:
"""""""""
The '``@llvm.nvvm.elect.sync``' intrinsic generates the ``elect.sync``
PTX instruction, which elects one predicated active leader thread from
a set of threads specified by ``membermask``. The behavior is undefined
if the executing thread is not in ``membermask``. The laneid of the
elected thread is captured in the i32 return value. The i1 return
value is set to ``True`` for the leader thread and ``False`` for all
the other threads. Election of a leader thread happens deterministically,
i.e. the same leader thread is elected for the same ``membermask``
every time. For more information, refer PTX ISA
`<https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#parallel-synchronization-and-communication-instructions-elect-sync>`_.
Membar/Fences
-------------
'``llvm.nvvm.fence.proxy.tensormap_generic.*``'
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Syntax:
"""""""
.. code-block:: llvm
declare void @llvm.nvvm.fence.proxy.tensormap_generic.release.cta()
declare void @llvm.nvvm.fence.proxy.tensormap_generic.release.cluster()
declare void @llvm.nvvm.fence.proxy.tensormap_generic.release.gpu()
declare void @llvm.nvvm.fence.proxy.tensormap_generic.release.sys()
declare void @llvm.nvvm.fence.proxy.tensormap_generic.acquire.cta(ptr %addr, i32 %size)
declare void @llvm.nvvm.fence.proxy.tensormap_generic.acquire.cluster(ptr %addr, i32 %size)
declare void @llvm.nvvm.fence.proxy.tensormap_generic.acquire.gpu(ptr %addr, i32 %size)
declare void @llvm.nvvm.fence.proxy.tensormap_generic.acquire.sys(ptr %addr, i32 %size)
Overview:
"""""""""
The ``@llvm.nvvm.fence.proxy.tensormap_generic.*`` is a uni-directional fence used to establish ordering between a prior memory access performed via the generic `proxy<https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#proxies>_` and a subsequent memory access performed via the tensormap proxy. ``nvvm.fence.proxy.tensormap_generic.release`` can form a release sequence that synchronizes with an acquire sequence that contains the ``nvvm.fence.proxy.tensormap_generic.acquire`` proxy fence. The following table describes the mapping between LLVM Intrinsic and the PTX instruction:
====================================================== =========================================================
NVVM Intrinsic PTX Instruction
====================================================== =========================================================
``@llvm.nvvm.fence.proxy.tensormap_generic.release.*`` ``fence.proxy.tensormap::generic.release.*``
``@llvm.nvvm.fence.proxy.tensormap_generic.acquire.*`` ``fence.proxy.tensormap::generic.acquire.* [addr], size``
====================================================== =========================================================
The address operand ``addr`` and the operand ``size`` together specify the memory range ``[addr, addr+size)`` on which the ordering guarantees on the memory accesses across the proxies is to be provided. The only supported value for the ``size`` operand is ``128`` and must be an immediate. Generic Addressing is used unconditionally, and the address specified by the operand addr must fall within the ``.global`` state space. Otherwise, the behavior is undefined. For more information, see `PTX ISA <https://docs.nvidia.com/cuda/parallel-thread-execution/#parallel-synchronization-and-communication-instructions-membar>`_.
Address Space Intrinsics
------------------------
'``llvm.nvvm.isspacep.*``' Intrinsics
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Syntax:
"""""""
.. code-block:: llvm
declare i1 @llvm.nvvm.isspacep.const(ptr %p)
declare i1 @llvm.nvvm.isspacep.global(ptr %p)
declare i1 @llvm.nvvm.isspacep.local(ptr %p)
declare i1 @llvm.nvvm.isspacep.shared(ptr %p)
declare i1 @llvm.nvvm.isspacep.shared.cluster(ptr %p)
Overview:
"""""""""
The '``llvm.nvvm.isspacep.*``' intrinsics determine whether the provided generic
pointer references memory which falls within a particular address space.
Semantics:
""""""""""
If the given pointer in the generic address space refers to memory which falls
within the state space of the intrinsic (and therefore could be safely address
space casted to this space), 1 is returned, otherwise 0 is returned.
'``llvm.nvvm.mapa.*``' Intrinsics
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Syntax:
"""""""
.. code-block:: llvm
declare ptr @llvm.nvvm.mapa(ptr %p, i32 %rank)
declare ptr addrspace(7) @llvm.nvvm.mapa.shared.cluster(ptr addrspace(3) %p, i32 %rank)
Overview:
"""""""""
The '``llvm.nvvm.mapa.*``' intrinsics map a shared memory pointer ``p`` of another CTA with ``%rank`` to the current CTA.
The ``llvm.nvvm.mapa`` form expects a generic pointer to shared memory and returns a generic pointer to shared cluster memory.
The ``llvm.nvvm.mapa.shared.cluster`` form expects a pointer to shared memory and returns a pointer to shared cluster memory.
They corresponds directly to the ``mapa`` and ``mapa.shared.cluster`` PTX instructions.
Semantics:
""""""""""
If the given pointer in the generic address space refers to memory which falls
within the state space of the intrinsic (and therefore could be safely address
space casted to this space), 1 is returned, otherwise 0 is returned.
Arithmetic Intrinsics
---------------------
'``llvm.nvvm.fabs.*``' Intrinsic
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Syntax:
"""""""
.. code-block:: llvm
declare float @llvm.nvvm.fabs.f32(float %a)
declare double @llvm.nvvm.fabs.f64(double %a)
declare half @llvm.nvvm.fabs.f16(half %a)
declare <2 x half> @llvm.nvvm.fabs.v2f16(<2 x half> %a)
declare bfloat @llvm.nvvm.fabs.bf16(bfloat %a)
declare <2 x bfloat> @llvm.nvvm.fabs.v2bf16(<2 x bfloat> %a)
Overview:
"""""""""
The '``llvm.nvvm.fabs.*``' intrinsics return the absolute value of the operand.
Semantics:
""""""""""
Unlike, '``llvm.fabs.*``', these intrinsics do not perfectly preserve NaN
values. Instead, a NaN input yeilds an unspecified NaN output.
'``llvm.nvvm.fabs.ftz.*``' Intrinsic
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Syntax:
"""""""
.. code-block:: llvm
declare float @llvm.nvvm.fabs.ftz.f32(float %a)
declare half @llvm.nvvm.fabs.ftz.f16(half %a)
declare <2 x half> @llvm.nvvm.fabs.ftz.v2f16(<2 x half> %a)
Overview:
"""""""""
The '``llvm.nvvm.fabs.ftz.*``' intrinsics return the absolute value of the
operand, flushing subnormals to sign preserving zero.
Semantics:
""""""""""
Before the absolute value is taken, the input is flushed to sign preserving
zero if it is a subnormal. In addition, unlike '``llvm.fabs.*``', a NaN input
yields an unspecified NaN output.
'``llvm.nvvm.idp2a.[us].[us]``' Intrinsics
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Syntax:
"""""""
.. code-block:: llvm
declare i32 @llvm.nvvm.idp2a.s.s(i32 %a, i32 %b, i1 immarg %is.hi, i32 %c)
declare i32 @llvm.nvvm.idp2a.s.u(i32 %a, i32 %b, i1 immarg %is.hi, i32 %c)
declare i32 @llvm.nvvm.idp2a.u.s(i32 %a, i32 %b, i1 immarg %is.hi, i32 %c)
declare i32 @llvm.nvvm.idp2a.u.u(i32 %a, i32 %b, i1 immarg %is.hi, i32 %c)
Overview:
"""""""""
The '``llvm.nvvm.idp2a.[us].[us]``' intrinsics performs a 2-element vector dot
product followed by addition. They corresponds directly to the ``dp2a`` PTX
instruction.
Semantics:
""""""""""
The 32-bit value in ``%a`` is broken into 2 16-bit values which are extended to
32 bits. For the '``llvm.nvvm.idp2a.u.[us]``' variants zero-extension is used,
while for the '``llvm.nvvm.idp2a.s.[us]``' sign-extension is used. Two bytes are
selected from ``%b``, if ``%is.hi`` is true, the most significant bytes are
selected, otherwise the least significant bytes are selected. These bytes are
then extended to 32-bits. For the '``llvm.nvvm.idp2a.[us].u``' variants
zero-extension is used, while for the '``llvm.nvvm.idp2a.[us].s``'
sign-extension is used. The dot product of these 2-element vectors is added to
``%c`` to produce the return.
'``llvm.nvvm.idp4a.[us].[us]``' Intrinsics
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Syntax:
"""""""
.. code-block:: llvm
declare i32 @llvm.nvvm.idp4a.s.s(i32 %a, i32 %b, i32 %c)
declare i32 @llvm.nvvm.idp4a.s.u(i32 %a, i32 %b, i32 %c)
declare i32 @llvm.nvvm.idp4a.u.s(i32 %a, i32 %b, i32 %c)
declare i32 @llvm.nvvm.idp4a.u.u(i32 %a, i32 %b, i32 %c)
Overview:
"""""""""
The '``llvm.nvvm.idp4a.[us].[us]``' intrinsics perform a 4-element vector dot
product followed by addition. They corresponds directly to the ``dp4a`` PTX
instruction.
Semantics:
""""""""""
Each of the 4 bytes in both ``%a`` and ``%b`` are extended to 32-bit integers
forming 2 ``<4 x i32>``. For ``%a``, zero-extension is used in the
'``llvm.nvvm.idp4a.u.[us]``' variants, while sign-extension is used with
'``llvm.nvvm.idp4a.s.[us]``' variants. Similarly, for ``%b``, zero-extension is
used in the '``llvm.nvvm.idp4a.[us].u``' variants, while sign-extension is used
with '``llvm.nvvm.idp4a.[us].s``' variants. The dot product of these 4-element
vectors is added to ``%c`` to produce the return.
Bit Manipulation Intrinsics
---------------------------
'``llvm.nvvm.fshl.clamp.*``' Intrinsic
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Syntax:
"""""""
.. code-block:: llvm
declare i32 @llvm.nvvm.fshl.clamp.i32(i32 %hi, i32 %lo, i32 %n)
Overview:
"""""""""
The '``llvm.nvvm.fshl.clamp``' family of intrinsics performs a clamped funnel
shift left. These intrinsics are very similar to '``llvm.fshl``', except the
shift amount is clamped at the integer width (instead of modulo it). Currently,
only ``i32`` is supported.
Semantics:
""""""""""
The '``llvm.nvvm.fshl.clamp``' family of intrinsic functions performs a clamped
funnel shift left: the first two values are concatenated as { %hi : %lo } (%hi
is the most significant bits of the wide value), the combined value is shifted
left, and the most significant bits are extracted to produce a result that is
the same size as the original arguments. The shift amount is the minimum of the
value of %n and the bit width of the integer type.
'``llvm.nvvm.fshr.clamp.*``' Intrinsic
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Syntax:
"""""""
.. code-block:: llvm
declare i32 @llvm.nvvm.fshr.clamp.i32(i32 %hi, i32 %lo, i32 %n)
Overview:
"""""""""
The '``llvm.nvvm.fshr.clamp``' family of intrinsics perform a clamped funnel
shift right. These intrinsics are very similar to '``llvm.fshr``', except the
shift amount is clamped at the integer width (instead of modulo it). Currently,
only ``i32`` is supported.
Semantics:
""""""""""
The '``llvm.nvvm.fshr.clamp``' family of intrinsic functions performs a clamped
funnel shift right: the first two values are concatenated as { %hi : %lo } (%hi
is the most significant bits of the wide value), the combined value is shifted
right, and the least significant bits are extracted to produce a result that is
the same size as the original arguments. The shift amount is the minimum of the
value of %n and the bit width of the integer type.
'``llvm.nvvm.flo.u.*``' Intrinsic
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Syntax:
"""""""
.. code-block:: llvm
declare i32 @llvm.nvvm.flo.u.i32(i32 %a, i1 %shiftamt)
declare i32 @llvm.nvvm.flo.u.i64(i64 %a, i1 %shiftamt)
Overview:
"""""""""
The '``llvm.nvvm.flo.u``' family of intrinsics identifies the bit position of the
leading one, returning either it's offset from the most or least significant bit.
Semantics:
""""""""""
The '``llvm.nvvm.flo.u``' family of intrinsics returns the bit position of the
most significant 1. If %shiftamt is true, The result is the shift amount needed
to left-shift the found bit into the most-significant bit position, otherwise
the result is the shift amount needed to right-shift the found bit into the
least-significant bit position. 0xffffffff is returned if no 1 bit is found.
'``llvm.nvvm.flo.s.*``' Intrinsic
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Syntax:
"""""""
.. code-block:: llvm
declare i32 @llvm.nvvm.flo.s.i32(i32 %a, i1 %shiftamt)
declare i32 @llvm.nvvm.flo.s.i64(i64 %a, i1 %shiftamt)
Overview:
"""""""""
The '``llvm.nvvm.flo.s``' family of intrinsics identifies the bit position of the
leading non-sign bit, returning either it's offset from the most or least
significant bit.
Semantics:
""""""""""
The '``llvm.nvvm.flo.s``' family of intrinsics returns the bit position of the
most significant 0 for negative inputs and the most significant 1 for
non-negative inputs. If %shiftamt is true, The result is the shift amount needed
to left-shift the found bit into the most-significant bit position, otherwise
the result is the shift amount needed to right-shift the found bit into the
least-significant bit position. 0xffffffff is returned if no 1 bit is found.
'``llvm.nvvm.{zext,sext}.{wrap,clamp}``' Intrinsics
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Syntax:
"""""""
.. code-block:: llvm
declare i32 @llvm.nvvm.zext.wrap(i32 %a, i32 %b)
declare i32 @llvm.nvvm.zext.clamp(i32 %a, i32 %b)
declare i32 @llvm.nvvm.sext.wrap(i32 %a, i32 %b)
declare i32 @llvm.nvvm.sext.clamp(i32 %a, i32 %b)
Overview:
"""""""""
The '``llvm.nvvm.{zext,sext}.{wrap,clamp}``' family of intrinsics extracts the
low bits of the input value, and zero- or sign-extends them back to the original
width.
Semantics:
""""""""""
The '``llvm.nvvm.{zext,sext}.{wrap,clamp}``' family of intrinsics returns
extension of N lowest bits of operand %a. For the '``wrap``' variants, N is the
value of operand %b modulo 32. For the '``clamp``' variants, N is the value of
operand %b clamped to the range [0, 32]. The N lowest bits are then
zero-extended the case of the '``zext``' variants, or sign-extended the case of
the '``sext``' variants. If N is 0, the result is 0.
'``llvm.nvvm.bmsk.{wrap,clamp}``' Intrinsic
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Syntax:
"""""""
.. code-block:: llvm
declare i32 @llvm.nvvm.bmsk.wrap(i32 %a, i32 %b)
declare i32 @llvm.nvvm.bmsk.clamp(i32 %a, i32 %b)
Overview:
"""""""""
The '``llvm.nvvm.bmsk.{wrap,clamp}``' family of intrinsics creates a bit mask
given a starting bit position and a bit width.
Semantics:
""""""""""
The '``llvm.nvvm.bmsk.{wrap,clamp}``' family of intrinsics returns a value with
all bits set to 0 except for %b bits starting at bit position %a. For the
'``wrap``' variants, the values of %a and %b modulo 32 are used. For the
'``clamp``' variants, the values of %a and %b are clamped to the range [0, 32],
which in practice is equivalent to using them as is.
'``llvm.nvvm.prmt``' Intrinsic
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Syntax:
"""""""
.. code-block:: llvm
declare i32 @llvm.nvvm.prmt(i32 %lo, i32 %hi, i32 %selector)
Overview:
"""""""""
The '``llvm.nvvm.prmt``' constructs a permutation of the bytes of the first two
operands, selecting based on the third operand.
Semantics:
""""""""""
The bytes in the first two source operands are numbered from 0 to 7:
{%hi, %lo} = {{b7, b6, b5, b4}, {b3, b2, b1, b0}}. For each byte in the target
register, a 4-bit selection value is defined.
The 3 lsbs of the selection value specify which of the 8 source bytes should be
moved into the target position. The msb defines if the byte value should be
copied, or if the sign (msb of the byte) should be replicated over all 8 bits
of the target position (sign extend of the byte value); msb=0 means copy the
literal value; msb=1 means replicate the sign.
These 4-bit selection values are pulled from the lower 16-bits of the %selector
operand, with the least significant selection value corresponding to the least
significant byte of the destination.
'``llvm.nvvm.prmt.*``' Intrinsics
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Syntax:
"""""""
.. code-block:: llvm
declare i32 @llvm.nvvm.prmt.f4e(i32 %lo, i32 %hi, i32 %selector)
declare i32 @llvm.nvvm.prmt.b4e(i32 %lo, i32 %hi, i32 %selector)
declare i32 @llvm.nvvm.prmt.rc8(i32 %lo, i32 %selector)
declare i32 @llvm.nvvm.prmt.ecl(i32 %lo, i32 %selector)
declare i32 @llvm.nvvm.prmt.ecr(i32 %lo, i32 %selector)
declare i32 @llvm.nvvm.prmt.rc16(i32 %lo, i32 %selector)
Overview:
"""""""""
The '``llvm.nvvm.prmt.*``' family of intrinsics constructs a permutation of the
bytes of the first one or two operands, selecting based on the 2 least
significant bits of the final operand.
Semantics:
""""""""""
As with the generic '``llvm.nvvm.prmt``' intrinsic, the bytes in the first one
or two source operands are numbered. The first source operand (%lo) is numbered
{b3, b2, b1, b0}, in the case of the '``f4e``' and '``b4e``' variants, the
second source operand (%hi) is numbered {b7, b6, b5, b4}.
Depending on the 2 least significant bits of the %selector operand, the result
of the permutation is defined as follows:
+------------+----------------+--------------+
| Mode | %selector[1:0] | Output |
+------------+----------------+--------------+
| '``f4e``' | 0 | {3, 2, 1, 0} |
| +----------------+--------------+
| | 1 | {4, 3, 2, 1} |
| +----------------+--------------+
| | 2 | {5, 4, 3, 2} |
| +----------------+--------------+
| | 3 | {6, 5, 4, 3} |
+------------+----------------+--------------+
| '``b4e``' | 0 | {5, 6, 7, 0} |
| +----------------+--------------+
| | 1 | {6, 7, 0, 1} |
| +----------------+--------------+
| | 2 | {7, 0, 1, 2} |
| +----------------+--------------+
| | 3 | {0, 1, 2, 3} |
+------------+----------------+--------------+
| '``rc8``' | 0 | {0, 0, 0, 0} |
| +----------------+--------------+
| | 1 | {1, 1, 1, 1} |
| +----------------+--------------+
| | 2 | {2, 2, 2, 2} |
| +----------------+--------------+
| | 3 | {3, 3, 3, 3} |
+------------+----------------+--------------+
| '``ecl``' | 0 | {3, 2, 1, 0} |
| +----------------+--------------+
| | 1 | {3, 2, 1, 1} |
| +----------------+--------------+
| | 2 | {3, 2, 2, 2} |
| +----------------+--------------+
| | 3 | {3, 3, 3, 3} |
+------------+----------------+--------------+
| '``ecr``' | 0 | {0, 0, 0, 0} |
| +----------------+--------------+
| | 1 | {1, 1, 1, 0} |
| +----------------+--------------+
| | 2 | {2, 2, 1, 0} |
| +----------------+--------------+
| | 3 | {3, 2, 1, 0} |
+------------+----------------+--------------+
| '``rc16``' | 0 | {1, 0, 1, 0} |
| +----------------+--------------+
| | 1 | {3, 2, 3, 2} |
| +----------------+--------------+
| | 2 | {1, 0, 1, 0} |
| +----------------+--------------+
| | 3 | {3, 2, 3, 2} |
+------------+----------------+--------------+
TMA family of Intrinsics
------------------------
'``llvm.nvvm.cp.async.bulk.global.to.shared.cluster``'
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Syntax:
"""""""
.. code-block:: llvm
declare void @llvm.nvvm.cp.async.bulk.global.to.shared.cluster(ptr addrspace(7) %dst, ptr addrspace(3) %mbar, ptr addrspace(1) %src, i32 %size, i16 %mc, i64 %ch, i1 %flag_mc, i1 %flag_ch)
Overview:
"""""""""
The '``@llvm.nvvm.cp.async.bulk.global.to.shared.cluster``' intrinsic
corresponds to the ``cp.async.bulk.shared::cluster.global.*`` family
of PTX instructions. These instructions initiate an asynchronous
copy of bulk data from global memory to shared::cluster memory.
The 32-bit operand ``%size`` specifies the amount of memory to be
copied and it must be a multiple of 16.
* The last two arguments to these intrinsics are boolean flags
indicating support for cache_hint and/or multicast modifiers.
These flag arguments must be compile-time constants. The backend
looks through these flags and lowers the intrinsics appropriately.
* The Nth argument (denoted by ``i1 %flag_ch``) when set, indicates
a valid cache_hint (``i64 %ch``) and generates the ``.L2::cache_hint``
variant of the PTX instruction.
* The [N-1]th argument (denoted by ``i1 %flag_mc``) when set, indicates
the presence of a multicast mask (``i16 %mc``) and generates the PTX
instruction with the ``.multicast::cluster`` modifier.
For more information, refer PTX ISA
`<https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#data-movement-and-conversion-instructions-cp-async-bulk>`_.
'``llvm.nvvm.cp.async.bulk.shared.cta.to.global``'
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Syntax:
"""""""
.. code-block:: llvm
declare void @llvm.nvvm.cp.async.bulk.shared.cta.to.global(ptr addrspace(1) %dst, ptr addrspace(3) %src, i32 %size, i64 %ch, i1 %flag_ch)
declare void @llvm.nvvm.cp.async.bulk.shared.cta.to.global.bytemask(..., i32 %size, i64 %ch, i1 %flag_ch, i16 %mask)
Overview:
"""""""""
The '``@llvm.nvvm.cp.async.bulk.shared.cta.to.global``' intrinsic
corresponds to the ``cp.async.bulk.global.shared::cta.*`` set of PTX
instructions. These instructions initiate an asynchronous copy from
shared::cta to global memory. The 32-bit operand ``%size`` specifies
the amount of memory to be copied (in bytes) and it must be a multiple
of 16. For the ``.bytemask`` variant, the 16-bit wide mask operand
specifies whether the i-th byte of each 16-byte wide chunk of source
data is copied to the destination.
* The ``i1 %flag_ch`` argument to these intrinsics is a boolean
flag indicating support for cache_hint. This flag argument must
be a compile-time constant. When set, it indicates a valid
cache_hint (``i64 %ch``) and generates the ``.L2::cache_hint``
variant of the PTX instruction.
For more information, refer PTX ISA
`<https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#data-movement-and-conversion-instructions-cp-async-bulk>`_.
'``llvm.nvvm.cp.async.bulk.shared.cta.to.cluster``'
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Syntax:
"""""""
.. code-block:: llvm
declare void @llvm.nvvm.cp.async.bulk.shared.cta.to.cluster(ptr addrspace(7) %dst, ptr addrspace(3) %mbar, ptr addrspace(3) %src, i32 %size)
Overview:
"""""""""
The '``@llvm.nvvm.cp.async.bulk.shared.cta.to.cluster``' intrinsic
corresponds to the ``cp.async.bulk.shared::cluster.shared::cta.*``
PTX instruction. This instruction initiates an asynchronous copy from
shared::cta to shared::cluster memory. The destination has to be in
the shared memory of a different CTA within the cluster. The 32-bit
operand ``%size`` specifies the amount of memory to be copied and
it must be a multiple of 16.
For more information, refer PTX ISA
`<https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#data-movement-and-conversion-instructions-cp-async-bulk>`_.
'``llvm.nvvm.cp.async.bulk.prefetch.L2``'
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Syntax:
"""""""
.. code-block:: llvm
declare void @llvm.nvvm.cp.async.bulk.prefetch.L2(ptr addrspace(1) %src, i32 %size, i64 %ch, i1 %flag_ch)
Overview:
"""""""""
The '``@llvm.nvvm.cp.async.bulk.prefetch.L2``' intrinsic
corresponds to the ``cp.async.bulk.prefetch.L2.*`` family
of PTX instructions. These instructions initiate an asynchronous
prefetch of bulk data from global memory to the L2 cache.
The 32-bit operand ``%size`` specifies the amount of memory to be
prefetched in terms of bytes and it must be a multiple of 16.
* The last argument to these intrinsics is boolean flag indicating
support for cache_hint. These flag argument must be compile-time
constant. When set, it indicates a valid cache_hint (``i64 %ch``)
and generates the ``.L2::cache_hint`` variant of the PTX instruction.
For more information, refer PTX ISA
`<https://docs.nvidia.com/cuda/parallel-thread-execution/#data-movement-and-conversion-instructions-cp-async-bulk-prefetch>`_.
'``llvm.nvvm.prefetch.*``'
^^^^^^^^^^^^^^^^^^^^^^^^^^
Syntax:
"""""""
.. code-block:: llvm
declare void @llvm.nvvm.prefetch.global.L1(ptr addrspace(1) %global_ptr)
declare void @llvm.nvvm.prefetch.global.L2(ptr addrspace(1) %global_ptr)
declare void @llvm.nvvm.prefetch.local.L1(ptr addrspace(5) %local_ptr)
declare void @llvm.nvvm.prefetch.local.L2(ptr addrspace(5) %local_ptr)
declare void @llvm.nvvm.prefetch.L1(ptr %ptr)
declare void @llvm.nvvm.prefetch.L2(ptr %ptr)
declare void @llvm.nvvm.prefetch.global.L2.evict.normal(ptr addrspace(1) %global_ptr)
declare void @llvm.nvvm.prefetch.global.L2.evict.last(ptr addrspace(1) %global_ptr)
declare void @llvm.nvvm.prefetchu.L1(ptr %ptr)
Overview:
"""""""""
The '``@llvm.nvvm.prefetch.*``' and '``@llvm.nvvm.prefetchu.*``' intrinsic
correspond to the '``prefetch.*``;' and '``prefetchu.*``' family of PTX instructions.
The '``prefetch.*``' instructions bring the cache line containing the
specified address in global or local memory address space into the
specified cache level (L1 or L2). The '`prefetchu.*``' instruction brings the cache line
containing the specified generic address into the specified uniform cache level.
If no address space is specified, it is assumed to be generic address. The intrinsic
uses and eviction priority which can be accessed by the '``.level::eviction_priority``' modifier.
* A prefetch to a shared memory location performs no operation.
* A prefetch into the uniform cache requires a generic address,
and no operation occurs if the address maps to a const, local, or shared memory location.
For more information, refer to the PTX ISA
`<https://docs.nvidia.com/cuda/parallel-thread-execution/#data-movement-and-conversion-instructions-prefetch-prefetchu>`_.
'``llvm.nvvm.applypriority.*``'
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Syntax:
"""""""
.. code-block:: llvm
declare void @llvm.nvvm.applypriority.global.L2.evict.normal(ptr addrspace(1) %global_ptr, i64 %size)
declare void @llvm.nvvm.applypriority.L2.evict.normal(ptr %ptr, i64 %size)
Overview:
"""""""""
The '``@llvm.nvvm.applypriority.*``' applies the cache eviction priority specified by the
.level::eviction_priority qualifier to the address range [a..a+size) in the specified cache
level. If no state space is specified then Generic Addressing is used. If the specified address
does not fall within the address window of .global state space then the behavior is undefined.
The operand size is an integer constant that specifies the amount of data, in bytes, in the specified cache
level on which the priority is to be applied. The only supported value for the size operand is 128.
For more information, refer to the PTX ISA
`<https://docs.nvidia.com/cuda/parallel-thread-execution/#data-movement-and-conversion-instructions-applypriority>`_.
``llvm.nvvm.discard.*``'
^^^^^^^^^^^^^^^^^^^^^^^^
Syntax:
"""""""
.. code-block:: llvm
declare void @llvm.nvvm.discard.global.L2(ptr addrspace(1) %global_ptr, i64 immarg)
declare void @llvm.nvvm.discard.L2(ptr %ptr, i64 immarg)
Overview:
"""""""""
The *effects* of the ``@llvm.nvvm.discard.L2*`` intrinsics are those of a non-atomic
non-volatile ``llvm.memset`` that writes ``undef`` to the destination
address range ``[%ptr, %ptr + immarg)``. The ``%ptr`` must be aligned by 128 bytes.
Subsequent reads from the address range may read ``undef`` until the memory is overwritten
with a different value.
These operations *hint* the implementation that data in the L2 cache can be destructively
discarded without writing it back to memory.
The operand ``immarg`` is an integer constant that specifies the length in bytes of the
address range ``[%ptr, %ptr + immarg)`` to write ``undef`` into.
The only supported value for the ``immarg`` operand is ``128``.
If generic addressing is used and the specified address does not fall within the
address window of global memory (``addrspace(1)``) the behavior is undefined.
.. code-block:: llvm
call void @llvm.nvvm.discard.L2(ptr %p, i64 128) ;; writes `undef` to [p, p+128)
%a = load i64, ptr %p. ;; loads 8 bytes containing undef
%b = load i64, ptr %p ;; loads 8 bytes containing undef
;; comparing %a and %b compares `undef` values!
%fa = freeze i64 %a ;; freezes undef to stable bit-pattern
%fb = freeze i64 %b ;; freezes undef to stable bit-pattern
;; %fa may compare different to %fb!
For more information, refer to the `CUDA C++ discard documentation <https://nvidia.github.io/cccl/libcudacxx/extended_api/memory_access_properties/discard_memory.html>`__ and to the `PTX ISA discard documentation <https://docs.nvidia.com/cuda/parallel-thread-execution/#data-movement-and-conversion-instructions-discard>`__ .
'``llvm.nvvm.cp.async.bulk.tensor.g2s.tile.[1-5]d``'
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Syntax:
"""""""
.. code-block:: llvm
declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.1d(ptr addrspace(7) %dst, ptr addrspace(3) %bar, ptr %tensor_map, i32 %d0, i16 %mc, i64 %ch, i1 %flag_mc, i1 %flag_ch, i32 %flag_cta_group)
declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.2d(..., i32 %d0, i32 %d1, ...)
declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.3d(..., i32 %d0, i32 %d1, i32 %d2, ...)
declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.4d(..., i32 %d0, i32 %d1, i32 %d2, i32 %d3, ...)
declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.5d(..., i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, ...)
declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.gather4.2d(ptr addrspace(7) %dst, ptr addrspace(3) %bar, ptr %tensor_map, i32 %x0, i32 %y0, i32 %y1, i32 %y2, i32 %y3, i16 %mc, i64 %ch, i1 %flag_mc, i1 %flag_ch, i32 %flag_cta_group)
Overview:
"""""""""
The '``@llvm.nvvm.cp.async.bulk.tensor.g2s.tile.[1-5]d``' intrinsics
correspond to the ``cp.async.bulk.tensor.[1-5]d.*`` set of PTX instructions.
These instructions initiate an asynchronous copy of tensor data from
global memory to shared::cluster memory (indicated by the ``g2s`` prefix)
in ``tile`` mode. In tile mode, the multi-dimensional layout of the
source tensor is preserved at the destination. The dimension of the
tensor data ranges from 1d to 5d with the coordinates specified
by the ``i32 %d0 ... i32 %d4`` arguments. In ``tile.gather4`` mode,
four rows in a 2D tensor are combined to form a single 2D destination
tensor. The first coordinate ``i32 %x0`` denotes the column index
followed by four coordinates indicating the four row-indices.
So, this mode takes a total of 5 coordinates as input arguments.
For more information on ``gather4`` mode, refer PTX ISA
`<https://docs.nvidia.com/cuda/parallel-thread-execution/#tensor-tiled-scatter4-gather4-modes>`_.
* The last three arguments to these intrinsics are flags
indicating support for multicast, cache_hint and cta_group::1/2
modifiers. These flag arguments must be compile-time constants.
The backend looks through these flags and lowers the intrinsics
appropriately.
* The argument denoted by ``i1 %flag_ch`` when set, indicates
a valid cache_hint (``i64 %ch``) and generates the ``.L2::cache_hint``
variant of the PTX instruction.
* The argument denoted by ``i1 %flag_mc`` when set, indicates
the presence of a multicast mask (``i16 %mc``) and generates
the PTX instruction with the ``.multicast::cluster`` modifier.
* The argument denoted by ``i32 %flag_cta_group`` takes values within
the range [0, 3) i.e. {0,1,2}. When the value of ``%flag_cta_group``
is not within the range, it may raise an error from the Verifier.
The default value is '0' with no cta_group modifier in the
instruction. The values of '1' and '2' lower to ``cta_group::1``
and ``cta_group::2`` variants of the PTX instruction respectively.
For more information, refer PTX ISA
`<https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#data-movement-and-conversion-instructions-cp-async-bulk-tensor>`_.
'``llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.[3-5]d``'
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Syntax:
"""""""
.. code-block:: llvm
declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.3d(ptr addrspace(7) %dst, ptr addrspace(3) %bar, ptr %tensor_map, i32 %d0, i32 %d1, i32 %d2, i16 %im2col0, i16 %mc, i64 %ch, i1 %flag_mc, i1 %flag_ch, i32 %flag_cta_group)
declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.4d(..., i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 %im2col0, i16 %im2col1, ...)
declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.5d(..., i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 %im2col0, i16 %im2col1, i16 %im2col2, ...)
declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.w.3d(ptr addrspace(7) %dst, ptr addrspace(3) %bar, ptr %tensor_map, i32 %d0, i32 %d1, i32 %d2, i16 %wHalo, i16 %wOffset, i16 %mc, i64 %ch, i1 %flag_mc, i1 %flag_ch, i32 %flag_cta_group)
declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.w.4d(..., i32 %d0, i32 %d1, i32 %d2, i32 %d3, ...)
declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.w.5d(..., i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, ...)
declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.w.128.3d(ptr addrspace(7) %dst, ptr addrspace(3) %bar, ptr %tensor_map, i32 %d0, i32 %d1, i32 %d2, i16 %wHalo, i16 %wOffset, i16 %mc, i64 %ch, i1 %flag_mc, i1 %flag_ch, i32 %flag_cta_group)
declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.w.128.4d(..., i32 %d0, i32 %d1, i32 %d2, i32 %d3, ...)
declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.w.128.5d(..., i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, ...)
Overview:
"""""""""
The '``@llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.[3-5]d``' intrinsics
correspond to the ``cp.async.bulk.tensor.[1-5]d.*`` set of PTX instructions.
These instructions initiate an asynchronous copy of tensor data from
global memory to shared::cluster memory (indicated by the ``g2s`` prefix)
in ``im2col`` mode. In im2col mode, some dimensions of the source tensor
are unrolled into a single dimensional column at the destination. In this
mode, the tensor has to be at least three-dimensional. Along with the tensor
coordinates, im2col offsets are also specified (denoted by
``i16 im2col0...i16 %im2col2``). For the ``im2col`` mode, the number of offsets
is two less than the number of dimensions of the tensor operation. For the
``im2col.w`` and ``im2col.w.128`` mode, the number of offsets is always 2,
denoted by ``i16 %wHalo`` and ``i16 %wOffset`` arguments. For more information
on ``im2col.w`` and ``im2col.w.128`` modes, refer PTX ISA
`<https://docs.nvidia.com/cuda/parallel-thread-execution/#tensor-im2col-w-w128-modes>`_.
The last three arguments to these intrinsics are flags, with the same functionality
as described in the ``tile`` mode intrinsics above.
For more information, refer PTX ISA
`<https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#data-movement-and-conversion-instructions-cp-async-bulk-tensor>`_.
'``llvm.nvvm.cp.async.bulk.tensor.g2s.cta.tile.[1-5]d``'
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Syntax:
"""""""
.. code-block:: llvm
declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.cta.tile.1d(ptr addrspace(3) %dst, ptr addrspace(3) %bar, ptr %tensor_map, i32 %d0, i64 %ch, i1 %flag_ch)
declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.cta.tile.2d(..., i32 %d0, i32 %d1, ...)
declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.cta.tile.3d(..., i32 %d0, i32 %d1, i32 %d2, ...)
declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.cta.tile.4d(..., i32 %d0, i32 %d1, i32 %d2, i32 %d3, ...)
declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.cta.tile.5d(..., i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, ...)
declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.cta.tile.gather4.2d(ptr addrspace(3) %dst, ptr addrspace(3) %bar, ptr %tensor_map, i32 %x0, i32 %y0, i32 %y1, i32 %y2, i32 %y3, i64 %ch, i1 %flag_ch)
Overview:
"""""""""
The '``@llvm.nvvm.cp.async.bulk.tensor.g2s.cta.tile.[1-5]d``' intrinsics
correspond to the ``cp.async.bulk.tensor.[1-5]d.shared::cta.global.*``
set of PTX instructions. These instructions initiate an asynchronous
copy of tensor data from global memory to shared::cta memory in
``tile`` mode. In tile mode, the multi-dimensional layout of the
source tensor is preserved at the destination. The dimension of the
tensor data ranges from 1d to 5d with the coordinates specified
by the ``i32 %d0 ... i32 %d4`` arguments. In ``tile.gather4`` mode,
four rows in a 2D tensor are combined to form a single 2D destination
tensor. The first coordinate ``i32 %x0`` denotes the column index
followed by four coordinates indicating the four row-indices.
So, this mode takes a total of 5 coordinates as input arguments.
For more information on ``gather4`` mode, refer PTX ISA
`<https://docs.nvidia.com/cuda/parallel-thread-execution/#tensor-tiled-scatter4-gather4-modes>`_.
* The last argument to these intrinsics is a boolean flag
indicating support for cache_hint. This flag argument must
be a compile-time constant. When set, it indicates a valid
cache_hint (``i64 %ch``) and generates the ``.L2::cache_hint``
variant of the PTX instruction.
For more information, refer PTX ISA
`<https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#data-movement-and-conversion-instructions-cp-async-bulk-tensor>`_.
'``llvm.nvvm.cp.async.bulk.tensor.g2s.cta.im2col.[3-5]d``'
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Syntax:
"""""""
.. code-block:: llvm
declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.cta.im2col.3d(ptr addrspace(3) %dst, ptr addrspace(3) %bar, ptr %tensor_map, i32 %d0, i32 %d1, i32 %d2, i16 %im2col0, i64 %ch, i1 %flag_ch)
declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.cta.im2col.4d(..., i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 %im2col0, i16 %im2col1, ...)
declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.cta.im2col.5d(..., i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 %im2col0, i16 %im2col1, i16 %im2col2, ...)
declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.cta.im2col.w.3d(ptr addrspace(3) %dst, ptr addrspace(3) %bar, ptr %tensor_map, i32 %d0, i32 %d1, i32 %d2, i16 %wHalo, i16 %wOffset, i64 %ch, i1 %flag_ch)
declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.cta.im2col.w.4d(..., i32 %d0, i32 %d1, i32 %d2, i32 %d3, ...)
declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.cta.im2col.w.5d(..., i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, ...)
declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.cta.im2col.w.128.3d(ptr addrspace(3) %dst, ptr addrspace(3) %bar, ptr %tensor_map, i32 %d0, i32 %d1, i32 %d2, i16 %wHalo, i16 %wOffset, i64 %ch, i1 %flag_ch)
declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.cta.im2col.w.128.4d(..., i32 %d0, i32 %d1, i32 %d2, i32 %d3, ...)
declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.cta.im2col.w.128.5d(..., i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, ...)
Overview:
"""""""""
The '``@llvm.nvvm.cp.async.bulk.tensor.g2s.cta.im2col.[3-5]d``' intrinsics
correspond to the ``cp.async.bulk.tensor.[1-5]d.shared::cta.global.*``
set of PTX instructions. These instructions initiate an asynchronous copy
of tensor data from global memory to shared::cta memory in ``im2col`` mode.
In im2col mode, some dimensions of the source tensor are unrolled into a
single dimensional column at the destination. In this mode, the tensor has
to be at least three-dimensional. Along with the tensor coordinates, im2col
offsets are also specified (denoted by ``i16 im2col0...i16 %im2col2``).
For the ``im2col`` mode, the number of offsets is two less than the number
of dimensions of the tensor operation. For the ``im2col.w`` and ``im2col.w.128``
mode, the number of offsets is always 2, denoted by ``i16 %wHalo`` and
``i16 %wOffset`` arguments. For more information on ``im2col.w`` and
``im2col.w.128`` modes, refer PTX ISA
`<https://docs.nvidia.com/cuda/parallel-thread-execution/#tensor-im2col-w-w128-modes>`_.
* The last argument to these intrinsics is a boolean flag
indicating support for cache_hint. This flag argument must
be a compile-time constant. When set, it indicates a valid
cache_hint (``i64 %ch``) and generates the ``.L2::cache_hint``
variant of the PTX instruction.
For more information, refer PTX ISA
`<https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#data-movement-and-conversion-instructions-cp-async-bulk-tensor>`_.
'``llvm.nvvm.cp.async.bulk.tensor.s2g.tile.[1-5]d``'
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Syntax:
"""""""
.. code-block:: llvm
declare void @llvm.nvvm.cp.async.bulk.tensor.s2g.tile.1d(ptr addrspace(3) %src, ptr %tensor_map, i32 %d0, i64 %ch, i1 %flag_ch)
declare void @llvm.nvvm.cp.async.bulk.tensor.s2g.tile.2d(..., i32 %d0, i32 %d1, ...)
declare void @llvm.nvvm.cp.async.bulk.tensor.s2g.tile.3d(..., i32 %d0, i32 %d1, i32 %d2, ...)
declare void @llvm.nvvm.cp.async.bulk.tensor.s2g.tile.4d(..., i32 %d0, i32 %d1, i32 %d2, i32 %d3, ...)
declare void @llvm.nvvm.cp.async.bulk.tensor.s2g.tile.5d(..., i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, ...)
declare void @llvm.nvvm.cp.async.bulk.tensor.s2g.tile.scatter4.2d(ptr addrspace(3) %src, ptr %tensor_map, i32 %x0, i32 %y0, i32 %y1, i32 %y2, i32 %y3, i64 %ch, i1 %flag_ch)
Overview:
"""""""""
The '``@llvm.nvvm.cp.async.bulk.tensor.s2g.tile.[1-5]d``' intrinsics
correspond to the ``cp.async.bulk.tensor.[1-5]d.*`` set of PTX instructions.
These instructions initiate an asynchronous copy of tensor data from
shared::cta to global memory (indicated by the ``s2g`` prefix)
in ``tile`` mode. The dimension of the tensor data ranges from 1d to 5d
with the coordinates specified by the ``i32 %d0 ... i32 %d4`` arguments.
In ``tile.scatter4`` mode, a single 2D source tensor is divided into
four rows in the 2D destination tensor. The first coordinate ``i32 %x0``
denotes the column index followed by four coordinates indicating the
four row-indices. So, this mode takes a total of 5 coordinates as input arguments.
For more information on ``scatter4`` mode, refer PTX ISA
`<https://docs.nvidia.com/cuda/parallel-thread-execution/#tensor-tiled-scatter4-gather4-modes>`_.
* The last argument to these intrinsics is a boolean flag
indicating support for cache_hint. This flag argument must
be a compile-time constant. When set, it indicates a valid
cache_hint (``i64 %ch``) and generates the ``.L2::cache_hint``
variant of the PTX instruction.
For more information, refer PTX ISA
`<https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#data-movement-and-conversion-instructions-cp-async-bulk-tensor>`_.
'``llvm.nvvm.cp.async.bulk.tensor.s2g.im2col.[3-5]d``'
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Syntax:
"""""""
.. code-block:: llvm
declare void @llvm.nvvm.cp.async.bulk.tensor.s2g.im2col.3d(ptr addrspace(3) %src, ptr %tensor_map, i32 %d0, i32 %d1, i32 %d2, i64 %ch, i1 %flag_ch)
declare void @llvm.nvvm.cp.async.bulk.tensor.s2g.im2col.4d(..., i32 %d0, i32 %d1, i32 %d2, i32 %d3, ...)
declare void @llvm.nvvm.cp.async.bulk.tensor.s2g.im2col.5d(..., i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, ...)
Overview:
"""""""""
The '``@llvm.nvvm.cp.async.bulk.tensor.s2g.im2col.[1-5]d``' intrinsics
correspond to the ``cp.async.bulk.tensor.[1-5]d.*`` set of PTX instructions.
These instructions initiate an asynchronous copy of tensor data from
shared::cta to global memory (indicated by the ``s2g`` prefix)
in ``im2col`` mode. In this mode, the tensor has to be at least
three-dimensional. Unlike the ``g2s`` variants, there are no
im2col_offsets for these intrinsics. The last argument to these
intrinsics is a boolean flag, with the same functionality as
described in the ``s2g.tile`` mode intrinsics above.
For more information, refer PTX ISA
`<https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#data-movement-and-conversion-instructions-cp-async-bulk-tensor>`_.
'``llvm.nvvm.cp.async.bulk.tensor.prefetch.tile.[1-5]d``'
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Syntax:
"""""""
.. code-block:: llvm
declare void @llvm.nvvm.cp.async.bulk.tensor.prefetch.tile.1d(ptr %tensor_map, i32 %d0, i64 %ch, i1 %flag_ch)
declare void @llvm.nvvm.cp.async.bulk.tensor.prefetch.tile.2d(..., i32 %d0, i32 %d1, ...)
declare void @llvm.nvvm.cp.async.bulk.tensor.prefetch.tile.3d(..., i32 %d0, i32 %d1, i32 %d2, ...)
declare void @llvm.nvvm.cp.async.bulk.tensor.prefetch.tile.4d(..., i32 %d0, i32 %d1, i32 %d2, i32 %d3, ...)
declare void @llvm.nvvm.cp.async.bulk.tensor.prefetch.tile.5d(..., i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, ...)
declare void @llvm.nvvm.cp.async.bulk.tensor.prefetch.tile.gather4.2d(ptr %tensor_map, i32 %x0, i32 %y0, i32 %y1, i32 %y2, i32 %y3, i64 %ch, i1 %flag_ch)
Overview:
"""""""""
The '``@llvm.nvvm.cp.async.bulk.tensor.prefetch.tile.[1-5]d``' intrinsics
correspond to the ``cp.async.bulk.prefetch.tensor.[1-5]d.L2.global*`` set
of PTX instructions. These instructions initiate an asynchronous prefetch
of tensor data from global memory to the L2 cache. In tile mode, the
multi-dimensional layout of the source tensor is preserved at the destination.
The dimension of the tensor data ranges from 1d to 5d with the coordinates
specified by the ``i32 %d0 ... i32 %d4`` arguments.
In ``tile.gather4`` mode, four rows in the 2-dimnesional source tensor are
fetched to the L2 cache. The first coordinate ``i32 %x0`` denotes the column index
followed by four coordinates indicating the four row-indices. So, this mode takes
a total of 5 coordinates as input arguments.
For more information on ``gather4`` mode, refer PTX ISA
`<https://docs.nvidia.com/cuda/parallel-thread-execution/#tensor-tiled-scatter4-gather4-modes>`_.
* The last argument to these intrinsics is a boolean flag
indicating support for cache_hint. This flag argument must
be a compile-time constant. When set, it indicates a valid
cache_hint (``i64 %ch``) and generates the ``.L2::cache_hint``
variant of the PTX instruction.
For more information, refer PTX ISA
`<https://docs.nvidia.com/cuda/parallel-thread-execution/#data-movement-and-conversion-instructions-cp-async-bulk-prefetch-tensor>`_.
'``llvm.nvvm.cp.async.bulk.tensor.prefetch.im2col.[3-5]d``'
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Syntax:
"""""""
.. code-block:: llvm
declare void @llvm.nvvm.cp.async.bulk.tensor.prefetch.im2col.3d(ptr %tensor_map, i32 %d0, i32 %d1, i32 %d2, i16 %im2col0, i64 %ch, i1 %flag_ch)
declare void @llvm.nvvm.cp.async.bulk.tensor.prefetch.im2col.4d(..., i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 %im2col0, i16 %im2col1, ...)
declare void @llvm.nvvm.cp.async.bulk.tensor.prefetch.im2col.5d(..., i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 %im2col0, i16 %im2col1, i16 %im2col2, ...)
declare void @llvm.nvvm.cp.async.bulk.tensor.prefetch.im2col.w.3d(ptr %tensor_map, i32 %d0, i32 %d1, i32 %d2, i16 %wHalo, i16 %wOffset, i64 %ch, i1 %flag_ch)
declare void @llvm.nvvm.cp.async.bulk.tensor.prefetch.im2col.w.4d(..., i32 %d0, i32 %d1, i32 %d2, i32 %d3, ...)
declare void @llvm.nvvm.cp.async.bulk.tensor.prefetch.im2col.w.5d(..., i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, ...)
declare void @llvm.nvvm.cp.async.bulk.tensor.prefetch.im2col.w.128.3d(ptr %tensor_map, i32 %d0, i32 %d1, i32 %d2, i16 %wHalo, i16 %wOffset, i64 %ch, i1 %flag_ch)
declare void @llvm.nvvm.cp.async.bulk.tensor.prefetch.im2col.w.128.4d(..., i32 %d0, i32 %d1, i32 %d2, i32 %d3, ...)
declare void @llvm.nvvm.cp.async.bulk.tensor.prefetch.im2col.w.128.5d(..., i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, ...)
Overview:
"""""""""
The '``@llvm.nvvm.cp.async.bulk.tensor.prefetch.im2col.[3-5]d``' intrinsics
correspond to the ``cp.async.bulk.prefetch.tensor.[1-5]d.L2.global*`` set
of PTX instructions. These instructions initiate an asynchronous prefetch
of tensor data from global memory to the L2 cache. In im2col mode, some
dimensions of the source tensor are unrolled into a single dimensional
column at the destination. In this mode, the tensor has to be at least
three-dimensional. Along with the tensor coordinates, im2col offsets are
also specified (denoted by ``i16 im2col0...i16 %im2col2``). For ``im2col``
mode, the number of offsets is two less than the number of dimensions of
the tensor operation. For the ``im2col.w`` and ``im2col.w.128`` modes,
the number of offsets is always 2, denoted by ``i16 %wHalo`` and
``i16 %wOffset`` arguments. For more information on ``im2col.w`` and
``im2col.w.128`` modes, refer PTX ISA
`<https://docs.nvidia.com/cuda/parallel-thread-execution/#tensor-im2col-w-w128-modes>`_.
The last argument to these intrinsics is a boolean flag, with
the same functionality as described in the ``tile`` mode intrinsics above.
For more information, refer PTX ISA
`<https://docs.nvidia.com/cuda/parallel-thread-execution/#data-movement-and-conversion-instructions-cp-async-bulk-prefetch-tensor>`_.
'``llvm.nvvm.cp.async.bulk.tensor.reduce.[red_op].tile.[1-5]d``'
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Syntax:
"""""""
.. code-block:: llvm
declare void @llvm.nvvm.cp.async.bulk.tensor.reduce.add.tile.1d(ptr addrspace(3) %src, ptr %tensor_map, i32 %d0, i64 %ch, i1 %flag_ch)
declare void @llvm.nvvm.cp.async.bulk.tensor.reduce.min.tile.1d(ptr addrspace(3) %src, ptr %tensor_map, i32 %d0, i64 %ch, i1 %flag_ch)
declare void @llvm.nvvm.cp.async.bulk.tensor.reduce.max.tile.1d(ptr addrspace(3) %src, ptr %tensor_map, i32 %d0, i64 %ch, i1 %flag_ch)
declare void @llvm.nvvm.cp.async.bulk.tensor.reduce.inc.tile.1d(ptr addrspace(3) %src, ptr %tensor_map, i32 %d0, i64 %ch, i1 %flag_ch)
declare void @llvm.nvvm.cp.async.bulk.tensor.reduce.dec.tile.1d(ptr addrspace(3) %src, ptr %tensor_map, i32 %d0, i64 %ch, i1 %flag_ch)
declare void @llvm.nvvm.cp.async.bulk.tensor.reduce.and.tile.1d(ptr addrspace(3) %src, ptr %tensor_map, i32 %d0, i64 %ch, i1 %flag_ch)
declare void @llvm.nvvm.cp.async.bulk.tensor.reduce.or.tile.1d(ptr addrspace(3) %src, ptr %tensor_map, i32 %d0, i64 %ch, i1 %flag_ch)
declare void @llvm.nvvm.cp.async.bulk.tensor.reduce.xor.tile.1d(ptr addrspace(3) %src, ptr %tensor_map, i32 %d0, i64 %ch, i1 %flag_ch)
declare void @llvm.nvvm.cp.async.bulk.tensor.reduce.<red_op>.tile.2d(..., i32 %d0, i32 %d1, ...)
declare void @llvm.nvvm.cp.async.bulk.tensor.reduce.<red_op>.tile.3d(..., i32 %d0, i32 %d1, i32 %d2, ...)
declare void @llvm.nvvm.cp.async.bulk.tensor.reduce.<red_op>.tile.4d(..., i32 %d0, i32 %d1, i32 %d2, i32 %d3, ...)
declare void @llvm.nvvm.cp.async.bulk.tensor.reduce.<red_op>.tile.5d(..., i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, ...)
Overview:
"""""""""
The '``@llvm.nvvm.cp.async.bulk.tensor.reduce.<red_op>.tile.[1-5]d``' intrinsics
correspond to the ``cp.reduce.async.bulk.tensor.[1-5]d.*`` set of PTX instructions.
These instructions initiate an asynchronous reduction operation of tensor data
in global memory with the tensor data in shared{::cta} memory, using ``tile`` mode.
The dimension of the tensor data ranges from 1d to 5d with the coordinates
specified by the ``i32 %d0 ... i32 %d4`` arguments. The supported reduction
operations are {add, min, max, inc, dec, and, or, xor} as described in the
``tile.1d`` intrinsics.
* The last argument to these intrinsics is a boolean flag
indicating support for cache_hint. This flag argument must
be a compile-time constant. When set, it indicates a valid
cache_hint (``i64 %ch``) and generates the ``.L2::cache_hint``
variant of the PTX instruction.
For more information, refer PTX ISA
`<https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#data-movement-and-conversion-instructions-cp-reduce-async-bulk-tensor>`_.
'``llvm.nvvm.cp.async.bulk.tensor.reduce.[red_op].im2col.[3-5]d``'
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Syntax:
"""""""
.. code-block:: llvm
declare void @llvm.nvvm.cp.async.bulk.tensor.reduce.<red_op>.im2col.3d(ptr addrspace(3) %src, ptr %tensor_map, i32 %d0, i32 %d1, i32 %d2, i64 %ch, i1 %flag_ch)
declare void @llvm.nvvm.cp.async.bulk.tensor.reduce.<red_op>.im2col.4d(..., i32 %d0, i32 %d1, i32 %d2, i32 %d3, ...)
declare void @llvm.nvvm.cp.async.bulk.tensor.reduce.<red_op>.im2col.5d(..., i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, ...)
Overview:
"""""""""
The '``@llvm.nvvm.cp.async.bulk.tensor.reduce.<red_op>.im2col.[3-5]d``' intrinsics
correspond to the ``cp.reduce.async.bulk.tensor.[3-5]d.*`` set of PTX instructions.
These instructions initiate an asynchronous reduction operation of tensor data
in global memory with the tensor data in shared{::cta} memory, using ``im2col`` mode.
In this mode, the tensor has to be at least three-dimensional. The supported reduction
operations supported are the same as the ones in the tile mode. The last argument to
these intrinsics is a boolean flag, with the same functionality as described in the
``tile`` mode intrinsics above.
For more information, refer PTX ISA
`<https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#data-movement-and-conversion-instructions-cp-reduce-async-bulk-tensor>`_.
Warp Group Intrinsics
---------------------
'``llvm.nvvm.wgmma.fence.sync.aligned``'
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Syntax:
"""""""
.. code-block:: llvm
declare void @llvm.nvvm.wgmma.fence.sync.aligned()
Overview:
"""""""""
The '``@llvm.nvvm.wgmma.fence.sync.aligned``' intrinsic generates the
``wgmma.fence.sync.aligned`` PTX instruction, which establishes an ordering
between prior accesses to any warpgroup registers and subsequent accesses to
the same registers by a ``wgmma.mma_async`` instruction.
The ``wgmma.fence`` instruction must be issued by all warps of the warpgroup in
the following locations:
* Before the first ``wgmma.mma_async`` operation in a warpgroup.
* Between a register access by a thread in the warpgroup and any
``wgmma.mma_async`` instruction that accesses the same registers, except when
these are accumulator register accesses across multiple ``wgmma.mma_async``
instructions of the same shape in which case an ordering guarantee is
provided by default.
For more information, refer PTX ISA
`<https://docs.nvidia.com/cuda/parallel-thread-execution/#asynchronous-warpgroup-level-matrix-instructions-wgmma-fence>`_.
'``llvm.nvvm.wgmma.commit_group.sync.aligned``'
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Syntax:
"""""""
.. code-block:: llvm
declare void @llvm.nvvm.wgmma.commit_group.sync.aligned()
Overview:
"""""""""
The '``@llvm.nvvm.wgmma.commit_group.sync.aligned``' intrinsic generates the
``wgmma.commit_group.sync.aligned`` PTX instruction, which creates a new
wgmma-group per warpgroup and batches all prior ``wgmma.mma_async``
instructions initiated by the executing warp but not committed to any
wgmma-group into the new wgmma-group. If there are no uncommitted ``wgmma
mma_async`` instructions then, ``wgmma.commit_group`` results in an empty
wgmma-group.
An executing thread can wait for the completion of all ``wgmma.mma_async``
operations in a wgmma-group by using ``wgmma.wait_group``.
For more information, refer PTX ISA
`<https://docs.nvidia.com/cuda/parallel-thread-execution/#asynchronous-warpgroup-level-matrix-instructions-wgmma-commit-group>`_.
'``llvm.nvvm.wgmma.wait_group.sync.aligned``'
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Syntax:
"""""""
.. code-block:: llvm
declare void @llvm.nvvm.wgmma.wait_group.sync.aligned(i64 immarg N)
Overview:
"""""""""
The '``@llvm.nvvm.wgmma.wait_group.sync.aligned``' intrinsic generates the
``wgmma.commit_group.sync.aligned N`` PTX instruction, which will cause the
executing thread to wait until only ``N`` or fewer of the most recent
wgmma-groups are pending and all the prior wgmma-groups committed by the
executing threads are complete. For example, when ``N`` is 0, the executing
thread waits on all the prior wgmma-groups to complete. Operand ``N`` is an
integer constant.
Accessing the accumulator register or the input register containing the
fragments of matrix A of a ``wgmma.mma_async`` instruction without first
performing a ``wgmma.wait_group`` instruction that waits on a wgmma-group
including that ``wgmma.mma_async`` instruction is undefined behavior.
For more information, refer PTX ISA
`<https://docs.nvidia.com/cuda/parallel-thread-execution/#asynchronous-warpgroup-level-matrix-instructions-wgmma-wait-group>`_.
'``llvm.nvvm.griddepcontrol.*``'
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Syntax:
"""""""
.. code-block:: llvm
declare void @llvm.nvvm.griddepcontrol.launch_dependents()
declare void @llvm.nvvm.griddepcontrol.wait()
Overview:
"""""""""
The ``griddepcontrol`` intrinsics allows the dependent grids and prerequisite grids as defined by the runtime, to control execution in the following way:
``griddepcontrol.launch_dependents`` intrinsic signals that the dependents can be scheduled, before the current grid completes. The intrinsic can be invoked by multiple threads in the current CTA and repeated invocations of the intrinsic will have no additional side effects past that of the first invocation.
``griddepcontrol.wait`` intrinsic causes the executing thread to wait until all prerequisite grids in flight have completed and all the memory operations from the prerequisite grids are performed and made visible to the current grid.
For more information, refer
`PTX ISA <https://docs.nvidia.com/cuda/parallel-thread-execution/#parallel-synchronization-and-communication-instructions-griddepcontrol>`__.
TCGEN05 family of Intrinsics
----------------------------
The llvm.nvvm.tcgen05.* intrinsics model the TCGEN05 family of instructions
exposed by PTX. These intrinsics use 'Tensor Memory' (henceforth ``tmem``).
NVPTX represents this memory using ``addrspace(6)`` and is always 32-bits.
For more information, refer to the PTX ISA
`<https://docs.nvidia.com/cuda/parallel-thread-execution/#tensor-memory>`_.
The tensor-memory pointers may only be used with the tcgen05 intrinsics.
There are specialized load/store instructions provided (tcgen05.ld/st) to
work with tensor-memory.
See the PTX ISA for more information on tensor-memory load/store instructions
`<https://docs.nvidia.com/cuda/parallel-thread-execution/#tensor-memory-and-register-load-store-instructions>`_.
'``llvm.nvvm.tcgen05.alloc``'
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Syntax:
"""""""
.. code-block:: llvm
declare void @llvm.nvvm.tcgen05.alloc.cg1(ptr %dst, i32 %ncols)
declare void @llvm.nvvm.tcgen05.alloc.cg2(ptr %dst, i32 %ncols)
declare void @llvm.nvvm.tcgen05.alloc.shared.cg1(ptr addrspace(3) %dst, i32 %ncols)
declare void @llvm.nvvm.tcgen05.alloc.shared.cg2(ptr addrspace(3) %dst, i32 %ncols)
Overview:
"""""""""
The '``@llvm.nvvm.tcgen05.alloc.*``' intrinsics correspond to the
``tcgen05.alloc.cta_group*.sync.aligned.b32`` family of PTX instructions.
The ``tcgen05.alloc`` is a potentially blocking instruction which dynamically
allocates the specified number of columns in the Tensor Memory and writes
the address of the allocated Tensor Memory into shared memory at the
location specified by ``%dst``. The 32-bit operand ``%ncols`` specifies
the number of columns to be allocated and it must be a power-of-two.
The ``.shared`` variant explicitly uses shared memory address space for
the ``%dst`` operand. The ``.cg1`` and ``.cg2`` variants generate
``cta_group::1`` and ``cta_group::2`` variants of the instruction respectively.
For more information, refer to the PTX ISA
`<https://docs.nvidia.com/cuda/parallel-thread-execution/#tensor-memory-allocation-and-management-instructions>`_.
'``llvm.nvvm.tcgen05.dealloc``'
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Syntax:
"""""""
.. code-block:: llvm
declare void @llvm.nvvm.tcgen05.dealloc.cg1(ptr addrspace(6) %tmem_addr, i32 %ncols)
declare void @llvm.nvvm.tcgen05.dealloc.cg2(ptr addrspace(6) %tmem_addr, i32 %ncols)
Overview:
"""""""""
The '``@llvm.nvvm.tcgen05.dealloc.*``' intrinsics correspond to the
``tcgen05.dealloc.*`` set of PTX instructions. The ``tcgen05.dealloc``
instructions deallocates the Tensor Memory specified by the Tensor Memory
address ``%tmem_addr``. The operand ``%tmem_addr`` must point to a previous
Tensor Memory allocation. The 32-bit operand ``%ncols`` specifies the number
of columns to be de-allocated. The ``.cg1`` and ``.cg2`` variants generate
``cta_group::1`` and ``cta_group::2`` variants of the instruction respectively.
For more information, refer to the PTX ISA
`<https://docs.nvidia.com/cuda/parallel-thread-execution/#tensor-memory-allocation-and-management-instructions>`_.
'``llvm.nvvm.tcgen05.relinq.alloc.permit``'
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Syntax:
"""""""
.. code-block:: llvm
declare void @llvm.nvvm.tcgen05.relinq.alloc.permit.cg1()
declare void @llvm.nvvm.tcgen05.relinq.alloc.permit.cg2()
Overview:
"""""""""
The '``@llvm.nvvm.tcgen05.relinq.alloc.permit.*``' intrinsics correspond
to the ``tcgen05.relinquish_alloc_permit.*`` set of PTX instructions.
This instruction specifies that the CTA of the executing thread is
relinquishing the right to allocate Tensor Memory. So, it is illegal
for a CTA to perform ``tcgen05.alloc`` after any of its constituent
threads execute ``tcgen05.relinquish_alloc_permit``. The ``.cg1``
and ``.cg2`` variants generate ``cta_group::1`` and ``cta_group::2``
flavors of the instruction respectively.
For more information, refer to the PTX ISA
`<https://docs.nvidia.com/cuda/parallel-thread-execution/#tensor-memory-allocation-and-management-instructions>`_.
'``llvm.nvvm.tcgen05.commit``'
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Syntax:
"""""""
.. code-block:: llvm
declare void @llvm.nvvm.tcgen05.commit.{cg1,cg2}(ptr %mbar)
declare void @llvm.nvvm.tcgen05.commit.shared.{cg1,cg2}(ptr addrspace(3) %mbar)
declare void @llvm.nvvm.tcgen05.commit.mc.{cg1,cg2}(ptr %mbar, i16 %mc)
declare void @llvm.nvvm.tcgen05.commit.mc.shared.{cg1,cg2}(ptr addrspace(3) %mbar, i16 %mc)
Overview:
"""""""""
The '``@llvm.nvvm.tcgen05.commit.*``' intrinsics correspond to the
``tcgen05.commit.{cg1/cg2}.mbarrier::arrive::one.*`` set of PTX instructions.
The ``tcgen05.commit`` is an asynchronous instruction which makes the mbarrier
object (``%mbar``) track the completion of all prior asynchronous tcgen05 operations.
The ``.mc`` variants allow signaling on the mbarrier objects of multiple CTAs
(specified by ``%mc``) in the cluster. The ``.cg1`` and ``.cg2`` variants generate
``cta_group::1`` and ``cta_group::2`` flavors of the instruction respectively.
For more information, refer to the PTX ISA
`<https://docs.nvidia.com/cuda/parallel-thread-execution/#tcgen-async-sync-operations-commit>`_.
'``llvm.nvvm.tcgen05.wait``'
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Syntax:
"""""""
.. code-block:: llvm
declare void @llvm.nvvm.tcgen05.wait.ld()
declare void @llvm.nvvm.tcgen05.wait.st()
Overview:
"""""""""
The '``@llvm.nvvm.tcgen05.wait.ld/st``' intrinsics correspond to
the ``tcgen05.wait::{ld/st}.sync.aligned`` pair of PTX instructions.
The ``tcgen05.wait::ld`` causes the executing thread to block until
all prior ``tcgen05.ld`` operations issued by the executing thread
have completed. The ``tcgen05.wait::st`` causes the executing thread
to block until all prior ``tcgen05.st`` operations issued by the
executing thread have completed.
For more information, refer to the PTX ISA
`<https://docs.nvidia.com/cuda/parallel-thread-execution/#tcgen05-instructions-tcgen05-wait>`_.
'``llvm.nvvm.tcgen05.fence``'
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Syntax:
"""""""
.. code-block:: llvm
declare void @llvm.nvvm.tcgen05.fence.before.thread.sync()
declare void @llvm.nvvm.tcgen05.fence.after.thread.sync()
Overview:
"""""""""
The '``@llvm.nvvm.tcgen05.fence.*``' intrinsics correspond to
the ``tcgen05.fence::{before/after}_thread_sync`` pair of PTX instructions.
These instructions act as code motion fences for asynchronous tcgen05
operations.
For more information, refer to the PTX ISA
`<https://docs.nvidia.com/cuda/parallel-thread-execution/#tensorcore-5th-generation-instructions-tcgen05-fence>`_.
'``llvm.nvvm.tcgen05.shift``'
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Syntax:
"""""""
.. code-block:: llvm
declare void @llvm.nvvm.tcgen05.shift.down.cg1(ptr addrspace(6) %tmem_addr)
declare void @llvm.nvvm.tcgen05.shift.down.cg2(ptr addrspace(6) %tmem_addr)
Overview:
"""""""""
The '``@llvm.nvvm.tcgen05.shift.{cg1/cg2}``' intrinsics correspond to
the ``tcgen05.shift.{cg1/cg2}`` PTX instructions. The ``tcgen05.shift``
is an asynchronous instruction which initiates the shifting of 32-byte
elements downwards across all the rows, except the last, by one row.
The address operand ``%tmem_addr`` specifies the base address of the
matrix in the Tensor Memory whose rows must be down shifted.
For more information, refer to the PTX ISA
`<https://docs.nvidia.com/cuda/parallel-thread-execution/#tcgen05-instructions-tcgen05-shift>`_.
'``llvm.nvvm.tcgen05.cp``'
^^^^^^^^^^^^^^^^^^^^^^^^^^
Syntax:
"""""""
.. code-block:: llvm
declare void @llvm.nvvm.tcgen05.cp.4x256b.{cg1,cg2}(ptr addrspace(6) %tmem_addr, i64 %sdesc)
declare void @llvm.nvvm.tcgen05.cp.128x256b.{cg1,cg2}(ptr addrspace(6) %tmem_addr, i64 %sdesc)
declare void @llvm.nvvm.tcgen05.cp.128x128b.{cg1,cg2}(ptr addrspace(6) %tmem_addr, i64 %sdesc)
declare void @llvm.nvvm.tcgen05.cp.32x128b_warpx4.{cg1,cg2}(ptr addrspace(6) %tmem_addr, i64 %sdesc)
declare void @llvm.nvvm.tcgen05.cp.64x128b_warpx2_02_13.{cg1,cg2}(ptr addrspace(6) %tmem_addr, i64 %sdesc)
declare void @llvm.nvvm.tcgen05.cp.64x128b_warpx2_01_23.{cg1,cg2}(ptr addrspace(6) %tmem_addr, i64 %sdesc)
declare void @llvm.nvvm.tcgen05.cp.4x256b.b6x16_p32.{cg1,cg2}(ptr addrspace(6) %tmem_addr, i64 %sdesc)
declare void @llvm.nvvm.tcgen05.cp.128x256b.b6x16_p32.{cg1,cg2}(ptr addrspace(6) %tmem_addr, i64 %sdesc)
declare void @llvm.nvvm.tcgen05.cp.128x128b.b6x16_p32.{cg1,cg2}(ptr addrspace(6) %tmem_addr, i64 %sdesc)
declare void @llvm.nvvm.tcgen05.cp.32x128b_warpx4.b6x16_p32.{cg1,cg2}(ptr addrspace(6) %tmem_addr, i64 %sdesc)
declare void @llvm.nvvm.tcgen05.cp.64x128b_warpx2_02_13.b6x16_p32.{cg1,cg2}(ptr addrspace(6) %tmem_addr, i64 %sdesc)
declare void @llvm.nvvm.tcgen05.cp.64x128b_warpx2_01_23.b6x16_p32.{cg1,cg2}(ptr addrspace(6) %tmem_addr, i64 %sdesc)
declare void @llvm.nvvm.tcgen05.cp.4x256b.b4x16_p64.{cg1,cg2}(ptr addrspace(6) %tmem_addr, i64 %sdesc)
declare void @llvm.nvvm.tcgen05.cp.128x256b.b4x16_p64.{cg1,cg2}(ptr addrspace(6) %tmem_addr, i64 %sdesc)
declare void @llvm.nvvm.tcgen05.cp.128x128b.b4x16_p64.{cg1,cg2}(ptr addrspace(6) %tmem_addr, i64 %sdesc)
declare void @llvm.nvvm.tcgen05.cp.32x128b_warpx4.b4x16_p64.{cg1,cg2}(ptr addrspace(6) %tmem_addr, i64 %sdesc)
declare void @llvm.nvvm.tcgen05.cp.64x128b_warpx2_02_13.b4x16_p64.{cg1,cg2}(ptr addrspace(6) %tmem_addr, i64 %sdesc)
declare void @llvm.nvvm.tcgen05.cp.64x128b_warpx2_01_23.b4x16_p64.{cg1,cg2}(ptr addrspace(6) %tmem_addr, i64 %sdesc)
Overview:
"""""""""
The '``@llvm.nvvm.tcgen05.cp.{shape}.{src_fmt}.{cg1/cg2}``' intrinsics
correspond to the ``tcgen05.cp.*`` family of PTX instructions.
The ``tcgen05.cp`` instruction initiates an asynchronous copy operation from
shared memory to the location specified by ``%tmem_addr`` in Tensor Memory.
The 64-bit register operand ``%sdesc`` is the matrix descriptor representing
the source matrix in shared memory that needs to be copied.
The valid shapes for the copy operation are:
{128x256b, 4x256b, 128x128b, 64x128b_warpx2_02_13, 64x128b_warpx2_01_23, 32x128b_warpx4}.
Shapes ``64x128b`` and ``32x128b`` require dedicated multicast qualifiers,
which are appended to the corresponding intrinsic names.
Optionally, the data can be decompressed from the source format in the shared memory
to the destination format in Tensor Memory during the copy operation. Currently,
only ``.b8x16`` is supported as destination format. The valid source formats are
``.b6x16_p32`` and ``.b4x16_p64``.
When the source format is ``.b6x16_p32``, a contiguous set of 16 elements of 6-bits
each followed by four bytes of padding (``_p32``) in shared memory is decompressed
into 16 elements of 8-bits (``.b8x16``) each in the Tensor Memory.
When the source format is ``.b4x16_p64``, a contiguous set of 16 elements of 4-bits
each followed by eight bytes of padding (``_p64``) in shared memory is decompressed
into 16 elements of 8-bits (``.b8x16``) each in the Tensor Memory.
For more information on the decompression schemes, refer to the PTX ISA
`<https://docs.nvidia.com/cuda/parallel-thread-execution/#optional-decompression>`_.
For more information on the tcgen05.cp instruction, refer to the PTX ISA
`<https://docs.nvidia.com/cuda/parallel-thread-execution/#tcgen05-instructions-tcgen05-cp>`_.
'``llvm.nvvm.tcgen05.ld.*``'
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Syntax:
"""""""
.. code-block:: llvm
declare <n x i32> @llvm.nvvm.tcgen05.ld.<shape>.<num>(ptr addrspace(6) %tmem_addr, i1 %pack)
declare <n x i32> @llvm.nvvm.tcgen05.ld.16x32bx2.<num>(ptr addrspace(6) %tmem_addr, i64 %offset, i1 %pack)
Overview:
"""""""""
This group of intrinsics asynchronously load data from the Tensor Memory at the location specified
by the 32-bit address operand `tmem_addr` into the destination registers, collectively across all threads
of the warps.
All the threads in the warp must specify the same value of `tmem_addr`, which must be the base address
of the collective load operation. Otherwise, the behavior is undefined.
The `shape` qualifier and the `num` qualifier together determines the total dimension of the data ('n') which
is loaded from the Tensor Memory. The `shape` qualifier indicates the base dimension of data. The `num` qualifier
indicates the repeat factor on the base dimension resulting in the total dimension of the data that is accessed.
Allowed values for the 'num' are `x1, x2, x4, x8, x16, x32, x64, x128`.
Allowed values for the 'shape' in the first intrinsic are `16x64b, 16x128b, 16x256b, 32x32b`.
Allowed value for the 'shape' in the second intrinsic is `16x32bx2`.
The result of the intrinsic is a vector consisting of one or more 32-bit registers derived from `shape` and
`num` as shown below.
=========== ========================= ========== ==========
num/shape 16x32bx2/16x64b/32x32b 16x128b 16x256b
=========== ========================= ========== ==========
x1 1 2 4
x2 2 4 8
x4 4 8 16
x8 8 16 32
x16 16 32 64
x32 32 64 128
x64 64 128 NA
x128 128 NA NA
=========== ========================= ========== ==========
The last argument `i1 %pack` is a compile-time constant which when set, indicates that the adjacent columns are packed into a single 32-bit element during the load
For more information, refer to the
`PTX ISA <https://docs.nvidia.com/cuda/parallel-thread-execution/#tcgen05-instructions-tcgen05-ld>`__.
'``llvm.nvvm.tcgen05.st.*``'
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Syntax:
"""""""
.. code-block:: llvm
declare void @llvm.nvvm.tcgen05.st.<shape>.<num>(ptr addrspace(6) %tmem_addr, <n x i32> %args, i1 %unpack)
declare void @llvm.nvvm.tcgen05.st.16x32bx2.<num>(ptr addrspace(6) %tmem_addr, <n x i32> %args, i64 %offset, i1 %unpack)
Overview:
"""""""""
This group of intrinsics asynchronously store data from the source vector into the Tensor Memory at the location
specified by the 32-bit address operand 'tmem_addr` collectively across all threads of the warps.
All the threads in the warp must specify the same value of `tmem_addr`, which must be the base address of the
collective load operation. Otherwise, the behavior is undefined.
The `shape` qualifier and the `num` qualifier together determines the total dimension of the data ('n') which
is loaded from the Tensor Memory. The `shape` qualifier indicates the base dimension of data. The `num` qualifier
indicates the repeat factor on the base dimension resulting in the total dimension of the data that is accessed.
Allowed values for the 'num' are `x1, x2, x4, x8, x16, x32, x64, x128`.
Allowed values for the 'shape' in the first intrinsic are `16x64b, 16x128b, 16x256b, 32x32b`.
Allowed value for the 'shape' in the second intrinsic is `16x32bx2`.
`args` argument is a vector consisting of one or more 32-bit registers derived from `shape` and
`num` as listed in the table listed in the `tcgen05.ld` section.
Each shape support an `unpack` mode to allow a 32-bit element in the register to be unpacked into two 16-bit elements and store them in adjacent columns. `unpack` mode can be enabled by setting the `%unpack` operand to 1 and can be disabled by setting it to 0.
The last argument `i1 %unpack` is a compile-time constant which when set, indicates that a 32-bit element in the register to be unpacked into two 16-bit elements and store them in adjacent columns.
For more information, refer to the
`PTX ISA <https://docs.nvidia.com/cuda/parallel-thread-execution/#tcgen05-instructions-tcgen05-st>`__.
Store Intrinsics
----------------
'``llvm.nvvm.st.bulk.*``'
^^^^^^^^^^^^^^^^^^^^^^^^^
Syntax:
"""""""
.. code-block:: llvm
declare void @llvm.nvvm.st.bulk(ptr addrspace(1) %dst, i64 %size, i64 immarg %initval)
declare void @llvm.nvvm.st.bulk.shared.cta(ptr addrspace(3) %dst, i64 %size, i64 immarg %initval)
Overview:
"""""""""
The '``@llvm.nvvm.st.bulk.*``' intrinsics initialize a region of shared memory
starting from the location specified by the destination address operand `%dst`.
The integer operand `%size` specifies the amount of memory to be initialized in
terms of number of bytes and must be a multiple of 8. Otherwise, the behavior
is undefined.
The integer immediate operand `%initval` specifies the initialization value for
the memory locations. The only numeric value allowed is 0.
The ``@llvm.nvvm.st.bulk.shared.cta`` and ``@llvm.nvvm.st.bulk`` intrinsics are
similar but the latter uses generic addressing (see `Generic Addressing <https://docs.nvidia.com/cuda/parallel-thread-execution/#generic-addressing>`__).
For more information, refer `PTX ISA <https://docs.nvidia.com/cuda/parallel-thread-execution/#data-movement-and-conversion-instructions-st-bulk>`__.
clusterlaunchcontrol Intrinsics
-------------------------------
'``llvm.nvvm.clusterlaunchcontrol.try_cancel*``' Intrinsics
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Syntax:
"""""""
.. code-block:: llvm
declare void @llvm.nvvm.clusterlaunchcontrol.try_cancel.async.shared(ptr addrspace(3) %addr, ptr addrspace(3) %mbar)
declare void @llvm.nvvm.clusterlaunchcontrol.try_cancel.async.multicast.shared(ptr addrspace(3) %addr, ptr addrspace(3) %mbar)
Overview:
"""""""""
The ``clusterlaunchcontrol.try_cancel`` intrinsics requests atomically cancelling
the launch of a cluster that has not started running yet. It asynchronously non-atomically writes
a 16-byte opaque response to shared memory, pointed to by 16-byte-aligned ``addr`` indicating whether the
operation succeeded or failed. ``addr`` and 8-byte-aligned ``mbar`` must refer to ``shared::cta``
otherwise the behavior is undefined. The completion of the asynchronous operation
is tracked using the mbarrier completion mechanism at ``.cluster`` scope referenced
by the shared memory pointer, ``mbar``. On success, the opaque response contains
the CTA id of the first CTA of the canceled cluster; no other successful response
from other ``clusterlaunchcontrol.try_cancel`` operations from the same grid will
contain that id.
The ``multicast`` variant specifies that the response is asynchronously non-atomically written to
the corresponding shared memory location of each CTA in the requesting cluster.
The completion of the write of each local response is tracked by independent
mbarriers at the corresponding shared memory location of each CTA in the
cluster.
For more information, refer `PTX ISA <https://docs.nvidia.com/cuda/parallel-thread-execution/?a#parallel-synchronization-and-communication-instructions-clusterlaunchcontrol-try-cancel>`__.
'``llvm.nvvm.clusterlaunchcontrol.query_cancel.is_canceled``' Intrinsic
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Syntax:
"""""""
.. code-block:: llvm
declare i1 @llvm.nvvm.clusterlaunchcontrol.query_cancel.is_canceled(i128 %try_cancel_response)
Overview:
"""""""""
The ``llvm.nvvm.clusterlaunchcontrol.query_cancel.is_canceled`` intrinsic decodes the opaque response written by the
``llvm.nvvm.clusterlaunchcontrol.try_cancel`` operation.
The intrinsic returns ``0`` (false) if the request failed. If the request succeeded,
it returns ``1`` (true). A true result indicates that:
- the thread block cluster whose first CTA id matches that of the response
handle will not run, and
- no other successful response of another ``try_cancel`` request in the grid will contain
the first CTA id of that cluster
For more information, refer `PTX ISA <https://docs.nvidia.com/cuda/parallel-thread-execution/?a#parallel-synchronization-and-communication-instructions-clusterlaunchcontrol-query-cancel>`__.
'``llvm.nvvm.clusterlaunchcontrol.query_cancel.get_first_ctaid.*``' Intrinsics
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Syntax:
"""""""
.. code-block:: llvm
declare i32 @llvm.nvvm.clusterlaunchcontrol.query_cancel.get_first_ctaid.x(i128 %try_cancel_response)
declare i32 @llvm.nvvm.clusterlaunchcontrol.query_cancel.get_first_ctaid.y(i128 %try_cancel_response)
declare i32 @llvm.nvvm.clusterlaunchcontrol.query_cancel.get_first_ctaid.z(i128 %try_cancel_response)
Overview:
"""""""""
The ``clusterlaunchcontrol.query_cancel.get_first_ctaid.*`` intrinsic can be
used to decode the successful opaque response written by the
``llvm.nvvm.clusterlaunchcontrol.try_cancel`` operation.
If the request succeeded:
- ``llvm.nvvm.clusterlaunchcontrol.query_cancel.get_first_ctaid.{x,y,z}`` returns
the coordinate of the first CTA in the canceled cluster, either x, y, or z.
If the request failed, the behavior of these intrinsics is undefined.
For more information, refer `PTX ISA <https://docs.nvidia.com/cuda/parallel-thread-execution/?a#parallel-synchronization-and-communication-instructions-clusterlaunchcontrol-query-cancel>`__.
Perf Monitor Event Intrinsics
-----------------------------
'``llvm.nvvm.pm.event.mask``' Intrinsic
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Syntax:
"""""""
.. code-block:: llvm
declare void @llvm.nvvm.pm.event.mask(i16 immarg %mask_val)
Overview:
"""""""""
The '``llvm.nvvm.pm.event.mask``' intrinsic triggers one or more
performance monitor events. Each bit in the 16-bit immediate operand
``%mask_val`` controls an event.
For more information on the pmevent instructions, refer to the PTX ISA
`<https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#miscellaneous-instructions-pmevent>`_.
Other Intrinsics
----------------
For the full set of NVPTX intrinsics, please see the
``include/llvm/IR/IntrinsicsNVVM.td`` file in the LLVM source tree.
.. _libdevice:
Linking with Libdevice
======================
The CUDA Toolkit comes with an LLVM bitcode library called ``libdevice`` that
implements many common mathematical functions. This library can be used as a
high-performance math library for any compilers using the LLVM NVPTX target.
The library can be found under ``nvvm/libdevice/`` in the CUDA Toolkit and
there is a separate version for each compute architecture.
For a list of all math functions implemented in libdevice, see
`libdevice Users Guide <http://docs.nvidia.com/cuda/libdevice-users-guide/index.html>`_.
To accommodate various math-related compiler flags that can affect code
generation of libdevice code, the library code depends on a special LLVM IR
pass (``NVVMReflect``) to handle conditional compilation within LLVM IR. This
pass looks for calls to the ``@__nvvm_reflect`` function and replaces them
with constants based on the defined reflection parameters. Such conditional
code often follows a pattern:
.. code-block:: c++
float my_function(float a) {
if (__nvvm_reflect("FASTMATH"))
return my_function_fast(a);
else
return my_function_precise(a);
}
The default value for all unspecified reflection parameters is zero.
The ``NVVMReflect`` pass should be executed early in the optimization
pipeline, immediately after the link stage. The ``internalize`` pass is also
recommended to remove unused math functions from the resulting PTX. For an
input IR module ``module.bc``, the following compilation flow is recommended:
The ``NVVMReflect`` pass will attempt to remove dead code even without
optimizations. This allows potentially incompatible instructions to be avoided
at all optimizations levels by using the ``__CUDA_ARCH`` argument.
1. Save list of external functions in ``module.bc``
2. Link ``module.bc`` with ``libdevice.compute_XX.YY.bc``
3. Internalize all functions not in list from (1)
4. Eliminate all unused internal functions
5. Run ``NVVMReflect`` pass
6. Run standard optimization pipeline
.. note::
``linkonce`` and ``linkonce_odr`` linkage types are not suitable for the
libdevice functions. It is possible to link two IR modules that have been
linked against libdevice using different reflection variables.
Since the ``NVVMReflect`` pass replaces conditionals with constants, it will
often leave behind dead code of the form:
.. code-block:: llvm
entry:
..
br i1 true, label %foo, label %bar
foo:
..
bar:
; Dead code
..
Therefore, it is recommended that ``NVVMReflect`` is executed early in the
optimization pipeline before dead-code elimination.
The NVPTX TargetMachine knows how to schedule ``NVVMReflect`` at the beginning
of your pass manager; just use the following code when setting up your pass
manager and the PassBuilder will use ``registerPassBuilderCallbacks`` to let
NVPTXTargetMachine::registerPassBuilderCallbacks add the pass to the
pass manager:
.. code-block:: c++
std::unique_ptr<TargetMachine> TM = ...;
PassBuilder PB(TM);
ModulePassManager MPM;
PB.parsePassPipeline(MPM, ...);
Reflection Parameters
---------------------
The libdevice library currently uses the following reflection parameters to
control code generation:
==================== ======================================================
Flag Description
==================== ======================================================
``__CUDA_FTZ=[0,1]`` Use optimized code paths that flush subnormals to zero
==================== ======================================================
The value of this flag is determined by the "nvvm-reflect-ftz" module flag.
The following sets the ftz flag to 1.
.. code-block:: llvm
!llvm.module.flags = !{!0}
!0 = !{i32 4, !"nvvm-reflect-ftz", i32 1}
(``i32 4`` indicates that the value set here overrides the value in another
module we link with. See the `LangRef <LangRef.html#module-flags-metadata>`
for details.)
Executing PTX
=============
The most common way to execute PTX assembly on a GPU device is to use the CUDA
Driver API. This API is a low-level interface to the GPU driver and allows for
JIT compilation of PTX code to native GPU machine code.
Initializing the Driver API:
.. code-block:: c++
CUdevice device;
CUcontext context;
// Initialize the driver API
cuInit(0);
// Get a handle to the first compute device
cuDeviceGet(&device, 0);
// Create a compute device context
cuCtxCreate(&context, 0, device);
JIT compiling a PTX string to a device binary:
.. code-block:: c++
CUmodule module;
CUfunction function;
// JIT compile a null-terminated PTX string
cuModuleLoadData(&module, (void*)PTXString);
// Get a handle to the "myfunction" kernel function
cuModuleGetFunction(&function, module, "myfunction");
For full examples of executing PTX assembly, please see the `CUDA Samples
<https://developer.nvidia.com/cuda-downloads>`_ distribution.
Common Issues
=============
ptxas complains of undefined function: __nvvm_reflect
-----------------------------------------------------
When linking with libdevice, the ``NVVMReflect`` pass must be used. See
:ref:`libdevice` for more information.
Tutorial: A Simple Compute Kernel
=================================
To start, let us take a look at a simple compute kernel written directly in
LLVM IR. The kernel implements vector addition, where each thread computes one
element of the output vector C from the input vectors A and B. To make this
easier, we also assume that only a single CTA (thread block) will be launched,
and that it will be one dimensional.
The Kernel
----------
.. code-block:: llvm
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v16:16:16-v32:32:32-v64:64:64-v128:128:128-n16:32:64"
target triple = "nvptx64-nvidia-cuda"
; Intrinsic to read X component of thread ID
declare i32 @llvm.nvvm.read.ptx.sreg.tid.x() readnone nounwind
define void @kernel(ptr addrspace(1) %A,
ptr addrspace(1) %B,
ptr addrspace(1) %C) {
entry:
; What is my ID?
%id = tail call i32 @llvm.nvvm.read.ptx.sreg.tid.x() readnone nounwind
; Compute pointers into A, B, and C
%ptrA = getelementptr float, ptr addrspace(1) %A, i32 %id
%ptrB = getelementptr float, ptr addrspace(1) %B, i32 %id
%ptrC = getelementptr float, ptr addrspace(1) %C, i32 %id
; Read A, B
%valA = load float, ptr addrspace(1) %ptrA, align 4
%valB = load float, ptr addrspace(1) %ptrB, align 4
; Compute C = A + B
%valC = fadd float %valA, %valB
; Store back to C
store float %valC, ptr addrspace(1) %ptrC, align 4
ret void
}
!nvvm.annotations = !{!0}
!0 = !{ptr @kernel, !"kernel", i32 1}
We can use the LLVM ``llc`` tool to directly run the NVPTX code generator:
.. code-block:: text
# llc -mcpu=sm_20 kernel.ll -o kernel.ptx
.. note::
If you want to generate 32-bit code, change ``p:64:64:64`` to ``p:32:32:32``
in the module data layout string and use ``nvptx-nvidia-cuda`` as the
target triple.
The output we get from ``llc`` (as of LLVM 3.4):
.. code-block:: text
//
// Generated by LLVM NVPTX Back-End
//
.version 3.1
.target sm_20
.address_size 64
// .globl kernel
// @kernel
.visible .entry kernel(
.param .u64 kernel_param_0,
.param .u64 kernel_param_1,
.param .u64 kernel_param_2
)
{
.reg .f32 %f<4>;
.reg .s32 %r<2>;
.reg .s64 %rl<8>;
// %bb.0: // %entry
ld.param.u64 %rl1, [kernel_param_0];
mov.u32 %r1, %tid.x;
mul.wide.s32 %rl2, %r1, 4;
add.s64 %rl3, %rl1, %rl2;
ld.param.u64 %rl4, [kernel_param_1];
add.s64 %rl5, %rl4, %rl2;
ld.param.u64 %rl6, [kernel_param_2];
add.s64 %rl7, %rl6, %rl2;
ld.global.f32 %f1, [%rl3];
ld.global.f32 %f2, [%rl5];
add.f32 %f3, %f1, %f2;
st.global.f32 [%rl7], %f3;
ret;
}
Dissecting the Kernel
---------------------
Now let us dissect the LLVM IR that makes up this kernel.
Data Layout
^^^^^^^^^^^
The data layout string determines the size in bits of common data types, their
ABI alignment, and their storage size. For NVPTX, you should use one of the
following:
32-bit PTX:
.. code-block:: llvm
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v16:16:16-v32:32:32-v64:64:64-v128:128:128-n16:32:64"
64-bit PTX:
.. code-block:: llvm
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v16:16:16-v32:32:32-v64:64:64-v128:128:128-n16:32:64"
Target Intrinsics
^^^^^^^^^^^^^^^^^
In this example, we use the ``@llvm.nvvm.read.ptx.sreg.tid.x`` intrinsic to
read the X component of the current thread's ID, which corresponds to a read
of register ``%tid.x`` in PTX. The NVPTX back-end supports a large set of
intrinsics. A short list is shown below; please see
``include/llvm/IR/IntrinsicsNVVM.td`` for the full list.
================================================ ====================
Intrinsic CUDA Equivalent
================================================ ====================
``i32 @llvm.nvvm.read.ptx.sreg.tid.{x,y,z}`` threadIdx.{x,y,z}
``i32 @llvm.nvvm.read.ptx.sreg.ctaid.{x,y,z}`` blockIdx.{x,y,z}
``i32 @llvm.nvvm.read.ptx.sreg.ntid.{x,y,z}`` blockDim.{x,y,z}
``i32 @llvm.nvvm.read.ptx.sreg.nctaid.{x,y,z}`` gridDim.{x,y,z}
``void @llvm.nvvm.barrier0()`` __syncthreads()
================================================ ====================
Address Spaces
^^^^^^^^^^^^^^
You may have noticed that all of the pointer types in the LLVM IR example had
an explicit address space specifier. What is address space 1? NVIDIA GPU
devices (generally) have four types of memory:
- Global: Large, off-chip memory
- Shared: Small, on-chip memory shared among all threads in a CTA
- Local: Per-thread, private memory
- Constant: Read-only memory shared across all threads
These different types of memory are represented in LLVM IR as address spaces.
There is also a fifth address space used by the NVPTX code generator that
corresponds to the "generic" address space. This address space can represent
addresses in any other address space (with a few exceptions). This allows
users to write IR functions that can load/store memory using the same
instructions. Intrinsics are provided to convert pointers between the generic
and non-generic address spaces.
See :ref:`address_spaces` and :ref:`nvptx_intrinsics` for more information.
Kernel Metadata
^^^^^^^^^^^^^^^
In PTX, a function can be either a `kernel` function (callable from the host
program), or a `device` function (callable only from GPU code). You can think
of `kernel` functions as entry-points in the GPU program. To mark an LLVM IR
function as a `kernel` function, we make use of special LLVM metadata. The
NVPTX back-end will look for a named metadata node called
``nvvm.annotations``. This named metadata must contain a list of metadata that
describe the IR. For our purposes, we need to declare a metadata node that
assigns the "kernel" attribute to the LLVM IR function that should be emitted
as a PTX `kernel` function. These metadata nodes take the form:
.. code-block:: text
!{<function ref>, metadata !"kernel", i32 1}
For the previous example, we have:
.. code-block:: llvm
!nvvm.annotations = !{!0}
!0 = !{ptr @kernel, !"kernel", i32 1}
Here, we have a single metadata declaration in ``nvvm.annotations``. This
metadata annotates our ``@kernel`` function with the ``kernel`` attribute.
Running the Kernel
------------------
Generating PTX from LLVM IR is all well and good, but how do we execute it on
a real GPU device? The CUDA Driver API provides a convenient mechanism for
loading and JIT compiling PTX to a native GPU device, and launching a kernel.
The API is similar to OpenCL. A simple example showing how to load and
execute our vector addition code is shown below. Note that for brevity this
code does not perform much error checking!
.. note::
You can also use the ``ptxas`` tool provided by the CUDA Toolkit to offline
compile PTX to machine code (SASS) for a specific GPU architecture. Such
binaries can be loaded by the CUDA Driver API in the same way as PTX. This
can be useful for reducing startup time by precompiling the PTX kernels.
.. code-block:: c++
#include <iostream>
#include <fstream>
#include <cassert>
#include "cuda.h"
void checkCudaErrors(CUresult err) {
assert(err == CUDA_SUCCESS);
}
/// main - Program entry point
int main(int argc, char **argv) {
CUdevice device;
CUmodule cudaModule;
CUcontext context;
CUfunction function;
CUlinkState linker;
int devCount;
// CUDA initialization
checkCudaErrors(cuInit(0));
checkCudaErrors(cuDeviceGetCount(&devCount));
checkCudaErrors(cuDeviceGet(&device, 0));
char name[128];
checkCudaErrors(cuDeviceGetName(name, 128, device));
std::cout << "Using CUDA Device [0]: " << name << "\n";
int devMajor, devMinor;
checkCudaErrors(cuDeviceComputeCapability(&devMajor, &devMinor, device));
std::cout << "Device Compute Capability: "
<< devMajor << "." << devMinor << "\n";
if (devMajor < 2) {
std::cerr << "ERROR: Device 0 is not SM 2.0 or greater\n";
return 1;
}
std::ifstream t("kernel.ptx");
if (!t.is_open()) {
std::cerr << "kernel.ptx not found\n";
return 1;
}
std::string str((std::istreambuf_iterator<char>(t)),
std::istreambuf_iterator<char>());
// Create driver context
checkCudaErrors(cuCtxCreate(&context, 0, device));
// Create module for object
checkCudaErrors(cuModuleLoadDataEx(&cudaModule, str.c_str(), 0, 0, 0));
// Get kernel function
checkCudaErrors(cuModuleGetFunction(&function, cudaModule, "kernel"));
// Device data
CUdeviceptr devBufferA;
CUdeviceptr devBufferB;
CUdeviceptr devBufferC;
checkCudaErrors(cuMemAlloc(&devBufferA, sizeof(float)*16));
checkCudaErrors(cuMemAlloc(&devBufferB, sizeof(float)*16));
checkCudaErrors(cuMemAlloc(&devBufferC, sizeof(float)*16));
float* hostA = new float[16];
float* hostB = new float[16];
float* hostC = new float[16];
// Populate input
for (unsigned i = 0; i != 16; ++i) {
hostA[i] = (float)i;
hostB[i] = (float)(2*i);
hostC[i] = 0.0f;
}
checkCudaErrors(cuMemcpyHtoD(devBufferA, &hostA[0], sizeof(float)*16));
checkCudaErrors(cuMemcpyHtoD(devBufferB, &hostB[0], sizeof(float)*16));
unsigned blockSizeX = 16;
unsigned blockSizeY = 1;
unsigned blockSizeZ = 1;
unsigned gridSizeX = 1;
unsigned gridSizeY = 1;
unsigned gridSizeZ = 1;
// Kernel parameters
void *KernelParams[] = { &devBufferA, &devBufferB, &devBufferC };
std::cout << "Launching kernel\n";
// Kernel launch
checkCudaErrors(cuLaunchKernel(function, gridSizeX, gridSizeY, gridSizeZ,
blockSizeX, blockSizeY, blockSizeZ,
0, NULL, KernelParams, NULL));
// Retrieve device data
checkCudaErrors(cuMemcpyDtoH(&hostC[0], devBufferC, sizeof(float)*16));
std::cout << "Results:\n";
for (unsigned i = 0; i != 16; ++i) {
std::cout << hostA[i] << " + " << hostB[i] << " = " << hostC[i] << "\n";
}
// Clean up after ourselves
delete [] hostA;
delete [] hostB;
delete [] hostC;
// Clean-up
checkCudaErrors(cuMemFree(devBufferA));
checkCudaErrors(cuMemFree(devBufferB));
checkCudaErrors(cuMemFree(devBufferC));
checkCudaErrors(cuModuleUnload(cudaModule));
checkCudaErrors(cuCtxDestroy(context));
return 0;
}
You will need to link with the CUDA driver and specify the path to cuda.h.
.. code-block:: text
# clang++ sample.cpp -o sample -O2 -g -I/usr/local/cuda-5.5/include -lcuda
We don't need to specify a path to ``libcuda.so`` since this is installed in a
system location by the driver, not the CUDA toolkit.
If everything goes as planned, you should see the following output when
running the compiled program:
.. code-block:: text
Using CUDA Device [0]: GeForce GTX 680
Device Compute Capability: 3.0
Launching kernel
Results:
0 + 0 = 0
1 + 2 = 3
2 + 4 = 6
3 + 6 = 9
4 + 8 = 12
5 + 10 = 15
6 + 12 = 18
7 + 14 = 21
8 + 16 = 24
9 + 18 = 27
10 + 20 = 30
11 + 22 = 33
12 + 24 = 36
13 + 26 = 39
14 + 28 = 42
15 + 30 = 45
.. note::
You will likely see a different device identifier based on your hardware
Tutorial: Linking with Libdevice
================================
In this tutorial, we show a simple example of linking LLVM IR with the
libdevice library. We will use the same kernel as the previous tutorial,
except that we will compute ``C = pow(A, B)`` instead of ``C = A + B``.
Libdevice provides an ``__nv_powf`` function that we will use.
.. code-block:: llvm
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v16:16:16-v32:32:32-v64:64:64-v128:128:128-n16:32:64"
target triple = "nvptx64-nvidia-cuda"
; Intrinsic to read X component of thread ID
declare i32 @llvm.nvvm.read.ptx.sreg.tid.x() readnone nounwind
; libdevice function
declare float @__nv_powf(float, float)
define void @kernel(ptr addrspace(1) %A,
ptr addrspace(1) %B,
ptr addrspace(1) %C) {
entry:
; What is my ID?
%id = tail call i32 @llvm.nvvm.read.ptx.sreg.tid.x() readnone nounwind
; Compute pointers into A, B, and C
%ptrA = getelementptr float, ptr addrspace(1) %A, i32 %id
%ptrB = getelementptr float, ptr addrspace(1) %B, i32 %id
%ptrC = getelementptr float, ptr addrspace(1) %C, i32 %id
; Read A, B
%valA = load float, ptr addrspace(1) %ptrA, align 4
%valB = load float, ptr addrspace(1) %ptrB, align 4
; Compute C = pow(A, B)
%valC = call float @__nv_powf(float %valA, float %valB)
; Store back to C
store float %valC, ptr addrspace(1) %ptrC, align 4
ret void
}
!nvvm.annotations = !{!0}
!0 = !{ptr @kernel, !"kernel", i32 1}
To compile this kernel, we perform the following steps:
1. Link with libdevice
2. Internalize all but the public kernel function
3. Run ``NVVMReflect`` and set ``__CUDA_FTZ`` to 0
4. Optimize the linked module
5. Codegen the module
These steps can be performed by the LLVM ``llvm-link``, ``opt``, and ``llc``
tools. In a complete compiler, these steps can also be performed entirely
programmatically by setting up an appropriate pass configuration (see
:ref:`libdevice`).
.. code-block:: text
# llvm-link t2.bc libdevice.compute_20.10.bc -o t2.linked.bc
# opt -internalize -internalize-public-api-list=kernel -nvvm-reflect-list=__CUDA_FTZ=0 -nvvm-reflect -O3 t2.linked.bc -o t2.opt.bc
# llc -mcpu=sm_20 t2.opt.bc -o t2.ptx
.. note::
The ``-nvvm-reflect-list=_CUDA_FTZ=0`` is not strictly required, as any
undefined variables will default to zero. It is shown here for evaluation
purposes.
This gives us the following PTX (excerpt):
.. code-block:: text
//
// Generated by LLVM NVPTX Back-End
//
.version 3.1
.target sm_20
.address_size 64
// .globl kernel
// @kernel
.visible .entry kernel(
.param .u64 kernel_param_0,
.param .u64 kernel_param_1,
.param .u64 kernel_param_2
)
{
.reg .pred %p<30>;
.reg .f32 %f<111>;
.reg .s32 %r<21>;
.reg .s64 %rl<8>;
// %bb.0: // %entry
ld.param.u64 %rl2, [kernel_param_0];
mov.u32 %r3, %tid.x;
ld.param.u64 %rl3, [kernel_param_1];
mul.wide.s32 %rl4, %r3, 4;
add.s64 %rl5, %rl2, %rl4;
ld.param.u64 %rl6, [kernel_param_2];
add.s64 %rl7, %rl3, %rl4;
add.s64 %rl1, %rl6, %rl4;
ld.global.f32 %f1, [%rl5];
ld.global.f32 %f2, [%rl7];
setp.eq.f32 %p1, %f1, 0f3F800000;
setp.eq.f32 %p2, %f2, 0f00000000;
or.pred %p3, %p1, %p2;
@%p3 bra BB0_1;
bra.uni BB0_2;
BB0_1:
mov.f32 %f110, 0f3F800000;
st.global.f32 [%rl1], %f110;
ret;
BB0_2: // %__nv_isnanf.exit.i
abs.f32 %f4, %f1;
setp.gtu.f32 %p4, %f4, 0f7F800000;
@%p4 bra BB0_4;
// %bb.3: // %__nv_isnanf.exit5.i
abs.f32 %f5, %f2;
setp.le.f32 %p5, %f5, 0f7F800000;
@%p5 bra BB0_5;
BB0_4: // %.critedge1.i
add.f32 %f110, %f1, %f2;
st.global.f32 [%rl1], %f110;
ret;
BB0_5: // %__nv_isinff.exit.i
...
BB0_26: // %__nv_truncf.exit.i.i.i.i.i
mul.f32 %f90, %f107, 0f3FB8AA3B;
cvt.rzi.f32.f32 %f91, %f90;
mov.f32 %f92, 0fBF317200;
fma.rn.f32 %f93, %f91, %f92, %f107;
mov.f32 %f94, 0fB5BFBE8E;
fma.rn.f32 %f95, %f91, %f94, %f93;
mul.f32 %f89, %f95, 0f3FB8AA3B;
// inline asm
ex2.approx.ftz.f32 %f88,%f89;
// inline asm
add.f32 %f96, %f91, 0f00000000;
ex2.approx.f32 %f97, %f96;
mul.f32 %f98, %f88, %f97;
setp.lt.f32 %p15, %f107, 0fC2D20000;
selp.f32 %f99, 0f00000000, %f98, %p15;
setp.gt.f32 %p16, %f107, 0f42D20000;
selp.f32 %f110, 0f7F800000, %f99, %p16;
setp.eq.f32 %p17, %f110, 0f7F800000;
@%p17 bra BB0_28;
// %bb.27:
fma.rn.f32 %f110, %f110, %f108, %f110;
BB0_28: // %__internal_accurate_powf.exit.i
setp.lt.f32 %p18, %f1, 0f00000000;
setp.eq.f32 %p19, %f3, 0f3F800000;
and.pred %p20, %p18, %p19;
@!%p20 bra BB0_30;
bra.uni BB0_29;
BB0_29:
mov.b32 %r9, %f110;
xor.b32 %r10, %r9, -2147483648;
mov.b32 %f110, %r10;
BB0_30: // %__nv_powf.exit
st.global.f32 [%rl1], %f110;
ret;
}
|