aboutsummaryrefslogtreecommitdiff
path: root/offload/libomptarget/KernelLanguage/API.cpp
blob: ef1aad829e7bd7b66e181af6282f09330000d68e (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
//===------ API.cpp - Kernel Language (CUDA/HIP) entry points ----- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
//===----------------------------------------------------------------------===//

#include "Shared/APITypes.h"

#include <cstdio>

struct dim3 {
  unsigned x = 0, y = 0, z = 0;
};

struct __omp_kernel_t {
  dim3 __grid_size;
  dim3 __block_size;
  size_t __shared_memory;

  void *__stream;
};

static __omp_kernel_t __current_kernel = {};
#pragma omp threadprivate(__current_kernel);

extern "C" {

// TODO: There is little reason we need to keep these names or the way calls are
// issued. For now we do to avoid modifying Clang's CUDA codegen. Unclear when
// we actually need to push/pop configurations.
unsigned __llvmPushCallConfiguration(dim3 __grid_size, dim3 __block_size,
                                     size_t __shared_memory, void *__stream) {
  __omp_kernel_t &__kernel = __current_kernel;
  __kernel.__grid_size = __grid_size;
  __kernel.__block_size = __block_size;
  __kernel.__shared_memory = __shared_memory;
  __kernel.__stream = __stream;
  return 0;
}

unsigned __llvmPopCallConfiguration(dim3 *__grid_size, dim3 *__block_size,
                                    size_t *__shared_memory, void *__stream) {
  __omp_kernel_t &__kernel = __current_kernel;
  *__grid_size = __kernel.__grid_size;
  *__block_size = __kernel.__block_size;
  *__shared_memory = __kernel.__shared_memory;
  *((void **)__stream) = __kernel.__stream;
  return 0;
}

int __tgt_target_kernel(void *Loc, int64_t DeviceId, int32_t NumTeams,
                        int32_t ThreadLimit, const void *HostPtr,
                        KernelArgsTy *Args);

unsigned llvmLaunchKernel(const void *func, dim3 gridDim, dim3 blockDim,
                          void *args, size_t sharedMem, void *stream) {
  KernelArgsTy Args = {};
  Args.DynCGroupMem = sharedMem;
  Args.NumTeams[0] = gridDim.x;
  Args.NumTeams[1] = gridDim.y;
  Args.NumTeams[2] = gridDim.z;
  Args.ThreadLimit[0] = blockDim.x;
  Args.ThreadLimit[1] = blockDim.y;
  Args.ThreadLimit[2] = blockDim.z;
  Args.ArgPtrs = reinterpret_cast<void **>(args);
  Args.Flags.IsCUDA = true;
  return __tgt_target_kernel(nullptr, 0, gridDim.x, blockDim.x, func, &Args);
}
}