1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
|
/* { dg-do run } */
/* Test calloc with omp_alloc. */
#include <omp.h>
#pragma omp requires dynamic_allocators
void
test (int n, omp_allocator_handle_t allocator)
{
#pragma omp target map(to:n) map(to:allocator)
{
int *a;
a = (int *) omp_calloc (n, sizeof (int), allocator);
for (int i = 0; i < n; i++)
if (a[i] != 0)
{
__builtin_printf ("memory not zeroed at %i\n", i);
__builtin_abort ();
}
#pragma omp parallel
for (int i = 0; i < n; i++)
a[i] = i;
for (int i = 0; i < n; i++)
if (a[i] != i)
{
__builtin_printf ("data mismatch at %i\n", i);
__builtin_abort ();
}
omp_free (a, allocator);
}
}
int
main ()
{
/* omp_low_lat_mem_alloc doesn't actually get low-latency memory on GPU. */
omp_alloctrait_t traits[1] = { { omp_atk_access, omp_atv_cgroup } };
omp_allocator_handle_t gpu_lowlat;
#pragma omp target map(from:gpu_lowlat)
gpu_lowlat = omp_init_allocator (omp_low_lat_mem_space, 1, traits);
// Smaller than low-latency memory limit
test (10, omp_default_mem_alloc);
test (10, omp_large_cap_mem_alloc);
test (10, omp_const_mem_alloc);
test (10, omp_high_bw_mem_alloc);
test (10, omp_low_lat_mem_alloc);
test (10, gpu_lowlat);
test (10, omp_cgroup_mem_alloc);
test (10, omp_pteam_mem_alloc);
test (10, omp_thread_mem_alloc);
// Larger than low-latency memory limit
test (100000, omp_default_mem_alloc);
test (100000, omp_large_cap_mem_alloc);
test (100000, omp_const_mem_alloc);
test (100000, omp_high_bw_mem_alloc);
test (100000, omp_low_lat_mem_alloc);
test (100000, gpu_lowlat);
test (100000, omp_cgroup_mem_alloc);
test (100000, omp_pteam_mem_alloc);
test (100000, omp_thread_mem_alloc);
return 0;
}
|