aboutsummaryrefslogtreecommitdiff
path: root/libgomp
diff options
context:
space:
mode:
authorAndrew Stubbs <ams@codesourcery.com>2022-12-01 17:30:21 +0000
committerAndrew Stubbs <ams@codesourcery.com>2023-02-02 11:47:03 +0000
commitf6fff8a6fcd8375aa1056671fcd8de76304e8973 (patch)
tree5c5cd81a56864b4601ec8a13cd5e6fa728113c75 /libgomp
parent8da7476c5fa8870c2fcded48d3de95978434c1be (diff)
downloadgcc-f6fff8a6fcd8375aa1056671fcd8de76304e8973.zip
gcc-f6fff8a6fcd8375aa1056671fcd8de76304e8973.tar.gz
gcc-f6fff8a6fcd8375aa1056671fcd8de76304e8973.tar.bz2
amdgcn, libgomp: Manually allocated stacks
Switch from using stacks in the "private segment" to using a memory block allocated on the host side. The primary reason is to permit the reverse offload implementation to access values located on the device stack, but there may also be performance benefits, especially with repeated kernel invocations. This implementation unifies the stacks with the "team arena" optimization feature, and now allows both to have run-time configurable sizes. A new ABI is needed, so all libraries must be rebuilt, and newlib must be version 4.3.0.20230120 or newer. gcc/ChangeLog: * config/gcn/gcn-run.cc: Include libgomp-gcn.h. (struct kernargs): Replace the common content with kernargs_abi. (struct heap): Delete. (main): Read GCN_STACK_SIZE envvar. Allocate space for the device stacks. Write the new kernargs fields. * config/gcn/gcn.cc (gcn_option_override): Remove stack_size_opt. (default_requested_args): Remove PRIVATE_SEGMENT_BUFFER_ARG and PRIVATE_SEGMENT_WAVE_OFFSET_ARG. (gcn_addr_space_convert): Mask the QUEUE_PTR_ARG content. (gcn_expand_prologue): Move the TARGET_PACKED_WORK_ITEMS to the top. Set up the stacks from the values in the kernargs, not private. (gcn_expand_builtin_1): Match the stack configuration in the prologue. (gcn_hsa_declare_function_name): Turn off the private segment. (gcn_conditional_register_usage): Ensure QUEUE_PTR is fixed. * config/gcn/gcn.h (FIXED_REGISTERS): Fix the QUEUE_PTR register. * config/gcn/gcn.opt (mstack-size): Change the description. include/ChangeLog: * gomp-constants.h (GOMP_VERSION_GCN): Bump. libgomp/ChangeLog: * config/gcn/libgomp-gcn.h (DEFAULT_GCN_STACK_SIZE): New define. (DEFAULT_TEAM_ARENA_SIZE): New define. (struct heap): Move to this file. (struct kernargs_abi): Likewise. * config/gcn/team.c (gomp_gcn_enter_kernel): Use team arena size from the kernargs. * libgomp.h: Include libgomp-gcn.h. (TEAM_ARENA_SIZE): Remove. (team_malloc): Update the error message. * plugin/plugin-gcn.c (struct kernargs): Move common content to struct kernargs_abi. (struct agent_info): Rename team arenas to ephemeral memories. (struct team_arena_list): Rename .... (struct ephemeral_memories_list): to this. (struct heap): Delete. (team_arena_size): New variable. (stack_size): New variable. (print_kernel_dispatch): Update debug messages. (init_environment_variables): Read GCN_TEAM_ARENA_SIZE. Read GCN_STACK_SIZE. (get_team_arena): Rename ... (configure_ephemeral_memories): ... to this, and set up stacks. (release_team_arena): Rename ... (release_ephemeral_memories): ... to this. (destroy_team_arenas): Rename ... (destroy_ephemeral_memories): ... to this. (create_kernel_dispatch): Add num_threads parameter. Adjust for kernargs_abi refactor and ephemeral memories. (release_kernel_dispatch): Adjust for ephemeral memories. (run_kernel): Pass thread-count to create_kernel_dispatch. (GOMP_OFFLOAD_init_device): Adjust for ephemeral memories. (GOMP_OFFLOAD_fini_device): Adjust for ephemeral memories. gcc/testsuite/ChangeLog: * gcc.c-torture/execute/pr47237.c: Xfail on amdgcn. * gcc.dg/builtin-apply3.c: Xfail for amdgcn. * gcc.dg/builtin-apply4.c: Xfail for amdgcn. * gcc.dg/torture/stackalign/builtin-apply-3.c: Xfail for amdgcn. * gcc.dg/torture/stackalign/builtin-apply-4.c: Xfail for amdgcn.
Diffstat (limited to 'libgomp')
-rw-r--r--libgomp/config/gcn/libgomp-gcn.h34
-rw-r--r--libgomp/config/gcn/team.c8
-rw-r--r--libgomp/libgomp.h5
-rw-r--r--libgomp/plugin/plugin-gcn.c252
4 files changed, 179 insertions, 120 deletions
diff --git a/libgomp/config/gcn/libgomp-gcn.h b/libgomp/config/gcn/libgomp-gcn.h
index cc0fc13..f62b7dd 100644
--- a/libgomp/config/gcn/libgomp-gcn.h
+++ b/libgomp/config/gcn/libgomp-gcn.h
@@ -30,6 +30,40 @@
#ifndef LIBGOMP_GCN_H
#define LIBGOMP_GCN_H 1
+#define DEFAULT_GCN_STACK_SIZE (32*1024)
+#define DEFAULT_TEAM_ARENA_SIZE (64*1024)
+
+struct heap
+{
+ int64_t size;
+ char data[0];
+};
+
+/* This struct defines the (unofficial) ABI-defined values the compiler
+ expects to find in first bytes of the kernargs space.
+ The plugin may choose to place additional data later in the kernargs
+ memory allocation, but those are not in any fixed location. */
+struct kernargs_abi {
+ /* Leave space for the real kernel arguments.
+ OpenACC and OpenMP only use one pointer. */
+ int64_t dummy1;
+ int64_t dummy2;
+
+ /* A pointer to struct output, below, for console output data. */
+ int64_t out_ptr; /* Offset 16. */
+
+ /* A pointer to struct heap. */
+ int64_t heap_ptr; /* Offset 24. */
+
+ /* A pointer to the ephemeral memory areas.
+ The team arena is only needed for OpenMP.
+ Each should have enough space for all the teams and threads. */
+ int64_t arena_ptr; /* Offset 32. */
+ int64_t stack_ptr; /* Offset 40. */
+ int arena_size_per_team; /* Offset 48. */
+ int stack_size_per_thread; /* Offset 52. */
+};
+
/* This struct is also used in Newlib's libc/sys/amdgcn/write.c. */
struct output
{
diff --git a/libgomp/config/gcn/team.c b/libgomp/config/gcn/team.c
index 527aa08..f03207c 100644
--- a/libgomp/config/gcn/team.c
+++ b/libgomp/config/gcn/team.c
@@ -60,14 +60,16 @@ gomp_gcn_enter_kernel (void)
/* Initialize the team arena for optimized memory allocation.
The arena has been allocated on the host side, and the address
passed in via the kernargs. Each team takes a small slice of it. */
- void **kernargs = (void**) __builtin_gcn_kernarg_ptr ();
- void *team_arena = (kernargs[4] + TEAM_ARENA_SIZE*teamid);
+ struct kernargs_abi *kernargs =
+ (struct kernargs_abi*) __builtin_gcn_kernarg_ptr ();
+ void *team_arena = ((void*)kernargs->arena_ptr
+ + kernargs->arena_size_per_team * teamid);
void * __lds *arena_start = (void * __lds *)TEAM_ARENA_START;
void * __lds *arena_free = (void * __lds *)TEAM_ARENA_FREE;
void * __lds *arena_end = (void * __lds *)TEAM_ARENA_END;
*arena_start = team_arena;
*arena_free = team_arena;
- *arena_end = team_arena + TEAM_ARENA_SIZE;
+ *arena_end = team_arena + kernargs->arena_size_per_team;
/* Allocate and initialize the team-local-storage data. */
struct gomp_thread *thrs = team_malloc_cleared (sizeof (*thrs)
diff --git a/libgomp/libgomp.h b/libgomp/libgomp.h
index e7e409f..ba8fe34 100644
--- a/libgomp/libgomp.h
+++ b/libgomp/libgomp.h
@@ -112,8 +112,8 @@ extern void gomp_aligned_free (void *);
/* Optimized allocators for team-specific data that will die with the team. */
#ifdef __AMDGCN__
+#include "libgomp-gcn.h"
/* The arena is initialized in config/gcn/team.c. */
-#define TEAM_ARENA_SIZE 64*1024 /* Must match the value in plugin-gcn.c. */
#define TEAM_ARENA_START 16 /* LDS offset of free pointer. */
#define TEAM_ARENA_FREE 24 /* LDS offset of free pointer. */
#define TEAM_ARENA_END 32 /* LDS offset of end pointer. */
@@ -135,7 +135,8 @@ team_malloc (size_t size)
{
/* While this is experimental, let's make sure we know when OOM
happens. */
- const char msg[] = "GCN team arena exhausted\n";
+ const char msg[] = "GCN team arena exhausted;"
+ " configure with GCN_TEAM_ARENA_SIZE=bytes\n";
write (2, msg, sizeof(msg)-1);
/* Fall back to using the heap (slowly). */
diff --git a/libgomp/plugin/plugin-gcn.c b/libgomp/plugin/plugin-gcn.c
index b5d9dac..a7b3505 100644
--- a/libgomp/plugin/plugin-gcn.c
+++ b/libgomp/plugin/plugin-gcn.c
@@ -237,20 +237,7 @@ struct kernel_dispatch
in libgomp target code. */
struct kernargs {
- /* Leave space for the real kernel arguments.
- OpenACC and OpenMP only use one pointer. */
- int64_t dummy1;
- int64_t dummy2;
-
- /* A pointer to struct output, below, for console output data. */
- int64_t out_ptr;
-
- /* A pointer to struct heap, below. */
- int64_t heap_ptr;
-
- /* A pointer to an ephemeral memory arena.
- Only needed for OpenMP. */
- int64_t arena_ptr;
+ struct kernargs_abi abi;
/* Output data. */
struct output output_data;
@@ -426,9 +413,9 @@ struct agent_info
/* The HSA memory region from which to allocate device data. */
hsa_region_t data_region;
- /* Allocated team arenas. */
- struct team_arena_list *team_arena_list;
- pthread_mutex_t team_arena_write_lock;
+ /* Allocated ephemeral memories (team arena and stack space). */
+ struct ephemeral_memories_list *ephemeral_memories_list;
+ pthread_mutex_t ephemeral_memories_write_lock;
/* Read-write lock that protects kernels which are running or about to be run
from interference with loading and unloading of images. Needs to be
@@ -510,17 +497,18 @@ struct module_info
};
/* A linked list of memory arenas allocated on the device.
- These are only used by OpenMP, as a means to optimize per-team malloc. */
+ These are used by OpenMP, as a means to optimize per-team malloc,
+ and for host-accessible stack space. */
-struct team_arena_list
+struct ephemeral_memories_list
{
- struct team_arena_list *next;
+ struct ephemeral_memories_list *next;
- /* The number of teams determines the size of the allocation. */
- int num_teams;
- /* The device address of the arena itself. */
- void *arena;
- /* A flag to prevent two asynchronous kernels trying to use the same arena.
+ /* The size is determined by the number of teams and threads. */
+ size_t size;
+ /* The device address allocated memory. */
+ void *address;
+ /* A flag to prevent two asynchronous kernels trying to use the same memory.
The mutex is locked until the kernel exits. */
pthread_mutex_t in_use;
};
@@ -539,15 +527,6 @@ struct hsa_context_info
char driver_version_s[30];
};
-/* Format of the on-device heap.
-
- This must match the definition in Newlib and gcn-run. */
-
-struct heap {
- int64_t size;
- char data[0];
-};
-
/* }}} */
/* {{{ Global variables */
@@ -565,6 +544,11 @@ static struct hsa_runtime_fn_info hsa_fns;
static size_t gcn_kernel_heap_size = DEFAULT_GCN_HEAP_SIZE;
+/* Ephemeral memory sizes for each kernel launch. */
+
+static int team_arena_size = DEFAULT_TEAM_ARENA_SIZE;
+static int stack_size = DEFAULT_GCN_STACK_SIZE;
+
/* Flag to decide whether print to stderr information about what is going on.
Set in init_debug depending on environment variables. */
@@ -1020,9 +1004,13 @@ print_kernel_dispatch (struct kernel_dispatch *dispatch, unsigned indent)
fprintf (stderr, "%*squeue: %p\n", indent, "", dispatch->queue);
fprintf (stderr, "%*skernarg_address: %p\n", indent, "", kernargs);
fprintf (stderr, "%*sheap address: %p\n", indent, "",
- (void*)kernargs->heap_ptr);
- fprintf (stderr, "%*sarena address: %p\n", indent, "",
- (void*)kernargs->arena_ptr);
+ (void*)kernargs->abi.heap_ptr);
+ fprintf (stderr, "%*sarena address: %p (%d bytes per workgroup)\n", indent,
+ "", (void*)kernargs->abi.arena_ptr,
+ kernargs->abi.arena_size_per_team);
+ fprintf (stderr, "%*sstack address: %p (%d bytes per wavefront)\n", indent,
+ "", (void*)kernargs->abi.stack_ptr,
+ kernargs->abi.stack_size_per_thread);
fprintf (stderr, "%*sobject: %lu\n", indent, "", dispatch->object);
fprintf (stderr, "%*sprivate_segment_size: %u\n", indent, "",
dispatch->private_segment_size);
@@ -1082,6 +1070,22 @@ init_environment_variables (void)
if (tmp)
gcn_kernel_heap_size = tmp;
}
+
+ const char *arena = secure_getenv ("GCN_TEAM_ARENA_SIZE");
+ if (arena)
+ {
+ int tmp = atoi (arena);
+ if (tmp)
+ team_arena_size = tmp;;
+ }
+
+ const char *stack = secure_getenv ("GCN_STACK_SIZE");
+ if (stack)
+ {
+ int tmp = atoi (stack);
+ if (tmp)
+ stack_size = tmp;;
+ }
}
/* Return malloc'd string with name of SYMBOL. */
@@ -1693,85 +1697,103 @@ isa_code(const char *isa) {
/* }}} */
/* {{{ Run */
-/* Create or reuse a team arena.
+/* Create or reuse a team arena and stack space.
Team arenas are used by OpenMP to avoid calling malloc multiple times
while setting up each team. This is purely a performance optimization.
- Allocating an arena also costs performance, albeit on the host side, so
- this function will reuse an existing arena if a large enough one is idle.
- The arena is released, but not deallocated, when the kernel exits. */
+ The stack space is used by all kernels. We must allocate it in such a
+ way that the reverse offload implmentation can access the data.
-static void *
-get_team_arena (struct agent_info *agent, int num_teams)
+ Allocating this memory costs performance, so this function will reuse an
+ existing allocation if a large enough one is idle.
+ The memory lock is released, but not deallocated, when the kernel exits. */
+
+static void
+configure_ephemeral_memories (struct kernel_info *kernel,
+ struct kernargs_abi *kernargs, int num_teams,
+ int num_threads)
{
- struct team_arena_list **next_ptr = &agent->team_arena_list;
- struct team_arena_list *item;
+ struct agent_info *agent = kernel->agent;
+ struct ephemeral_memories_list **next_ptr = &agent->ephemeral_memories_list;
+ struct ephemeral_memories_list *item;
+
+ int actual_arena_size = (kernel->kind == KIND_OPENMP
+ ? team_arena_size : 0);
+ int actual_arena_total_size = actual_arena_size * num_teams;
+ size_t size = (actual_arena_total_size
+ + num_teams * num_threads * stack_size);
for (item = *next_ptr; item; next_ptr = &item->next, item = item->next)
{
- if (item->num_teams < num_teams)
+ if (item->size < size)
continue;
- if (pthread_mutex_trylock (&item->in_use))
- continue;
-
- return item->arena;
+ if (pthread_mutex_trylock (&item->in_use) == 0)
+ break;
}
- GCN_DEBUG ("Creating a new arena for %d teams\n", num_teams);
-
- if (pthread_mutex_lock (&agent->team_arena_write_lock))
+ if (!item)
{
- GOMP_PLUGIN_error ("Could not lock a GCN agent program mutex");
- return false;
- }
- item = malloc (sizeof (*item));
- item->num_teams = num_teams;
- item->next = NULL;
- *next_ptr = item;
+ GCN_DEBUG ("Creating a new %sstack for %d teams with %d threads"
+ " (%zd bytes)\n", (actual_arena_size ? "arena and " : ""),
+ num_teams, num_threads, size);
- if (pthread_mutex_init (&item->in_use, NULL))
- {
- GOMP_PLUGIN_error ("Failed to initialize a GCN team arena write mutex");
- return false;
- }
- if (pthread_mutex_lock (&item->in_use))
- {
- GOMP_PLUGIN_error ("Could not lock a GCN agent program mutex");
- return false;
- }
- if (pthread_mutex_unlock (&agent->team_arena_write_lock))
- {
- GOMP_PLUGIN_error ("Could not unlock a GCN agent program mutex");
- return false;
- }
+ if (pthread_mutex_lock (&agent->ephemeral_memories_write_lock))
+ {
+ GOMP_PLUGIN_error ("Could not lock a GCN agent program mutex");
+ return;
+ }
+ item = malloc (sizeof (*item));
+ item->size = size;
+ item->next = NULL;
+ *next_ptr = item;
- const int TEAM_ARENA_SIZE = 64*1024; /* Must match libgomp.h. */
- hsa_status_t status;
- status = hsa_fns.hsa_memory_allocate_fn (agent->data_region,
- TEAM_ARENA_SIZE*num_teams,
- &item->arena);
- if (status != HSA_STATUS_SUCCESS)
- hsa_fatal ("Could not allocate memory for GCN kernel arena", status);
- status = hsa_fns.hsa_memory_assign_agent_fn (item->arena, agent->id,
- HSA_ACCESS_PERMISSION_RW);
- if (status != HSA_STATUS_SUCCESS)
- hsa_fatal ("Could not assign arena memory to device", status);
+ if (pthread_mutex_init (&item->in_use, NULL))
+ {
+ GOMP_PLUGIN_error ("Failed to initialize a GCN memory write mutex");
+ return;
+ }
+ if (pthread_mutex_lock (&item->in_use))
+ {
+ GOMP_PLUGIN_error ("Could not lock a GCN agent program mutex");
+ return;
+ }
+ if (pthread_mutex_unlock (&agent->ephemeral_memories_write_lock))
+ {
+ GOMP_PLUGIN_error ("Could not unlock a GCN agent program mutex");
+ return;
+ }
+
+ hsa_status_t status;
+ status = hsa_fns.hsa_memory_allocate_fn (agent->data_region, size,
+ &item->address);
+ if (status != HSA_STATUS_SUCCESS)
+ hsa_fatal ("Could not allocate memory for GCN kernel arena", status);
+ status = hsa_fns.hsa_memory_assign_agent_fn (item->address, agent->id,
+ HSA_ACCESS_PERMISSION_RW);
+ if (status != HSA_STATUS_SUCCESS)
+ hsa_fatal ("Could not assign arena & stack memory to device", status);
+ }
- return item->arena;
+ kernargs->arena_ptr = (actual_arena_total_size
+ ? (uint64_t)item->address
+ : 0);
+ kernargs->stack_ptr = (uint64_t)item->address + actual_arena_total_size;
+ kernargs->arena_size_per_team = actual_arena_size;
+ kernargs->stack_size_per_thread = stack_size;
}
-/* Mark a team arena available for reuse. */
+/* Mark an ephemeral memory space available for reuse. */
static void
-release_team_arena (struct agent_info* agent, void *arena)
+release_ephemeral_memories (struct agent_info* agent, void *address)
{
- struct team_arena_list *item;
+ struct ephemeral_memories_list *item;
- for (item = agent->team_arena_list; item; item = item->next)
+ for (item = agent->ephemeral_memories_list; item; item = item->next)
{
- if (item->arena == arena)
+ if (item->address == address)
{
if (pthread_mutex_unlock (&item->in_use))
GOMP_PLUGIN_error ("Could not unlock a GCN agent program mutex");
@@ -1784,22 +1806,22 @@ release_team_arena (struct agent_info* agent, void *arena)
/* Clean up all the allocated team arenas. */
static bool
-destroy_team_arenas (struct agent_info *agent)
+destroy_ephemeral_memories (struct agent_info *agent)
{
- struct team_arena_list *item, *next;
+ struct ephemeral_memories_list *item, *next;
- for (item = agent->team_arena_list; item; item = next)
+ for (item = agent->ephemeral_memories_list; item; item = next)
{
next = item->next;
- hsa_fns.hsa_memory_free_fn (item->arena);
+ hsa_fns.hsa_memory_free_fn (item->address);
if (pthread_mutex_destroy (&item->in_use))
{
- GOMP_PLUGIN_error ("Failed to destroy a GCN team arena mutex");
+ GOMP_PLUGIN_error ("Failed to destroy a GCN memory mutex");
return false;
}
free (item);
}
- agent->team_arena_list = NULL;
+ agent->ephemeral_memories_list = NULL;
return true;
}
@@ -1871,7 +1893,8 @@ alloc_by_agent (struct agent_info *agent, size_t size)
the necessary device signals and memory allocations. */
static struct kernel_dispatch *
-create_kernel_dispatch (struct kernel_info *kernel, int num_teams)
+create_kernel_dispatch (struct kernel_info *kernel, int num_teams,
+ int num_threads)
{
struct agent_info *agent = kernel->agent;
struct kernel_dispatch *shadow
@@ -1906,7 +1929,7 @@ create_kernel_dispatch (struct kernel_info *kernel, int num_teams)
struct kernargs *kernargs = shadow->kernarg_address;
/* Zero-initialize the output_data (minimum needed). */
- kernargs->out_ptr = (int64_t)&kernargs->output_data;
+ kernargs->abi.out_ptr = (int64_t)&kernargs->output_data;
kernargs->output_data.next_output = 0;
for (unsigned i = 0;
i < (sizeof (kernargs->output_data.queue)
@@ -1916,13 +1939,10 @@ create_kernel_dispatch (struct kernel_info *kernel, int num_teams)
kernargs->output_data.consumed = 0;
/* Pass in the heap location. */
- kernargs->heap_ptr = (int64_t)kernel->module->heap;
+ kernargs->abi.heap_ptr = (int64_t)kernel->module->heap;
- /* Create an arena. */
- if (kernel->kind == KIND_OPENMP)
- kernargs->arena_ptr = (int64_t)get_team_arena (agent, num_teams);
- else
- kernargs->arena_ptr = 0;
+ /* Create the ephemeral memory spaces. */
+ configure_ephemeral_memories (kernel, &kernargs->abi, num_teams, num_threads);
/* Ensure we can recognize unset return values. */
kernargs->output_data.return_value = 0xcafe0000;
@@ -2006,9 +2026,10 @@ release_kernel_dispatch (struct kernel_dispatch *shadow)
GCN_DEBUG ("Released kernel dispatch: %p\n", shadow);
struct kernargs *kernargs = shadow->kernarg_address;
- void *arena = (void *)kernargs->arena_ptr;
- if (arena)
- release_team_arena (shadow->agent, arena);
+ void *addr = (void *)kernargs->abi.arena_ptr;
+ if (!addr)
+ addr = (void *)kernargs->abi.stack_ptr;
+ release_ephemeral_memories (shadow->agent, addr);
hsa_fns.hsa_memory_free_fn (shadow->kernarg_address);
@@ -2238,7 +2259,8 @@ run_kernel (struct kernel_info *kernel, void *vars,
packet->workgroup_size_z);
struct kernel_dispatch *shadow
- = create_kernel_dispatch (kernel, packet->grid_size_x);
+ = create_kernel_dispatch (kernel, packet->grid_size_x,
+ packet->grid_size_z);
shadow->queue = command_q;
if (debug)
@@ -3280,14 +3302,14 @@ GOMP_OFFLOAD_init_device (int n)
GOMP_PLUGIN_error ("Failed to initialize a GCN agent queue mutex");
return false;
}
- if (pthread_mutex_init (&agent->team_arena_write_lock, NULL))
+ if (pthread_mutex_init (&agent->ephemeral_memories_write_lock, NULL))
{
GOMP_PLUGIN_error ("Failed to initialize a GCN team arena write mutex");
return false;
}
agent->async_queues = NULL;
agent->omp_async_queue = NULL;
- agent->team_arena_list = NULL;
+ agent->ephemeral_memories_list = NULL;
uint32_t queue_size;
hsa_status_t status;
@@ -3640,7 +3662,7 @@ GOMP_OFFLOAD_fini_device (int n)
agent->module = NULL;
}
- if (!destroy_team_arenas (agent))
+ if (!destroy_ephemeral_memories (agent))
return false;
if (!destroy_hsa_program (agent))
@@ -3666,9 +3688,9 @@ GOMP_OFFLOAD_fini_device (int n)
GOMP_PLUGIN_error ("Failed to destroy a GCN agent queue mutex");
return false;
}
- if (pthread_mutex_destroy (&agent->team_arena_write_lock))
+ if (pthread_mutex_destroy (&agent->ephemeral_memories_write_lock))
{
- GOMP_PLUGIN_error ("Failed to destroy a GCN team arena mutex");
+ GOMP_PLUGIN_error ("Failed to destroy a GCN memory mutex");
return false;
}
agent->initialized = false;