diff options
author | Andrew Stubbs <ams@codesourcery.com> | 2022-12-01 17:30:21 +0000 |
---|---|---|
committer | Andrew Stubbs <ams@codesourcery.com> | 2023-02-02 11:47:03 +0000 |
commit | f6fff8a6fcd8375aa1056671fcd8de76304e8973 (patch) | |
tree | 5c5cd81a56864b4601ec8a13cd5e6fa728113c75 /gcc | |
parent | 8da7476c5fa8870c2fcded48d3de95978434c1be (diff) | |
download | gcc-f6fff8a6fcd8375aa1056671fcd8de76304e8973.zip gcc-f6fff8a6fcd8375aa1056671fcd8de76304e8973.tar.gz gcc-f6fff8a6fcd8375aa1056671fcd8de76304e8973.tar.bz2 |
amdgcn, libgomp: Manually allocated stacks
Switch from using stacks in the "private segment" to using a memory block
allocated on the host side. The primary reason is to permit the reverse
offload implementation to access values located on the device stack, but
there may also be performance benefits, especially with repeated kernel
invocations.
This implementation unifies the stacks with the "team arena" optimization
feature, and now allows both to have run-time configurable sizes.
A new ABI is needed, so all libraries must be rebuilt, and newlib must be
version 4.3.0.20230120 or newer.
gcc/ChangeLog:
* config/gcn/gcn-run.cc: Include libgomp-gcn.h.
(struct kernargs): Replace the common content with kernargs_abi.
(struct heap): Delete.
(main): Read GCN_STACK_SIZE envvar.
Allocate space for the device stacks.
Write the new kernargs fields.
* config/gcn/gcn.cc (gcn_option_override): Remove stack_size_opt.
(default_requested_args): Remove PRIVATE_SEGMENT_BUFFER_ARG and
PRIVATE_SEGMENT_WAVE_OFFSET_ARG.
(gcn_addr_space_convert): Mask the QUEUE_PTR_ARG content.
(gcn_expand_prologue): Move the TARGET_PACKED_WORK_ITEMS to the top.
Set up the stacks from the values in the kernargs, not private.
(gcn_expand_builtin_1): Match the stack configuration in the prologue.
(gcn_hsa_declare_function_name): Turn off the private segment.
(gcn_conditional_register_usage): Ensure QUEUE_PTR is fixed.
* config/gcn/gcn.h (FIXED_REGISTERS): Fix the QUEUE_PTR register.
* config/gcn/gcn.opt (mstack-size): Change the description.
include/ChangeLog:
* gomp-constants.h (GOMP_VERSION_GCN): Bump.
libgomp/ChangeLog:
* config/gcn/libgomp-gcn.h (DEFAULT_GCN_STACK_SIZE): New define.
(DEFAULT_TEAM_ARENA_SIZE): New define.
(struct heap): Move to this file.
(struct kernargs_abi): Likewise.
* config/gcn/team.c (gomp_gcn_enter_kernel): Use team arena size from
the kernargs.
* libgomp.h: Include libgomp-gcn.h.
(TEAM_ARENA_SIZE): Remove.
(team_malloc): Update the error message.
* plugin/plugin-gcn.c (struct kernargs): Move common content to
struct kernargs_abi.
(struct agent_info): Rename team arenas to ephemeral memories.
(struct team_arena_list): Rename ....
(struct ephemeral_memories_list): to this.
(struct heap): Delete.
(team_arena_size): New variable.
(stack_size): New variable.
(print_kernel_dispatch): Update debug messages.
(init_environment_variables): Read GCN_TEAM_ARENA_SIZE.
Read GCN_STACK_SIZE.
(get_team_arena): Rename ...
(configure_ephemeral_memories): ... to this, and set up stacks.
(release_team_arena): Rename ...
(release_ephemeral_memories): ... to this.
(destroy_team_arenas): Rename ...
(destroy_ephemeral_memories): ... to this.
(create_kernel_dispatch): Add num_threads parameter.
Adjust for kernargs_abi refactor and ephemeral memories.
(release_kernel_dispatch): Adjust for ephemeral memories.
(run_kernel): Pass thread-count to create_kernel_dispatch.
(GOMP_OFFLOAD_init_device): Adjust for ephemeral memories.
(GOMP_OFFLOAD_fini_device): Adjust for ephemeral memories.
gcc/testsuite/ChangeLog:
* gcc.c-torture/execute/pr47237.c: Xfail on amdgcn.
* gcc.dg/builtin-apply3.c: Xfail for amdgcn.
* gcc.dg/builtin-apply4.c: Xfail for amdgcn.
* gcc.dg/torture/stackalign/builtin-apply-3.c: Xfail for amdgcn.
* gcc.dg/torture/stackalign/builtin-apply-4.c: Xfail for amdgcn.
Diffstat (limited to 'gcc')
-rw-r--r-- | gcc/config/gcn/gcn-run.cc | 62 | ||||
-rw-r--r-- | gcc/config/gcn/gcn.cc | 192 | ||||
-rw-r--r-- | gcc/config/gcn/gcn.h | 2 | ||||
-rw-r--r-- | gcc/config/gcn/gcn.opt | 2 | ||||
-rw-r--r-- | gcc/testsuite/gcc.c-torture/execute/pr47237.c | 2 | ||||
-rw-r--r-- | gcc/testsuite/gcc.dg/builtin-apply3.c | 1 | ||||
-rw-r--r-- | gcc/testsuite/gcc.dg/builtin-apply4.c | 1 | ||||
-rw-r--r-- | gcc/testsuite/gcc.dg/torture/stackalign/builtin-apply-3.c | 1 | ||||
-rw-r--r-- | gcc/testsuite/gcc.dg/torture/stackalign/builtin-apply-4.c | 1 |
9 files changed, 152 insertions, 112 deletions
diff --git a/gcc/config/gcn/gcn-run.cc b/gcc/config/gcn/gcn-run.cc index 606772e..4232a17 100644 --- a/gcc/config/gcn/gcn-run.cc +++ b/gcc/config/gcn/gcn-run.cc @@ -35,6 +35,7 @@ #include <signal.h> #include "hsa.h" +#include "../../../libgomp/config/gcn/libgomp-gcn.h" #ifndef HSA_RUNTIME_LIB #define HSA_RUNTIME_LIB "libhsa-runtime64.so.1" @@ -487,39 +488,16 @@ device_malloc (size_t size, hsa_region_t region) automatically assign the exit value to *return_value. */ struct kernargs { - /* Kernargs. */ - int32_t argc; - int64_t argv; - int64_t out_ptr; - int64_t heap_ptr; - - /* Output data. */ - struct output - { - int return_value; - unsigned int next_output; - struct printf_data - { - int written; - char msg[128]; - int type; - union - { - int64_t ivalue; - double dvalue; - char text[128]; - }; - } queue[1024]; - unsigned int consumed; - } output_data; + union { + struct { + int32_t argc; + int64_t argv; + } args; + struct kernargs_abi abi; + }; + struct output output_data; }; -struct heap -{ - int64_t size; - char data[0]; -} heap; - /* Print any console output from the kernel. We print all entries from "consumed" to the next entry without a "written" flag, or "next_output" is reached. The buffer is circular, but the @@ -687,6 +665,16 @@ main (int argc, char *argv[]) for (int i = 0; i < kernel_argc; i++) args_size += strlen (kernel_argv[i]) + 1; + /* The device stack can be adjusted via an environment variable. */ + char *envvar = getenv ("GCN_STACK_SIZE"); + int stack_size = 1 * 1024 * 1024; /* 1MB default. */ + if (envvar) + { + int val = atoi (envvar); + if (val) + stack_size = val; + } + /* Allocate device memory for both function parameters and the argv data. */ struct kernargs *kernargs = device_malloc (sizeof (*kernargs), @@ -702,11 +690,12 @@ main (int argc, char *argv[]) XHSA (hsa_fns.hsa_memory_assign_agent_fn (heap, device, HSA_ACCESS_PERMISSION_RW), "Assign heap to device agent"); + void *stack = device_malloc (stack_size, heap_region); /* Write the data to the target. */ - kernargs->argc = kernel_argc; - kernargs->argv = (int64_t) args->argv_data; - kernargs->out_ptr = (int64_t) &kernargs->output_data; + kernargs->args.argc = kernel_argc; + kernargs->args.argv = (int64_t) args->argv_data; + kernargs->abi.out_ptr = (int64_t) &kernargs->output_data; kernargs->output_data.return_value = 0xcafe0000; /* Default return value. */ kernargs->output_data.next_output = 0; for (unsigned i = 0; i < (sizeof (kernargs->output_data.queue) @@ -721,8 +710,11 @@ main (int argc, char *argv[]) memcpy (&args->strings[offset], kernel_argv[i], arg_len + 1); offset += arg_len; } - kernargs->heap_ptr = (int64_t) heap; + kernargs->abi.heap_ptr = (int64_t) heap; hsa_fns.hsa_memory_copy_fn (&heap->size, &heap_size, sizeof (heap_size)); + kernargs->abi.arena_ptr = 0; + kernargs->abi.stack_ptr = (int64_t) stack; + kernargs->abi.stack_size_per_thread = stack_size; /* Run constructors on the GPU. */ run (init_array_kernel, kernargs); diff --git a/gcc/config/gcn/gcn.cc b/gcc/config/gcn/gcn.cc index edde7ba..23ab01e 100644 --- a/gcc/config/gcn/gcn.cc +++ b/gcc/config/gcn/gcn.cc @@ -138,21 +138,6 @@ gcn_option_override (void) : ISA_UNKNOWN); gcc_assert (gcn_isa != ISA_UNKNOWN); - /* The default stack size needs to be small for offload kernels because - there may be many, many threads. Also, a smaller stack gives a - measureable performance boost. But, a small stack is insufficient - for running the testsuite, so we use a larger default for the stand - alone case. */ - if (stack_size_opt == -1) - { - if (flag_openacc || flag_openmp) - /* 512 bytes per work item = 32kB total. */ - stack_size_opt = 512 * 64; - else - /* 1MB total. */ - stack_size_opt = 1048576; - } - /* Reserve 1Kb (somewhat arbitrarily) of LDS space for reduction results and worker broadcasts. */ if (gang_private_size_opt == -1) @@ -228,11 +213,9 @@ static const struct gcn_kernel_arg_type }; static const long default_requested_args - = (1 << PRIVATE_SEGMENT_BUFFER_ARG) - | (1 << DISPATCH_PTR_ARG) + = (1 << DISPATCH_PTR_ARG) | (1 << QUEUE_PTR_ARG) | (1 << KERNARG_SEGMENT_PTR_ARG) - | (1 << PRIVATE_SEGMENT_WAVE_OFFSET_ARG) | (1 << WORKGROUP_ID_X_ARG) | (1 << WORK_ITEM_ID_X_ARG) | (1 << WORK_ITEM_ID_Y_ARG) @@ -1865,10 +1848,14 @@ gcn_addr_space_convert (rtx op, tree from_type, tree to_type) if (AS_LDS_P (as_from) && AS_FLAT_P (as_to)) { - rtx queue = gen_rtx_REG (DImode, - cfun->machine->args.reg[QUEUE_PTR_ARG]); + /* The high bits of the QUEUE_PTR_ARG register are used by + GCN_BUILTIN_FIRST_CALL_THIS_THREAD_P, so mask them out. */ + rtx queue_reg = gen_rtx_REG (DImode, + cfun->machine->args.reg[QUEUE_PTR_ARG]); + rtx queue_ptr = gen_reg_rtx (DImode); + emit_insn (gen_anddi3 (queue_ptr, queue_reg, GEN_INT (0xffffffffffff))); rtx group_seg_aperture_hi = gen_rtx_MEM (SImode, - gen_rtx_PLUS (DImode, queue, + gen_rtx_PLUS (DImode, queue_ptr, gen_int_mode (64, SImode))); rtx tmp = gen_reg_rtx (DImode); @@ -2521,6 +2508,11 @@ gcn_conditional_register_usage (void) fixed_regs[cfun->machine->args.reg[DISPATCH_PTR_ARG]] = 1; fixed_regs[cfun->machine->args.reg[DISPATCH_PTR_ARG] + 1] = 1; } + if (cfun->machine->args.reg[QUEUE_PTR_ARG] >= 0) + { + fixed_regs[cfun->machine->args.reg[QUEUE_PTR_ARG]] = 1; + fixed_regs[cfun->machine->args.reg[QUEUE_PTR_ARG] + 1] = 1; + } if (cfun->machine->args.reg[WORKGROUP_ID_X_ARG] >= 0) fixed_regs[cfun->machine->args.reg[WORKGROUP_ID_X_ARG]] = 1; if (cfun->machine->args.reg[WORK_ITEM_ID_X_ARG] >= 0) @@ -3346,10 +3338,56 @@ gcn_expand_prologue () } else { - rtx wave_offset = gen_rtx_REG (SImode, - cfun->machine->args. - reg[PRIVATE_SEGMENT_WAVE_OFFSET_ARG]); + if (TARGET_PACKED_WORK_ITEMS) + { + /* v0 conatins the X, Y and Z dimensions all in one. + Expand them out for ABI compatibility. */ + /* TODO: implement and use zero_extract. */ + rtx v1 = gen_rtx_REG (V64SImode, VGPR_REGNO (1)); + emit_insn (gen_andv64si3 (v1, gen_rtx_REG (V64SImode, VGPR_REGNO (0)), + gen_rtx_CONST_INT (VOIDmode, 0x3FF << 10))); + emit_insn (gen_lshrv64si3 (v1, v1, gen_rtx_CONST_INT (VOIDmode, 10))); + emit_insn (gen_prologue_use (v1)); + + rtx v2 = gen_rtx_REG (V64SImode, VGPR_REGNO (2)); + emit_insn (gen_andv64si3 (v2, gen_rtx_REG (V64SImode, VGPR_REGNO (0)), + gen_rtx_CONST_INT (VOIDmode, 0x3FF << 20))); + emit_insn (gen_lshrv64si3 (v2, v2, gen_rtx_CONST_INT (VOIDmode, 20))); + emit_insn (gen_prologue_use (v2)); + } + + /* We no longer use the private segment for the stack (it's not + accessible to reverse offload), so we must calculate a wave offset + from the grid dimensions and stack size, which is calculated on the + host, and passed in the kernargs region. + See libgomp-gcn.h for details. */ + rtx wave_offset = gen_rtx_REG (SImode, FIRST_PARM_REG); + + rtx num_waves_mem = gcn_oacc_dim_size (1); + rtx num_waves = gen_rtx_REG (SImode, FIRST_PARM_REG+1); + set_mem_addr_space (num_waves_mem, ADDR_SPACE_SCALAR_FLAT); + emit_move_insn (num_waves, num_waves_mem); + + rtx workgroup_num = gcn_oacc_dim_pos (0); + rtx wave_num = gen_rtx_REG (SImode, FIRST_PARM_REG+2); + emit_move_insn(wave_num, gcn_oacc_dim_pos (1)); + rtx thread_id = gen_rtx_REG (SImode, FIRST_PARM_REG+3); + emit_insn (gen_mulsi3 (thread_id, num_waves, workgroup_num)); + emit_insn (gen_addsi3_scc (thread_id, thread_id, wave_num)); + + rtx kernarg_reg = gen_rtx_REG (DImode, cfun->machine->args.reg + [KERNARG_SEGMENT_PTR_ARG]); + rtx stack_size_mem = gen_rtx_MEM (SImode, + gen_rtx_PLUS (DImode, kernarg_reg, + GEN_INT (52))); + set_mem_addr_space (stack_size_mem, ADDR_SPACE_SCALAR_FLAT); + emit_move_insn (wave_offset, stack_size_mem); + + emit_insn (gen_mulsi3 (wave_offset, wave_offset, thread_id)); + + /* The FLAT_SCRATCH_INIT is not usually needed, but can be enabled + via the function attributes. */ if (cfun->machine->args.requested & (1 << FLAT_SCRATCH_INIT_ARG)) { rtx fs_init_lo = @@ -3386,10 +3424,12 @@ gcn_expand_prologue () HOST_WIDE_INT sp_adjust = (offsets->local_vars + offsets->outgoing_args_size); - /* Initialise FP and SP from the buffer descriptor in s[0:3]. */ - emit_move_insn (fp_lo, gen_rtx_REG (SImode, 0)); - emit_insn (gen_andsi3_scc (fp_hi, gen_rtx_REG (SImode, 1), - gen_int_mode (0xffff, SImode))); + /* Initialize FP and SP from space allocated on the host. */ + rtx stack_addr_mem = gen_rtx_MEM (DImode, + gen_rtx_PLUS (DImode, kernarg_reg, + GEN_INT (40))); + set_mem_addr_space (stack_addr_mem, ADDR_SPACE_SCALAR_FLAT); + emit_move_insn (fp, stack_addr_mem); rtx scc = gen_rtx_REG (BImode, SCC_REG); emit_insn (gen_addsi3_scalar_carry (fp_lo, fp_lo, wave_offset, scc)); emit_insn (gen_addcsi3_scalar_zero (fp_hi, fp_hi, scc)); @@ -3445,25 +3485,6 @@ gcn_expand_prologue () emit_insn (gen_prologue_use (gen_rtx_REG (SImode, M0_REG))); } - if (TARGET_PACKED_WORK_ITEMS - && cfun && cfun->machine && !cfun->machine->normal_function) - { - /* v0 conatins the X, Y and Z dimensions all in one. - Expand them out for ABI compatibility. */ - /* TODO: implement and use zero_extract. */ - rtx v1 = gen_rtx_REG (V64SImode, VGPR_REGNO (1)); - emit_insn (gen_andv64si3 (v1, gen_rtx_REG (V64SImode, VGPR_REGNO (0)), - gen_rtx_CONST_INT (VOIDmode, 0x3FF << 10))); - emit_insn (gen_lshrv64si3 (v1, v1, gen_rtx_CONST_INT (VOIDmode, 10))); - emit_insn (gen_prologue_use (v1)); - - rtx v2 = gen_rtx_REG (V64SImode, VGPR_REGNO (2)); - emit_insn (gen_andv64si3 (v2, gen_rtx_REG (V64SImode, VGPR_REGNO (0)), - gen_rtx_CONST_INT (VOIDmode, 0x3FF << 20))); - emit_insn (gen_lshrv64si3 (v2, v2, gen_rtx_CONST_INT (VOIDmode, 20))); - emit_insn (gen_prologue_use (v2)); - } - if (cfun && cfun->machine && !cfun->machine->normal_function && flag_openmp) { /* OpenMP kernels have an implicit call to gomp_gcn_enter_kernel. */ @@ -4504,26 +4525,53 @@ gcn_expand_builtin_1 (tree exp, rtx target, rtx /*subtarget */ , cf. struct hsa_kernel_dispatch_packet_s in the HSA doc. */ rtx ptr; if (cfun->machine->args.reg[DISPATCH_PTR_ARG] >= 0 - && cfun->machine->args.reg[PRIVATE_SEGMENT_BUFFER_ARG] >= 0) + && cfun->machine->args.reg[KERNARG_SEGMENT_PTR_ARG] >= 0) { - rtx size_rtx = gen_rtx_REG (DImode, - cfun->machine->args.reg[DISPATCH_PTR_ARG]); - size_rtx = gen_rtx_MEM (SImode, - gen_rtx_PLUS (DImode, size_rtx, - GEN_INT (6*2 + 3*4))); - size_rtx = gen_rtx_MULT (SImode, size_rtx, GEN_INT (64)); - - ptr = gen_rtx_REG (DImode, - cfun->machine->args.reg[PRIVATE_SEGMENT_BUFFER_ARG]); - ptr = gen_rtx_AND (DImode, ptr, GEN_INT (0x0000ffffffffffff)); - ptr = gen_rtx_PLUS (DImode, ptr, size_rtx); - if (cfun->machine->args.reg[PRIVATE_SEGMENT_WAVE_OFFSET_ARG] >= 0) - { - rtx off; - off = gen_rtx_REG (SImode, - cfun->machine->args.reg[PRIVATE_SEGMENT_WAVE_OFFSET_ARG]); - ptr = gen_rtx_PLUS (DImode, ptr, off); - } + rtx num_waves_mem = gcn_oacc_dim_size (1); + rtx num_waves = gen_reg_rtx (SImode); + set_mem_addr_space (num_waves_mem, ADDR_SPACE_SCALAR_FLAT); + emit_move_insn (num_waves, num_waves_mem); + + rtx workgroup_num = gcn_oacc_dim_pos (0); + rtx wave_num = gen_reg_rtx (SImode); + emit_move_insn(wave_num, gcn_oacc_dim_pos (1)); + + rtx thread_id = gen_reg_rtx (SImode); + emit_insn (gen_mulsi3 (thread_id, num_waves, workgroup_num)); + emit_insn (gen_addsi3_scc (thread_id, thread_id, wave_num)); + + rtx kernarg_reg = gen_rtx_REG (DImode, cfun->machine->args.reg + [KERNARG_SEGMENT_PTR_ARG]); + rtx stack_size_mem = gen_rtx_MEM (SImode, + gen_rtx_PLUS (DImode, + kernarg_reg, + GEN_INT (52))); + set_mem_addr_space (stack_size_mem, ADDR_SPACE_SCALAR_FLAT); + rtx stack_size = gen_reg_rtx (SImode); + emit_move_insn (stack_size, stack_size_mem); + + rtx wave_offset = gen_reg_rtx (SImode); + emit_insn (gen_mulsi3 (wave_offset, stack_size, thread_id)); + + rtx stack_limit_offset = gen_reg_rtx (SImode); + emit_insn (gen_addsi3 (stack_limit_offset, wave_offset, + stack_size)); + + rtx stack_limit_offset_di = gen_reg_rtx (DImode); + emit_move_insn (gen_rtx_SUBREG (SImode, stack_limit_offset_di, 4), + const0_rtx); + emit_move_insn (gen_rtx_SUBREG (SImode, stack_limit_offset_di, 0), + stack_limit_offset); + + rtx stack_addr_mem = gen_rtx_MEM (DImode, + gen_rtx_PLUS (DImode, + kernarg_reg, + GEN_INT (40))); + set_mem_addr_space (stack_addr_mem, ADDR_SPACE_SCALAR_FLAT); + rtx stack_addr = gen_reg_rtx (DImode); + emit_move_insn (stack_addr, stack_addr_mem); + + ptr = gen_rtx_PLUS (DImode, stack_addr, stack_limit_offset_di); } else { @@ -4551,11 +4599,11 @@ gcn_expand_builtin_1 (tree exp, rtx target, rtx /*subtarget */ , whether it was the first call. */ rtx result = gen_reg_rtx (BImode); emit_move_insn (result, const0_rtx); - if (cfun->machine->args.reg[PRIVATE_SEGMENT_BUFFER_ARG] >= 0) + if (cfun->machine->args.reg[QUEUE_PTR_ARG] >= 0) { rtx not_first = gen_label_rtx (); rtx reg = gen_rtx_REG (DImode, - cfun->machine->args.reg[PRIVATE_SEGMENT_BUFFER_ARG]); + cfun->machine->args.reg[QUEUE_PTR_ARG]); reg = gcn_operand_part (DImode, reg, 1); rtx cmp = force_reg (SImode, gen_rtx_LSHIFTRT (SImode, reg, GEN_INT (16))); @@ -6041,16 +6089,13 @@ gcn_hsa_declare_function_name (FILE *file, const char *name, tree) "\t .amdhsa_reserve_vcc\t1\n" "\t .amdhsa_reserve_flat_scratch\t0\n" "\t .amdhsa_reserve_xnack_mask\t%i\n" - "\t .amdhsa_private_segment_fixed_size\t%i\n" + "\t .amdhsa_private_segment_fixed_size\t0\n" "\t .amdhsa_group_segment_fixed_size\t%u\n" "\t .amdhsa_float_denorm_mode_32\t3\n" "\t .amdhsa_float_denorm_mode_16_64\t3\n", vgpr, sgpr, xnack_enabled, - /* workitem_private_segment_bytes_size needs to be - one 64th the wave-front stack size. */ - stack_size_opt / 64, LDS_SIZE); if (gcn_arch == PROCESSOR_GFX90a) fprintf (file, @@ -6075,7 +6120,7 @@ gcn_hsa_declare_function_name (FILE *file, const char *name, tree) " .kernarg_segment_size: %i\n" " .kernarg_segment_align: %i\n" " .group_segment_fixed_size: %u\n" - " .private_segment_fixed_size: %i\n" + " .private_segment_fixed_size: 0\n" " .wavefront_size: 64\n" " .sgpr_count: %i\n" " .vgpr_count: %i\n" @@ -6083,7 +6128,6 @@ gcn_hsa_declare_function_name (FILE *file, const char *name, tree) cfun->machine->kernarg_segment_byte_size, cfun->machine->kernarg_segment_alignment, LDS_SIZE, - stack_size_opt / 64, sgpr, vgpr); if (gcn_arch == PROCESSOR_GFX90a) fprintf (file, " .agpr_count: 0\n"); // AGPRs are not used, yet diff --git a/gcc/config/gcn/gcn.h b/gcc/config/gcn/gcn.h index 19ad521..4ff9a5d 100644 --- a/gcc/config/gcn/gcn.h +++ b/gcc/config/gcn/gcn.h @@ -183,7 +183,7 @@ #define FIXED_REGISTERS { \ /* Scalars. */ \ - 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, \ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, \ /* fp sp lr. */ \ 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, \ /* exec_save, cc_save */ \ diff --git a/gcc/config/gcn/gcn.opt b/gcc/config/gcn/gcn.opt index e616ea0..c5c32bd 100644 --- a/gcc/config/gcn/gcn.opt +++ b/gcc/config/gcn/gcn.opt @@ -69,7 +69,7 @@ int stack_size_opt = -1 mstack-size= Target RejectNegative Joined UInteger Var(stack_size_opt) Init(-1) --mstack-size=<number> Set the private segment size per wave-front, in bytes. +Obsolete; use GCN_STACK_SIZE at runtime. int gang_private_size_opt = -1 diff --git a/gcc/testsuite/gcc.c-torture/execute/pr47237.c b/gcc/testsuite/gcc.c-torture/execute/pr47237.c index 9812406..944bdb7 100644 --- a/gcc/testsuite/gcc.c-torture/execute/pr47237.c +++ b/gcc/testsuite/gcc.c-torture/execute/pr47237.c @@ -1,4 +1,4 @@ -/* { dg-xfail-if "can cause stack underflow" { nios2-*-* } } */ +/* { dg-xfail-run-if "can cause stack underflow" { nios2-*-* amdgcn-*-* } } */ /* { dg-require-effective-target untyped_assembly } */ #define INTEGER_ARG 5 diff --git a/gcc/testsuite/gcc.dg/builtin-apply3.c b/gcc/testsuite/gcc.dg/builtin-apply3.c index 37c5209..8fc2003 100644 --- a/gcc/testsuite/gcc.dg/builtin-apply3.c +++ b/gcc/testsuite/gcc.dg/builtin-apply3.c @@ -6,6 +6,7 @@ /* { dg-do run } */ /* { dg-require-effective-target untyped_assembly } */ +/* { dg-xfail-run-if "can cause stack underflow" { amdgcn-*-* } } */ #define INTEGER_ARG 5 diff --git a/gcc/testsuite/gcc.dg/builtin-apply4.c b/gcc/testsuite/gcc.dg/builtin-apply4.c index cca9187..aa491c1 100644 --- a/gcc/testsuite/gcc.dg/builtin-apply4.c +++ b/gcc/testsuite/gcc.dg/builtin-apply4.c @@ -3,6 +3,7 @@ /* { dg-additional-options "-mno-mmx" { target { { i?86-*-* x86_64-*-* } && ia32 } } } */ /* { dg-do run } */ /* { dg-require-effective-target untyped_assembly } */ +/* { dg-xfail-run-if "can cause stack underflow" { amdgcn-*-* } } */ extern void abort (void); diff --git a/gcc/testsuite/gcc.dg/torture/stackalign/builtin-apply-3.c b/gcc/testsuite/gcc.dg/torture/stackalign/builtin-apply-3.c index 37c5209..8fc2003 100644 --- a/gcc/testsuite/gcc.dg/torture/stackalign/builtin-apply-3.c +++ b/gcc/testsuite/gcc.dg/torture/stackalign/builtin-apply-3.c @@ -6,6 +6,7 @@ /* { dg-do run } */ /* { dg-require-effective-target untyped_assembly } */ +/* { dg-xfail-run-if "can cause stack underflow" { amdgcn-*-* } } */ #define INTEGER_ARG 5 diff --git a/gcc/testsuite/gcc.dg/torture/stackalign/builtin-apply-4.c b/gcc/testsuite/gcc.dg/torture/stackalign/builtin-apply-4.c index 78b1032..94b2012 100644 --- a/gcc/testsuite/gcc.dg/torture/stackalign/builtin-apply-4.c +++ b/gcc/testsuite/gcc.dg/torture/stackalign/builtin-apply-4.c @@ -2,6 +2,7 @@ /* { dg-do run } */ /* { dg-additional-options "-fgnu89-inline" } */ /* { dg-require-effective-target untyped_assembly } */ +/* { dg-xfail-run-if "can cause stack underflow" { amdgcn-*-* } } */ extern void abort (void); |