aboutsummaryrefslogtreecommitdiff
path: root/sim/frv/memory.c
diff options
context:
space:
mode:
authorDave Brolley <brolley@redhat.com>2003-08-29 16:35:47 +0000
committerDave Brolley <brolley@redhat.com>2003-08-29 16:35:47 +0000
commitb34f6357d032f4b39f9c7adb1995956d04339461 (patch)
treea77b3c9a47df5e56dc9fc85d3a2792b12cb436e8 /sim/frv/memory.c
parent60fac5b81a94dedf1997344af7a998e9ea611e55 (diff)
downloadgdb-b34f6357d032f4b39f9c7adb1995956d04339461.zip
gdb-b34f6357d032f4b39f9c7adb1995956d04339461.tar.gz
gdb-b34f6357d032f4b39f9c7adb1995956d04339461.tar.bz2
New simulator for Fujitsu frv contributed by Red Hat.
Diffstat (limited to 'sim/frv/memory.c')
-rw-r--r--sim/frv/memory.c750
1 files changed, 750 insertions, 0 deletions
diff --git a/sim/frv/memory.c b/sim/frv/memory.c
new file mode 100644
index 0000000..603f7bb
--- /dev/null
+++ b/sim/frv/memory.c
@@ -0,0 +1,750 @@
+/* frv memory model.
+ Copyright (C) 1999, 2000, 2001 Free Software Foundation, Inc.
+ Contributed by Red Hat.
+
+This file is part of the GNU simulators.
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License along
+with this program; if not, write to the Free Software Foundation, Inc.,
+59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
+
+#define WANT_CPU frvbf
+#define WANT_CPU_FRVBF
+
+#include "sim-main.h"
+#include "cgen-mem.h"
+#include "bfd.h"
+
+/* Check for alignment and access restrictions. Return the corrected address.
+ */
+static SI
+fr400_check_data_read_address (SIM_CPU *current_cpu, SI address, int align_mask)
+{
+ /* Check access restrictions for double word loads only. */
+ if (align_mask == 7)
+ {
+ if ((USI)address >= 0xfe800000 && (USI)address <= 0xfeffffff)
+ frv_queue_data_access_error_interrupt (current_cpu, address);
+ }
+ return address;
+}
+
+static SI
+fr500_check_data_read_address (SIM_CPU *current_cpu, SI address, int align_mask)
+{
+ if (address & align_mask)
+ {
+ frv_queue_mem_address_not_aligned_interrupt (current_cpu, address);
+ address &= ~align_mask;
+ }
+
+ if ((USI)address >= 0xfeff0600 && (USI)address <= 0xfeff7fff
+ || (USI)address >= 0xfe800000 && (USI)address <= 0xfefeffff)
+ frv_queue_data_access_error_interrupt (current_cpu, address);
+
+ return address;
+}
+
+static SI
+check_data_read_address (SIM_CPU *current_cpu, SI address, int align_mask)
+{
+ SIM_DESC sd = CPU_STATE (current_cpu);
+ switch (STATE_ARCHITECTURE (sd)->mach)
+ {
+ case bfd_mach_fr400:
+ address = fr400_check_data_read_address (current_cpu, address,
+ align_mask);
+ break;
+ case bfd_mach_frvtomcat:
+ case bfd_mach_fr500:
+ case bfd_mach_frv:
+ address = fr500_check_data_read_address (current_cpu, address,
+ align_mask);
+ break;
+ default:
+ break;
+ }
+
+ return address;
+}
+
+static SI
+fr400_check_readwrite_address (SIM_CPU *current_cpu, SI address, int align_mask)
+{
+ if (address & align_mask)
+ {
+ /* Make sure that this exception is not masked. */
+ USI isr = GET_ISR ();
+ if (! GET_ISR_EMAM (isr))
+ {
+ /* Bad alignment causes a data_access_error on fr400. */
+ frv_queue_data_access_error_interrupt (current_cpu, address);
+ }
+ address &= ~align_mask;
+ }
+ /* Nothing to check. */
+ return address;
+}
+
+static SI
+fr500_check_readwrite_address (SIM_CPU *current_cpu, SI address, int align_mask)
+{
+ if ((USI)address >= 0xfe000000 && (USI)address <= 0xfe003fff
+ || (USI)address >= 0xfe004000 && (USI)address <= 0xfe3fffff
+ || (USI)address >= 0xfe400000 && (USI)address <= 0xfe403fff
+ || (USI)address >= 0xfe404000 && (USI)address <= 0xfe7fffff)
+ frv_queue_data_access_exception_interrupt (current_cpu);
+
+ return address;
+}
+
+static SI
+check_readwrite_address (SIM_CPU *current_cpu, SI address, int align_mask)
+{
+ SIM_DESC sd = CPU_STATE (current_cpu);
+ switch (STATE_ARCHITECTURE (sd)->mach)
+ {
+ case bfd_mach_fr400:
+ address = fr400_check_readwrite_address (current_cpu, address,
+ align_mask);
+ break;
+ case bfd_mach_frvtomcat:
+ case bfd_mach_fr500:
+ case bfd_mach_frv:
+ address = fr500_check_readwrite_address (current_cpu, address,
+ align_mask);
+ break;
+ default:
+ break;
+ }
+
+ return address;
+}
+
+static PCADDR
+fr400_check_insn_read_address (SIM_CPU *current_cpu, PCADDR address,
+ int align_mask)
+{
+ if (address & align_mask)
+ {
+ frv_queue_instruction_access_error_interrupt (current_cpu);
+ address &= ~align_mask;
+ }
+ else if ((USI)address >= 0xfe800000 && (USI)address <= 0xfeffffff)
+ frv_queue_instruction_access_error_interrupt (current_cpu);
+
+ return address;
+}
+
+static PCADDR
+fr500_check_insn_read_address (SIM_CPU *current_cpu, PCADDR address,
+ int align_mask)
+{
+ if (address & align_mask)
+ {
+ frv_queue_mem_address_not_aligned_interrupt (current_cpu, address);
+ address &= ~align_mask;
+ }
+
+ if ((USI)address >= 0xfeff0600 && (USI)address <= 0xfeff7fff
+ || (USI)address >= 0xfe800000 && (USI)address <= 0xfefeffff)
+ frv_queue_instruction_access_error_interrupt (current_cpu);
+ else if ((USI)address >= 0xfe004000 && (USI)address <= 0xfe3fffff
+ || (USI)address >= 0xfe400000 && (USI)address <= 0xfe403fff
+ || (USI)address >= 0xfe404000 && (USI)address <= 0xfe7fffff)
+ frv_queue_instruction_access_exception_interrupt (current_cpu);
+ else
+ {
+ USI hsr0 = GET_HSR0 ();
+ if (! GET_HSR0_RME (hsr0)
+ && (USI)address >= 0xfe000000 && (USI)address <= 0xfe003fff)
+ frv_queue_instruction_access_exception_interrupt (current_cpu);
+ }
+
+ return address;
+}
+
+static PCADDR
+check_insn_read_address (SIM_CPU *current_cpu, PCADDR address, int align_mask)
+{
+ SIM_DESC sd = CPU_STATE (current_cpu);
+ switch (STATE_ARCHITECTURE (sd)->mach)
+ {
+ case bfd_mach_fr400:
+ address = fr400_check_insn_read_address (current_cpu, address,
+ align_mask);
+ break;
+ case bfd_mach_frvtomcat:
+ case bfd_mach_fr500:
+ case bfd_mach_frv:
+ address = fr500_check_insn_read_address (current_cpu, address,
+ align_mask);
+ break;
+ default:
+ break;
+ }
+
+ return address;
+}
+
+/* Memory reads. */
+QI
+frvbf_read_mem_QI (SIM_CPU *current_cpu, IADDR pc, SI address)
+{
+ USI hsr0 = GET_HSR0 ();
+ FRV_CACHE *cache = CPU_DATA_CACHE (current_cpu);
+
+ /* Check for access exceptions. */
+ address = check_data_read_address (current_cpu, address, 0);
+ address = check_readwrite_address (current_cpu, address, 0);
+
+ /* If we need to count cycles, then the cache operation will be
+ initiated from the model profiling functions.
+ See frvbf_model_.... */
+ if (model_insn)
+ {
+ CPU_LOAD_ADDRESS (current_cpu) = address;
+ CPU_LOAD_LENGTH (current_cpu) = 1;
+ CPU_LOAD_SIGNED (current_cpu) = 1;
+ return 0xb7; /* any random value */
+ }
+
+ if (GET_HSR0_DCE (hsr0))
+ {
+ int cycles;
+ cycles = frv_cache_read (cache, 0, address);
+ if (cycles != 0)
+ return CACHE_RETURN_DATA (cache, 0, address, QI, 1);
+ }
+
+ return GETMEMQI (current_cpu, pc, address);
+}
+
+UQI
+frvbf_read_mem_UQI (SIM_CPU *current_cpu, IADDR pc, SI address)
+{
+ USI hsr0 = GET_HSR0 ();
+ FRV_CACHE *cache = CPU_DATA_CACHE (current_cpu);
+
+ /* Check for access exceptions. */
+ address = check_data_read_address (current_cpu, address, 0);
+ address = check_readwrite_address (current_cpu, address, 0);
+
+ /* If we need to count cycles, then the cache operation will be
+ initiated from the model profiling functions.
+ See frvbf_model_.... */
+ if (model_insn)
+ {
+ CPU_LOAD_ADDRESS (current_cpu) = address;
+ CPU_LOAD_LENGTH (current_cpu) = 1;
+ CPU_LOAD_SIGNED (current_cpu) = 0;
+ return 0xb7; /* any random value */
+ }
+
+ if (GET_HSR0_DCE (hsr0))
+ {
+ int cycles;
+ cycles = frv_cache_read (cache, 0, address);
+ if (cycles != 0)
+ return CACHE_RETURN_DATA (cache, 0, address, UQI, 1);
+ }
+
+ return GETMEMUQI (current_cpu, pc, address);
+}
+
+HI
+frvbf_read_mem_HI (SIM_CPU *current_cpu, IADDR pc, SI address)
+{
+ USI hsr0;
+ FRV_CACHE *cache;
+
+ /* Check for access exceptions. */
+ address = check_data_read_address (current_cpu, address, 1);
+ address = check_readwrite_address (current_cpu, address, 1);
+
+ /* If we need to count cycles, then the cache operation will be
+ initiated from the model profiling functions.
+ See frvbf_model_.... */
+ hsr0 = GET_HSR0 ();
+ cache = CPU_DATA_CACHE (current_cpu);
+ if (model_insn)
+ {
+ CPU_LOAD_ADDRESS (current_cpu) = address;
+ CPU_LOAD_LENGTH (current_cpu) = 2;
+ CPU_LOAD_SIGNED (current_cpu) = 1;
+ return 0xb711; /* any random value */
+ }
+
+ if (GET_HSR0_DCE (hsr0))
+ {
+ int cycles;
+ cycles = frv_cache_read (cache, 0, address);
+ if (cycles != 0)
+ return CACHE_RETURN_DATA (cache, 0, address, HI, 2);
+ }
+
+ return GETMEMHI (current_cpu, pc, address);
+}
+
+UHI
+frvbf_read_mem_UHI (SIM_CPU *current_cpu, IADDR pc, SI address)
+{
+ USI hsr0;
+ FRV_CACHE *cache;
+
+ /* Check for access exceptions. */
+ address = check_data_read_address (current_cpu, address, 1);
+ address = check_readwrite_address (current_cpu, address, 1);
+
+ /* If we need to count cycles, then the cache operation will be
+ initiated from the model profiling functions.
+ See frvbf_model_.... */
+ hsr0 = GET_HSR0 ();
+ cache = CPU_DATA_CACHE (current_cpu);
+ if (model_insn)
+ {
+ CPU_LOAD_ADDRESS (current_cpu) = address;
+ CPU_LOAD_LENGTH (current_cpu) = 2;
+ CPU_LOAD_SIGNED (current_cpu) = 0;
+ return 0xb711; /* any random value */
+ }
+
+ if (GET_HSR0_DCE (hsr0))
+ {
+ int cycles;
+ cycles = frv_cache_read (cache, 0, address);
+ if (cycles != 0)
+ return CACHE_RETURN_DATA (cache, 0, address, UHI, 2);
+ }
+
+ return GETMEMUHI (current_cpu, pc, address);
+}
+
+SI
+frvbf_read_mem_SI (SIM_CPU *current_cpu, IADDR pc, SI address)
+{
+ FRV_CACHE *cache;
+ USI hsr0;
+
+ /* Check for access exceptions. */
+ address = check_data_read_address (current_cpu, address, 3);
+ address = check_readwrite_address (current_cpu, address, 3);
+
+ hsr0 = GET_HSR0 ();
+ cache = CPU_DATA_CACHE (current_cpu);
+ /* If we need to count cycles, then the cache operation will be
+ initiated from the model profiling functions.
+ See frvbf_model_.... */
+ if (model_insn)
+ {
+ CPU_LOAD_ADDRESS (current_cpu) = address;
+ CPU_LOAD_LENGTH (current_cpu) = 4;
+ return 0x37111319; /* any random value */
+ }
+
+ if (GET_HSR0_DCE (hsr0))
+ {
+ int cycles;
+ cycles = frv_cache_read (cache, 0, address);
+ if (cycles != 0)
+ return CACHE_RETURN_DATA (cache, 0, address, SI, 4);
+ }
+
+ return GETMEMSI (current_cpu, pc, address);
+}
+
+SI
+frvbf_read_mem_WI (SIM_CPU *current_cpu, IADDR pc, SI address)
+{
+ return frvbf_read_mem_SI (current_cpu, pc, address);
+}
+
+DI
+frvbf_read_mem_DI (SIM_CPU *current_cpu, IADDR pc, SI address)
+{
+ USI hsr0;
+ FRV_CACHE *cache;
+
+ /* Check for access exceptions. */
+ address = check_data_read_address (current_cpu, address, 7);
+ address = check_readwrite_address (current_cpu, address, 7);
+
+ /* If we need to count cycles, then the cache operation will be
+ initiated from the model profiling functions.
+ See frvbf_model_.... */
+ hsr0 = GET_HSR0 ();
+ cache = CPU_DATA_CACHE (current_cpu);
+ if (model_insn)
+ {
+ CPU_LOAD_ADDRESS (current_cpu) = address;
+ CPU_LOAD_LENGTH (current_cpu) = 8;
+ return 0x37111319; /* any random value */
+ }
+
+ if (GET_HSR0_DCE (hsr0))
+ {
+ int cycles;
+ cycles = frv_cache_read (cache, 0, address);
+ if (cycles != 0)
+ return CACHE_RETURN_DATA (cache, 0, address, DI, 8);
+ }
+
+ return GETMEMDI (current_cpu, pc, address);
+}
+
+DF
+frvbf_read_mem_DF (SIM_CPU *current_cpu, IADDR pc, SI address)
+{
+ USI hsr0;
+ FRV_CACHE *cache;
+
+ /* Check for access exceptions. */
+ address = check_data_read_address (current_cpu, address, 7);
+ address = check_readwrite_address (current_cpu, address, 7);
+
+ /* If we need to count cycles, then the cache operation will be
+ initiated from the model profiling functions.
+ See frvbf_model_.... */
+ hsr0 = GET_HSR0 ();
+ cache = CPU_DATA_CACHE (current_cpu);
+ if (model_insn)
+ {
+ CPU_LOAD_ADDRESS (current_cpu) = address;
+ CPU_LOAD_LENGTH (current_cpu) = 8;
+ return 0x37111319; /* any random value */
+ }
+
+ if (GET_HSR0_DCE (hsr0))
+ {
+ int cycles;
+ cycles = frv_cache_read (cache, 0, address);
+ if (cycles != 0)
+ return CACHE_RETURN_DATA (cache, 0, address, DF, 8);
+ }
+
+ return GETMEMDF (current_cpu, pc, address);
+}
+
+USI
+frvbf_read_imem_USI (SIM_CPU *current_cpu, PCADDR vpc)
+{
+ USI hsr0;
+ vpc = check_insn_read_address (current_cpu, vpc, 3);
+
+ hsr0 = GET_HSR0 ();
+ if (GET_HSR0_ICE (hsr0))
+ {
+ FRV_CACHE *cache;
+ USI value;
+
+ /* We don't want this to show up in the cache statistics. That read
+ is done in frvbf_simulate_insn_prefetch. So read the cache or memory
+ passively here. */
+ cache = CPU_INSN_CACHE (current_cpu);
+ if (frv_cache_read_passive_SI (cache, vpc, &value))
+ return value;
+ }
+ return sim_core_read_unaligned_4 (current_cpu, vpc, read_map, vpc);
+}
+
+static SI
+fr400_check_write_address (SIM_CPU *current_cpu, SI address, int align_mask)
+{
+ if (address & align_mask)
+ {
+ /* On the fr400, this causes a data_access_error. */
+ /* Make sure that this exception is not masked. */
+ USI isr = GET_ISR ();
+ if (! GET_ISR_EMAM (isr))
+ {
+ /* Bad alignment causes a data_access_error on fr400. */
+ frv_queue_data_access_error_interrupt (current_cpu, address);
+ }
+ address &= ~align_mask;
+ }
+ if (align_mask == 7
+ && address >= 0xfe800000 && address <= 0xfeffffff)
+ frv_queue_program_interrupt (current_cpu, FRV_DATA_STORE_ERROR);
+
+ return address;
+}
+
+static SI
+fr500_check_write_address (SIM_CPU *current_cpu, SI address, int align_mask)
+{
+ if (address & align_mask)
+ {
+ struct frv_interrupt_queue_element *item =
+ frv_queue_mem_address_not_aligned_interrupt (current_cpu, address);
+ /* Record the correct vliw slot with the interrupt. */
+ if (item != NULL)
+ item->slot = frv_interrupt_state.slot;
+ address &= ~align_mask;
+ }
+ if (address >= 0xfeff0600 && address <= 0xfeff7fff
+ || address >= 0xfe800000 && address <= 0xfefeffff)
+ frv_queue_program_interrupt (current_cpu, FRV_DATA_STORE_ERROR);
+
+ return address;
+}
+
+static SI
+check_write_address (SIM_CPU *current_cpu, SI address, int align_mask)
+{
+ SIM_DESC sd = CPU_STATE (current_cpu);
+ switch (STATE_ARCHITECTURE (sd)->mach)
+ {
+ case bfd_mach_fr400:
+ address = fr400_check_write_address (current_cpu, address, align_mask);
+ break;
+ case bfd_mach_frvtomcat:
+ case bfd_mach_fr500:
+ case bfd_mach_frv:
+ address = fr500_check_write_address (current_cpu, address, align_mask);
+ break;
+ default:
+ break;
+ }
+ return address;
+}
+
+void
+frvbf_write_mem_QI (SIM_CPU *current_cpu, IADDR pc, SI address, QI value)
+{
+ USI hsr0;
+ hsr0 = GET_HSR0 ();
+ if (GET_HSR0_DCE (hsr0))
+ sim_queue_fn_mem_qi_write (current_cpu, frvbf_mem_set_QI, address, value);
+ else
+ sim_queue_mem_qi_write (current_cpu, address, value);
+ frv_set_write_queue_slot (current_cpu);
+}
+
+void
+frvbf_write_mem_UQI (SIM_CPU *current_cpu, IADDR pc, SI address, UQI value)
+{
+ frvbf_write_mem_QI (current_cpu, pc, address, value);
+}
+
+void
+frvbf_write_mem_HI (SIM_CPU *current_cpu, IADDR pc, SI address, HI value)
+{
+ USI hsr0;
+ hsr0 = GET_HSR0 ();
+ if (GET_HSR0_DCE (hsr0))
+ sim_queue_fn_mem_hi_write (current_cpu, frvbf_mem_set_HI, address, value);
+ else
+ sim_queue_mem_hi_write (current_cpu, address, value);
+ frv_set_write_queue_slot (current_cpu);
+}
+
+void
+frvbf_write_mem_UHI (SIM_CPU *current_cpu, IADDR pc, SI address, UHI value)
+{
+ frvbf_write_mem_HI (current_cpu, pc, address, value);
+}
+
+void
+frvbf_write_mem_SI (SIM_CPU *current_cpu, IADDR pc, SI address, SI value)
+{
+ USI hsr0;
+ hsr0 = GET_HSR0 ();
+ if (GET_HSR0_DCE (hsr0))
+ sim_queue_fn_mem_si_write (current_cpu, frvbf_mem_set_SI, address, value);
+ else
+ sim_queue_mem_si_write (current_cpu, address, value);
+ frv_set_write_queue_slot (current_cpu);
+}
+
+void
+frvbf_write_mem_WI (SIM_CPU *current_cpu, IADDR pc, SI address, SI value)
+{
+ frvbf_write_mem_SI (current_cpu, pc, address, value);
+}
+
+void
+frvbf_write_mem_DI (SIM_CPU *current_cpu, IADDR pc, SI address, DI value)
+{
+ USI hsr0;
+ hsr0 = GET_HSR0 ();
+ if (GET_HSR0_DCE (hsr0))
+ sim_queue_fn_mem_di_write (current_cpu, frvbf_mem_set_DI, address, value);
+ else
+ sim_queue_mem_di_write (current_cpu, address, value);
+ frv_set_write_queue_slot (current_cpu);
+}
+
+void
+frvbf_write_mem_DF (SIM_CPU *current_cpu, IADDR pc, SI address, DF value)
+{
+ USI hsr0;
+ hsr0 = GET_HSR0 ();
+ if (GET_HSR0_DCE (hsr0))
+ sim_queue_fn_mem_df_write (current_cpu, frvbf_mem_set_DF, address, value);
+ else
+ sim_queue_mem_df_write (current_cpu, address, value);
+ frv_set_write_queue_slot (current_cpu);
+}
+
+/* Memory writes. These do the actual writing through the cache. */
+void
+frvbf_mem_set_QI (SIM_CPU *current_cpu, IADDR pc, SI address, QI value)
+{
+ FRV_CACHE *cache = CPU_DATA_CACHE (current_cpu);
+
+ /* Check for access errors. */
+ address = check_write_address (current_cpu, address, 0);
+ address = check_readwrite_address (current_cpu, address, 0);
+
+ /* If we need to count cycles, then submit the write request to the cache
+ and let it prioritize the request. Otherwise perform the write now. */
+ if (model_insn)
+ {
+ int slot = UNIT_I0;
+ frv_cache_request_store (cache, address, slot, (char *)&value,
+ sizeof (value));
+ }
+ else
+ frv_cache_write (cache, address, (char *)&value, sizeof (value));
+}
+
+void
+frvbf_mem_set_HI (SIM_CPU *current_cpu, IADDR pc, SI address, HI value)
+{
+ FRV_CACHE *cache;
+
+ /* Check for access errors. */
+ address = check_write_address (current_cpu, address, 1);
+ address = check_readwrite_address (current_cpu, address, 1);
+
+ /* If we need to count cycles, then submit the write request to the cache
+ and let it prioritize the request. Otherwise perform the write now. */
+ value = H2T_2 (value);
+ cache = CPU_DATA_CACHE (current_cpu);
+ if (model_insn)
+ {
+ int slot = UNIT_I0;
+ frv_cache_request_store (cache, address, slot,
+ (char *)&value, sizeof (value));
+ }
+ else
+ frv_cache_write (cache, address, (char *)&value, sizeof (value));
+}
+
+void
+frvbf_mem_set_SI (SIM_CPU *current_cpu, IADDR pc, SI address, SI value)
+{
+ FRV_CACHE *cache;
+
+ /* Check for access errors. */
+ address = check_write_address (current_cpu, address, 3);
+ address = check_readwrite_address (current_cpu, address, 3);
+
+ /* If we need to count cycles, then submit the write request to the cache
+ and let it prioritize the request. Otherwise perform the write now. */
+ cache = CPU_DATA_CACHE (current_cpu);
+ value = H2T_4 (value);
+ if (model_insn)
+ {
+ int slot = UNIT_I0;
+ frv_cache_request_store (cache, address, slot,
+ (char *)&value, sizeof (value));
+ }
+ else
+ frv_cache_write (cache, address, (char *)&value, sizeof (value));
+}
+
+void
+frvbf_mem_set_DI (SIM_CPU *current_cpu, IADDR pc, SI address, DI value)
+{
+ FRV_CACHE *cache;
+
+ /* Check for access errors. */
+ address = check_write_address (current_cpu, address, 7);
+ address = check_readwrite_address (current_cpu, address, 7);
+
+ /* If we need to count cycles, then submit the write request to the cache
+ and let it prioritize the request. Otherwise perform the write now. */
+ value = H2T_8 (value);
+ cache = CPU_DATA_CACHE (current_cpu);
+ if (model_insn)
+ {
+ int slot = UNIT_I0;
+ frv_cache_request_store (cache, address, slot,
+ (char *)&value, sizeof (value));
+ }
+ else
+ frv_cache_write (cache, address, (char *)&value, sizeof (value));
+}
+
+void
+frvbf_mem_set_DF (SIM_CPU *current_cpu, IADDR pc, SI address, DF value)
+{
+ FRV_CACHE *cache;
+
+ /* Check for access errors. */
+ address = check_write_address (current_cpu, address, 7);
+ address = check_readwrite_address (current_cpu, address, 7);
+
+ /* If we need to count cycles, then submit the write request to the cache
+ and let it prioritize the request. Otherwise perform the write now. */
+ value = H2T_8 (value);
+ cache = CPU_DATA_CACHE (current_cpu);
+ if (model_insn)
+ {
+ int slot = UNIT_I0;
+ frv_cache_request_store (cache, address, slot,
+ (char *)&value, sizeof (value));
+ }
+ else
+ frv_cache_write (cache, address, (char *)&value, sizeof (value));
+}
+
+void
+frvbf_mem_set_XI (SIM_CPU *current_cpu, IADDR pc, SI address, SI *value)
+{
+ int i;
+ FRV_CACHE *cache;
+
+ /* Check for access errors. */
+ address = check_write_address (current_cpu, address, 0xf);
+ address = check_readwrite_address (current_cpu, address, 0xf);
+
+ /* TODO -- reverse word order as well? */
+ for (i = 0; i < 4; ++i)
+ value[i] = H2T_4 (value[i]);
+
+ /* If we need to count cycles, then submit the write request to the cache
+ and let it prioritize the request. Otherwise perform the write now. */
+ cache = CPU_DATA_CACHE (current_cpu);
+ if (model_insn)
+ {
+ int slot = UNIT_I0;
+ frv_cache_request_store (cache, address, slot, (char*)value, 16);
+ }
+ else
+ frv_cache_write (cache, address, (char*)value, 16);
+}
+
+/* Record the current VLIW slot on the element at the top of the write queue.
+*/
+void
+frv_set_write_queue_slot (SIM_CPU *current_cpu)
+{
+ FRV_VLIW *vliw = CPU_VLIW (current_cpu);
+ int slot = vliw->next_slot - 1;
+ CGEN_WRITE_QUEUE *q = CPU_WRITE_QUEUE (current_cpu);
+ int ix = CGEN_WRITE_QUEUE_INDEX (q) - 1;
+ CGEN_WRITE_QUEUE_ELEMENT *item = CGEN_WRITE_QUEUE_ELEMENT (q, ix);
+ CGEN_WRITE_QUEUE_ELEMENT_PIPE (item) = (*vliw->current_vliw)[slot];
+}