1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
|
/* memory.c -- Memory accessor functions for the AArch64 simulator
Copyright (C) 2015-2016 Free Software Foundation, Inc.
Contributed by Red Hat.
This file is part of GDB.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>. */
#include "config.h"
#include <sys/types.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "libiberty.h"
#include "memory.h"
#include "simulator.h"
#include "sim-core.h"
static inline void
mem_error (sim_cpu *cpu, const char *message, uint64_t addr)
{
TRACE_MEMORY (cpu, "ERROR: %s: %" PRIx64, message, addr);
}
/* FIXME: AArch64 requires aligned memory access if SCTRLR_ELx.A is set,
but we are not implementing that here. */
#define FETCH_FUNC64(RETURN_TYPE, ACCESS_TYPE, NAME, N) \
RETURN_TYPE \
aarch64_get_mem_##NAME (sim_cpu *cpu, uint64_t address) \
{ \
RETURN_TYPE val = (RETURN_TYPE) (ACCESS_TYPE) \
sim_core_read_unaligned_##N (cpu, 0, read_map, address); \
TRACE_MEMORY (cpu, "read of %" PRIx64 " (%d bytes) from %" PRIx64, \
val, N, address); \
\
return val; \
}
FETCH_FUNC64 (uint64_t, uint64_t, u64, 8)
FETCH_FUNC64 (int64_t, int64_t, s64, 8)
#define FETCH_FUNC32(RETURN_TYPE, ACCESS_TYPE, NAME, N) \
RETURN_TYPE \
aarch64_get_mem_##NAME (sim_cpu *cpu, uint64_t address) \
{ \
RETURN_TYPE val = (RETURN_TYPE) (ACCESS_TYPE) \
sim_core_read_unaligned_##N (cpu, 0, read_map, address); \
TRACE_MEMORY (cpu, "read of %8x (%d bytes) from %" PRIx64, \
val, N, address); \
\
return val; \
}
FETCH_FUNC32 (uint32_t, uint32_t, u32, 4)
FETCH_FUNC32 (int32_t, int32_t, s32, 4)
FETCH_FUNC32 (uint32_t, uint16_t, u16, 2)
FETCH_FUNC32 (int32_t, int16_t, s16, 2)
FETCH_FUNC32 (uint32_t, uint8_t, u8, 1)
FETCH_FUNC32 (int32_t, int8_t, s8, 1)
void
aarch64_get_mem_long_double (sim_cpu *cpu, uint64_t address, FRegister *a)
{
a->v[0] = sim_core_read_unaligned_8 (cpu, 0, read_map, address);
a->v[1] = sim_core_read_unaligned_8 (cpu, 0, read_map, address + 8);
}
/* FIXME: Aarch64 requires aligned memory access if SCTRLR_ELx.A is set,
but we are not implementing that here. */
#define STORE_FUNC(TYPE, NAME, N) \
void \
aarch64_set_mem_##NAME (sim_cpu *cpu, uint64_t address, TYPE value) \
{ \
TRACE_MEMORY (cpu, \
"write of %" PRIx64 " (%d bytes) to %" PRIx64, \
(uint64_t) value, N, address); \
\
sim_core_write_unaligned_##N (cpu, 0, write_map, address, value); \
}
STORE_FUNC (uint64_t, u64, 8)
STORE_FUNC (int64_t, s64, 8)
STORE_FUNC (uint32_t, u32, 4)
STORE_FUNC (int32_t, s32, 4)
STORE_FUNC (uint16_t, u16, 2)
STORE_FUNC (int16_t, s16, 2)
STORE_FUNC (uint8_t, u8, 1)
STORE_FUNC (int8_t, s8, 1)
void
aarch64_set_mem_long_double (sim_cpu *cpu, uint64_t address, FRegister a)
{
TRACE_MEMORY (cpu,
"write of long double %" PRIx64 " %" PRIx64 " to %" PRIx64,
a.v[0], a.v[1], address);
sim_core_write_unaligned_8 (cpu, 0, write_map, address, a.v[0]);
sim_core_write_unaligned_8 (cpu, 0, write_map, address + 8, a.v[1]);
}
void
aarch64_get_mem_blk (sim_cpu * cpu,
uint64_t address,
char * buffer,
unsigned length)
{
unsigned len;
len = sim_core_read_buffer (CPU_STATE (cpu), cpu, read_map,
buffer, address, length);
if (len == length)
return;
memset (buffer, 0, length);
if (cpu)
mem_error (cpu, "read of non-existant mem block at", address);
sim_engine_halt (CPU_STATE (cpu), cpu, NULL, aarch64_get_PC (cpu),
sim_stopped, SIM_SIGBUS);
}
const char *
aarch64_get_mem_ptr (sim_cpu *cpu, uint64_t address)
{
char *addr = sim_core_trans_addr (CPU_STATE (cpu), cpu, read_map, address);
if (addr == NULL)
{
mem_error (cpu, "request for non-existant mem addr of", address);
sim_engine_halt (CPU_STATE (cpu), cpu, NULL, aarch64_get_PC (cpu),
sim_stopped, SIM_SIGBUS);
}
return addr;
}
/* We implement a combined stack and heap. That way the sbrk()
function in libgloss/aarch64/syscalls.c has a chance to detect
an out-of-memory condition by noticing a stack/heap collision.
The heap starts at the end of loaded memory and carries on up
to an arbitary 2Gb limit. */
uint64_t
aarch64_get_heap_start (sim_cpu *cpu)
{
uint64_t heap = trace_sym_value (CPU_STATE (cpu), "end");
if (heap == 0)
heap = trace_sym_value (CPU_STATE (cpu), "_end");
if (heap == 0)
{
heap = STACK_TOP - 0x100000;
sim_io_eprintf (CPU_STATE (cpu),
"Unable to find 'end' symbol - using addr based "
"upon stack instead %" PRIx64 "\n",
heap);
}
return heap;
}
uint64_t
aarch64_get_stack_start (sim_cpu *cpu)
{
if (aarch64_get_heap_start (cpu) >= STACK_TOP)
mem_error (cpu, "executable is too big", aarch64_get_heap_start (cpu));
return STACK_TOP;
}
|