aboutsummaryrefslogtreecommitdiff
path: root/softmmu_template.h
diff options
context:
space:
mode:
Diffstat (limited to 'softmmu_template.h')
-rw-r--r--softmmu_template.h114
1 files changed, 56 insertions, 58 deletions
diff --git a/softmmu_template.h b/softmmu_template.h
index 16b0852..39f571b 100644
--- a/softmmu_template.h
+++ b/softmmu_template.h
@@ -168,9 +168,10 @@ static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState *env,
#ifdef SOFTMMU_CODE_ACCESS
static __attribute__((unused))
#endif
-WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx,
- uintptr_t retaddr)
+WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr)
{
+ unsigned mmu_idx = get_mmuidx(oi);
int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
uintptr_t haddr;
@@ -182,12 +183,11 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx,
/* If the TLB entry is for a different page, reload and try again. */
if ((addr & TARGET_PAGE_MASK)
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
-#ifdef ALIGNED_ONLY
- if ((addr & (DATA_SIZE - 1)) != 0) {
+ if ((addr & (DATA_SIZE - 1)) != 0
+ && (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
mmu_idx, retaddr);
}
-#endif
if (!VICTIM_TLB_HIT(ADDR_READ)) {
tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
mmu_idx, retaddr);
@@ -218,16 +218,16 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx,
DATA_TYPE res1, res2;
unsigned shift;
do_unaligned_access:
-#ifdef ALIGNED_ONLY
- cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
- mmu_idx, retaddr);
-#endif
+ if ((get_memop(oi) & MO_AMASK) == MO_ALIGN) {
+ cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
+ mmu_idx, retaddr);
+ }
addr1 = addr & ~(DATA_SIZE - 1);
addr2 = addr1 + DATA_SIZE;
/* Note the adjustment at the beginning of the function.
Undo that for the recursion. */
- res1 = helper_le_ld_name(env, addr1, mmu_idx, retaddr + GETPC_ADJ);
- res2 = helper_le_ld_name(env, addr2, mmu_idx, retaddr + GETPC_ADJ);
+ res1 = helper_le_ld_name(env, addr1, oi, retaddr + GETPC_ADJ);
+ res2 = helper_le_ld_name(env, addr2, oi, retaddr + GETPC_ADJ);
shift = (addr & (DATA_SIZE - 1)) * 8;
/* Little-endian combine. */
@@ -236,12 +236,11 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx,
}
/* Handle aligned access or unaligned access in the same page. */
-#ifdef ALIGNED_ONLY
- if ((addr & (DATA_SIZE - 1)) != 0) {
+ if ((addr & (DATA_SIZE - 1)) != 0
+ && (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
mmu_idx, retaddr);
}
-#endif
haddr = addr + env->tlb_table[mmu_idx][index].addend;
#if DATA_SIZE == 1
@@ -256,9 +255,10 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx,
#ifdef SOFTMMU_CODE_ACCESS
static __attribute__((unused))
#endif
-WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx,
- uintptr_t retaddr)
+WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr)
{
+ unsigned mmu_idx = get_mmuidx(oi);
int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
uintptr_t haddr;
@@ -270,12 +270,11 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx,
/* If the TLB entry is for a different page, reload and try again. */
if ((addr & TARGET_PAGE_MASK)
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
-#ifdef ALIGNED_ONLY
- if ((addr & (DATA_SIZE - 1)) != 0) {
+ if ((addr & (DATA_SIZE - 1)) != 0
+ && (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
mmu_idx, retaddr);
}
-#endif
if (!VICTIM_TLB_HIT(ADDR_READ)) {
tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
mmu_idx, retaddr);
@@ -306,16 +305,16 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx,
DATA_TYPE res1, res2;
unsigned shift;
do_unaligned_access:
-#ifdef ALIGNED_ONLY
- cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
- mmu_idx, retaddr);
-#endif
+ if ((get_memop(oi) & MO_AMASK) == MO_ALIGN) {
+ cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
+ mmu_idx, retaddr);
+ }
addr1 = addr & ~(DATA_SIZE - 1);
addr2 = addr1 + DATA_SIZE;
/* Note the adjustment at the beginning of the function.
Undo that for the recursion. */
- res1 = helper_be_ld_name(env, addr1, mmu_idx, retaddr + GETPC_ADJ);
- res2 = helper_be_ld_name(env, addr2, mmu_idx, retaddr + GETPC_ADJ);
+ res1 = helper_be_ld_name(env, addr1, oi, retaddr + GETPC_ADJ);
+ res2 = helper_be_ld_name(env, addr2, oi, retaddr + GETPC_ADJ);
shift = (addr & (DATA_SIZE - 1)) * 8;
/* Big-endian combine. */
@@ -324,12 +323,11 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx,
}
/* Handle aligned access or unaligned access in the same page. */
-#ifdef ALIGNED_ONLY
- if ((addr & (DATA_SIZE - 1)) != 0) {
+ if ((addr & (DATA_SIZE - 1)) != 0
+ && (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
mmu_idx, retaddr);
}
-#endif
haddr = addr + env->tlb_table[mmu_idx][index].addend;
res = glue(glue(ld, LSUFFIX), _be_p)((uint8_t *)haddr);
@@ -341,7 +339,8 @@ DATA_TYPE
glue(glue(helper_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr,
int mmu_idx)
{
- return helper_te_ld_name (env, addr, mmu_idx, GETRA());
+ TCGMemOpIdx oi = make_memop_idx(SHIFT, mmu_idx);
+ return helper_te_ld_name (env, addr, oi, GETRA());
}
#ifndef SOFTMMU_CODE_ACCESS
@@ -350,16 +349,16 @@ glue(glue(helper_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr,
avoid this for 64-bit data, or for 32-bit data on 32-bit host. */
#if DATA_SIZE * 8 < TCG_TARGET_REG_BITS
WORD_TYPE helper_le_lds_name(CPUArchState *env, target_ulong addr,
- int mmu_idx, uintptr_t retaddr)
+ TCGMemOpIdx oi, uintptr_t retaddr)
{
- return (SDATA_TYPE)helper_le_ld_name(env, addr, mmu_idx, retaddr);
+ return (SDATA_TYPE)helper_le_ld_name(env, addr, oi, retaddr);
}
# if DATA_SIZE > 1
WORD_TYPE helper_be_lds_name(CPUArchState *env, target_ulong addr,
- int mmu_idx, uintptr_t retaddr)
+ TCGMemOpIdx oi, uintptr_t retaddr)
{
- return (SDATA_TYPE)helper_be_ld_name(env, addr, mmu_idx, retaddr);
+ return (SDATA_TYPE)helper_be_ld_name(env, addr, oi, retaddr);
}
# endif
#endif
@@ -386,8 +385,9 @@ static inline void glue(io_write, SUFFIX)(CPUArchState *env,
}
void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
- int mmu_idx, uintptr_t retaddr)
+ TCGMemOpIdx oi, uintptr_t retaddr)
{
+ unsigned mmu_idx = get_mmuidx(oi);
int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
uintptr_t haddr;
@@ -398,12 +398,11 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
/* If the TLB entry is for a different page, reload and try again. */
if ((addr & TARGET_PAGE_MASK)
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
-#ifdef ALIGNED_ONLY
- if ((addr & (DATA_SIZE - 1)) != 0) {
+ if ((addr & (DATA_SIZE - 1)) != 0
+ && (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
mmu_idx, retaddr);
}
-#endif
if (!VICTIM_TLB_HIT(addr_write)) {
tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
}
@@ -431,10 +430,10 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
>= TARGET_PAGE_SIZE)) {
int i;
do_unaligned_access:
-#ifdef ALIGNED_ONLY
- cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
- mmu_idx, retaddr);
-#endif
+ if ((get_memop(oi) & MO_AMASK) == MO_ALIGN) {
+ cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
+ mmu_idx, retaddr);
+ }
/* XXX: not efficient, but simple */
/* Note: relies on the fact that tlb_fill() does not remove the
* previous page from the TLB cache. */
@@ -444,18 +443,17 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
/* Note the adjustment at the beginning of the function.
Undo that for the recursion. */
glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8,
- mmu_idx, retaddr + GETPC_ADJ);
+ oi, retaddr + GETPC_ADJ);
}
return;
}
/* Handle aligned access or unaligned access in the same page. */
-#ifdef ALIGNED_ONLY
- if ((addr & (DATA_SIZE - 1)) != 0) {
+ if ((addr & (DATA_SIZE - 1)) != 0
+ && (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
mmu_idx, retaddr);
}
-#endif
haddr = addr + env->tlb_table[mmu_idx][index].addend;
#if DATA_SIZE == 1
@@ -467,8 +465,9 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
#if DATA_SIZE > 1
void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
- int mmu_idx, uintptr_t retaddr)
+ TCGMemOpIdx oi, uintptr_t retaddr)
{
+ unsigned mmu_idx = get_mmuidx(oi);
int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
uintptr_t haddr;
@@ -479,12 +478,11 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
/* If the TLB entry is for a different page, reload and try again. */
if ((addr & TARGET_PAGE_MASK)
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
-#ifdef ALIGNED_ONLY
- if ((addr & (DATA_SIZE - 1)) != 0) {
+ if ((addr & (DATA_SIZE - 1)) != 0
+ && (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
mmu_idx, retaddr);
}
-#endif
if (!VICTIM_TLB_HIT(addr_write)) {
tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
}
@@ -512,10 +510,10 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
>= TARGET_PAGE_SIZE)) {
int i;
do_unaligned_access:
-#ifdef ALIGNED_ONLY
- cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
- mmu_idx, retaddr);
-#endif
+ if ((get_memop(oi) & MO_AMASK) == MO_ALIGN) {
+ cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
+ mmu_idx, retaddr);
+ }
/* XXX: not efficient, but simple */
/* Note: relies on the fact that tlb_fill() does not remove the
* previous page from the TLB cache. */
@@ -525,18 +523,17 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
/* Note the adjustment at the beginning of the function.
Undo that for the recursion. */
glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8,
- mmu_idx, retaddr + GETPC_ADJ);
+ oi, retaddr + GETPC_ADJ);
}
return;
}
/* Handle aligned access or unaligned access in the same page. */
-#ifdef ALIGNED_ONLY
- if ((addr & (DATA_SIZE - 1)) != 0) {
+ if ((addr & (DATA_SIZE - 1)) != 0
+ && (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
mmu_idx, retaddr);
}
-#endif
haddr = addr + env->tlb_table[mmu_idx][index].addend;
glue(glue(st, SUFFIX), _be_p)((uint8_t *)haddr, val);
@@ -547,7 +544,8 @@ void
glue(glue(helper_st, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr,
DATA_TYPE val, int mmu_idx)
{
- helper_te_st_name(env, addr, val, mmu_idx, GETRA());
+ TCGMemOpIdx oi = make_memop_idx(SHIFT, mmu_idx);
+ helper_te_st_name(env, addr, val, oi, GETRA());
}
#endif /* !defined(SOFTMMU_CODE_ACCESS) */