aboutsummaryrefslogtreecommitdiff
path: root/target
diff options
context:
space:
mode:
Diffstat (limited to 'target')
-rw-r--r--target/s390x/arch_dump.c22
-rw-r--r--target/s390x/tcg/mem_helper.c287
-rw-r--r--target/s390x/tcg/translate.c480
-rw-r--r--target/s390x/tcg/translate_vx.c.inc45
4 files changed, 323 insertions, 511 deletions
diff --git a/target/s390x/arch_dump.c b/target/s390x/arch_dump.c
index a232914..cb98f48 100644
--- a/target/s390x/arch_dump.c
+++ b/target/s390x/arch_dump.c
@@ -227,28 +227,28 @@ static int s390x_write_elf64_notes(const char *note_name,
DumpState *s,
const NoteFuncDesc *funcs)
{
- Note note, *notep;
+ g_autofree Note *notep = NULL;
const NoteFuncDesc *nf;
- int note_size, content_size;
+ int note_size, prev_size = 0, content_size;
int ret = -1;
- assert(strlen(note_name) < sizeof(note.name));
+ assert(strlen(note_name) < sizeof(notep->name));
for (nf = funcs; nf->note_contents_func; nf++) {
- notep = &note;
if (nf->pvonly && !s390_is_pv()) {
continue;
}
content_size = nf->note_size_func ? nf->note_size_func() : nf->contents_size;
- note_size = sizeof(note) - sizeof(notep->contents) + content_size;
+ note_size = sizeof(Note) - sizeof(notep->contents) + content_size;
- /* Notes with dynamic sizes need to allocate a note */
- if (nf->note_size_func) {
+ if (prev_size < note_size) {
+ g_free(notep);
notep = g_malloc(note_size);
+ prev_size = note_size;
}
- memset(notep, 0, sizeof(note));
+ memset(notep, 0, note_size);
/* Setup note header data */
notep->hdr.n_descsz = cpu_to_be32(content_size);
@@ -258,15 +258,9 @@ static int s390x_write_elf64_notes(const char *note_name,
/* Get contents and write them out */
(*nf->note_contents_func)(notep, cpu, id);
ret = f(notep, note_size, s);
-
- if (nf->note_size_func) {
- g_free(notep);
- }
-
if (ret < 0) {
return -1;
}
-
}
return 0;
diff --git a/target/s390x/tcg/mem_helper.c b/target/s390x/tcg/mem_helper.c
index d6725fd..e51a0db 100644
--- a/target/s390x/tcg/mem_helper.c
+++ b/target/s390x/tcg/mem_helper.c
@@ -35,6 +35,12 @@
#include "hw/boards.h"
#endif
+#ifdef CONFIG_USER_ONLY
+# define user_or_likely(X) true
+#else
+# define user_or_likely(X) likely(X)
+#endif
+
/*****************************************************************************/
/* Softmmu support */
@@ -114,19 +120,15 @@ static inline void cpu_stsize_data_ra(CPUS390XState *env, uint64_t addr,
typedef struct S390Access {
target_ulong vaddr1;
target_ulong vaddr2;
- char *haddr1;
- char *haddr2;
+ void *haddr1;
+ void *haddr2;
uint16_t size1;
uint16_t size2;
/*
* If we can't access the host page directly, we'll have to do I/O access
* via ld/st helpers. These are internal details, so we store the
* mmu idx to do the access here instead of passing it around in the
- * helpers. Maybe, one day we can get rid of ld/st access - once we can
- * handle TLB_NOTDIRTY differently. We don't expect these special accesses
- * to trigger exceptions - only if we would have TLB_NOTDIRTY on LAP
- * pages, we might trigger a new MMU translation - very unlikely that
- * the mapping changes in between and we would trigger a fault.
+ * helpers.
*/
int mmu_idx;
} S390Access;
@@ -138,23 +140,27 @@ typedef struct S390Access {
* For !CONFIG_USER_ONLY, the TEC is stored stored to env->tlb_fill_tec.
* For CONFIG_USER_ONLY, the faulting address is stored to env->__excp_addr.
*/
-static int s390_probe_access(CPUArchState *env, target_ulong addr, int size,
- MMUAccessType access_type, int mmu_idx,
- bool nonfault, void **phost, uintptr_t ra)
+static inline int s390_probe_access(CPUArchState *env, target_ulong addr,
+ int size, MMUAccessType access_type,
+ int mmu_idx, bool nonfault,
+ void **phost, uintptr_t ra)
{
-#if defined(CONFIG_USER_ONLY)
- return probe_access_flags(env, addr, access_type, mmu_idx,
- nonfault, phost, ra);
-#else
- int flags;
+ int flags = probe_access_flags(env, addr, access_type, mmu_idx,
+ nonfault, phost, ra);
- env->tlb_fill_exc = 0;
- flags = probe_access_flags(env, addr, access_type, mmu_idx, nonfault, phost,
- ra);
- if (env->tlb_fill_exc) {
+ if (unlikely(flags & TLB_INVALID_MASK)) {
+ assert(!nonfault);
+#ifdef CONFIG_USER_ONLY
+ /* Address is in TEC in system mode; see s390_cpu_record_sigsegv. */
+ env->__excp_addr = addr & TARGET_PAGE_MASK;
+ return (page_get_flags(addr) & PAGE_VALID
+ ? PGM_PROTECTION : PGM_ADDRESSING);
+#else
return env->tlb_fill_exc;
+#endif
}
+#ifndef CONFIG_USER_ONLY
if (unlikely(flags & TLB_WATCHPOINT)) {
/* S390 does not presently use transaction attributes. */
cpu_check_watchpoint(env_cpu(env), addr, size,
@@ -162,8 +168,9 @@ static int s390_probe_access(CPUArchState *env, target_ulong addr, int size,
(access_type == MMU_DATA_STORE
? BP_MEM_WRITE : BP_MEM_READ), ra);
}
- return 0;
#endif
+
+ return 0;
}
static int access_prepare_nf(S390Access *access, CPUS390XState *env,
@@ -171,51 +178,46 @@ static int access_prepare_nf(S390Access *access, CPUS390XState *env,
MMUAccessType access_type,
int mmu_idx, uintptr_t ra)
{
- void *haddr1, *haddr2 = NULL;
int size1, size2, exc;
- vaddr vaddr2 = 0;
assert(size > 0 && size <= 4096);
size1 = MIN(size, -(vaddr1 | TARGET_PAGE_MASK)),
size2 = size - size1;
+ memset(access, 0, sizeof(*access));
+ access->vaddr1 = vaddr1;
+ access->size1 = size1;
+ access->size2 = size2;
+ access->mmu_idx = mmu_idx;
+
exc = s390_probe_access(env, vaddr1, size1, access_type, mmu_idx, nonfault,
- &haddr1, ra);
- if (exc) {
+ &access->haddr1, ra);
+ if (unlikely(exc)) {
return exc;
}
if (unlikely(size2)) {
/* The access crosses page boundaries. */
- vaddr2 = wrap_address(env, vaddr1 + size1);
+ vaddr vaddr2 = wrap_address(env, vaddr1 + size1);
+
+ access->vaddr2 = vaddr2;
exc = s390_probe_access(env, vaddr2, size2, access_type, mmu_idx,
- nonfault, &haddr2, ra);
- if (exc) {
+ nonfault, &access->haddr2, ra);
+ if (unlikely(exc)) {
return exc;
}
}
-
- *access = (S390Access) {
- .vaddr1 = vaddr1,
- .vaddr2 = vaddr2,
- .haddr1 = haddr1,
- .haddr2 = haddr2,
- .size1 = size1,
- .size2 = size2,
- .mmu_idx = mmu_idx
- };
return 0;
}
-static S390Access access_prepare(CPUS390XState *env, vaddr vaddr, int size,
- MMUAccessType access_type, int mmu_idx,
- uintptr_t ra)
+static inline void access_prepare(S390Access *ret, CPUS390XState *env,
+ vaddr vaddr, int size,
+ MMUAccessType access_type, int mmu_idx,
+ uintptr_t ra)
{
- S390Access ret;
- int exc = access_prepare_nf(&ret, env, false, vaddr, size,
+ int exc = access_prepare_nf(ret, env, false, vaddr, size,
access_type, mmu_idx, ra);
assert(!exc);
- return ret;
}
/* Helper to handle memset on a single page. */
@@ -224,28 +226,14 @@ static void do_access_memset(CPUS390XState *env, vaddr vaddr, char *haddr,
uintptr_t ra)
{
#ifdef CONFIG_USER_ONLY
- g_assert(haddr);
memset(haddr, byte, size);
#else
- MemOpIdx oi = make_memop_idx(MO_UB, mmu_idx);
- int i;
-
if (likely(haddr)) {
memset(haddr, byte, size);
} else {
- /*
- * Do a single access and test if we can then get access to the
- * page. This is especially relevant to speed up TLB_NOTDIRTY.
- */
- g_assert(size > 0);
- cpu_stb_mmu(env, vaddr, byte, oi, ra);
- haddr = tlb_vaddr_to_host(env, vaddr, MMU_DATA_STORE, mmu_idx);
- if (likely(haddr)) {
- memset(haddr + 1, byte, size - 1);
- } else {
- for (i = 1; i < size; i++) {
- cpu_stb_mmu(env, vaddr + i, byte, oi, ra);
- }
+ MemOpIdx oi = make_memop_idx(MO_UB, mmu_idx);
+ for (int i = 0; i < size; i++) {
+ cpu_stb_mmu(env, vaddr + i, byte, oi, ra);
}
}
#endif
@@ -264,70 +252,43 @@ static void access_memset(CPUS390XState *env, S390Access *desta,
desta->mmu_idx, ra);
}
-static uint8_t do_access_get_byte(CPUS390XState *env, vaddr vaddr, char **haddr,
- int offset, int mmu_idx, uintptr_t ra)
-{
-#ifdef CONFIG_USER_ONLY
- return ldub_p(*haddr + offset);
-#else
- MemOpIdx oi = make_memop_idx(MO_UB, mmu_idx);
- uint8_t byte;
-
- if (likely(*haddr)) {
- return ldub_p(*haddr + offset);
- }
- /*
- * Do a single access and test if we can then get access to the
- * page. This is especially relevant to speed up TLB_NOTDIRTY.
- */
- byte = cpu_ldb_mmu(env, vaddr + offset, oi, ra);
- *haddr = tlb_vaddr_to_host(env, vaddr, MMU_DATA_LOAD, mmu_idx);
- return byte;
-#endif
-}
-
static uint8_t access_get_byte(CPUS390XState *env, S390Access *access,
int offset, uintptr_t ra)
{
- if (offset < access->size1) {
- return do_access_get_byte(env, access->vaddr1, &access->haddr1,
- offset, access->mmu_idx, ra);
- }
- return do_access_get_byte(env, access->vaddr2, &access->haddr2,
- offset - access->size1, access->mmu_idx, ra);
-}
+ target_ulong vaddr = access->vaddr1;
+ void *haddr = access->haddr1;
-static void do_access_set_byte(CPUS390XState *env, vaddr vaddr, char **haddr,
- int offset, uint8_t byte, int mmu_idx,
- uintptr_t ra)
-{
-#ifdef CONFIG_USER_ONLY
- stb_p(*haddr + offset, byte);
-#else
- MemOpIdx oi = make_memop_idx(MO_UB, mmu_idx);
+ if (unlikely(offset >= access->size1)) {
+ offset -= access->size1;
+ vaddr = access->vaddr2;
+ haddr = access->haddr2;
+ }
- if (likely(*haddr)) {
- stb_p(*haddr + offset, byte);
- return;
+ if (user_or_likely(haddr)) {
+ return ldub_p(haddr + offset);
+ } else {
+ MemOpIdx oi = make_memop_idx(MO_UB, access->mmu_idx);
+ return cpu_ldb_mmu(env, vaddr + offset, oi, ra);
}
- /*
- * Do a single access and test if we can then get access to the
- * page. This is especially relevant to speed up TLB_NOTDIRTY.
- */
- cpu_stb_mmu(env, vaddr + offset, byte, oi, ra);
- *haddr = tlb_vaddr_to_host(env, vaddr, MMU_DATA_STORE, mmu_idx);
-#endif
}
static void access_set_byte(CPUS390XState *env, S390Access *access,
int offset, uint8_t byte, uintptr_t ra)
{
- if (offset < access->size1) {
- do_access_set_byte(env, access->vaddr1, &access->haddr1, offset, byte,
- access->mmu_idx, ra);
+ target_ulong vaddr = access->vaddr1;
+ void *haddr = access->haddr1;
+
+ if (unlikely(offset >= access->size1)) {
+ offset -= access->size1;
+ vaddr = access->vaddr2;
+ haddr = access->haddr2;
+ }
+
+ if (user_or_likely(haddr)) {
+ stb_p(haddr + offset, byte);
} else {
- do_access_set_byte(env, access->vaddr2, &access->haddr2,
- offset - access->size1, byte, access->mmu_idx, ra);
+ MemOpIdx oi = make_memop_idx(MO_UB, access->mmu_idx);
+ cpu_stb_mmu(env, vaddr + offset, byte, oi, ra);
}
}
@@ -338,16 +299,17 @@ static void access_set_byte(CPUS390XState *env, S390Access *access,
static void access_memmove(CPUS390XState *env, S390Access *desta,
S390Access *srca, uintptr_t ra)
{
+ int len = desta->size1 + desta->size2;
int diff;
- g_assert(desta->size1 + desta->size2 == srca->size1 + srca->size2);
+ assert(len == srca->size1 + srca->size2);
/* Fallback to slow access in case we don't have access to all host pages */
if (unlikely(!desta->haddr1 || (desta->size2 && !desta->haddr2) ||
!srca->haddr1 || (srca->size2 && !srca->haddr2))) {
int i;
- for (i = 0; i < desta->size1 + desta->size2; i++) {
+ for (i = 0; i < len; i++) {
uint8_t byte = access_get_byte(env, srca, i, ra);
access_set_byte(env, desta, i, byte, ra);
@@ -355,20 +317,20 @@ static void access_memmove(CPUS390XState *env, S390Access *desta,
return;
}
- if (srca->size1 == desta->size1) {
+ diff = desta->size1 - srca->size1;
+ if (likely(diff == 0)) {
memmove(desta->haddr1, srca->haddr1, srca->size1);
if (unlikely(srca->size2)) {
memmove(desta->haddr2, srca->haddr2, srca->size2);
}
- } else if (srca->size1 < desta->size1) {
- diff = desta->size1 - srca->size1;
+ } else if (diff > 0) {
memmove(desta->haddr1, srca->haddr1, srca->size1);
memmove(desta->haddr1 + srca->size1, srca->haddr2, diff);
if (likely(desta->size2)) {
memmove(desta->haddr2, srca->haddr2 + diff, desta->size2);
}
} else {
- diff = srca->size1 - desta->size1;
+ diff = -diff;
memmove(desta->haddr1, srca->haddr1, desta->size1);
memmove(desta->haddr2, srca->haddr1 + desta->size1, diff);
if (likely(srca->size2)) {
@@ -407,9 +369,9 @@ static uint32_t do_helper_nc(CPUS390XState *env, uint32_t l, uint64_t dest,
/* NC always processes one more byte than specified - maximum is 256 */
l++;
- srca1 = access_prepare(env, src, l, MMU_DATA_LOAD, mmu_idx, ra);
- srca2 = access_prepare(env, dest, l, MMU_DATA_LOAD, mmu_idx, ra);
- desta = access_prepare(env, dest, l, MMU_DATA_STORE, mmu_idx, ra);
+ access_prepare(&srca1, env, src, l, MMU_DATA_LOAD, mmu_idx, ra);
+ access_prepare(&srca2, env, dest, l, MMU_DATA_LOAD, mmu_idx, ra);
+ access_prepare(&desta, env, dest, l, MMU_DATA_STORE, mmu_idx, ra);
for (i = 0; i < l; i++) {
const uint8_t x = access_get_byte(env, &srca1, i, ra) &
access_get_byte(env, &srca2, i, ra);
@@ -441,9 +403,9 @@ static uint32_t do_helper_xc(CPUS390XState *env, uint32_t l, uint64_t dest,
/* XC always processes one more byte than specified - maximum is 256 */
l++;
- srca1 = access_prepare(env, src, l, MMU_DATA_LOAD, mmu_idx, ra);
- srca2 = access_prepare(env, dest, l, MMU_DATA_LOAD, mmu_idx, ra);
- desta = access_prepare(env, dest, l, MMU_DATA_STORE, mmu_idx, ra);
+ access_prepare(&srca1, env, src, l, MMU_DATA_LOAD, mmu_idx, ra);
+ access_prepare(&srca2, env, dest, l, MMU_DATA_LOAD, mmu_idx, ra);
+ access_prepare(&desta, env, dest, l, MMU_DATA_STORE, mmu_idx, ra);
/* xor with itself is the same as memset(0) */
if (src == dest) {
@@ -482,9 +444,9 @@ static uint32_t do_helper_oc(CPUS390XState *env, uint32_t l, uint64_t dest,
/* OC always processes one more byte than specified - maximum is 256 */
l++;
- srca1 = access_prepare(env, src, l, MMU_DATA_LOAD, mmu_idx, ra);
- srca2 = access_prepare(env, dest, l, MMU_DATA_LOAD, mmu_idx, ra);
- desta = access_prepare(env, dest, l, MMU_DATA_STORE, mmu_idx, ra);
+ access_prepare(&srca1, env, src, l, MMU_DATA_LOAD, mmu_idx, ra);
+ access_prepare(&srca2, env, dest, l, MMU_DATA_LOAD, mmu_idx, ra);
+ access_prepare(&desta, env, dest, l, MMU_DATA_STORE, mmu_idx, ra);
for (i = 0; i < l; i++) {
const uint8_t x = access_get_byte(env, &srca1, i, ra) |
access_get_byte(env, &srca2, i, ra);
@@ -515,8 +477,8 @@ static uint32_t do_helper_mvc(CPUS390XState *env, uint32_t l, uint64_t dest,
/* MVC always copies one more byte than specified - maximum is 256 */
l++;
- srca = access_prepare(env, src, l, MMU_DATA_LOAD, mmu_idx, ra);
- desta = access_prepare(env, dest, l, MMU_DATA_STORE, mmu_idx, ra);
+ access_prepare(&srca, env, src, l, MMU_DATA_LOAD, mmu_idx, ra);
+ access_prepare(&desta, env, dest, l, MMU_DATA_STORE, mmu_idx, ra);
/*
* "When the operands overlap, the result is obtained as if the operands
@@ -554,8 +516,8 @@ void HELPER(mvcrl)(CPUS390XState *env, uint64_t l, uint64_t dest, uint64_t src)
/* MVCRL always copies one more byte than specified - maximum is 256 */
l++;
- srca = access_prepare(env, src, l, MMU_DATA_LOAD, mmu_idx, ra);
- desta = access_prepare(env, dest, l, MMU_DATA_STORE, mmu_idx, ra);
+ access_prepare(&srca, env, src, l, MMU_DATA_LOAD, mmu_idx, ra);
+ access_prepare(&desta, env, dest, l, MMU_DATA_STORE, mmu_idx, ra);
for (i = l - 1; i >= 0; i--) {
uint8_t byte = access_get_byte(env, &srca, i, ra);
@@ -575,8 +537,8 @@ void HELPER(mvcin)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src)
l++;
src = wrap_address(env, src - l + 1);
- srca = access_prepare(env, src, l, MMU_DATA_LOAD, mmu_idx, ra);
- desta = access_prepare(env, dest, l, MMU_DATA_STORE, mmu_idx, ra);
+ access_prepare(&srca, env, src, l, MMU_DATA_LOAD, mmu_idx, ra);
+ access_prepare(&desta, env, dest, l, MMU_DATA_STORE, mmu_idx, ra);
for (i = 0; i < l; i++) {
const uint8_t x = access_get_byte(env, &srca, l - i - 1, ra);
@@ -595,9 +557,9 @@ void HELPER(mvn)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src)
/* MVN always copies one more byte than specified - maximum is 256 */
l++;
- srca1 = access_prepare(env, src, l, MMU_DATA_LOAD, mmu_idx, ra);
- srca2 = access_prepare(env, dest, l, MMU_DATA_LOAD, mmu_idx, ra);
- desta = access_prepare(env, dest, l, MMU_DATA_STORE, mmu_idx, ra);
+ access_prepare(&srca1, env, src, l, MMU_DATA_LOAD, mmu_idx, ra);
+ access_prepare(&srca2, env, dest, l, MMU_DATA_LOAD, mmu_idx, ra);
+ access_prepare(&desta, env, dest, l, MMU_DATA_STORE, mmu_idx, ra);
for (i = 0; i < l; i++) {
const uint8_t x = (access_get_byte(env, &srca1, i, ra) & 0x0f) |
(access_get_byte(env, &srca2, i, ra) & 0xf0);
@@ -618,8 +580,8 @@ void HELPER(mvo)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src)
S390Access srca, desta;
int i, j;
- srca = access_prepare(env, src, len_src, MMU_DATA_LOAD, mmu_idx, ra);
- desta = access_prepare(env, dest, len_dest, MMU_DATA_STORE, mmu_idx, ra);
+ access_prepare(&srca, env, src, len_src, MMU_DATA_LOAD, mmu_idx, ra);
+ access_prepare(&desta, env, dest, len_dest, MMU_DATA_STORE, mmu_idx, ra);
/* Handle rightmost byte */
byte_dest = cpu_ldub_data_ra(env, dest + len_dest - 1, ra);
@@ -651,9 +613,9 @@ void HELPER(mvz)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src)
/* MVZ always copies one more byte than specified - maximum is 256 */
l++;
- srca1 = access_prepare(env, src, l, MMU_DATA_LOAD, mmu_idx, ra);
- srca2 = access_prepare(env, dest, l, MMU_DATA_LOAD, mmu_idx, ra);
- desta = access_prepare(env, dest, l, MMU_DATA_STORE, mmu_idx, ra);
+ access_prepare(&srca1, env, src, l, MMU_DATA_LOAD, mmu_idx, ra);
+ access_prepare(&srca2, env, dest, l, MMU_DATA_LOAD, mmu_idx, ra);
+ access_prepare(&desta, env, dest, l, MMU_DATA_STORE, mmu_idx, ra);
for (i = 0; i < l; i++) {
const uint8_t x = (access_get_byte(env, &srca1, i, ra) & 0xf0) |
(access_get_byte(env, &srca2, i, ra) & 0x0f);
@@ -997,8 +959,8 @@ uint32_t HELPER(mvst)(CPUS390XState *env, uint32_t r1, uint32_t r2)
* this point). We might over-indicate watchpoints within the pages
* (if we ever care, we have to limit processing to a single byte).
*/
- srca = access_prepare(env, s, len, MMU_DATA_LOAD, mmu_idx, ra);
- desta = access_prepare(env, d, len, MMU_DATA_STORE, mmu_idx, ra);
+ access_prepare(&srca, env, s, len, MMU_DATA_LOAD, mmu_idx, ra);
+ access_prepare(&desta, env, d, len, MMU_DATA_STORE, mmu_idx, ra);
for (i = 0; i < len; i++) {
const uint8_t v = access_get_byte(env, &srca, i, ra);
@@ -1085,19 +1047,19 @@ static inline uint32_t do_mvcl(CPUS390XState *env,
len = MIN(MIN(*srclen, -(*src | TARGET_PAGE_MASK)), len);
*destlen -= len;
*srclen -= len;
- srca = access_prepare(env, *src, len, MMU_DATA_LOAD, mmu_idx, ra);
- desta = access_prepare(env, *dest, len, MMU_DATA_STORE, mmu_idx, ra);
+ access_prepare(&srca, env, *src, len, MMU_DATA_LOAD, mmu_idx, ra);
+ access_prepare(&desta, env, *dest, len, MMU_DATA_STORE, mmu_idx, ra);
access_memmove(env, &desta, &srca, ra);
*src = wrap_address(env, *src + len);
*dest = wrap_address(env, *dest + len);
} else if (wordsize == 1) {
/* Pad the remaining area */
*destlen -= len;
- desta = access_prepare(env, *dest, len, MMU_DATA_STORE, mmu_idx, ra);
+ access_prepare(&desta, env, *dest, len, MMU_DATA_STORE, mmu_idx, ra);
access_memset(env, &desta, pad, ra);
*dest = wrap_address(env, *dest + len);
} else {
- desta = access_prepare(env, *dest, len, MMU_DATA_STORE, mmu_idx, ra);
+ access_prepare(&desta, env, *dest, len, MMU_DATA_STORE, mmu_idx, ra);
/* The remaining length selects the padding byte. */
for (i = 0; i < len; (*destlen)--, i++) {
@@ -1153,16 +1115,16 @@ uint32_t HELPER(mvcl)(CPUS390XState *env, uint32_t r1, uint32_t r2)
while (destlen) {
cur_len = MIN(destlen, -(dest | TARGET_PAGE_MASK));
if (!srclen) {
- desta = access_prepare(env, dest, cur_len, MMU_DATA_STORE, mmu_idx,
- ra);
+ access_prepare(&desta, env, dest, cur_len,
+ MMU_DATA_STORE, mmu_idx, ra);
access_memset(env, &desta, pad, ra);
} else {
cur_len = MIN(MIN(srclen, -(src | TARGET_PAGE_MASK)), cur_len);
- srca = access_prepare(env, src, cur_len, MMU_DATA_LOAD, mmu_idx,
- ra);
- desta = access_prepare(env, dest, cur_len, MMU_DATA_STORE, mmu_idx,
- ra);
+ access_prepare(&srca, env, src, cur_len,
+ MMU_DATA_LOAD, mmu_idx, ra);
+ access_prepare(&desta, env, dest, cur_len,
+ MMU_DATA_STORE, mmu_idx, ra);
access_memmove(env, &desta, &srca, ra);
src = wrap_address(env, src + cur_len);
srclen -= cur_len;
@@ -2267,8 +2229,8 @@ uint32_t HELPER(mvcs)(CPUS390XState *env, uint64_t l, uint64_t a1, uint64_t a2,
return cc;
}
- srca = access_prepare(env, a2, l, MMU_DATA_LOAD, MMU_PRIMARY_IDX, ra);
- desta = access_prepare(env, a1, l, MMU_DATA_STORE, MMU_SECONDARY_IDX, ra);
+ access_prepare(&srca, env, a2, l, MMU_DATA_LOAD, MMU_PRIMARY_IDX, ra);
+ access_prepare(&desta, env, a1, l, MMU_DATA_STORE, MMU_SECONDARY_IDX, ra);
access_memmove(env, &desta, &srca, ra);
return cc;
}
@@ -2301,9 +2263,8 @@ uint32_t HELPER(mvcp)(CPUS390XState *env, uint64_t l, uint64_t a1, uint64_t a2,
} else if (!l) {
return cc;
}
-
- srca = access_prepare(env, a2, l, MMU_DATA_LOAD, MMU_SECONDARY_IDX, ra);
- desta = access_prepare(env, a1, l, MMU_DATA_STORE, MMU_PRIMARY_IDX, ra);
+ access_prepare(&srca, env, a2, l, MMU_DATA_LOAD, MMU_SECONDARY_IDX, ra);
+ access_prepare(&desta, env, a1, l, MMU_DATA_STORE, MMU_PRIMARY_IDX, ra);
access_memmove(env, &desta, &srca, ra);
return cc;
}
@@ -2644,10 +2605,12 @@ uint32_t HELPER(mvcos)(CPUS390XState *env, uint64_t dest, uint64_t src,
/* FIXME: Access using correct keys and AR-mode */
if (len) {
- S390Access srca = access_prepare(env, src, len, MMU_DATA_LOAD,
- mmu_idx_from_as(src_as), ra);
- S390Access desta = access_prepare(env, dest, len, MMU_DATA_STORE,
- mmu_idx_from_as(dest_as), ra);
+ S390Access srca, desta;
+
+ access_prepare(&srca, env, src, len, MMU_DATA_LOAD,
+ mmu_idx_from_as(src_as), ra);
+ access_prepare(&desta, env, dest, len, MMU_DATA_STORE,
+ mmu_idx_from_as(dest_as), ra);
access_memmove(env, &desta, &srca, ra);
}
diff --git a/target/s390x/tcg/translate.c b/target/s390x/tcg/translate.c
index ac5bd98..faa6f73 100644
--- a/target/s390x/tcg/translate.c
+++ b/target/s390x/tcg/translate.c
@@ -171,8 +171,6 @@ static uint64_t inline_branch_miss[CC_OP_MAX];
static void pc_to_link_info(TCGv_i64 out, DisasContext *s, uint64_t pc)
{
- TCGv_i64 tmp;
-
if (s->base.tb->flags & FLAG_MASK_32) {
if (s->base.tb->flags & FLAG_MASK_64) {
tcg_gen_movi_i64(out, pc);
@@ -181,9 +179,7 @@ static void pc_to_link_info(TCGv_i64 out, DisasContext *s, uint64_t pc)
pc |= 0x80000000;
}
assert(!(s->base.tb->flags & FLAG_MASK_64));
- tmp = tcg_const_i64(pc);
- tcg_gen_deposit_i64(out, out, tmp, 0, 32);
- tcg_temp_free_i64(tmp);
+ tcg_gen_deposit_i64(out, out, tcg_constant_i64(pc), 0, 32);
}
static TCGv_i64 psw_addr;
@@ -360,11 +356,8 @@ static void per_branch(DisasContext *s, bool to_next)
tcg_gen_movi_i64(gbea, s->base.pc_next);
if (s->base.tb->flags & FLAG_MASK_PER) {
- TCGv_i64 next_pc = to_next ? tcg_const_i64(s->pc_tmp) : psw_addr;
+ TCGv_i64 next_pc = to_next ? tcg_constant_i64(s->pc_tmp) : psw_addr;
gen_helper_per_branch(cpu_env, gbea, next_pc);
- if (to_next) {
- tcg_temp_free_i64(next_pc);
- }
}
#endif
}
@@ -382,9 +375,8 @@ static void per_branch_cond(DisasContext *s, TCGCond cond,
gen_set_label(lab);
} else {
- TCGv_i64 pc = tcg_const_i64(s->base.pc_next);
+ TCGv_i64 pc = tcg_constant_i64(s->base.pc_next);
tcg_gen_movcond_i64(cond, gbea, arg1, arg2, gbea, pc);
- tcg_temp_free_i64(pc);
}
#endif
}
@@ -438,23 +430,17 @@ static int get_mem_index(DisasContext *s)
static void gen_exception(int excp)
{
- TCGv_i32 tmp = tcg_const_i32(excp);
- gen_helper_exception(cpu_env, tmp);
- tcg_temp_free_i32(tmp);
+ gen_helper_exception(cpu_env, tcg_constant_i32(excp));
}
static void gen_program_exception(DisasContext *s, int code)
{
- TCGv_i32 tmp;
-
- /* Remember what pgm exception this was. */
- tmp = tcg_const_i32(code);
- tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
- tcg_temp_free_i32(tmp);
+ /* Remember what pgm exeption this was. */
+ tcg_gen_st_i32(tcg_constant_i32(code), cpu_env,
+ offsetof(CPUS390XState, int_pgm_code));
- tmp = tcg_const_i32(s->ilen);
- tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
- tcg_temp_free_i32(tmp);
+ tcg_gen_st_i32(tcg_constant_i32(s->ilen), cpu_env,
+ offsetof(CPUS390XState, int_pgm_ilen));
/* update the psw */
update_psw_addr(s);
@@ -473,9 +459,7 @@ static inline void gen_illegal_opcode(DisasContext *s)
static inline void gen_data_exception(uint8_t dxc)
{
- TCGv_i32 tmp = tcg_const_i32(dxc);
- gen_helper_data_exception(cpu_env, tmp);
- tcg_temp_free_i32(tmp);
+ gen_helper_data_exception(cpu_env, tcg_constant_i32(dxc));
}
static inline void gen_trap(DisasContext *s)
@@ -596,13 +580,13 @@ static void gen_op_calc_cc(DisasContext *s)
switch (s->cc_op) {
default:
- dummy = tcg_const_i64(0);
+ dummy = tcg_constant_i64(0);
/* FALLTHRU */
case CC_OP_ADD_64:
case CC_OP_SUB_64:
case CC_OP_ADD_32:
case CC_OP_SUB_32:
- local_cc_op = tcg_const_i32(s->cc_op);
+ local_cc_op = tcg_constant_i32(s->cc_op);
break;
case CC_OP_CONST0:
case CC_OP_CONST1:
@@ -675,13 +659,6 @@ static void gen_op_calc_cc(DisasContext *s)
tcg_abort();
}
- if (local_cc_op) {
- tcg_temp_free_i32(local_cc_op);
- }
- if (dummy) {
- tcg_temp_free_i64(dummy);
- }
-
/* We now have cc in cc_op as constant */
set_cc_static(s);
}
@@ -868,7 +845,7 @@ static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
c->is_64 = false;
c->u.s32.a = tcg_temp_new_i32();
tcg_gen_extrl_i64_i32(c->u.s32.a, cc_dst);
- c->u.s32.b = tcg_const_i32(0);
+ c->u.s32.b = tcg_constant_i32(0);
break;
case CC_OP_LTGT_32:
case CC_OP_LTUGTU_32:
@@ -883,7 +860,7 @@ static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
case CC_OP_NZ:
case CC_OP_FLOGR:
c->u.s64.a = cc_dst;
- c->u.s64.b = tcg_const_i64(0);
+ c->u.s64.b = tcg_constant_i64(0);
c->g1 = true;
break;
case CC_OP_LTGT_64:
@@ -897,14 +874,14 @@ static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
case CC_OP_TM_64:
case CC_OP_ICM:
c->u.s64.a = tcg_temp_new_i64();
- c->u.s64.b = tcg_const_i64(0);
+ c->u.s64.b = tcg_constant_i64(0);
tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
break;
case CC_OP_ADDU:
case CC_OP_SUBU:
c->is_64 = true;
- c->u.s64.b = tcg_const_i64(0);
+ c->u.s64.b = tcg_constant_i64(0);
c->g1 = true;
switch (mask) {
case 8 | 2:
@@ -927,65 +904,65 @@ static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
switch (mask) {
case 0x8 | 0x4 | 0x2: /* cc != 3 */
cond = TCG_COND_NE;
- c->u.s32.b = tcg_const_i32(3);
+ c->u.s32.b = tcg_constant_i32(3);
break;
case 0x8 | 0x4 | 0x1: /* cc != 2 */
cond = TCG_COND_NE;
- c->u.s32.b = tcg_const_i32(2);
+ c->u.s32.b = tcg_constant_i32(2);
break;
case 0x8 | 0x2 | 0x1: /* cc != 1 */
cond = TCG_COND_NE;
- c->u.s32.b = tcg_const_i32(1);
+ c->u.s32.b = tcg_constant_i32(1);
break;
case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
cond = TCG_COND_EQ;
c->g1 = false;
c->u.s32.a = tcg_temp_new_i32();
- c->u.s32.b = tcg_const_i32(0);
+ c->u.s32.b = tcg_constant_i32(0);
tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
break;
case 0x8 | 0x4: /* cc < 2 */
cond = TCG_COND_LTU;
- c->u.s32.b = tcg_const_i32(2);
+ c->u.s32.b = tcg_constant_i32(2);
break;
case 0x8: /* cc == 0 */
cond = TCG_COND_EQ;
- c->u.s32.b = tcg_const_i32(0);
+ c->u.s32.b = tcg_constant_i32(0);
break;
case 0x4 | 0x2 | 0x1: /* cc != 0 */
cond = TCG_COND_NE;
- c->u.s32.b = tcg_const_i32(0);
+ c->u.s32.b = tcg_constant_i32(0);
break;
case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
cond = TCG_COND_NE;
c->g1 = false;
c->u.s32.a = tcg_temp_new_i32();
- c->u.s32.b = tcg_const_i32(0);
+ c->u.s32.b = tcg_constant_i32(0);
tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
break;
case 0x4: /* cc == 1 */
cond = TCG_COND_EQ;
- c->u.s32.b = tcg_const_i32(1);
+ c->u.s32.b = tcg_constant_i32(1);
break;
case 0x2 | 0x1: /* cc > 1 */
cond = TCG_COND_GTU;
- c->u.s32.b = tcg_const_i32(1);
+ c->u.s32.b = tcg_constant_i32(1);
break;
case 0x2: /* cc == 2 */
cond = TCG_COND_EQ;
- c->u.s32.b = tcg_const_i32(2);
+ c->u.s32.b = tcg_constant_i32(2);
break;
case 0x1: /* cc == 3 */
cond = TCG_COND_EQ;
- c->u.s32.b = tcg_const_i32(3);
+ c->u.s32.b = tcg_constant_i32(3);
break;
default:
/* CC is masked by something else: (8 >> cc) & mask. */
cond = TCG_COND_NE;
c->g1 = false;
- c->u.s32.a = tcg_const_i32(8);
- c->u.s32.b = tcg_const_i32(0);
- tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
+ c->u.s32.a = tcg_temp_new_i32();
+ c->u.s32.b = tcg_constant_i32(0);
+ tcg_gen_shr_i32(c->u.s32.a, tcg_constant_i32(8), cc_op);
tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
break;
}
@@ -1300,9 +1277,9 @@ static DisasJumpType help_branch(DisasContext *s, DisasCompare *c,
Most commonly we're single-stepping or some other condition that
disables all use of goto_tb. Just update the PC and exit. */
- TCGv_i64 next = tcg_const_i64(s->pc_tmp);
+ TCGv_i64 next = tcg_constant_i64(s->pc_tmp);
if (is_imm) {
- cdest = tcg_const_i64(dest);
+ cdest = tcg_constant_i64(dest);
}
if (c->is_64) {
@@ -1312,21 +1289,15 @@ static DisasJumpType help_branch(DisasContext *s, DisasCompare *c,
} else {
TCGv_i32 t0 = tcg_temp_new_i32();
TCGv_i64 t1 = tcg_temp_new_i64();
- TCGv_i64 z = tcg_const_i64(0);
+ TCGv_i64 z = tcg_constant_i64(0);
tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
tcg_gen_extu_i32_i64(t1, t0);
tcg_temp_free_i32(t0);
tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
per_branch_cond(s, TCG_COND_NE, t1, z);
tcg_temp_free_i64(t1);
- tcg_temp_free_i64(z);
}
- if (is_imm) {
- tcg_temp_free_i64(cdest);
- }
- tcg_temp_free_i64(next);
-
ret = DISAS_PC_UPDATED;
}
@@ -1410,10 +1381,9 @@ static DisasJumpType op_addc64(DisasContext *s, DisasOps *o)
{
compute_carry(s);
- TCGv_i64 zero = tcg_const_i64(0);
+ TCGv_i64 zero = tcg_constant_i64(0);
tcg_gen_add2_i64(o->out, cc_src, o->in1, zero, cc_src, zero);
tcg_gen_add2_i64(o->out, cc_src, o->out, cc_src, o->in2, zero);
- tcg_temp_free_i64(zero);
return DISAS_NEXT;
}
@@ -1649,7 +1619,7 @@ static DisasJumpType op_bct32(DisasContext *s, DisasOps *o)
tcg_gen_subi_i64(t, regs[r1], 1);
store_reg32_i64(r1, t);
c.u.s32.a = tcg_temp_new_i32();
- c.u.s32.b = tcg_const_i32(0);
+ c.u.s32.b = tcg_constant_i32(0);
tcg_gen_extrl_i64_i32(c.u.s32.a, t);
tcg_temp_free_i64(t);
@@ -1673,7 +1643,7 @@ static DisasJumpType op_bcth(DisasContext *s, DisasOps *o)
tcg_gen_subi_i64(t, t, 1);
store_reg32h_i64(r1, t);
c.u.s32.a = tcg_temp_new_i32();
- c.u.s32.b = tcg_const_i32(0);
+ c.u.s32.b = tcg_constant_i32(0);
tcg_gen_extrl_i64_i32(c.u.s32.a, t);
tcg_temp_free_i64(t);
@@ -1694,7 +1664,7 @@ static DisasJumpType op_bct64(DisasContext *s, DisasOps *o)
tcg_gen_subi_i64(regs[r1], regs[r1], 1);
c.u.s64.a = regs[r1];
- c.u.s64.b = tcg_const_i64(0);
+ c.u.s64.b = tcg_constant_i64(0);
return help_branch(s, &c, is_imm, imm, o->in2);
}
@@ -1820,7 +1790,7 @@ static TCGv_i32 fpinst_extract_m34(DisasContext *s, bool m3_with_fpe,
return NULL;
}
- return tcg_const_i32(deposit32(m3, 4, 4, m4));
+ return tcg_constant_i32(deposit32(m3, 4, 4, m4));
}
static DisasJumpType op_cfeb(DisasContext *s, DisasOps *o)
@@ -1831,7 +1801,6 @@ static DisasJumpType op_cfeb(DisasContext *s, DisasOps *o)
return DISAS_NORETURN;
}
gen_helper_cfeb(o->out, cpu_env, o->in2, m34);
- tcg_temp_free_i32(m34);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -1844,7 +1813,6 @@ static DisasJumpType op_cfdb(DisasContext *s, DisasOps *o)
return DISAS_NORETURN;
}
gen_helper_cfdb(o->out, cpu_env, o->in2, m34);
- tcg_temp_free_i32(m34);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -1857,7 +1825,6 @@ static DisasJumpType op_cfxb(DisasContext *s, DisasOps *o)
return DISAS_NORETURN;
}
gen_helper_cfxb(o->out, cpu_env, o->in2_128, m34);
- tcg_temp_free_i32(m34);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -1870,7 +1837,6 @@ static DisasJumpType op_cgeb(DisasContext *s, DisasOps *o)
return DISAS_NORETURN;
}
gen_helper_cgeb(o->out, cpu_env, o->in2, m34);
- tcg_temp_free_i32(m34);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -1883,7 +1849,6 @@ static DisasJumpType op_cgdb(DisasContext *s, DisasOps *o)
return DISAS_NORETURN;
}
gen_helper_cgdb(o->out, cpu_env, o->in2, m34);
- tcg_temp_free_i32(m34);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -1896,7 +1861,6 @@ static DisasJumpType op_cgxb(DisasContext *s, DisasOps *o)
return DISAS_NORETURN;
}
gen_helper_cgxb(o->out, cpu_env, o->in2_128, m34);
- tcg_temp_free_i32(m34);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -1909,7 +1873,6 @@ static DisasJumpType op_clfeb(DisasContext *s, DisasOps *o)
return DISAS_NORETURN;
}
gen_helper_clfeb(o->out, cpu_env, o->in2, m34);
- tcg_temp_free_i32(m34);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -1922,7 +1885,6 @@ static DisasJumpType op_clfdb(DisasContext *s, DisasOps *o)
return DISAS_NORETURN;
}
gen_helper_clfdb(o->out, cpu_env, o->in2, m34);
- tcg_temp_free_i32(m34);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -1935,7 +1897,6 @@ static DisasJumpType op_clfxb(DisasContext *s, DisasOps *o)
return DISAS_NORETURN;
}
gen_helper_clfxb(o->out, cpu_env, o->in2_128, m34);
- tcg_temp_free_i32(m34);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -1948,7 +1909,6 @@ static DisasJumpType op_clgeb(DisasContext *s, DisasOps *o)
return DISAS_NORETURN;
}
gen_helper_clgeb(o->out, cpu_env, o->in2, m34);
- tcg_temp_free_i32(m34);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -1961,7 +1921,6 @@ static DisasJumpType op_clgdb(DisasContext *s, DisasOps *o)
return DISAS_NORETURN;
}
gen_helper_clgdb(o->out, cpu_env, o->in2, m34);
- tcg_temp_free_i32(m34);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -1974,7 +1933,6 @@ static DisasJumpType op_clgxb(DisasContext *s, DisasOps *o)
return DISAS_NORETURN;
}
gen_helper_clgxb(o->out, cpu_env, o->in2_128, m34);
- tcg_temp_free_i32(m34);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -1987,7 +1945,6 @@ static DisasJumpType op_cegb(DisasContext *s, DisasOps *o)
return DISAS_NORETURN;
}
gen_helper_cegb(o->out, cpu_env, o->in2, m34);
- tcg_temp_free_i32(m34);
return DISAS_NEXT;
}
@@ -1999,7 +1956,6 @@ static DisasJumpType op_cdgb(DisasContext *s, DisasOps *o)
return DISAS_NORETURN;
}
gen_helper_cdgb(o->out, cpu_env, o->in2, m34);
- tcg_temp_free_i32(m34);
return DISAS_NEXT;
}
@@ -2011,7 +1967,6 @@ static DisasJumpType op_cxgb(DisasContext *s, DisasOps *o)
return DISAS_NORETURN;
}
gen_helper_cxgb(o->out_128, cpu_env, o->in2, m34);
- tcg_temp_free_i32(m34);
return DISAS_NEXT;
}
@@ -2023,7 +1978,6 @@ static DisasJumpType op_celgb(DisasContext *s, DisasOps *o)
return DISAS_NORETURN;
}
gen_helper_celgb(o->out, cpu_env, o->in2, m34);
- tcg_temp_free_i32(m34);
return DISAS_NEXT;
}
@@ -2035,7 +1989,6 @@ static DisasJumpType op_cdlgb(DisasContext *s, DisasOps *o)
return DISAS_NORETURN;
}
gen_helper_cdlgb(o->out, cpu_env, o->in2, m34);
- tcg_temp_free_i32(m34);
return DISAS_NEXT;
}
@@ -2047,7 +2000,6 @@ static DisasJumpType op_cxlgb(DisasContext *s, DisasOps *o)
return DISAS_NORETURN;
}
gen_helper_cxlgb(o->out_128, cpu_env, o->in2, m34);
- tcg_temp_free_i32(m34);
return DISAS_NEXT;
}
@@ -2092,9 +2044,8 @@ static DisasJumpType op_clc(DisasContext *s, DisasOps *o)
tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
break;
default:
- vl = tcg_const_i32(l);
+ vl = tcg_constant_i32(l);
gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
- tcg_temp_free_i32(vl);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -2114,11 +2065,9 @@ static DisasJumpType op_clcl(DisasContext *s, DisasOps *o)
return DISAS_NORETURN;
}
- t1 = tcg_const_i32(r1);
- t2 = tcg_const_i32(r2);
+ t1 = tcg_constant_i32(r1);
+ t2 = tcg_constant_i32(r2);
gen_helper_clcl(cc_op, cpu_env, t1, t2);
- tcg_temp_free_i32(t1);
- tcg_temp_free_i32(t2);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -2135,11 +2084,9 @@ static DisasJumpType op_clcle(DisasContext *s, DisasOps *o)
return DISAS_NORETURN;
}
- t1 = tcg_const_i32(r1);
- t3 = tcg_const_i32(r3);
+ t1 = tcg_constant_i32(r1);
+ t3 = tcg_constant_i32(r3);
gen_helper_clcle(cc_op, cpu_env, t1, o->in2, t3);
- tcg_temp_free_i32(t1);
- tcg_temp_free_i32(t3);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -2156,24 +2103,22 @@ static DisasJumpType op_clclu(DisasContext *s, DisasOps *o)
return DISAS_NORETURN;
}
- t1 = tcg_const_i32(r1);
- t3 = tcg_const_i32(r3);
+ t1 = tcg_constant_i32(r1);
+ t3 = tcg_constant_i32(r3);
gen_helper_clclu(cc_op, cpu_env, t1, o->in2, t3);
- tcg_temp_free_i32(t1);
- tcg_temp_free_i32(t3);
set_cc_static(s);
return DISAS_NEXT;
}
static DisasJumpType op_clm(DisasContext *s, DisasOps *o)
{
- TCGv_i32 m3 = tcg_const_i32(get_field(s, m3));
+ TCGv_i32 m3 = tcg_constant_i32(get_field(s, m3));
TCGv_i32 t1 = tcg_temp_new_i32();
+
tcg_gen_extrl_i64_i32(t1, o->in1);
gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
set_cc_static(s);
tcg_temp_free_i32(t1);
- tcg_temp_free_i32(m3);
return DISAS_NEXT;
}
@@ -2251,14 +2196,13 @@ static DisasJumpType op_cdsg(DisasContext *s, DisasOps *o)
static DisasJumpType op_csst(DisasContext *s, DisasOps *o)
{
int r3 = get_field(s, r3);
- TCGv_i32 t_r3 = tcg_const_i32(r3);
+ TCGv_i32 t_r3 = tcg_constant_i32(r3);
if (tb_cflags(s->base.tb) & CF_PARALLEL) {
gen_helper_csst_parallel(cc_op, cpu_env, t_r3, o->addr1, o->in2);
} else {
gen_helper_csst(cc_op, cpu_env, t_r3, o->addr1, o->in2);
}
- tcg_temp_free_i32(t_r3);
set_cc_static(s);
return DISAS_NEXT;
@@ -2356,9 +2300,9 @@ static DisasJumpType op_cuXX(DisasContext *s, DisasOps *o)
m3 = 0;
}
- tr1 = tcg_const_i32(r1);
- tr2 = tcg_const_i32(r2);
- chk = tcg_const_i32(m3);
+ tr1 = tcg_constant_i32(r1);
+ tr2 = tcg_constant_i32(r2);
+ chk = tcg_constant_i32(m3);
switch (s->insn->data) {
case 12:
@@ -2383,9 +2327,6 @@ static DisasJumpType op_cuXX(DisasContext *s, DisasOps *o)
g_assert_not_reached();
}
- tcg_temp_free_i32(tr1);
- tcg_temp_free_i32(tr2);
- tcg_temp_free_i32(chk);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -2393,15 +2334,11 @@ static DisasJumpType op_cuXX(DisasContext *s, DisasOps *o)
#ifndef CONFIG_USER_ONLY
static DisasJumpType op_diag(DisasContext *s, DisasOps *o)
{
- TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
- TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
- TCGv_i32 func_code = tcg_const_i32(get_field(s, i2));
+ TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
+ TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
+ TCGv_i32 func_code = tcg_constant_i32(get_field(s, i2));
gen_helper_diag(cpu_env, r1, r3, func_code);
-
- tcg_temp_free_i32(func_code);
- tcg_temp_free_i32(r3);
- tcg_temp_free_i32(r1);
return DISAS_NEXT;
}
#endif
@@ -2512,18 +2449,13 @@ static DisasJumpType op_ex(DisasContext *s, DisasOps *o)
update_cc_op(s);
if (r1 == 0) {
- v1 = tcg_const_i64(0);
+ v1 = tcg_constant_i64(0);
} else {
v1 = regs[r1];
}
- ilen = tcg_const_i32(s->ilen);
+ ilen = tcg_constant_i32(s->ilen);
gen_helper_ex(cpu_env, ilen, v1, o->in2);
- tcg_temp_free_i32(ilen);
-
- if (r1 == 0) {
- tcg_temp_free_i64(v1);
- }
return DISAS_PC_CC_UPDATED;
}
@@ -2536,7 +2468,6 @@ static DisasJumpType op_fieb(DisasContext *s, DisasOps *o)
return DISAS_NORETURN;
}
gen_helper_fieb(o->out, cpu_env, o->in2, m34);
- tcg_temp_free_i32(m34);
return DISAS_NEXT;
}
@@ -2548,7 +2479,6 @@ static DisasJumpType op_fidb(DisasContext *s, DisasOps *o)
return DISAS_NORETURN;
}
gen_helper_fidb(o->out, cpu_env, o->in2, m34);
- tcg_temp_free_i32(m34);
return DISAS_NEXT;
}
@@ -2560,7 +2490,6 @@ static DisasJumpType op_fixb(DisasContext *s, DisasOps *o)
return DISAS_NORETURN;
}
gen_helper_fixb(o->out_128, cpu_env, o->in2_128, m34);
- tcg_temp_free_i32(m34);
return DISAS_NEXT;
}
@@ -2674,12 +2603,11 @@ static DisasJumpType op_idte(DisasContext *s, DisasOps *o)
TCGv_i32 m4;
if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
- m4 = tcg_const_i32(get_field(s, m4));
+ m4 = tcg_constant_i32(get_field(s, m4));
} else {
- m4 = tcg_const_i32(0);
+ m4 = tcg_constant_i32(0);
}
gen_helper_idte(cpu_env, o->in1, o->in2, m4);
- tcg_temp_free_i32(m4);
return DISAS_NEXT;
}
@@ -2688,12 +2616,11 @@ static DisasJumpType op_ipte(DisasContext *s, DisasOps *o)
TCGv_i32 m4;
if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
- m4 = tcg_const_i32(get_field(s, m4));
+ m4 = tcg_constant_i32(get_field(s, m4));
} else {
- m4 = tcg_const_i32(0);
+ m4 = tcg_constant_i32(0);
}
gen_helper_ipte(cpu_env, o->in1, o->in2, m4);
- tcg_temp_free_i32(m4);
return DISAS_NEXT;
}
@@ -2749,16 +2676,12 @@ static DisasJumpType op_msa(DisasContext *s, DisasOps *o)
g_assert_not_reached();
};
- t_r1 = tcg_const_i32(r1);
- t_r2 = tcg_const_i32(r2);
- t_r3 = tcg_const_i32(r3);
- type = tcg_const_i32(s->insn->data);
+ t_r1 = tcg_constant_i32(r1);
+ t_r2 = tcg_constant_i32(r2);
+ t_r3 = tcg_constant_i32(r3);
+ type = tcg_constant_i32(s->insn->data);
gen_helper_msa(cc_op, cpu_env, t_r1, t_r2, t_r3, type);
set_cc_static(s);
- tcg_temp_free_i32(t_r1);
- tcg_temp_free_i32(t_r2);
- tcg_temp_free_i32(t_r3);
- tcg_temp_free_i32(type);
return DISAS_NEXT;
}
@@ -2841,7 +2764,6 @@ static DisasJumpType op_ledb(DisasContext *s, DisasOps *o)
return DISAS_NORETURN;
}
gen_helper_ledb(o->out, cpu_env, o->in2, m34);
- tcg_temp_free_i32(m34);
return DISAS_NEXT;
}
@@ -2853,7 +2775,6 @@ static DisasJumpType op_ldxb(DisasContext *s, DisasOps *o)
return DISAS_NORETURN;
}
gen_helper_ldxb(o->out, cpu_env, o->in2_128, m34);
- tcg_temp_free_i32(m34);
return DISAS_NEXT;
}
@@ -2865,7 +2786,6 @@ static DisasJumpType op_lexb(DisasContext *s, DisasOps *o)
return DISAS_NORETURN;
}
gen_helper_lexb(o->out, cpu_env, o->in2_128, m34);
- tcg_temp_free_i32(m34);
return DISAS_NEXT;
}
@@ -3017,10 +2937,9 @@ static DisasJumpType op_loc(DisasContext *s, DisasOps *o)
tcg_gen_extu_i32_i64(t, t32);
tcg_temp_free_i32(t32);
- z = tcg_const_i64(0);
+ z = tcg_constant_i64(0);
tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
tcg_temp_free_i64(t);
- tcg_temp_free_i64(z);
}
return DISAS_NEXT;
@@ -3029,11 +2948,10 @@ static DisasJumpType op_loc(DisasContext *s, DisasOps *o)
#ifndef CONFIG_USER_ONLY
static DisasJumpType op_lctl(DisasContext *s, DisasOps *o)
{
- TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
- TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
+ TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
+ TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
+
gen_helper_lctl(cpu_env, r1, o->in2, r3);
- tcg_temp_free_i32(r1);
- tcg_temp_free_i32(r3);
/* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
s->exit_to_mainloop = true;
return DISAS_TOO_MANY;
@@ -3041,11 +2959,10 @@ static DisasJumpType op_lctl(DisasContext *s, DisasOps *o)
static DisasJumpType op_lctlg(DisasContext *s, DisasOps *o)
{
- TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
- TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
+ TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
+ TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
+
gen_helper_lctlg(cpu_env, r1, o->in2, r3);
- tcg_temp_free_i32(r1);
- tcg_temp_free_i32(r3);
/* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
s->exit_to_mainloop = true;
return DISAS_TOO_MANY;
@@ -3105,11 +3022,10 @@ static DisasJumpType op_lpswe(DisasContext *s, DisasOps *o)
static DisasJumpType op_lam(DisasContext *s, DisasOps *o)
{
- TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
- TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
+ TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
+ TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
+
gen_helper_lam(cpu_env, r1, o->in2, r3);
- tcg_temp_free_i32(r1);
- tcg_temp_free_i32(r3);
return DISAS_NEXT;
}
@@ -3319,9 +3235,6 @@ static DisasJumpType op_lcbb(DisasContext *s, DisasOps *o)
static DisasJumpType op_mc(DisasContext *s, DisasOps *o)
{
-#if !defined(CONFIG_USER_ONLY)
- TCGv_i32 i2;
-#endif
const uint16_t monitor_class = get_field(s, i2);
if (monitor_class & 0xff00) {
@@ -3330,9 +3243,8 @@ static DisasJumpType op_mc(DisasContext *s, DisasOps *o)
}
#if !defined(CONFIG_USER_ONLY)
- i2 = tcg_const_i32(monitor_class);
- gen_helper_monitor_call(cpu_env, o->addr1, i2);
- tcg_temp_free_i32(i2);
+ gen_helper_monitor_call(cpu_env, o->addr1,
+ tcg_constant_i32(monitor_class));
#endif
/* Defaults to a NOP. */
return DISAS_NEXT;
@@ -3396,9 +3308,9 @@ static DisasJumpType op_movx(DisasContext *s, DisasOps *o)
static DisasJumpType op_mvc(DisasContext *s, DisasOps *o)
{
- TCGv_i32 l = tcg_const_i32(get_field(s, l1));
+ TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
+
gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
- tcg_temp_free_i32(l);
return DISAS_NEXT;
}
@@ -3410,9 +3322,9 @@ static DisasJumpType op_mvcrl(DisasContext *s, DisasOps *o)
static DisasJumpType op_mvcin(DisasContext *s, DisasOps *o)
{
- TCGv_i32 l = tcg_const_i32(get_field(s, l1));
+ TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
+
gen_helper_mvcin(cpu_env, l, o->addr1, o->in2);
- tcg_temp_free_i32(l);
return DISAS_NEXT;
}
@@ -3428,11 +3340,9 @@ static DisasJumpType op_mvcl(DisasContext *s, DisasOps *o)
return DISAS_NORETURN;
}
- t1 = tcg_const_i32(r1);
- t2 = tcg_const_i32(r2);
+ t1 = tcg_constant_i32(r1);
+ t2 = tcg_constant_i32(r2);
gen_helper_mvcl(cc_op, cpu_env, t1, t2);
- tcg_temp_free_i32(t1);
- tcg_temp_free_i32(t2);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -3449,11 +3359,9 @@ static DisasJumpType op_mvcle(DisasContext *s, DisasOps *o)
return DISAS_NORETURN;
}
- t1 = tcg_const_i32(r1);
- t3 = tcg_const_i32(r3);
+ t1 = tcg_constant_i32(r1);
+ t3 = tcg_constant_i32(r3);
gen_helper_mvcle(cc_op, cpu_env, t1, o->in2, t3);
- tcg_temp_free_i32(t1);
- tcg_temp_free_i32(t3);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -3470,11 +3378,9 @@ static DisasJumpType op_mvclu(DisasContext *s, DisasOps *o)
return DISAS_NORETURN;
}
- t1 = tcg_const_i32(r1);
- t3 = tcg_const_i32(r3);
+ t1 = tcg_constant_i32(r1);
+ t3 = tcg_constant_i32(r3);
gen_helper_mvclu(cc_op, cpu_env, t1, o->in2, t3);
- tcg_temp_free_i32(t1);
- tcg_temp_free_i32(t3);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -3509,49 +3415,45 @@ static DisasJumpType op_mvcs(DisasContext *s, DisasOps *o)
static DisasJumpType op_mvn(DisasContext *s, DisasOps *o)
{
- TCGv_i32 l = tcg_const_i32(get_field(s, l1));
+ TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
+
gen_helper_mvn(cpu_env, l, o->addr1, o->in2);
- tcg_temp_free_i32(l);
return DISAS_NEXT;
}
static DisasJumpType op_mvo(DisasContext *s, DisasOps *o)
{
- TCGv_i32 l = tcg_const_i32(get_field(s, l1));
+ TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
+
gen_helper_mvo(cpu_env, l, o->addr1, o->in2);
- tcg_temp_free_i32(l);
return DISAS_NEXT;
}
static DisasJumpType op_mvpg(DisasContext *s, DisasOps *o)
{
- TCGv_i32 t1 = tcg_const_i32(get_field(s, r1));
- TCGv_i32 t2 = tcg_const_i32(get_field(s, r2));
+ TCGv_i32 t1 = tcg_constant_i32(get_field(s, r1));
+ TCGv_i32 t2 = tcg_constant_i32(get_field(s, r2));
gen_helper_mvpg(cc_op, cpu_env, regs[0], t1, t2);
- tcg_temp_free_i32(t1);
- tcg_temp_free_i32(t2);
set_cc_static(s);
return DISAS_NEXT;
}
static DisasJumpType op_mvst(DisasContext *s, DisasOps *o)
{
- TCGv_i32 t1 = tcg_const_i32(get_field(s, r1));
- TCGv_i32 t2 = tcg_const_i32(get_field(s, r2));
+ TCGv_i32 t1 = tcg_constant_i32(get_field(s, r1));
+ TCGv_i32 t2 = tcg_constant_i32(get_field(s, r2));
gen_helper_mvst(cc_op, cpu_env, t1, t2);
- tcg_temp_free_i32(t1);
- tcg_temp_free_i32(t2);
set_cc_static(s);
return DISAS_NEXT;
}
static DisasJumpType op_mvz(DisasContext *s, DisasOps *o)
{
- TCGv_i32 l = tcg_const_i32(get_field(s, l1));
+ TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
+
gen_helper_mvz(cpu_env, l, o->addr1, o->in2);
- tcg_temp_free_i32(l);
return DISAS_NEXT;
}
@@ -3637,13 +3539,12 @@ static DisasJumpType op_msdb(DisasContext *s, DisasOps *o)
static DisasJumpType op_nabs(DisasContext *s, DisasOps *o)
{
- TCGv_i64 z, n;
- z = tcg_const_i64(0);
- n = tcg_temp_new_i64();
+ TCGv_i64 z = tcg_constant_i64(0);
+ TCGv_i64 n = tcg_temp_new_i64();
+
tcg_gen_neg_i64(n, o->in2);
tcg_gen_movcond_i64(TCG_COND_GE, o->out, o->in2, z, n, o->in2);
tcg_temp_free_i64(n);
- tcg_temp_free_i64(z);
return DISAS_NEXT;
}
@@ -3668,9 +3569,9 @@ static DisasJumpType op_nabsf128(DisasContext *s, DisasOps *o)
static DisasJumpType op_nc(DisasContext *s, DisasOps *o)
{
- TCGv_i32 l = tcg_const_i32(get_field(s, l1));
+ TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
+
gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
- tcg_temp_free_i32(l);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -3702,9 +3603,9 @@ static DisasJumpType op_negf128(DisasContext *s, DisasOps *o)
static DisasJumpType op_oc(DisasContext *s, DisasOps *o)
{
- TCGv_i32 l = tcg_const_i32(get_field(s, l1));
+ TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
+
gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
- tcg_temp_free_i32(l);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -3754,9 +3655,9 @@ static DisasJumpType op_oi(DisasContext *s, DisasOps *o)
static DisasJumpType op_pack(DisasContext *s, DisasOps *o)
{
- TCGv_i32 l = tcg_const_i32(get_field(s, l1));
+ TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
+
gen_helper_pack(cpu_env, l, o->addr1, o->in2);
- tcg_temp_free_i32(l);
return DISAS_NEXT;
}
@@ -3770,9 +3671,8 @@ static DisasJumpType op_pka(DisasContext *s, DisasOps *o)
gen_program_exception(s, PGM_SPECIFICATION);
return DISAS_NORETURN;
}
- l = tcg_const_i32(l2);
+ l = tcg_constant_i32(l2);
gen_helper_pka(cpu_env, o->addr1, o->in2, l);
- tcg_temp_free_i32(l);
return DISAS_NEXT;
}
@@ -3786,9 +3686,8 @@ static DisasJumpType op_pku(DisasContext *s, DisasOps *o)
gen_program_exception(s, PGM_SPECIFICATION);
return DISAS_NORETURN;
}
- l = tcg_const_i32(l2);
+ l = tcg_constant_i32(l2);
gen_helper_pku(cpu_env, o->addr1, o->in2, l);
- tcg_temp_free_i32(l);
return DISAS_NEXT;
}
@@ -4035,9 +3934,8 @@ static DisasJumpType op_sam(DisasContext *s, DisasOps *o)
}
s->pc_tmp &= mask;
- tsam = tcg_const_i64(sam);
+ tsam = tcg_constant_i64(sam);
tcg_gen_deposit_i64(psw_mask, psw_mask, tsam, 31, 2);
- tcg_temp_free_i64(tsam);
/* Always exit the TB, since we (may have) changed execution mode. */
return DISAS_TOO_MANY;
@@ -4096,12 +3994,11 @@ static DisasJumpType op_servc(DisasContext *s, DisasOps *o)
static DisasJumpType op_sigp(DisasContext *s, DisasOps *o)
{
- TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
- TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
+ TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
+ TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
+
gen_helper_sigp(cc_op, cpu_env, o->in2, r1, r3);
set_cc_static(s);
- tcg_temp_free_i32(r1);
- tcg_temp_free_i32(r3);
return DISAS_NEXT;
}
#endif
@@ -4370,21 +4267,19 @@ static DisasJumpType op_stckc(DisasContext *s, DisasOps *o)
static DisasJumpType op_stctg(DisasContext *s, DisasOps *o)
{
- TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
- TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
+ TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
+ TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
+
gen_helper_stctg(cpu_env, r1, o->in2, r3);
- tcg_temp_free_i32(r1);
- tcg_temp_free_i32(r3);
return DISAS_NEXT;
}
static DisasJumpType op_stctl(DisasContext *s, DisasOps *o)
{
- TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
- TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
+ TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
+ TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
+
gen_helper_stctl(cpu_env, r1, o->in2, r3);
- tcg_temp_free_i32(r1);
- tcg_temp_free_i32(r3);
return DISAS_NEXT;
}
@@ -4611,11 +4506,10 @@ static DisasJumpType op_st64(DisasContext *s, DisasOps *o)
static DisasJumpType op_stam(DisasContext *s, DisasOps *o)
{
- TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
- TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
+ TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
+ TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
+
gen_helper_stam(cpu_env, r1, o->in2, r3);
- tcg_temp_free_i32(r1);
- tcg_temp_free_i32(r3);
return DISAS_NEXT;
}
@@ -4673,7 +4567,7 @@ static DisasJumpType op_stm(DisasContext *s, DisasOps *o)
int r1 = get_field(s, r1);
int r3 = get_field(s, r3);
int size = s->insn->data;
- TCGv_i64 tsize = tcg_const_i64(size);
+ TCGv_i64 tsize = tcg_constant_i64(size);
while (1) {
if (size == 8) {
@@ -4688,7 +4582,6 @@ static DisasJumpType op_stm(DisasContext *s, DisasOps *o)
r1 = (r1 + 1) & 15;
}
- tcg_temp_free_i64(tsize);
return DISAS_NEXT;
}
@@ -4697,8 +4590,8 @@ static DisasJumpType op_stmh(DisasContext *s, DisasOps *o)
int r1 = get_field(s, r1);
int r3 = get_field(s, r3);
TCGv_i64 t = tcg_temp_new_i64();
- TCGv_i64 t4 = tcg_const_i64(4);
- TCGv_i64 t32 = tcg_const_i64(32);
+ TCGv_i64 t4 = tcg_constant_i64(4);
+ TCGv_i64 t32 = tcg_constant_i64(32);
while (1) {
tcg_gen_shl_i64(t, regs[r1], t32);
@@ -4711,8 +4604,6 @@ static DisasJumpType op_stmh(DisasContext *s, DisasOps *o)
}
tcg_temp_free_i64(t);
- tcg_temp_free_i64(t4);
- tcg_temp_free_i64(t32);
return DISAS_NEXT;
}
@@ -4731,26 +4622,20 @@ static DisasJumpType op_stpq(DisasContext *s, DisasOps *o)
static DisasJumpType op_srst(DisasContext *s, DisasOps *o)
{
- TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
- TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
+ TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
+ TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
gen_helper_srst(cpu_env, r1, r2);
-
- tcg_temp_free_i32(r1);
- tcg_temp_free_i32(r2);
set_cc_static(s);
return DISAS_NEXT;
}
static DisasJumpType op_srstu(DisasContext *s, DisasOps *o)
{
- TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
- TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
+ TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
+ TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
gen_helper_srstu(cpu_env, r1, r2);
-
- tcg_temp_free_i32(r1);
- tcg_temp_free_i32(r2);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -4808,10 +4693,9 @@ static DisasJumpType op_subb64(DisasContext *s, DisasOps *o)
* Borrow is {0, -1}, so add to subtract; replicate the
* borrow input to produce 128-bit -1 for the addition.
*/
- TCGv_i64 zero = tcg_const_i64(0);
+ TCGv_i64 zero = tcg_constant_i64(0);
tcg_gen_add2_i64(o->out, cc_src, o->in1, zero, cc_src, cc_src);
tcg_gen_sub2_i64(o->out, cc_src, o->out, cc_src, o->in2, zero);
- tcg_temp_free_i64(zero);
return DISAS_NEXT;
}
@@ -4823,13 +4707,11 @@ static DisasJumpType op_svc(DisasContext *s, DisasOps *o)
update_psw_addr(s);
update_cc_op(s);
- t = tcg_const_i32(get_field(s, i1) & 0xff);
+ t = tcg_constant_i32(get_field(s, i1) & 0xff);
tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
- tcg_temp_free_i32(t);
- t = tcg_const_i32(s->ilen);
+ t = tcg_constant_i32(s->ilen);
tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
- tcg_temp_free_i32(t);
gen_exception(EXCP_SVC);
return DISAS_NORETURN;
@@ -4886,18 +4768,18 @@ static DisasJumpType op_tprot(DisasContext *s, DisasOps *o)
static DisasJumpType op_tp(DisasContext *s, DisasOps *o)
{
- TCGv_i32 l1 = tcg_const_i32(get_field(s, l1) + 1);
+ TCGv_i32 l1 = tcg_constant_i32(get_field(s, l1) + 1);
+
gen_helper_tp(cc_op, cpu_env, o->addr1, l1);
- tcg_temp_free_i32(l1);
set_cc_static(s);
return DISAS_NEXT;
}
static DisasJumpType op_tr(DisasContext *s, DisasOps *o)
{
- TCGv_i32 l = tcg_const_i32(get_field(s, l1));
+ TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
+
gen_helper_tr(cpu_env, l, o->addr1, o->in2);
- tcg_temp_free_i32(l);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -4915,27 +4797,27 @@ static DisasJumpType op_tre(DisasContext *s, DisasOps *o)
static DisasJumpType op_trt(DisasContext *s, DisasOps *o)
{
- TCGv_i32 l = tcg_const_i32(get_field(s, l1));
+ TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
+
gen_helper_trt(cc_op, cpu_env, l, o->addr1, o->in2);
- tcg_temp_free_i32(l);
set_cc_static(s);
return DISAS_NEXT;
}
static DisasJumpType op_trtr(DisasContext *s, DisasOps *o)
{
- TCGv_i32 l = tcg_const_i32(get_field(s, l1));
+ TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
+
gen_helper_trtr(cc_op, cpu_env, l, o->addr1, o->in2);
- tcg_temp_free_i32(l);
set_cc_static(s);
return DISAS_NEXT;
}
static DisasJumpType op_trXX(DisasContext *s, DisasOps *o)
{
- TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
- TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
- TCGv_i32 sizes = tcg_const_i32(s->insn->opc & 3);
+ TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
+ TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
+ TCGv_i32 sizes = tcg_constant_i32(s->insn->opc & 3);
TCGv_i32 tst = tcg_temp_new_i32();
int m3 = get_field(s, m3);
@@ -4954,9 +4836,6 @@ static DisasJumpType op_trXX(DisasContext *s, DisasOps *o)
}
gen_helper_trXX(cc_op, cpu_env, r1, r2, tst, sizes);
- tcg_temp_free_i32(r1);
- tcg_temp_free_i32(r2);
- tcg_temp_free_i32(sizes);
tcg_temp_free_i32(tst);
set_cc_static(s);
return DISAS_NEXT;
@@ -4964,19 +4843,19 @@ static DisasJumpType op_trXX(DisasContext *s, DisasOps *o)
static DisasJumpType op_ts(DisasContext *s, DisasOps *o)
{
- TCGv_i32 t1 = tcg_const_i32(0xff);
+ TCGv_i32 t1 = tcg_constant_i32(0xff);
+
tcg_gen_atomic_xchg_i32(t1, o->in2, t1, get_mem_index(s), MO_UB);
tcg_gen_extract_i32(cc_op, t1, 7, 1);
- tcg_temp_free_i32(t1);
set_cc_static(s);
return DISAS_NEXT;
}
static DisasJumpType op_unpk(DisasContext *s, DisasOps *o)
{
- TCGv_i32 l = tcg_const_i32(get_field(s, l1));
+ TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
+
gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
- tcg_temp_free_i32(l);
return DISAS_NEXT;
}
@@ -4990,9 +4869,8 @@ static DisasJumpType op_unpka(DisasContext *s, DisasOps *o)
gen_program_exception(s, PGM_SPECIFICATION);
return DISAS_NORETURN;
}
- l = tcg_const_i32(l1);
+ l = tcg_constant_i32(l1);
gen_helper_unpka(cc_op, cpu_env, o->addr1, l, o->in2);
- tcg_temp_free_i32(l);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -5007,9 +4885,8 @@ static DisasJumpType op_unpku(DisasContext *s, DisasOps *o)
gen_program_exception(s, PGM_SPECIFICATION);
return DISAS_NORETURN;
}
- l = tcg_const_i32(l1);
+ l = tcg_constant_i32(l1);
gen_helper_unpku(cc_op, cpu_env, o->addr1, l, o->in2);
- tcg_temp_free_i32(l);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -5028,7 +4905,7 @@ static DisasJumpType op_xc(DisasContext *s, DisasOps *o)
/* If the addresses are identical, this is a store/memset of zero. */
if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
- o->in2 = tcg_const_i64(0);
+ o->in2 = tcg_constant_i64(0);
l++;
while (l >= 8) {
@@ -5061,9 +4938,8 @@ static DisasJumpType op_xc(DisasContext *s, DisasOps *o)
/* But in general we'll defer to a helper. */
o->in2 = get_address(s, 0, b2, d2);
- t32 = tcg_const_i32(l);
+ t32 = tcg_constant_i32(l);
gen_helper_xc(cc_op, cpu_env, t32, o->addr1, o->in2);
- tcg_temp_free_i32(t32);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -5128,46 +5004,39 @@ static DisasJumpType op_zero2(DisasContext *s, DisasOps *o)
#ifndef CONFIG_USER_ONLY
static DisasJumpType op_clp(DisasContext *s, DisasOps *o)
{
- TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
+ TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
gen_helper_clp(cpu_env, r2);
- tcg_temp_free_i32(r2);
set_cc_static(s);
return DISAS_NEXT;
}
static DisasJumpType op_pcilg(DisasContext *s, DisasOps *o)
{
- TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
- TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
+ TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
+ TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
gen_helper_pcilg(cpu_env, r1, r2);
- tcg_temp_free_i32(r1);
- tcg_temp_free_i32(r2);
set_cc_static(s);
return DISAS_NEXT;
}
static DisasJumpType op_pcistg(DisasContext *s, DisasOps *o)
{
- TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
- TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
+ TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
+ TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
gen_helper_pcistg(cpu_env, r1, r2);
- tcg_temp_free_i32(r1);
- tcg_temp_free_i32(r2);
set_cc_static(s);
return DISAS_NEXT;
}
static DisasJumpType op_stpcifc(DisasContext *s, DisasOps *o)
{
- TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
- TCGv_i32 ar = tcg_const_i32(get_field(s, b2));
+ TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
+ TCGv_i32 ar = tcg_constant_i32(get_field(s, b2));
gen_helper_stpcifc(cpu_env, r1, o->addr1, ar);
- tcg_temp_free_i32(ar);
- tcg_temp_free_i32(r1);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -5180,38 +5049,31 @@ static DisasJumpType op_sic(DisasContext *s, DisasOps *o)
static DisasJumpType op_rpcit(DisasContext *s, DisasOps *o)
{
- TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
- TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
+ TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
+ TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
gen_helper_rpcit(cpu_env, r1, r2);
- tcg_temp_free_i32(r1);
- tcg_temp_free_i32(r2);
set_cc_static(s);
return DISAS_NEXT;
}
static DisasJumpType op_pcistb(DisasContext *s, DisasOps *o)
{
- TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
- TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
- TCGv_i32 ar = tcg_const_i32(get_field(s, b2));
+ TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
+ TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
+ TCGv_i32 ar = tcg_constant_i32(get_field(s, b2));
gen_helper_pcistb(cpu_env, r1, r3, o->addr1, ar);
- tcg_temp_free_i32(ar);
- tcg_temp_free_i32(r1);
- tcg_temp_free_i32(r3);
set_cc_static(s);
return DISAS_NEXT;
}
static DisasJumpType op_mpcifc(DisasContext *s, DisasOps *o)
{
- TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
- TCGv_i32 ar = tcg_const_i32(get_field(s, b2));
+ TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
+ TCGv_i32 ar = tcg_constant_i32(get_field(s, b2));
gen_helper_mpcifc(cpu_env, r1, o->addr1, ar);
- tcg_temp_free_i32(ar);
- tcg_temp_free_i32(r1);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -6378,16 +6240,15 @@ static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s)
if (unlikely(s->ex_value)) {
/* Drop the EX data now, so that it's clear on exception paths. */
- TCGv_i64 zero = tcg_const_i64(0);
- int i;
- tcg_gen_st_i64(zero, cpu_env, offsetof(CPUS390XState, ex_value));
- tcg_temp_free_i64(zero);
+ tcg_gen_st_i64(tcg_constant_i64(0), cpu_env,
+ offsetof(CPUS390XState, ex_value));
/* Extract the values saved by EXECUTE. */
insn = s->ex_value & 0xffffffffffff0000ull;
ilen = s->ex_value & 0xf;
- /* register insn bytes with translator so plugins work */
- for (i = 0; i < ilen; i++) {
+
+ /* Register insn bytes with translator so plugins work. */
+ for (int i = 0; i < ilen; i++) {
uint8_t byte = extract64(insn, 56 - (i * 8), 8);
translator_fake_ldb(byte, pc + i);
}
@@ -6512,9 +6373,8 @@ static DisasJumpType translate_one(CPUS390XState *env, DisasContext *s)
#ifndef CONFIG_USER_ONLY
if (s->base.tb->flags & FLAG_MASK_PER) {
- TCGv_i64 addr = tcg_const_i64(s->base.pc_next);
+ TCGv_i64 addr = tcg_constant_i64(s->base.pc_next);
gen_helper_per_ifetch(cpu_env, addr);
- tcg_temp_free_i64(addr);
}
#endif
diff --git a/target/s390x/tcg/translate_vx.c.inc b/target/s390x/tcg/translate_vx.c.inc
index d39ee81..3fadc82 100644
--- a/target/s390x/tcg/translate_vx.c.inc
+++ b/target/s390x/tcg/translate_vx.c.inc
@@ -319,12 +319,10 @@ static void gen_gvec128_4_i64(gen_gvec128_4_i64_fn fn, uint8_t d, uint8_t a,
static void gen_addi2_i64(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al, TCGv_i64 ah,
uint64_t b)
{
- TCGv_i64 bl = tcg_const_i64(b);
- TCGv_i64 bh = tcg_const_i64(0);
+ TCGv_i64 bl = tcg_constant_i64(b);
+ TCGv_i64 bh = tcg_constant_i64(0);
tcg_gen_add2_i64(dl, dh, al, ah, bl, bh);
- tcg_temp_free_i64(bl);
- tcg_temp_free_i64(bh);
}
static DisasJumpType op_vbperm(DisasContext *s, DisasOps *o)
@@ -609,9 +607,8 @@ static DisasJumpType op_vlei(DisasContext *s, DisasOps *o)
return DISAS_NORETURN;
}
- tmp = tcg_const_i64((int16_t)get_field(s, i2));
+ tmp = tcg_constant_i64((int16_t)get_field(s, i2));
write_vec_element_i64(tmp, get_field(s, v1), enr, es);
- tcg_temp_free_i64(tmp);
return DISAS_NEXT;
}
@@ -1107,11 +1104,13 @@ static DisasJumpType op_vseg(DisasContext *s, DisasOps *o)
static DisasJumpType op_vst(DisasContext *s, DisasOps *o)
{
- TCGv_i64 tmp = tcg_const_i64(16);
+ TCGv_i64 tmp;
/* Probe write access before actually modifying memory */
- gen_helper_probe_write_access(cpu_env, o->addr1, tmp);
+ gen_helper_probe_write_access(cpu_env, o->addr1,
+ tcg_constant_i64(16));
+ tmp = tcg_temp_new_i64();
read_vec_element_i64(tmp, get_field(s, v1), 0, ES_64);
tcg_gen_qemu_st_i64(tmp, o->addr1, get_mem_index(s), MO_TEUQ);
gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 8);
@@ -1270,9 +1269,10 @@ static DisasJumpType op_vstm(DisasContext *s, DisasOps *o)
}
/* Probe write access before actually modifying memory */
- tmp = tcg_const_i64((v3 - v1 + 1) * 16);
- gen_helper_probe_write_access(cpu_env, o->addr1, tmp);
+ gen_helper_probe_write_access(cpu_env, o->addr1,
+ tcg_constant_i64((v3 - v1 + 1) * 16));
+ tmp = tcg_temp_new_i64();
for (;; v1++) {
read_vec_element_i64(tmp, v1, 0, ES_64);
tcg_gen_qemu_st_i64(tmp, o->addr1, get_mem_index(s), MO_TEUQ);
@@ -1359,7 +1359,7 @@ static DisasJumpType op_va(DisasContext *s, DisasOps *o)
static void gen_acc(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b, uint8_t es)
{
const uint8_t msb_bit_nr = NUM_VEC_ELEMENT_BITS(es) - 1;
- TCGv_i64 msb_mask = tcg_const_i64(dup_const(es, 1ull << msb_bit_nr));
+ TCGv_i64 msb_mask = tcg_constant_i64(dup_const(es, 1ull << msb_bit_nr));
TCGv_i64 t1 = tcg_temp_new_i64();
TCGv_i64 t2 = tcg_temp_new_i64();
TCGv_i64 t3 = tcg_temp_new_i64();
@@ -1416,7 +1416,7 @@ static void gen_acc2_i64(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al,
{
TCGv_i64 th = tcg_temp_new_i64();
TCGv_i64 tl = tcg_temp_new_i64();
- TCGv_i64 zero = tcg_const_i64(0);
+ TCGv_i64 zero = tcg_constant_i64(0);
tcg_gen_add2_i64(tl, th, al, zero, bl, zero);
tcg_gen_add2_i64(tl, th, th, zero, ah, zero);
@@ -1425,7 +1425,6 @@ static void gen_acc2_i64(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al,
tcg_temp_free_i64(th);
tcg_temp_free_i64(tl);
- tcg_temp_free_i64(zero);
}
static DisasJumpType op_vacc(DisasContext *s, DisasOps *o)
@@ -1455,15 +1454,14 @@ static void gen_ac2_i64(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al, TCGv_i64 ah,
TCGv_i64 bl, TCGv_i64 bh, TCGv_i64 cl, TCGv_i64 ch)
{
TCGv_i64 tl = tcg_temp_new_i64();
- TCGv_i64 th = tcg_const_i64(0);
+ TCGv_i64 zero = tcg_constant_i64(0);
/* extract the carry only */
tcg_gen_extract_i64(tl, cl, 0, 1);
tcg_gen_add2_i64(dl, dh, al, ah, bl, bh);
- tcg_gen_add2_i64(dl, dh, dl, dh, tl, th);
+ tcg_gen_add2_i64(dl, dh, dl, dh, tl, zero);
tcg_temp_free_i64(tl);
- tcg_temp_free_i64(th);
}
static DisasJumpType op_vac(DisasContext *s, DisasOps *o)
@@ -1484,7 +1482,7 @@ static void gen_accc2_i64(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al, TCGv_i64 ah,
{
TCGv_i64 tl = tcg_temp_new_i64();
TCGv_i64 th = tcg_temp_new_i64();
- TCGv_i64 zero = tcg_const_i64(0);
+ TCGv_i64 zero = tcg_constant_i64(0);
tcg_gen_andi_i64(tl, cl, 1);
tcg_gen_add2_i64(tl, th, tl, zero, al, zero);
@@ -1495,7 +1493,6 @@ static void gen_accc2_i64(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al, TCGv_i64 ah,
tcg_temp_free_i64(tl);
tcg_temp_free_i64(th);
- tcg_temp_free_i64(zero);
}
static DisasJumpType op_vaccc(DisasContext *s, DisasOps *o)
@@ -1597,14 +1594,13 @@ static void gen_avgl_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
static void gen_avgl_i64(TCGv_i64 dl, TCGv_i64 al, TCGv_i64 bl)
{
TCGv_i64 dh = tcg_temp_new_i64();
- TCGv_i64 zero = tcg_const_i64(0);
+ TCGv_i64 zero = tcg_constant_i64(0);
tcg_gen_add2_i64(dl, dh, al, zero, bl, zero);
gen_addi2_i64(dl, dh, dl, dh, 1);
tcg_gen_extract2_i64(dl, dl, dh, 1);
tcg_temp_free_i64(dh);
- tcg_temp_free_i64(zero);
}
static DisasJumpType op_vavgl(DisasContext *s, DisasOps *o)
@@ -2440,7 +2436,7 @@ static void gen_scbi2_i64(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al,
{
TCGv_i64 th = tcg_temp_new_i64();
TCGv_i64 tl = tcg_temp_new_i64();
- TCGv_i64 zero = tcg_const_i64(0);
+ TCGv_i64 zero = tcg_constant_i64(0);
tcg_gen_sub2_i64(tl, th, al, zero, bl, zero);
tcg_gen_andi_i64(th, th, 1);
@@ -2452,7 +2448,6 @@ static void gen_scbi2_i64(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al,
tcg_temp_free_i64(th);
tcg_temp_free_i64(tl);
- tcg_temp_free_i64(zero);
}
static DisasJumpType op_vscbi(DisasContext *s, DisasOps *o)
@@ -2572,11 +2567,12 @@ static DisasJumpType op_vsumq(DisasContext *s, DisasOps *o)
return DISAS_NORETURN;
}
- sumh = tcg_const_i64(0);
+ sumh = tcg_temp_new_i64();
suml = tcg_temp_new_i64();
- zero = tcg_const_i64(0);
+ zero = tcg_constant_i64(0);
tmpl = tcg_temp_new_i64();
+ tcg_gen_mov_i64(sumh, zero);
read_vec_element_i64(suml, get_field(s, v3), max_idx, es);
for (idx = 0; idx <= max_idx; idx++) {
read_vec_element_i64(tmpl, get_field(s, v2), idx, es);
@@ -2587,7 +2583,6 @@ static DisasJumpType op_vsumq(DisasContext *s, DisasOps *o)
tcg_temp_free_i64(sumh);
tcg_temp_free_i64(suml);
- tcg_temp_free_i64(zero);
tcg_temp_free_i64(tmpl);
return DISAS_NEXT;
}