aboutsummaryrefslogtreecommitdiff
path: root/tcg
diff options
context:
space:
mode:
Diffstat (limited to 'tcg')
-rw-r--r--tcg/aarch64/tcg-target.c.inc40
-rw-r--r--tcg/aarch64/tcg-target.h6
-rw-r--r--tcg/i386/tcg-target.c.inc123
-rw-r--r--tcg/i386/tcg-target.h28
-rw-r--r--tcg/tcg-op-ldst.c6
-rw-r--r--tcg/tcg.c14
6 files changed, 29 insertions, 188 deletions
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
index bc6b99a..8428366 100644
--- a/tcg/aarch64/tcg-target.c.inc
+++ b/tcg/aarch64/tcg-target.c.inc
@@ -13,12 +13,6 @@
#include "../tcg-ldst.c.inc"
#include "../tcg-pool.c.inc"
#include "qemu/bitops.h"
-#ifdef __linux__
-#include <asm/hwcap.h>
-#endif
-#ifdef CONFIG_DARWIN
-#include <sys/sysctl.h>
-#endif
/* We're going to re-use TCGType in setting of the SF bit, which controls
the size of the operation performed. If we know the values match, it
@@ -77,9 +71,6 @@ static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
return TCG_REG_X0 + slot;
}
-bool have_lse;
-bool have_lse2;
-
#define TCG_REG_TMP TCG_REG_X30
#define TCG_VEC_TMP TCG_REG_V31
@@ -2878,39 +2869,8 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
}
}
-#ifdef CONFIG_DARWIN
-static bool sysctl_for_bool(const char *name)
-{
- int val = 0;
- size_t len = sizeof(val);
-
- if (sysctlbyname(name, &val, &len, NULL, 0) == 0) {
- return val != 0;
- }
-
- /*
- * We might in the future ask for properties not present in older kernels,
- * but we're only asking about static properties, all of which should be
- * 'int'. So we shouln't see ENOMEM (val too small), or any of the other
- * more exotic errors.
- */
- assert(errno == ENOENT);
- return false;
-}
-#endif
-
static void tcg_target_init(TCGContext *s)
{
-#ifdef __linux__
- unsigned long hwcap = qemu_getauxval(AT_HWCAP);
- have_lse = hwcap & HWCAP_ATOMICS;
- have_lse2 = hwcap & HWCAP_USCAT;
-#endif
-#ifdef CONFIG_DARWIN
- have_lse = sysctl_for_bool("hw.optional.arm.FEAT_LSE");
- have_lse2 = sysctl_for_bool("hw.optional.arm.FEAT_LSE2");
-#endif
-
tcg_target_available_regs[TCG_TYPE_I32] = 0xffffffffu;
tcg_target_available_regs[TCG_TYPE_I64] = 0xffffffffu;
tcg_target_available_regs[TCG_TYPE_V64] = 0xffffffff00000000ull;
diff --git a/tcg/aarch64/tcg-target.h b/tcg/aarch64/tcg-target.h
index 74ee2ed..d5f7614 100644
--- a/tcg/aarch64/tcg-target.h
+++ b/tcg/aarch64/tcg-target.h
@@ -13,6 +13,8 @@
#ifndef AARCH64_TCG_TARGET_H
#define AARCH64_TCG_TARGET_H
+#include "host/cpuinfo.h"
+
#define TCG_TARGET_INSN_UNIT_SIZE 4
#define TCG_TARGET_TLB_DISPLACEMENT_BITS 24
#define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
@@ -57,8 +59,8 @@ typedef enum {
#define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_EVEN
#define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_NORMAL
-extern bool have_lse;
-extern bool have_lse2;
+#define have_lse (cpuinfo & CPUINFO_LSE)
+#define have_lse2 (cpuinfo & CPUINFO_LSE2)
/* optional instructions */
#define TCG_TARGET_HAS_div_i32 1
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
index 8b9a5f0..bfe9d98 100644
--- a/tcg/i386/tcg-target.c.inc
+++ b/tcg/i386/tcg-target.c.inc
@@ -158,42 +158,14 @@ static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
# define SOFTMMU_RESERVE_REGS 0
#endif
-/* The host compiler should supply <cpuid.h> to enable runtime features
- detection, as we're not going to go so far as our own inline assembly.
- If not available, default values will be assumed. */
-#if defined(CONFIG_CPUID_H)
-#include "qemu/cpuid.h"
-#endif
-
/* For 64-bit, we always know that CMOV is available. */
#if TCG_TARGET_REG_BITS == 64
-# define have_cmov 1
-#elif defined(CONFIG_CPUID_H)
-static bool have_cmov;
-#else
-# define have_cmov 0
-#endif
-
-/* We need these symbols in tcg-target.h, and we can't properly conditionalize
- it there. Therefore we always define the variable. */
-bool have_bmi1;
-bool have_popcnt;
-bool have_avx1;
-bool have_avx2;
-bool have_avx512bw;
-bool have_avx512dq;
-bool have_avx512vbmi2;
-bool have_avx512vl;
-bool have_movbe;
-bool have_atomic16;
-
-#ifdef CONFIG_CPUID_H
-static bool have_bmi2;
-static bool have_lzcnt;
+# define have_cmov true
#else
-# define have_bmi2 0
-# define have_lzcnt 0
+# define have_cmov (cpuinfo & CPUINFO_CMOV)
#endif
+#define have_bmi2 (cpuinfo & CPUINFO_BMI2)
+#define have_lzcnt (cpuinfo & CPUINFO_LZCNT)
static const tcg_insn_unit *tb_ret_addr;
@@ -3961,93 +3933,6 @@ static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
static void tcg_target_init(TCGContext *s)
{
-#ifdef CONFIG_CPUID_H
- unsigned a, b, c, d, b7 = 0, c7 = 0;
- unsigned max = __get_cpuid_max(0, 0);
-
- if (max >= 7) {
- /* BMI1 is available on AMD Piledriver and Intel Haswell CPUs. */
- __cpuid_count(7, 0, a, b7, c7, d);
- have_bmi1 = (b7 & bit_BMI) != 0;
- have_bmi2 = (b7 & bit_BMI2) != 0;
- }
-
- if (max >= 1) {
- __cpuid(1, a, b, c, d);
-#ifndef have_cmov
- /* For 32-bit, 99% certainty that we're running on hardware that
- supports cmov, but we still need to check. In case cmov is not
- available, we'll use a small forward branch. */
- have_cmov = (d & bit_CMOV) != 0;
-#endif
-
- /* MOVBE is only available on Intel Atom and Haswell CPUs, so we
- need to probe for it. */
- have_movbe = (c & bit_MOVBE) != 0;
- have_popcnt = (c & bit_POPCNT) != 0;
-
- /* There are a number of things we must check before we can be
- sure of not hitting invalid opcode. */
- if (c & bit_OSXSAVE) {
- unsigned bv = xgetbv_low(0);
-
- if ((bv & 6) == 6) {
- have_avx1 = (c & bit_AVX) != 0;
- have_avx2 = (b7 & bit_AVX2) != 0;
-
- /*
- * There are interesting instructions in AVX512, so long
- * as we have AVX512VL, which indicates support for EVEX
- * on sizes smaller than 512 bits. We are required to
- * check that OPMASK and all extended ZMM state are enabled
- * even if we're not using them -- the insns will fault.
- */
- if ((bv & 0xe0) == 0xe0
- && (b7 & bit_AVX512F)
- && (b7 & bit_AVX512VL)) {
- have_avx512vl = true;
- have_avx512bw = (b7 & bit_AVX512BW) != 0;
- have_avx512dq = (b7 & bit_AVX512DQ) != 0;
- have_avx512vbmi2 = (c7 & bit_AVX512VBMI2) != 0;
- }
-
- /*
- * The Intel SDM has added:
- * Processors that enumerate support for IntelĀ® AVX
- * (by setting the feature flag CPUID.01H:ECX.AVX[bit 28])
- * guarantee that the 16-byte memory operations performed
- * by the following instructions will always be carried
- * out atomically:
- * - MOVAPD, MOVAPS, and MOVDQA.
- * - VMOVAPD, VMOVAPS, and VMOVDQA when encoded with VEX.128.
- * - VMOVAPD, VMOVAPS, VMOVDQA32, and VMOVDQA64 when encoded
- * with EVEX.128 and k0 (masking disabled).
- * Note that these instructions require the linear addresses
- * of their memory operands to be 16-byte aligned.
- *
- * AMD has provided an even stronger guarantee that processors
- * with AVX provide 16-byte atomicity for all cachable,
- * naturally aligned single loads and stores, e.g. MOVDQU.
- *
- * See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=104688
- */
- if (have_avx1) {
- __cpuid(0, a, b, c, d);
- have_atomic16 = (c == signature_INTEL_ecx ||
- c == signature_AMD_ecx);
- }
- }
- }
- }
-
- max = __get_cpuid_max(0x8000000, 0);
- if (max >= 1) {
- __cpuid(0x80000001, a, b, c, d);
- /* LZCNT was introduced with AMD Barcelona and Intel Haswell CPUs. */
- have_lzcnt = (c & bit_LZCNT) != 0;
- }
-#endif /* CONFIG_CPUID_H */
-
tcg_target_available_regs[TCG_TYPE_I32] = ALL_GENERAL_REGS;
if (TCG_TARGET_REG_BITS == 64) {
tcg_target_available_regs[TCG_TYPE_I64] = ALL_GENERAL_REGS;
diff --git a/tcg/i386/tcg-target.h b/tcg/i386/tcg-target.h
index 0b5a2c6..0106946 100644
--- a/tcg/i386/tcg-target.h
+++ b/tcg/i386/tcg-target.h
@@ -25,6 +25,8 @@
#ifndef I386_TCG_TARGET_H
#define I386_TCG_TARGET_H
+#include "host/cpuinfo.h"
+
#define TCG_TARGET_INSN_UNIT_SIZE 1
#define TCG_TARGET_TLB_DISPLACEMENT_BITS 31
@@ -111,16 +113,22 @@ typedef enum {
# define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_BY_REF
#endif
-extern bool have_bmi1;
-extern bool have_popcnt;
-extern bool have_avx1;
-extern bool have_avx2;
-extern bool have_avx512bw;
-extern bool have_avx512dq;
-extern bool have_avx512vbmi2;
-extern bool have_avx512vl;
-extern bool have_movbe;
-extern bool have_atomic16;
+#define have_bmi1 (cpuinfo & CPUINFO_BMI1)
+#define have_popcnt (cpuinfo & CPUINFO_POPCNT)
+#define have_avx1 (cpuinfo & CPUINFO_AVX1)
+#define have_avx2 (cpuinfo & CPUINFO_AVX2)
+#define have_movbe (cpuinfo & CPUINFO_MOVBE)
+#define have_atomic16 (cpuinfo & CPUINFO_ATOMIC_VMOVDQA)
+
+/*
+ * There are interesting instructions in AVX512, so long as we have AVX512VL,
+ * which indicates support for EVEX on sizes smaller than 512 bits.
+ */
+#define have_avx512vl ((cpuinfo & CPUINFO_AVX512VL) && \
+ (cpuinfo & CPUINFO_AVX512F))
+#define have_avx512bw ((cpuinfo & CPUINFO_AVX512BW) && have_avx512vl)
+#define have_avx512dq ((cpuinfo & CPUINFO_AVX512DQ) && have_avx512vl)
+#define have_avx512vbmi2 ((cpuinfo & CPUINFO_AVX512VBMI2) && have_avx512vl)
/* optional instructions */
#define TCG_TARGET_HAS_div2_i32 1
diff --git a/tcg/tcg-op-ldst.c b/tcg/tcg-op-ldst.c
index f4e508c..3d27c34 100644
--- a/tcg/tcg-op-ldst.c
+++ b/tcg/tcg-op-ldst.c
@@ -975,13 +975,11 @@ static void tcg_gen_nonatomic_cmpxchg_i128_int(TCGv_i128 retv, TCGTemp *addr,
{
if (TCG_TARGET_REG_BITS == 32) {
/* Inline expansion below is simply too large for 32-bit hosts. */
- gen_atomic_cx_i128 gen = ((memop & MO_BSWAP) == MO_LE
- ? gen_helper_nonatomic_cmpxchgo_le
- : gen_helper_nonatomic_cmpxchgo_be);
MemOpIdx oi = make_memop_idx(memop, idx);
TCGv_i64 a64 = maybe_extend_addr64(addr);
- gen(retv, cpu_env, a64, cmpv, newv, tcg_constant_i32(oi));
+ gen_helper_nonatomic_cmpxchgo(retv, cpu_env, a64, cmpv, newv,
+ tcg_constant_i32(oi));
maybe_free_addr64(a64);
} else {
TCGv_i128 oldv = tcg_temp_ebb_new_i128();
diff --git a/tcg/tcg.c b/tcg/tcg.c
index 0b0fe9c..ac30d48 100644
--- a/tcg/tcg.c
+++ b/tcg/tcg.c
@@ -22,9 +22,6 @@
* THE SOFTWARE.
*/
-/* define it to use liveness analysis (better code) */
-#define USE_TCG_OPTIMIZATIONS
-
#include "qemu/osdep.h"
/* Define to jump the ELF file used to communicate with GDB. */
@@ -1451,7 +1448,6 @@ void tcg_prologue_init(TCGContext *s)
(uintptr_t)s->code_buf, prologue_size);
#endif
-#ifdef DEBUG_DISAS
if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM)) {
FILE *logfile = qemu_log_trylock();
if (logfile) {
@@ -1483,7 +1479,6 @@ void tcg_prologue_init(TCGContext *s)
qemu_log_unlock(logfile);
}
}
-#endif
#ifndef CONFIG_TCG_INTERPRETER
/*
@@ -5998,7 +5993,6 @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb, uint64_t pc_start)
}
#endif
-#ifdef DEBUG_DISAS
if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP)
&& qemu_log_in_addr_range(pc_start))) {
FILE *logfile = qemu_log_trylock();
@@ -6009,7 +6003,6 @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb, uint64_t pc_start)
qemu_log_unlock(logfile);
}
}
-#endif
#ifdef CONFIG_DEBUG_TCG
/* Ensure all labels referenced have been emitted. */
@@ -6032,9 +6025,7 @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb, uint64_t pc_start)
qatomic_set(&prof->opt_time, prof->opt_time - profile_getclock());
#endif
-#ifdef USE_TCG_OPTIMIZATIONS
tcg_optimize(s);
-#endif
#ifdef CONFIG_PROFILER
qatomic_set(&prof->opt_time, prof->opt_time + profile_getclock());
@@ -6046,7 +6037,6 @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb, uint64_t pc_start)
liveness_pass_1(s);
if (s->nb_indirects > 0) {
-#ifdef DEBUG_DISAS
if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_IND)
&& qemu_log_in_addr_range(pc_start))) {
FILE *logfile = qemu_log_trylock();
@@ -6057,7 +6047,7 @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb, uint64_t pc_start)
qemu_log_unlock(logfile);
}
}
-#endif
+
/* Replace indirect temps with direct temps. */
if (liveness_pass_2(s)) {
/* If changes were made, re-run liveness. */
@@ -6069,7 +6059,6 @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb, uint64_t pc_start)
qatomic_set(&prof->la_time, prof->la_time + profile_getclock());
#endif
-#ifdef DEBUG_DISAS
if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_OPT)
&& qemu_log_in_addr_range(pc_start))) {
FILE *logfile = qemu_log_trylock();
@@ -6080,7 +6069,6 @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb, uint64_t pc_start)
qemu_log_unlock(logfile);
}
}
-#endif
/* Initialize goto_tb jump offsets. */
tb->jmp_reset_offset[0] = TB_JMP_OFFSET_INVALID;