aboutsummaryrefslogtreecommitdiff
path: root/target
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2018-08-23 19:03:53 +0100
committerPeter Maydell <peter.maydell@linaro.org>2018-08-23 19:03:54 +0100
commit3c825bb7c1b4289ef05f51b5b77ac0967b6a27fa (patch)
tree44c5189aacbe795b125bfb4b5144a37fe122d2e4 /target
parent5ccac548faf041ff5229a8e8342e3be14a34c8af (diff)
parentb2e78fac6f27c36938353e477354778896adc08f (diff)
downloadqemu-3c825bb7c1b4289ef05f51b5b77ac0967b6a27fa.zip
qemu-3c825bb7c1b4289ef05f51b5b77ac0967b6a27fa.tar.gz
qemu-3c825bb7c1b4289ef05f51b5b77ac0967b6a27fa.tar.bz2
Merge remote-tracking branch 'remotes/bonzini/tags/for-upstream' into staging
* x86 TCG fixes for 64-bit call gates (Andrew) * qumu-guest-agent freeze-hook tweak (Christian) * pm_smbus improvements (Corey) * Move validation to pre_plug for pc-dimm (David) * Fix memory leaks (Eduardo, Marc-André) * synchronization profiler (Emilio) * Convert the CPU list to RCU (Emilio) * LSI support for PPR Extended Message (George) * vhost-scsi support for protection information (Greg) * Mark mptsas as a storage device in the help (Guenter) * checkpatch tweak cherry-picked from Linux (me) * Typos, cleanups and dead-code removal (Julia, Marc-André) * qemu-pr-helper support for old libmultipath (Murilo) * Annotate fallthroughs (me) * MemoryRegionOps cleanup (me, Peter) * Make s390 qtests independent from libqos, which doesn't actually support it (me) * Make cpu_get_ticks independent from BQL (me) * Introspection fixes (Thomas) * Support QEMU_MODULE_DIR environment variable (ryang) # gpg: Signature made Thu 23 Aug 2018 17:46:30 BST # gpg: using RSA key BFFBD25F78C7AE83 # gpg: Good signature from "Paolo Bonzini <bonzini@gnu.org>" # gpg: aka "Paolo Bonzini <pbonzini@redhat.com>" # Primary key fingerprint: 46F5 9FBD 57D6 12E7 BFD4 E2F7 7E15 100C CD36 69B1 # Subkey fingerprint: F133 3857 4B66 2389 866C 7682 BFFB D25F 78C7 AE83 * remotes/bonzini/tags/for-upstream: (69 commits) KVM: cleanup unnecessary #ifdef KVM_CAP_... target/i386: update MPX flags when CPL changes i2c: pm_smbus: Add the ability to force block transfer enable i2c: pm_smbus: Don't delay host status register busy bit when interrupts are enabled i2c: pm_smbus: Add interrupt handling i2c: pm_smbus: Add block transfer capability i2c: pm_smbus: Make the I2C block read command read-only i2c: pm_smbus: Fix the semantics of block I2C transfers i2c: pm_smbus: Clean up some style issues pc-dimm: assign and verify the "addr" property during pre_plug pc: drop memory region alignment check for 0 util/oslib-win32: indicate alignment for qemu_anon_ram_alloc() pc-dimm: assign and verify the "slot" property during pre_plug ipmi: Use proper struct reference for BT vmstate vhost-scsi: expose 't10_pi' property for VIRTIO_SCSI_F_T10_PI vhost-scsi: unify vhost-scsi get_features implementations vhost-user-scsi: move host_features into VHostSCSICommon cpus: allow cpu_get_ticks out of BQL cpus: protect TimerState writes with a spinlock seqlock: add QemuLockable support ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'target')
-rw-r--r--target/arm/helper.c1
-rw-r--r--target/i386/cpu.c9
-rw-r--r--target/i386/cpu.h7
-rw-r--r--target/i386/kvm.c8
-rw-r--r--target/i386/seg_helper.c196
-rw-r--r--target/i386/translate.c2
-rw-r--r--target/s390x/cpu_models.c2
7 files changed, 165 insertions, 60 deletions
diff --git a/target/arm/helper.c b/target/arm/helper.c
index c9bce1e..1b05480 100644
--- a/target/arm/helper.c
+++ b/target/arm/helper.c
@@ -12331,6 +12331,7 @@ int arm_rmode_to_sf(int rmode)
/* FIXME: add support for TIEAWAY and ODD */
qemu_log_mask(LOG_UNIMP, "arm: unimplemented rounding mode: %d\n",
rmode);
+ /* fall through for now */
case FPROUNDING_TIEEVEN:
default:
rmode = float_round_nearest_even;
diff --git a/target/i386/cpu.c b/target/i386/cpu.c
index 4e4fe8f..f24295e 100644
--- a/target/i386/cpu.c
+++ b/target/i386/cpu.c
@@ -3880,6 +3880,9 @@ arch_query_cpu_model_expansion(CpuModelExpansionType type,
}
props = qdict_new();
+ ret->model = g_new0(CpuModelInfo, 1);
+ ret->model->props = QOBJECT(props);
+ ret->model->has_props = true;
switch (type) {
case CPU_MODEL_EXPANSION_TYPE_STATIC:
@@ -3900,15 +3903,9 @@ arch_query_cpu_model_expansion(CpuModelExpansionType type,
goto out;
}
- if (!props) {
- props = qdict_new();
- }
x86_cpu_to_dict(xc, props);
- ret->model = g_new0(CpuModelInfo, 1);
ret->model->name = g_strdup(base_name);
- ret->model->props = QOBJECT(props);
- ret->model->has_props = true;
out:
object_unref(OBJECT(xc));
diff --git a/target/i386/cpu.h b/target/i386/cpu.h
index 9cad581..b572a8e 100644
--- a/target/i386/cpu.h
+++ b/target/i386/cpu.h
@@ -1515,6 +1515,8 @@ int cpu_x86_support_mca_broadcast(CPUX86State *env);
int cpu_get_pic_interrupt(CPUX86State *s);
/* MSDOS compatibility mode FPU exception support */
void cpu_set_ferr(CPUX86State *s);
+/* mpx_helper.c */
+void cpu_sync_bndcs_hflags(CPUX86State *env);
/* this function must always be used to load data in the segment
cache: it synchronizes the hflags with the segment cache values */
@@ -1557,6 +1559,8 @@ static inline void cpu_x86_load_seg_cache(CPUX86State *env,
#error HF_CPL_MASK is hardcoded
#endif
env->hflags = (env->hflags & ~HF_CPL_MASK) | cpl;
+ /* Possibly switch between BNDCFGS and BNDCFGU */
+ cpu_sync_bndcs_hflags(env);
}
new_hflags = (env->segs[R_SS].flags & DESC_B_MASK)
>> (DESC_B_SHIFT - HF_SS32_SHIFT);
@@ -1889,9 +1893,6 @@ void apic_handle_tpr_access_report(DeviceState *d, target_ulong ip,
*/
void x86_cpu_change_kvm_default(const char *prop, const char *value);
-/* mpx_helper.c */
-void cpu_sync_bndcs_hflags(CPUX86State *env);
-
/* Return name of 32-bit register, from a R_* constant */
const char *get_register_name_32(unsigned int reg);
diff --git a/target/i386/kvm.c b/target/i386/kvm.c
index 9313602..0b2a07d 100644
--- a/target/i386/kvm.c
+++ b/target/i386/kvm.c
@@ -1381,17 +1381,9 @@ int kvm_arch_init(MachineState *ms, KVMState *s)
int ret;
struct utsname utsname;
-#ifdef KVM_CAP_XSAVE
has_xsave = kvm_check_extension(s, KVM_CAP_XSAVE);
-#endif
-
-#ifdef KVM_CAP_XCRS
has_xcrs = kvm_check_extension(s, KVM_CAP_XCRS);
-#endif
-
-#ifdef KVM_CAP_PIT_STATE2
has_pit_state2 = kvm_check_extension(s, KVM_CAP_PIT_STATE2);
-#endif
hv_vpindex_settable = kvm_check_extension(s, KVM_CAP_HYPERV_VP_INDEX);
diff --git a/target/i386/seg_helper.c b/target/i386/seg_helper.c
index 00301a0..d1cbc6e 100644
--- a/target/i386/seg_helper.c
+++ b/target/i386/seg_helper.c
@@ -518,6 +518,11 @@ static void switch_tss(CPUX86State *env, int tss_selector,
static inline unsigned int get_sp_mask(unsigned int e2)
{
+#ifdef TARGET_X86_64
+ if (e2 & DESC_L_MASK) {
+ return 0;
+ } else
+#endif
if (e2 & DESC_B_MASK) {
return 0xffffffff;
} else {
@@ -1628,8 +1633,8 @@ void helper_ljmp_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
}
limit = get_seg_limit(e1, e2);
if (new_eip > limit &&
- !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK)) {
- raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
+ (!(env->hflags & HF_LMA_MASK) || !(e2 & DESC_L_MASK))) {
+ raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
}
cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
get_seg_base(e1, e2), limit, e2);
@@ -1640,6 +1645,14 @@ void helper_ljmp_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
rpl = new_cs & 3;
cpl = env->hflags & HF_CPL_MASK;
type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
+
+#ifdef TARGET_X86_64
+ if (env->efer & MSR_EFER_LMA) {
+ if (type != 12) {
+ raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
+ }
+ }
+#endif
switch (type) {
case 1: /* 286 TSS */
case 9: /* 386 TSS */
@@ -1662,6 +1675,23 @@ void helper_ljmp_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
if (type == 12) {
new_eip |= (e2 & 0xffff0000);
}
+
+#ifdef TARGET_X86_64
+ if (env->efer & MSR_EFER_LMA) {
+ /* load the upper 8 bytes of the 64-bit call gate */
+ if (load_segment_ra(env, &e1, &e2, new_cs + 8, GETPC())) {
+ raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc,
+ GETPC());
+ }
+ type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
+ if (type != 0) {
+ raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc,
+ GETPC());
+ }
+ new_eip |= ((target_ulong)e1) << 32;
+ }
+#endif
+
if (load_segment_ra(env, &e1, &e2, gate_cs, GETPC()) != 0) {
raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
}
@@ -1675,11 +1705,22 @@ void helper_ljmp_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
(!(e2 & DESC_C_MASK) && (dpl != cpl))) {
raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
}
+#ifdef TARGET_X86_64
+ if (env->efer & MSR_EFER_LMA) {
+ if (!(e2 & DESC_L_MASK)) {
+ raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
+ }
+ if (e2 & DESC_B_MASK) {
+ raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
+ }
+ }
+#endif
if (!(e2 & DESC_P_MASK)) {
raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
}
limit = get_seg_limit(e1, e2);
- if (new_eip > limit) {
+ if (new_eip > limit &&
+ (!(env->hflags & HF_LMA_MASK) || !(e2 & DESC_L_MASK))) {
raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
}
cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
@@ -1724,12 +1765,12 @@ void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
int shift, target_ulong next_eip)
{
int new_stack, i;
- uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
- uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask;
+ uint32_t e1, e2, cpl, dpl, rpl, selector, param_count;
+ uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, type, ss_dpl, sp_mask;
uint32_t val, limit, old_sp_mask;
- target_ulong ssp, old_ssp;
+ target_ulong ssp, old_ssp, offset, sp;
- LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift);
+ LOG_PCALL("lcall %04x:" TARGET_FMT_lx " s=%d\n", new_cs, new_eip, shift);
LOG_PCALL_STATE(CPU(x86_env_get_cpu(env)));
if ((new_cs & 0xfffc) == 0) {
raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
@@ -1807,6 +1848,15 @@ void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
dpl = (e2 >> DESC_DPL_SHIFT) & 3;
rpl = new_cs & 3;
+
+#ifdef TARGET_X86_64
+ if (env->efer & MSR_EFER_LMA) {
+ if (type != 12) {
+ raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
+ }
+ }
+#endif
+
switch (type) {
case 1: /* available 286 TSS */
case 9: /* available 386 TSS */
@@ -1833,8 +1883,23 @@ void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
}
selector = e1 >> 16;
- offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
param_count = e2 & 0x1f;
+ offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
+#ifdef TARGET_X86_64
+ if (env->efer & MSR_EFER_LMA) {
+ /* load the upper 8 bytes of the 64-bit call gate */
+ if (load_segment_ra(env, &e1, &e2, new_cs + 8, GETPC())) {
+ raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc,
+ GETPC());
+ }
+ type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
+ if (type != 0) {
+ raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc,
+ GETPC());
+ }
+ offset |= ((target_ulong)e1) << 32;
+ }
+#endif
if ((selector & 0xfffc) == 0) {
raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
}
@@ -1849,46 +1914,80 @@ void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
if (dpl > cpl) {
raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
}
+#ifdef TARGET_X86_64
+ if (env->efer & MSR_EFER_LMA) {
+ if (!(e2 & DESC_L_MASK)) {
+ raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
+ }
+ if (e2 & DESC_B_MASK) {
+ raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
+ }
+ shift++;
+ }
+#endif
if (!(e2 & DESC_P_MASK)) {
raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
}
if (!(e2 & DESC_C_MASK) && dpl < cpl) {
/* to inner privilege */
- get_ss_esp_from_tss(env, &ss, &sp, dpl, GETPC());
- LOG_PCALL("new ss:esp=%04x:%08x param_count=%d env->regs[R_ESP]="
- TARGET_FMT_lx "\n", ss, sp, param_count,
- env->regs[R_ESP]);
- if ((ss & 0xfffc) == 0) {
- raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
- }
- if ((ss & 3) != dpl) {
- raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
- }
- if (load_segment_ra(env, &ss_e1, &ss_e2, ss, GETPC()) != 0) {
- raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
- }
- ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
- if (ss_dpl != dpl) {
- raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
- }
- if (!(ss_e2 & DESC_S_MASK) ||
- (ss_e2 & DESC_CS_MASK) ||
- !(ss_e2 & DESC_W_MASK)) {
- raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
- }
- if (!(ss_e2 & DESC_P_MASK)) {
- raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
+#ifdef TARGET_X86_64
+ if (shift == 2) {
+ sp = get_rsp_from_tss(env, dpl);
+ ss = dpl; /* SS = NULL selector with RPL = new CPL */
+ new_stack = 1;
+ sp_mask = 0;
+ ssp = 0; /* SS base is always zero in IA-32e mode */
+ LOG_PCALL("new ss:rsp=%04x:%016llx env->regs[R_ESP]="
+ TARGET_FMT_lx "\n", ss, sp, env->regs[R_ESP]);
+ } else
+#endif
+ {
+ uint32_t sp32;
+ get_ss_esp_from_tss(env, &ss, &sp32, dpl, GETPC());
+ LOG_PCALL("new ss:esp=%04x:%08x param_count=%d env->regs[R_ESP]="
+ TARGET_FMT_lx "\n", ss, sp32, param_count,
+ env->regs[R_ESP]);
+ sp = sp32;
+ if ((ss & 0xfffc) == 0) {
+ raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
+ }
+ if ((ss & 3) != dpl) {
+ raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
+ }
+ if (load_segment_ra(env, &ss_e1, &ss_e2, ss, GETPC()) != 0) {
+ raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
+ }
+ ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
+ if (ss_dpl != dpl) {
+ raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
+ }
+ if (!(ss_e2 & DESC_S_MASK) ||
+ (ss_e2 & DESC_CS_MASK) ||
+ !(ss_e2 & DESC_W_MASK)) {
+ raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
+ }
+ if (!(ss_e2 & DESC_P_MASK)) {
+ raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
+ }
+
+ sp_mask = get_sp_mask(ss_e2);
+ ssp = get_seg_base(ss_e1, ss_e2);
}
/* push_size = ((param_count * 2) + 8) << shift; */
old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
old_ssp = env->segs[R_SS].base;
-
- sp_mask = get_sp_mask(ss_e2);
- ssp = get_seg_base(ss_e1, ss_e2);
- if (shift) {
+#ifdef TARGET_X86_64
+ if (shift == 2) {
+ /* XXX: verify if new stack address is canonical */
+ PUSHQ_RA(sp, env->segs[R_SS].selector, GETPC());
+ PUSHQ_RA(sp, env->regs[R_ESP], GETPC());
+ /* parameters aren't supported for 64-bit call gates */
+ } else
+#endif
+ if (shift == 1) {
PUSHL_RA(ssp, sp, sp_mask, env->segs[R_SS].selector, GETPC());
PUSHL_RA(ssp, sp, sp_mask, env->regs[R_ESP], GETPC());
for (i = param_count - 1; i >= 0; i--) {
@@ -1917,7 +2016,13 @@ void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
new_stack = 0;
}
- if (shift) {
+#ifdef TARGET_X86_64
+ if (shift == 2) {
+ PUSHQ_RA(sp, env->segs[R_CS].selector, GETPC());
+ PUSHQ_RA(sp, next_eip, GETPC());
+ } else
+#endif
+ if (shift == 1) {
PUSHL_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
PUSHL_RA(ssp, sp, sp_mask, next_eip, GETPC());
} else {
@@ -1928,11 +2033,18 @@ void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
/* from this point, not restartable */
if (new_stack) {
- ss = (ss & ~3) | dpl;
- cpu_x86_load_seg_cache(env, R_SS, ss,
- ssp,
- get_seg_limit(ss_e1, ss_e2),
- ss_e2);
+#ifdef TARGET_X86_64
+ if (shift == 2) {
+ cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
+ } else
+#endif
+ {
+ ss = (ss & ~3) | dpl;
+ cpu_x86_load_seg_cache(env, R_SS, ss,
+ ssp,
+ get_seg_limit(ss_e1, ss_e2),
+ ss_e2);
+ }
}
selector = (selector & ~3) | dpl;
diff --git a/target/i386/translate.c b/target/i386/translate.c
index 07d185e..1f9d1d9 100644
--- a/target/i386/translate.c
+++ b/target/i386/translate.c
@@ -4689,6 +4689,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
case 0x82:
if (CODE64(s))
goto illegal_op;
+ /* fall through */
case 0x80: /* GRP1 */
case 0x81:
case 0x83:
@@ -8292,6 +8293,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
case 0x10e ... 0x10f:
/* 3DNow! instructions, ignore prefixes */
s->prefix &= ~(PREFIX_REPZ | PREFIX_REPNZ | PREFIX_DATA);
+ /* fall through */
case 0x110 ... 0x117:
case 0x128 ... 0x12f:
case 0x138 ... 0x13a:
diff --git a/target/s390x/cpu_models.c b/target/s390x/cpu_models.c
index 12e765b..265d25c 100644
--- a/target/s390x/cpu_models.c
+++ b/target/s390x/cpu_models.c
@@ -1096,7 +1096,7 @@ void s390_set_qemu_cpu_model(uint16_t type, uint8_t gen, uint8_t ec_ga,
const S390CPUDef *def = s390_find_cpu_def(type, gen, ec_ga, NULL);
g_assert(def);
- g_assert(QTAILQ_EMPTY(&cpus));
+ g_assert(QTAILQ_EMPTY_RCU(&cpus));
/* TCG emulates some features that can usually not be enabled with
* the emulated machine generation. Make sure they can be enabled